Compare commits

...

22 Commits

Author SHA1 Message Date
github-actions[bot]
457196f9c1 ci: update version string in docs 2024-10-31 15:30:06 +00:00
dante
a3c131dac0 feat: lookupless rounding ops (#862) 2024-10-31 11:29:46 -04:00
sebastiandanconia
fd9c2305ac docs: improve cli friendliness (#861)
* Improve clarity of an info!() message

* Replace references to EZKL_REPO_PATH in `--help' output

Command `--help' messages aren't meant to be unduly verbose; we can
write them for common/simple use cases. We continue to support
EZKL_REPO_PATH for users who need it, for example to support
containerized server use cases.

To be clear, by default, EZKL_REPO_PATH = $HOME/.ezkl
2024-10-30 17:25:47 -04:00
dante
a0060f341d chore: rm lookup recip (#859) 2024-10-29 15:57:38 -04:00
dante
17f1d42739 chore: unify leakyrelu and relu (#858) 2024-10-29 10:43:40 -04:00
dante
ebaee9e2b1 feat: lookupless min/max ops (#854) 2024-10-26 08:00:27 -04:00
dante
d51cba589a feat: dynamic lookup overflow (#853) 2024-10-23 23:12:00 -04:00
Artem
1cb1b6e143 feat: iOS Bindings (#846) 2024-10-23 09:58:55 -04:00
Ethan Cemer
d2b683b527 feat: reusable verifier (#821) 2024-10-22 09:10:24 -04:00
Jseam
a06b09ef1f docs: add batch demo (#849) 2024-10-15 08:36:24 +01:00
dante
e5aa48fbd6 chore: support all padding types (#848) 2024-10-05 10:43:12 -04:00
dante
64fbc8a1c9 refactor: lookup-less sign relu abs (#845) 2024-09-17 11:58:58 -04:00
dante
c9f9d17f16 chore: optimized release builds by default (#844) 2024-09-05 15:52:03 +02:00
Ethan Cemer
b49b0487c4 fix: remove console import from index.ts in in-browser-evm-verifier package (#841) 2024-08-30 18:47:59 +01:00
dante
61b7a8e9b5 chore: perf updates (#838) 2024-08-27 09:45:40 -04:00
dante
5dbc7d5176 chore: cache lookup tables (#835) 2024-08-19 00:24:53 -04:00
dante
ada45a3197 chore: swap h2 collections hash for rustc hasher (#832) 2024-08-04 17:28:36 -04:00
dante
616b421967 fix: bump compiler to latest to accomodate latest serde diagnostic (#830) 2024-07-25 07:56:21 -04:00
dante
f64f0ecd23 fix: instance order when using processed params (#829) 2024-07-24 07:58:46 -04:00
dante
5be12b7a54 fix: num groups for conv operations should be specified at load time (#828) 2024-07-18 09:58:41 -04:00
dante
2fd877c716 chore: small worm example (#568) 2024-07-15 09:20:37 -04:00
dante
8197340985 chore: const filtering optimizations (#825) 2024-07-12 12:37:02 +01:00
120 changed files with 8465 additions and 4908 deletions

View File

@@ -22,15 +22,18 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Install binaryen
run: |
set -e

View File

@@ -11,7 +11,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: nanoGPT Mock

View File

@@ -40,7 +40,7 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
@@ -86,7 +86,7 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy

View File

@@ -45,7 +45,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Checkout repo
@@ -106,27 +106,27 @@ jobs:
include:
- build: windows-msvc
os: windows-latest
rust: nightly-2024-02-06
rust: nightly-2024-07-18
target: x86_64-pc-windows-msvc
- build: macos
os: macos-13
rust: nightly-2024-02-06
rust: nightly-2024-07-18
target: x86_64-apple-darwin
- build: macos-aarch64
os: macos-13
rust: nightly-2024-02-06
rust: nightly-2024-07-18
target: aarch64-apple-darwin
- build: linux-musl
os: ubuntu-22.04
rust: nightly-2024-02-06
rust: nightly-2024-07-18
target: x86_64-unknown-linux-musl
- build: linux-gnu
os: ubuntu-22.04
rust: nightly-2024-02-06
rust: nightly-2024-07-18
target: x86_64-unknown-linux-gnu
- build: linux-aarch64
os: ubuntu-22.04
rust: nightly-2024-02-06
rust: nightly-2024-07-18
target: aarch64-unknown-linux-gnu
steps:
@@ -181,9 +181,14 @@ jobs:
echo "target flag is: ${{ env.TARGET_FLAGS }}"
echo "target dir is: ${{ env.TARGET_DIR }}"
- name: Build release binary
- name: Build release binary (no asm)
if: matrix.build != 'linux-gnu'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry
- name: Build release binary (asm)
if: matrix.build == 'linux-gnu'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features asm
- name: Strip release binary
if: matrix.build != 'windows-msvc' && matrix.build != 'linux-aarch64'
run: strip "target/${{ matrix.target }}/release/ezkl"

View File

@@ -26,7 +26,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Build
@@ -38,7 +38,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Docs
@@ -50,7 +50,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -65,40 +65,40 @@ jobs:
- name: Library tests (original lookup)
run: cargo nextest run --lib --verbose --no-default-features --features ezkl
ultra-overflow-tests-gpu:
runs-on: GPU
env:
ENABLE_ICICLE_GPU: true
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- uses: mwilliamson/setup-wasmtime-action@v2
with:
wasmtime-version: "3.0.1"
- name: Install wasm32-wasi
run: rustup target add wasm32-wasi
- name: Install cargo-wasi
run: cargo install cargo-wasi
# - name: Matmul overflow (wasi)
# run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# - name: Conv overflow (wasi)
# run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
- name: lookup overflow
run: cargo nextest run --release lookup_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Matmul overflow
run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Conv overflow
run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Conv + relu overflow
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# ultra-overflow-tests-gpu:
# runs-on: GPU
# env:
# ENABLE_ICICLE_GPU: true
# steps:
# - uses: actions/checkout@v4
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - uses: mwilliamson/setup-wasmtime-action@v2
# with:
# wasmtime-version: "3.0.1"
# - name: Install wasm32-wasi
# run: rustup target add wasm32-wasi
# - name: Install cargo-wasi
# run: cargo install cargo-wasi
# # - name: Matmul overflow (wasi)
# # run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# # - name: Conv overflow (wasi)
# # run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
# - name: lookup overflow
# run: cargo nextest run lookup_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Matmul overflow
# run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Conv overflow
# run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Conv + relu overflow
# run: cargo nextest run conv_relu_col_ultra_overflow --no-capture --features icicle -- --include-ignored
ultra-overflow-tests_og-lookup:
runs-on: non-gpu
@@ -106,7 +106,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -139,7 +139,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -172,7 +172,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -184,22 +184,24 @@ jobs:
wasm32-tests:
runs-on: ubuntu-latest
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- uses: nanasess/setup-chromedriver@v2
# with:
# chromedriver-version: "115.0.5790.102"
- name: Install wasm32-unknown-unknown
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Run wasm verifier tests
# on mac:
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
@@ -212,7 +214,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -229,17 +231,19 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
# - name: The Worm Mock
# run: cargo nextest run --release --verbose tests::large_mock_::large_tests_5_expects -- --include-ignored
- name: public outputs and tolerance > 0
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
- name: public outputs + batch size == 10
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 32
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 16
- name: kzg inputs
run: cargo nextest run --release --verbose tests::mock_kzg_input_::t --test-threads 32
- name: kzg params
@@ -258,6 +262,8 @@ jobs:
run: cargo nextest run --release --verbose tests::mock_hashed_input_::t --test-threads 32
- name: hashed params
run: cargo nextest run --release --verbose tests::mock_hashed_params_::t --test-threads 32
- name: hashed params public inputs
run: cargo nextest run --release --verbose tests::mock_hashed_params_public_inputs_::t --test-threads 32
- name: hashed outputs
run: cargo nextest run --release --verbose tests::mock_hashed_output_::t --test-threads 32
- name: hashed inputs + params + outputs
@@ -286,7 +292,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -304,7 +310,7 @@ jobs:
node-version: "18.12.1"
cache: "pnpm"
- name: "Add rust-src"
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Install dependencies for js tests and in-browser-evm-verifier package
run: |
pnpm install --frozen-lockfile
@@ -323,12 +329,12 @@ jobs:
cd in-browser-evm-verifier
pnpm build:commonjs
cd ..
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: KZG prove and verify tests (EVM + VK rendered seperately)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_render_seperately_ --test-threads 1
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: KZG prove and verify tests (EVM + reusable verifier + col-overflow)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_reusable_verifier --test-threads 1
- name: KZG prove and verify tests (EVM + kzg all)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_kzg_all_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + kzg inputs)
@@ -365,15 +371,18 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- name: Use pnpm 8
uses: pnpm/action-setup@v2
@@ -431,40 +440,40 @@ jobs:
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed
prove-and-verify-tests-gpu:
runs-on: GPU
env:
ENABLE_ICICLE_GPU: true
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: KZG prove and verify tests (kzg outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + fixed params + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
- name: KZG prove and verify tests (public inputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
- name: KZG prove and verify tests (fixed params)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
# prove-and-verify-tests-gpu:
# runs-on: GPU
# env:
# ENABLE_ICICLE_GPU: true
# steps:
# - uses: actions/checkout@v4
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - name: Add rust-src
# run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
# - uses: actions/checkout@v3
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - name: KZG prove and verify tests (kzg outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + fixed params + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public inputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
# - name: KZG prove and verify tests (fixed params)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
# - name: KZG prove and verify tests (hashed outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
prove-and-verify-mock-aggr-tests:
runs-on: self-hosted
@@ -473,7 +482,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -491,7 +500,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -499,7 +508,7 @@ jobs:
crate: cargo-nextest
locked: true
- name: KZG )tests
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
prove-and-verify-aggr-tests:
runs-on: large-self-hosted
@@ -508,7 +517,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -525,17 +534,17 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: KZG prove and verify aggr tests
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
@@ -546,7 +555,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -566,17 +575,17 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Install cmake
run: sudo apt-get install -y cmake
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt;
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Run pytest
@@ -592,7 +601,7 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -641,17 +650,17 @@ jobs:
python-version: "3.11"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: Install pip
run: python -m ensurepip --upgrade
- name: Setup Virtual Env and Install python dependencies
@@ -677,3 +686,68 @@ jobs:
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
# - name: Reusable verifier tutorial
# run: source .env/bin/activate; cargo nextest run py_tests::tests::reusable_
ios-integration-tests:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: Run ios tests
run: CARGO_BUILD_TARGET=aarch64-apple-darwin RUSTUP_TOOLCHAIN=nightly-2024-07-18-aarch64-apple-darwin cargo test --test ios_integration_tests --features ios-bindings-test --no-default-features
swift-package-tests:
runs-on: macos-latest
needs: [ios-integration-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Build EzklCoreBindings
run: CONFIGURATION=debug cargo run --bin ios_gen_bindings --features "ios-bindings uuid camino uniffi_bindgen" --no-default-features
- name: Clone ezkl-swift- repository
run: |
git clone https://github.com/zkonduit/ezkl-swift-package.git
- name: Copy EzklCoreBindings
run: |
rm -rf ezkl-swift-package/Sources/EzklCoreBindings
cp -r build/EzklCoreBindings ezkl-swift-package/Sources/
- name: Set up Xcode environment
run: |
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
sudo xcodebuild -license accept
- name: Run Package Tests
run: |
cd ezkl-swift-package
xcodebuild test \
-scheme EzklPackage \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-resultBundlePath ../testResults
- name: Run Example App Tests
run: |
cd ezkl-swift-package/Example
xcodebuild test \
-project Example.xcodeproj \
-scheme EzklApp \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-parallel-testing-enabled NO \
-resultBundlePath ../../exampleTestResults \
-skip-testing:EzklAppUITests/EzklAppUITests/testButtonClicksInOrder

View File

@@ -0,0 +1,75 @@
name: Build and Publish EZKL iOS SPM package
on:
workflow_dispatch:
inputs:
tag:
description: "The tag to release"
required: true
push:
tags:
- "*"
jobs:
build-and-update:
runs-on: macos-latest
steps:
- name: Checkout EZKL
uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- name: Build EzklCoreBindings
run: CONFIGURATION=release cargo run --bin ios_gen_bindings --features "ios-bindings uuid camino uniffi_bindgen" --no-default-features
- name: Clone ezkl-swift-package repository
run: |
git clone https://github.com/zkonduit/ezkl-swift-package.git
- name: Copy EzklCoreBindings
run: |
rm -rf ezkl-swift-package/Sources/EzklCoreBindings
cp -r build/EzklCoreBindings ezkl-swift-package/Sources/
- name: Set up Xcode environment
run: |
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
sudo xcodebuild -license accept
- name: Run Package Tests
run: |
cd ezkl-swift-package
xcodebuild test \
-scheme EzklPackage \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-resultBundlePath ../testResults
- name: Run Example App Tests
run: |
cd ezkl-swift-package/Example
xcodebuild test \
-project Example.xcodeproj \
-scheme EzklApp \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-parallel-testing-enabled NO \
-resultBundlePath ../../exampleTestResults \
-skip-testing:EzklAppUITests/EzklAppUITests/testButtonClicksInOrder
- name: Commit and Push Changes to feat/ezkl-direct-integration
run: |
cd ezkl-swift-package
git config user.name "GitHub Action"
git config user.email "action@github.com"
git add Sources/EzklCoreBindings
git commit -m "Automatically updated EzklCoreBindings for EZKL"
git tag ${{ github.event.inputs.tag }}
git remote set-url origin https://zkonduit:${EZKL_PORTER_TOKEN}@github.com/zkonduit/ezkl-swift-package.git
git push origin
git push origin --tags
env:
EZKL_PORTER_TOKEN: ${{ secrets.EZKL_PORTER_TOKEN }}

6
.gitignore vendored
View File

@@ -46,7 +46,7 @@ var/
node_modules
/dist
timingData.json
!tests/wasm/pk.key
!tests/wasm/vk.key
!tests/assets/pk.key
!tests/assets/vk.key
docs/python/build
!tests/wasm/vk_aggr.key
!tests/assets/vk_aggr.key

897
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ cargo-features = ["profile-rustflags"]
name = "ezkl"
version = "0.0.0"
edition = "2021"
default-run = "ezkl"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -11,86 +12,93 @@ edition = "2021"
# Name to be imported within python
# Example: import ezkl
name = "ezkl"
crate-type = ["cdylib", "rlib"]
crate-type = ["cdylib", "rlib", "staticlib"]
[dependencies]
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "9fff22c", features = [
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "b753a832e92d5c86c5c997327a9cf9de86a18851", features = [
"derive_serde",
] }
rand = { version = "0.8", default_features = false }
itertools = { version = "0.10.3", default_features = false }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete = "4.5.2"
serde = { version = "1.0.126", features = ["derive"], optional = true }
serde_json = { version = "1.0.97", default_features = false, features = [
"float_roundtrip",
"raw_value",
], optional = true }
log = { version = "0.4.17", default_features = false, optional = true }
thiserror = { version = "1.0.38", default_features = false }
hex = { version = "0.4.3", default_features = false }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", package = "halo2_proofs", branch = "ac/cache-lookup-commitments", features = ["circuit-params"] }
rand = { version = "0.8", default-features = false }
itertools = { version = "0.10.3", default-features = false }
clap = { version = "4.5.3", features = ["derive"], optional = true }
serde = { version = "1.0.126", features = ["derive"] }
clap_complete = { version = "4.5.2", optional = true }
log = { version = "0.4.17", default-features = false }
thiserror = { version = "1.0.38", default-features = false }
hex = { version = "0.4.3", default-features = false }
halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac/chunked-mv-lookup", package = "ecc" }
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features = [
"derive_serde",
] }
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "main" }
maybe-rayon = { version = "0.1.1", default_features = false }
bincode = { version = "1.3.3", default_features = false }
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "ac/update-h2-curves", optional = true }
maybe-rayon = { version = "0.1.1", default-features = false }
bincode = { version = "1.3.3", default-features = false }
unzip-n = "0.1.2"
num = "0.4.1"
portable-atomic = "1.6.0"
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand" }
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
semver = "1.0.22"
portable-atomic = { version = "1.6.0", optional = true }
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand", optional = true }
semver = { version = "1.0.22", optional = true }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
serde_json = { version = "1.0.97", features = [
"float_roundtrip",
"raw_value",
] }
# evm related deps
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev="5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = ["provider-http", "signers", "contract", "rpc-types-eth", "signer-wallet", "node-bindings"] }
foundry-compilers = {version = "0.4.1", features = ["svm-solc"]}
ethabi = "18"
indicatif = { version = "0.17.5", features = ["rayon"] }
gag = { version = "1.0.0", default_features = false }
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = [
"provider-http",
"signers",
"contract",
"rpc-types-eth",
"signer-wallet",
"node-bindings",
], optional = true }
foundry-compilers = { version = "0.4.1", features = ["svm-solc"], optional = true }
ethabi = { version = "18", optional = true }
indicatif = { version = "0.17.5", features = ["rayon"], optional = true }
gag = { version = "1.0.0", default-features = false, optional = true }
instant = { version = "0.1" }
reqwest = { version = "0.12.4", default-features = false, features = [
"default-tls",
"multipart",
"stream",
] }
openssl = { version = "0.10.55", features = ["vendored"] }
tokio-postgres = "0.7.10"
pg_bigdecimal = "0.1.5"
lazy_static = "1.4.0"
colored_json = { version = "3.0.1", default_features = false, optional = true }
regex = { version = "1", default_features = false }
tokio = { version = "1.35", default_features = false, features = [
"macros",
"rt-multi-thread"
] }
pyo3 = { version = "0.21.2", features = [
"extension-module",
"abi3-py37",
"macros",
], default_features = false, optional = true }
pyo3-asyncio = { git = "https://github.com/jopemachine/pyo3-asyncio/", branch="migration-pyo3-0.21", features = [
"attributes",
"tokio-runtime",
], default_features = false, optional = true }
pyo3-log = { version = "0.10.0", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "7bf303b2ae9bddd5fa6951ae95848c0d52fb7f50", default_features = false, optional = true }
reqwest = { version = "0.12.4", default-features = false, features = ["default-tls", "multipart", "stream"], optional = true }
openssl = { version = "0.10.55", features = ["vendored"], optional = true }
tokio-postgres = { version = "0.7.10", optional = true }
pg_bigdecimal = { version = "0.1.5", optional = true }
lazy_static = { version = "1.4.0", optional = true }
colored_json = { version = "3.0.1", default-features = false, optional = true }
regex = { version = "1", default-features = false, optional = true }
tokio = { version = "1.35.0", default-features = false, features = ["macros", "rt-multi-thread"], optional = true }
pyo3 = { version = "0.21.2", features = ["extension-module", "abi3-py37", "macros"], default-features = false, optional = true }
pyo3-asyncio = { git = "https://github.com/jopemachine/pyo3-asyncio/", branch="migration-pyo3-0.21", features = ["attributes", "tokio-runtime"], default-features = false, optional = true }
pyo3-log = { version = "0.10.0", default-features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "40c64319291184814d9fea5fdf4fa16f5a4f7116", default-features = false, optional = true }
tabled = { version = "0.12.0", optional = true }
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
objc = { version = "0.2.4", optional = true }
mimalloc = { version = "0.1", optional = true }
# universal bindings
uniffi = { version = "=0.28.0", optional = true }
getrandom = { version = "0.2.8", optional = true }
uniffi_bindgen = { version = "=0.28.0", optional = true }
camino = { version = "^1.1", optional = true }
uuid = { version = "1.10.0", features = ["v4"], optional = true }
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies]
colored = { version = "2.0.0", default_features = false, optional = true }
env_logger = { version = "0.10.0", default_features = false, optional = true }
chrono = "0.4.31"
sha256 = "1.4.0"
colored = { version = "2.0.0", default-features = false, optional = true }
env_logger = { version = "0.10.0", default-features = false, optional = true }
chrono = { version = "0.4.31", optional = true }
sha256 = { version = "1.4.0", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
serde_json = { version = "1.0.97", default-features = false, features = [
"float_roundtrip",
"raw_value",
] }
getrandom = { version = "0.2.8", features = ["js"] }
instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] }
@@ -106,6 +114,10 @@ wasm-bindgen-console-logger = "0.1.1"
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies]
criterion = { version = "0.5.1", features = ["html_reports"] }
[build-dependencies]
uniffi = { version = "0.28", features = ["build"], optional = true }
[dev-dependencies]
tempfile = "3.3.0"
lazy_static = "1.4.0"
@@ -157,16 +169,20 @@ harness = false
[[bench]]
name = "relu"
name = "sigmoid"
harness = false
[[bench]]
name = "accum_matmul_relu"
name = "relu_lookupless"
harness = false
[[bench]]
name = "accum_matmul_sigmoid"
harness = false
[[bench]]
name = "accum_matmul_relu_overflow"
name = "accum_matmul_sigmoid_overflow"
harness = false
[[bin]]
@@ -175,42 +191,73 @@ test = false
bench = false
required-features = ["ezkl"]
[[bin]]
name = "ios_gen_bindings"
required-features = ["ios-bindings", "uuid", "camino", "uniffi_bindgen"]
[features]
web = ["wasm-bindgen-rayon"]
default = ["ezkl", "mv-lookup", "no-banner", "parallel-poly-read"]
default = ["ezkl", "mv-lookup", "precompute-coset", "no-banner", "parallel-poly-read"]
onnx = ["dep:tract-onnx"]
python-bindings = ["pyo3", "pyo3-log", "pyo3-asyncio"]
ios-bindings = ["mv-lookup", "precompute-coset", "parallel-poly-read", "uniffi"]
ios-bindings-test = ["ios-bindings", "uniffi/bindgen-tests"]
ezkl = [
"onnx",
"serde",
"serde_json",
"log",
"colored",
"env_logger",
"dep:colored",
"dep:env_logger",
"tabled/color",
"serde_json/std",
"colored_json",
"halo2_proofs/circuit-params",
"dep:alloy",
"dep:foundry-compilers",
"dep:ethabi",
"dep:indicatif",
"dep:gag",
"dep:reqwest",
"dep:openssl",
"dep:tokio-postgres",
"dep:pg_bigdecimal",
"dep:lazy_static",
"dep:regex",
"dep:tokio",
"dep:mimalloc",
"dep:chrono",
"dep:sha256",
"dep:portable-atomic",
"dep:clap_complete",
"dep:halo2_solidity_verifier",
"dep:semver",
"dep:clap",
"dep:tosubcommand",
]
parallel-poly-read = ["halo2_proofs/parallel-poly-read"]
parallel-poly-read = ["halo2_proofs/circuit-params", "halo2_proofs/parallel-poly-read"]
mv-lookup = [
"halo2_proofs/mv-lookup",
"snark-verifier/mv-lookup",
"halo2_solidity_verifier/mv-lookup",
]
asm = ["halo2curves/asm", "halo2_proofs/asm"]
precompute-coset = ["halo2_proofs/precompute-coset"]
det-prove = []
icicle = ["halo2_proofs/icicle_gpu"]
empty-cmd = []
no-banner = []
no-update = []
metal = ["dep:metal", "dep:objc"]
# icicle patch to 0.1.0 if feature icicle is enabled
[patch.'https://github.com/ingonyama-zk/icicle']
icicle = { git = "https://github.com/ingonyama-zk/icicle?rev=45b00fb", package = "icicle", branch = "fix/vhnat/ezkl-build-fix" }
[patch.'https://github.com/zkonduit/halo2']
halo2_proofs = { git = "https://github.com/zkonduit/halo2#8cfca221f53069a0374687654882b99e729041d7", package = "halo2_proofs" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/cache-lookup-commitments#8b13a0d2a7a34d8daab010dadb2c47dfa47d37d0", package = "halo2_proofs", branch = "ac/cache-lookup-commitments" }
[patch.crates-io]
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
[profile.release]
rustflags = ["-C", "relocation-model=pic"]
lto = "fat"
codegen-units = 1
# panic = "abort"

View File

@@ -64,7 +64,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,
@@ -72,6 +72,7 @@ impl Circuit<Fr> for MyCircuit {
Box::new(PolyOp::Conv {
padding: vec![(0, 0)],
stride: vec![1; 2],
group: 1,
}),
)
.unwrap();

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -57,7 +57,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -57,7 +57,15 @@ impl Circuit<Fr> for MyCircuit {
// sets up a new relu table
base_config
.configure_lookup(cs, &b, &output, &a, BITS, K, &LookupOp::ReLU)
.configure_lookup(
cs,
&b,
&output,
&a,
BITS,
K,
&LookupOp::Sigmoid { scale: 1.0.into() },
)
.unwrap();
MyConfig { base_config }
@@ -75,14 +83,18 @@ impl Circuit<Fr> for MyCircuit {
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let output = config
.base_config
.layout(&mut region, &self.inputs, Box::new(op))
.unwrap();
let _output = config
.base_config
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[output.unwrap()],
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
)
.unwrap();
Ok(())
},

View File

@@ -58,7 +58,15 @@ impl Circuit<Fr> for MyCircuit {
// sets up a new relu table
base_config
.configure_lookup(cs, &b, &output, &a, BITS, k, &LookupOp::ReLU)
.configure_lookup(
cs,
&b,
&output,
&a,
BITS,
k,
&LookupOp::Sigmoid { scale: 1.0.into() },
)
.unwrap();
MyConfig { base_config }
@@ -76,14 +84,18 @@ impl Circuit<Fr> for MyCircuit {
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let output = config
.base_config
.layout(&mut region, &self.inputs, Box::new(op))
.unwrap();
let _output = config
.base_config
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[output.unwrap()],
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
)
.unwrap();
Ok(())
},

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -59,7 +59,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &self.inputs, Box::new(PolyOp::Add))
.unwrap();

View File

@@ -56,7 +56,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &self.inputs, Box::new(PolyOp::Pow(4)))
.unwrap();

150
benches/relu_lookupless.rs Normal file
View File

@@ -0,0 +1,150 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ezkl::circuit::poly::PolyOp;
use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{BaseConfig as Config, CheckMode};
use ezkl::fieldutils::IntegerRep;
use ezkl::pfsys::create_proof_circuit;
use ezkl::pfsys::TranscriptType;
use ezkl::pfsys::{create_keys, srs::gen_srs};
use ezkl::tensor::*;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2_proofs::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
use halo2_proofs::poly::kzg::strategy::SingleStrategy;
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
plonk::{Circuit, ConstraintSystem, Error},
};
use halo2curves::bn256::{Bn256, Fr};
use rand::Rng;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
static mut LEN: usize = 4;
const K: usize = 16;
#[derive(Clone)]
struct NLCircuit {
pub input: ValTensor<Fr>,
}
impl Circuit<Fr> for NLCircuit {
type Config = Config<Fr>;
type FloorPlanner = SimpleFloorPlanner;
type Params = ();
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<Fr>) -> Self::Config {
unsafe {
let advices = (0..3)
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
.collect::<Vec<_>>();
let mut config = Config::default();
config
.configure_range_check(cs, &advices[0], &advices[1], (-1, 1), K)
.unwrap();
config
.configure_range_check(cs, &advices[0], &advices[1], (0, 1023), K)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
config
}
}
fn synthesize(
&self,
mut config: Self::Config,
mut layouter: impl Layouter<Fr>, // layouter is our 'write buffer' for the circuit
) -> Result<(), Error> {
config.layout_range_checks(&mut layouter).unwrap();
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,
&[self.input.clone()],
Box::new(PolyOp::LeakyReLU {
slope: 0.0.into(),
scale: 1,
}),
)
.unwrap();
Ok(())
},
)?;
Ok(())
}
}
fn runrelu(c: &mut Criterion) {
let mut group = c.benchmark_group("relu");
let mut rng = rand::thread_rng();
let params = gen_srs::<KZGCommitmentScheme<_>>(17);
for &len in [4, 8].iter() {
unsafe {
LEN = len;
};
let input: Tensor<Value<Fr>> =
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
let circuit = NLCircuit {
input: ValTensor::from(input.clone()),
};
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, &params, true).unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {
b.iter(|| {
let prover = create_proof_circuit::<
KZGCommitmentScheme<_>,
NLCircuit,
ProverSHPLONK<_>,
VerifierSHPLONK<_>,
SingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit.clone(),
vec![],
&params,
&pk,
CheckMode::UNSAFE,
ezkl::Commitments::KZG,
TranscriptType::EVM,
None,
None,
);
prover.unwrap();
});
});
}
group.finish();
}
criterion_group! {
name = benches;
config = Criterion::default().with_plots();
targets = runrelu
}
criterion_main!(benches);

View File

@@ -2,6 +2,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Through
use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::table::Range;
use ezkl::circuit::{ops::lookup::LookupOp, BaseConfig as Config, CheckMode};
use ezkl::fieldutils::IntegerRep;
use ezkl::pfsys::create_proof_circuit;
use ezkl::pfsys::TranscriptType;
use ezkl::pfsys::{create_keys, srs::gen_srs};
@@ -41,7 +42,7 @@ impl Circuit<Fr> for NLCircuit {
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
.collect::<Vec<_>>();
let nl = LookupOp::ReLU;
let nl = LookupOp::Sigmoid { scale: 1.0.into() };
let mut config = Config::default();
@@ -62,9 +63,13 @@ impl Circuit<Fr> for NLCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[self.input.clone()],
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
)
.unwrap();
Ok(())
},
@@ -84,7 +89,7 @@ fn runrelu(c: &mut Criterion) {
};
let input: Tensor<Value<Fr>> =
Tensor::<i32>::from((0..len).map(|_| rng.gen_range(0..10))).into();
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
let circuit = NLCircuit {
input: ValTensor::from(input.clone()),

7
build.rs Normal file
View File

@@ -0,0 +1,7 @@
fn main() {
if cfg!(feature = "ios-bindings-test") {
println!("cargo::rustc-env=UNIFFI_CARGO_BUILD_EXTRA_ARGS=--features=ios-bindings --no-default-features");
}
println!("cargo::rerun-if-changed=build.rs");
}

View File

@@ -1,4 +1,4 @@
ezkl==0.0.0
ezkl==15.2.0
sphinx
sphinx-rtd-theme
sphinxcontrib-napoleon

View File

@@ -1,7 +1,7 @@
import ezkl
project = 'ezkl'
release = '0.0.0'
release = '15.2.0'
version = release

View File

@@ -2,8 +2,7 @@ use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
};
use ezkl::fieldutils;
use ezkl::fieldutils::i32_to_felt;
use ezkl::fieldutils::{self, integer_rep_to_felt, IntegerRep};
use ezkl::tensor::*;
use halo2_proofs::dev::MockProver;
use halo2_proofs::poly::commitment::Params;
@@ -42,8 +41,8 @@ const NUM_INNER_COLS: usize = 1;
struct Config<
const LEN: usize, //LEN = CHOUT x OH x OW flattened //not supported yet in rust stable
const CLASSES: usize,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -66,8 +65,8 @@ struct Config<
struct MyCircuit<
const LEN: usize, //LEN = CHOUT x OH x OW flattened
const CLASSES: usize,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -90,8 +89,8 @@ struct MyCircuit<
impl<
const LEN: usize,
const CLASSES: usize,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -147,6 +146,8 @@ where
let params = VarTensor::new_advice(cs, K, NUM_INNER_COLS, LEN);
let output = VarTensor::new_advice(cs, K, NUM_INNER_COLS, LEN);
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
println!("INPUT COL {:#?}", input);
let mut layer_config = PolyConfig::configure(
@@ -157,15 +158,11 @@ where
);
layer_config
.configure_lookup(
cs,
&input,
&output,
&params,
(LOOKUP_MIN, LOOKUP_MAX),
K,
&LookupOp::ReLU,
)
.configure_range_check(cs, &input, &params, (-1, 1), K)
.unwrap();
layer_config
.configure_range_check(cs, &input, &params, (0, 1023), K)
.unwrap();
layer_config
@@ -196,15 +193,21 @@ where
) -> Result<(), Error> {
config.layer_config.layout_tables(&mut layouter).unwrap();
config
.layer_config
.layout_range_checks(&mut layouter)
.unwrap();
let x = layouter
.assign_region(
|| "mlp_4d",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 1024, 2);
let op = PolyOp::Conv {
padding: vec![(PADDING, PADDING); 2],
stride: vec![STRIDE; 2],
group: 1,
};
let x = config
.layer_config
@@ -221,7 +224,14 @@ where
let x = config
.layer_config
.layout(&mut region, &[x.unwrap()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[x.unwrap()],
Box::new(PolyOp::LeakyReLU {
slope: 0.0.into(),
scale: 1,
}),
)
.unwrap();
let mut x = config
@@ -281,7 +291,7 @@ where
}
pub fn runconv() {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
const KERNEL_HEIGHT: usize = 5;
@@ -315,7 +325,11 @@ pub fn runconv() {
.test_set_length(10_000)
.finalize();
let mut train_data = Tensor::from(trn_img.iter().map(|x| i32_to_felt::<F>(*x as i32 / 16)));
let mut train_data = Tensor::from(
trn_img
.iter()
.map(|x| integer_rep_to_felt::<F>(*x as IntegerRep / 16)),
);
train_data.reshape(&[50_000, 28, 28]).unwrap();
let mut train_labels = Tensor::from(trn_lbl.iter().map(|x| *x as f32));
@@ -343,8 +357,8 @@ pub fn runconv() {
.map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
}),
);
@@ -355,7 +369,8 @@ pub fn runconv() {
let l0_kernels = l0_kernels.try_into().unwrap();
let mut l0_bias = Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::i32_to_felt(0)));
let mut l0_bias =
Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::integer_rep_to_felt(0)));
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
let l0_bias = l0_bias.try_into().unwrap();
@@ -363,8 +378,8 @@ pub fn runconv() {
let mut l2_biases = Tensor::<F>::from(myparams.biases.into_iter().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
}));
l2_biases.set_visibility(&ezkl::graph::Visibility::Private);
l2_biases.reshape(&[l2_biases.len(), 1]).unwrap();
@@ -374,8 +389,8 @@ pub fn runconv() {
let mut l2_weights = Tensor::<F>::from(myparams.weights.into_iter().flatten().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
}));
l2_weights.set_visibility(&ezkl::graph::Visibility::Private);
l2_weights.reshape(&[CLASSES, LEN]).unwrap();
@@ -401,13 +416,13 @@ pub fn runconv() {
l2_params: [l2_weights, l2_biases],
};
let public_input: Tensor<i32> = vec![
-25124i32, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
let public_input: Tensor<IntegerRep> = vec![
-25124, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
]
.into_iter()
.into();
let pi_inner: Tensor<F> = public_input.map(i32_to_felt::<F>);
let pi_inner: Tensor<F> = public_input.map(integer_rep_to_felt::<F>);
println!("MOCK PROVING");
let now = Instant::now();

View File

@@ -2,7 +2,7 @@ use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
};
use ezkl::fieldutils::i32_to_felt;
use ezkl::fieldutils::{integer_rep_to_felt, IntegerRep};
use ezkl::tensor::*;
use halo2_proofs::dev::MockProver;
use halo2_proofs::{
@@ -23,8 +23,8 @@ struct MyConfig {
#[derive(Clone)]
struct MyCircuit<
const LEN: usize, //LEN = CHOUT x OH x OW flattened
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
> {
// Given the stateless MyConfig type information, a DNN trace is determined by its input and the parameters of its layers.
// Computing the trace still requires a forward pass. The intermediate activations are stored only by the layouter.
@@ -34,7 +34,7 @@ struct MyCircuit<
_marker: PhantomData<F>,
}
impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRep> Circuit<F>
for MyCircuit<LEN, LOOKUP_MIN, LOOKUP_MAX>
{
type Config = MyConfig;
@@ -53,6 +53,10 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
let output = VarTensor::new_advice(cs, K, 1, LEN);
// tells the config layer to add an affine op to the circuit gate
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
println!("INPUT COL {:#?}", input);
let mut layer_config = PolyConfig::<F>::configure(
cs,
&[input.clone(), params.clone()],
@@ -60,17 +64,12 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
CheckMode::SAFE,
);
// sets up a new ReLU table and resuses it for l1 and l3 non linearities
layer_config
.configure_lookup(
cs,
&input,
&output,
&params,
(LOOKUP_MIN, LOOKUP_MAX),
K,
&LookupOp::ReLU,
)
.configure_range_check(cs, &input, &params, (-1, 1), K)
.unwrap();
layer_config
.configure_range_check(cs, &input, &params, (0, 1023), K)
.unwrap();
// sets up a new ReLU table and resuses it for l1 and l3 non linearities
@@ -104,11 +103,16 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
) -> Result<(), Error> {
config.layer_config.layout_tables(&mut layouter).unwrap();
config
.layer_config
.layout_range_checks(&mut layouter)
.unwrap();
let x = layouter
.assign_region(
|| "mlp_4d",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
let x = config
.layer_config
.layout(
@@ -141,7 +145,14 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
println!("x shape: {:?}", x.dims());
let mut x = config
.layer_config
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[x],
Box::new(PolyOp::LeakyReLU {
scale: 1,
slope: 0.0.into(),
}),
)
.unwrap()
.unwrap();
println!("3");
@@ -177,7 +188,14 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
println!("x shape: {:?}", x.dims());
let x = config
.layer_config
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[x],
Box::new(PolyOp::LeakyReLU {
scale: 1,
slope: 0.0.into(),
}),
)
.unwrap();
println!("6");
println!("offset: {}", region.row());
@@ -212,36 +230,36 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
}
pub fn runmlp() {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
// parameters
let mut l0_kernel: Tensor<F> = Tensor::<i32>::new(
let mut l0_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
Some(&[10, 0, 0, -1, 0, 10, 1, 0, 0, 1, 10, 0, 1, 0, 0, 10]),
&[4, 4],
)
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l0_kernel.set_visibility(&ezkl::graph::Visibility::Private);
let mut l0_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
let mut l0_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
let mut l2_kernel: Tensor<F> = Tensor::<i32>::new(
let mut l2_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
Some(&[0, 3, 10, -1, 0, 10, 1, 0, 0, 1, 0, 12, 1, -2, 32, 0]),
&[4, 4],
)
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l2_kernel.set_visibility(&ezkl::graph::Visibility::Private);
// input data, with 1 padding to allow for bias
let input: Tensor<Value<F>> = Tensor::<i32>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
let input: Tensor<Value<F>> = Tensor::<IntegerRep>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
.unwrap()
.into();
let mut l2_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
let mut l2_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l2_bias.set_visibility(&ezkl::graph::Visibility::Private);
let circuit = MyCircuit::<4, -8192, 8192> {
@@ -251,12 +269,12 @@ pub fn runmlp() {
_marker: PhantomData,
};
let public_input: Vec<i32> = unsafe {
let public_input: Vec<IntegerRep> = unsafe {
vec![
(531f32 / 128f32).round().to_int_unchecked::<i32>(),
(103f32 / 128f32).round().to_int_unchecked::<i32>(),
(4469f32 / 128f32).round().to_int_unchecked::<i32>(),
(2849f32 / 128f32).to_int_unchecked::<i32>(),
(531f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(103f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(4469f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(2849f32 / 128f32).to_int_unchecked::<IntegerRep>(),
]
};
@@ -265,7 +283,10 @@ pub fn runmlp() {
let prover = MockProver::run(
K as u32,
&circuit,
vec![public_input.iter().map(|x| i32_to_felt::<F>(*x)).collect()],
vec![public_input
.iter()
.map(|x| integer_rep_to_felt::<F>(*x))
.collect()],
)
.unwrap();
prover.assert_satisfied();

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -232,7 +232,7 @@
"run_args.param_visibility = \"fixed\"\n",
"run_args.output_visibility = \"public\"\n",
"run_args.input_scale = 2\n",
"run_args.logrows = 8\n",
"run_args.logrows = 15\n",
"\n",
"ezkl.get_srs(logrows=run_args.logrows, commitment=ezkl.PyCommitments.KZG)"
]
@@ -404,7 +404,7 @@
"run_args.output_visibility = \"polycommit\"\n",
"run_args.variables = [(\"batch_size\", 1)]\n",
"run_args.input_scale = 2\n",
"run_args.logrows = 8\n"
"run_args.logrows = 15\n"
]
},
{
@@ -466,7 +466,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
"version": "3.12.5"
},
"orig_nbformat": 4
},

View File

@@ -0,0 +1,339 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Reusable Verifiers \n",
"\n",
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
"\n",
"- `1l_mlp sigmoid`\n",
"- `1l_mlp relu`\n",
"- `1l_conv sigmoid`\n",
"- `1l_conv relu`\n",
"\n",
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
"\n",
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we deploy a unique and much smaller verifying key artifact (VKA) contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.onnx\n",
"\n",
"# Define the models\n",
"class MLP_Sigmoid(nn.Module):\n",
" def __init__(self):\n",
" super(MLP_Sigmoid, self).__init__()\n",
" self.fc = nn.Linear(3, 3)\n",
" self.sigmoid = nn.Sigmoid()\n",
"\n",
" def forward(self, x):\n",
" x = self.fc(x)\n",
" x = self.sigmoid(x)\n",
" return x\n",
"\n",
"class MLP_Relu(nn.Module):\n",
" def __init__(self):\n",
" super(MLP_Relu, self).__init__()\n",
" self.fc = nn.Linear(3, 3)\n",
" self.relu = nn.ReLU()\n",
"\n",
" def forward(self, x):\n",
" x = self.fc(x)\n",
" x = self.relu(x)\n",
" return x\n",
"\n",
"class Conv_Sigmoid(nn.Module):\n",
" def __init__(self):\n",
" super(Conv_Sigmoid, self).__init__()\n",
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
" self.sigmoid = nn.Sigmoid()\n",
"\n",
" def forward(self, x):\n",
" x = self.conv(x)\n",
" x = self.sigmoid(x)\n",
" return x\n",
"\n",
"class Conv_Relu(nn.Module):\n",
" def __init__(self):\n",
" super(Conv_Relu, self).__init__()\n",
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
" self.relu = nn.ReLU()\n",
"\n",
" def forward(self, x):\n",
" x = self.conv(x)\n",
" x = self.relu(x)\n",
" return x\n",
"\n",
"# Instantiate the models\n",
"mlp_sigmoid = MLP_Sigmoid()\n",
"mlp_relu = MLP_Relu()\n",
"conv_sigmoid = Conv_Sigmoid()\n",
"conv_relu = Conv_Relu()\n",
"\n",
"# Dummy input tensor for mlp\n",
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
"input_mlp_path = 'mlp_input.json'\n",
"\n",
"# Dummy input tensor for conv\n",
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
"input_conv_path = 'conv_input.json'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"import torch\n",
"import ezkl\n",
"\n",
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
" # Create a new directory for the model if it doesn't exist\n",
" if not os.path.exists(name):\n",
" os.mkdir(name)\n",
" # Store the paths in each of their respective directories\n",
" model_path = os.path.join(name, \"network.onnx\")\n",
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
" pk_path = os.path.join(name, \"test.pk\")\n",
" vk_path = os.path.join(name, \"test.vk\")\n",
" settings_path = os.path.join(name, \"settings.json\")\n",
"\n",
" witness_path = os.path.join(name, \"witness.json\")\n",
" sol_code_path = os.path.join(name, 'test.sol')\n",
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
" abi_path = os.path.join(name, 'test.abi')\n",
" proof_path = os.path.join(name, \"proof.json\")\n",
"\n",
" # Flips the neural net into inference mode\n",
" model.eval()\n",
"\n",
" # Export the model\n",
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
" do_constant_folding=True, input_names=['input'],\n",
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
" 'output': {0: 'batch_size'}})\n",
"\n",
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
" data = dict(input_data=[data_array])\n",
" json.dump(data, open(input_path, 'w'))\n",
"\n",
" py_run_args = ezkl.PyRunArgs()\n",
" py_run_args.input_visibility = \"private\"\n",
" py_run_args.output_visibility = \"public\"\n",
" py_run_args.param_visibility = \"fixed\" # private by default\n",
"\n",
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
" assert res == True\n",
"\n",
" await ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
"\n",
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
" assert res == True\n",
"\n",
" res = await ezkl.get_srs(settings_path)\n",
" assert res == True\n",
"\n",
" # now generate the witness file\n",
" res = await ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
" assert os.path.isfile(witness_path) == True\n",
"\n",
" # SETUP \n",
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
" assert res == True\n",
" assert os.path.isfile(vk_path)\n",
" assert os.path.isfile(pk_path)\n",
" assert os.path.isfile(settings_path)\n",
"\n",
" # GENERATE A PROOF\n",
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path, \"single\")\n",
" assert os.path.isfile(proof_path)\n",
"\n",
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
" assert res == True\n",
"\n",
" res = await ezkl.create_evm_vka(vk_path, settings_path, sol_key_code_path, abi_path)\n",
" assert res == True\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import subprocess\n",
"import time\n",
"\n",
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"RPC_URL = \"http://localhost:3030\"\n",
"\n",
"# Save process globally\n",
"anvil_process = None\n",
"\n",
"def start_anvil():\n",
" global anvil_process\n",
" if anvil_process is None:\n",
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
" if anvil_process.returncode is not None:\n",
" raise Exception(\"failed to start anvil process\")\n",
" time.sleep(3)\n",
"\n",
"def stop_anvil():\n",
" global anvil_process\n",
" if anvil_process is not None:\n",
" anvil_process.terminate()\n",
" anvil_process = None\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check that the generated verifiers are identical for all models."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"start_anvil()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import filecmp\n",
"\n",
"def compare_files(file1, file2):\n",
" return filecmp.cmp(file1, file2, shallow=False)\n",
"\n",
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
"\n",
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
"\n",
"\n",
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we deploy separate verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os \n",
"addr_path_verifier = \"addr_verifier.txt\"\n",
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
"\n",
"res = await ezkl.deploy_evm(\n",
" addr_path_verifier,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030',\n",
" \"verifier/reusable\"\n",
")\n",
"\n",
"assert res == True\n",
"\n",
"with open(addr_path_verifier, 'r') as file:\n",
" addr = file.read().rstrip()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for name in names:\n",
" addr_path_vk = \"addr_vk.txt\"\n",
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
" res = await ezkl.deploy_evm(addr_path_vk, sol_key_code_path, 'http://127.0.0.1:3030', \"vka\")\n",
" assert res == True\n",
"\n",
" with open(addr_path_vk, 'r') as file:\n",
" addr_vk = file.read().rstrip()\n",
" \n",
" proof_path = os.path.join(name, \"proof.json\")\n",
" sol_code_path = os.path.join(name, 'vk.sol')\n",
" res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\",\n",
" addr_vk = addr_vk\n",
" )\n",
" assert res == True"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".env",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

View File

@@ -0,0 +1 @@
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":7,"param_scale":7,"scale_rebase_multiplier":10,"lookup_range":[0,0],"logrows":13,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private"},"num_constraints":5619,"total_const_size":513,"model_instance_shapes":[[1,3,10,10]],"model_output_scales":[14],"model_input_scales":[7],"module_sizes":{"kzg":[],"poseidon":[0,[0]],"elgamal":[0,[0]]},"required_lookups":[],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null}

View File

@@ -9,7 +9,9 @@ class MyModel(nn.Module):
super(MyModel, self).__init__()
def forward(self, w, x, y, z):
return [((x & y)) == (x & (y | (z ^ w)))]
a = (x & y)
b = (y & (z ^ w))
return [a & b]
circuit = MyModel()

View File

@@ -1 +1 @@
{"input_data": [[false, true, false], [true, false, false], [true, false, false], [false, false, false]]}
{"input_data": [[false, true, true], [false, true, true], [true, false, false], [false, true, true]]}

View File

@@ -1,21 +1,17 @@
pytorch1.12.1:«
+
pytorch2.2.2:„
*
input1
input2
onnx::Equal_4And_0"And
input2
/And_output_0/And"And
)
input3
input
onnx::Or_5Xor_1"Xor
input3
input
/Xor_output_0/Xor"Xor
input2
onnx::Or_5 onnx::And_6Or_2"Or
0
input1
onnx::And_6
onnx::Equal_7And_3"And
6
5
input2
/Xor_output_0/And_1_output_0/And_1"And
5
/And_output_0
/And_1_output_0output/And_2"And

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -21,9 +21,9 @@ def main():
torch_model = Circuit()
# Input to the model
shape = [3, 2, 3]
w = 0.1*torch.rand(1, *shape, requires_grad=True)
x = 0.1*torch.rand(1, *shape, requires_grad=True)
y = 0.1*torch.rand(1, *shape, requires_grad=True)
w = 2 * torch.rand(1, *shape, requires_grad=True) - 1
x = 2 * torch.rand(1, *shape, requires_grad=True) - 1
y = 2 * torch.rand(1, *shape, requires_grad=True) - 1
torch_out = torch_model(w, x, y)
# Export the model
torch.onnx.export(torch_model, # model being run

View File

@@ -1 +1 @@
{"input_shapes": [[3, 2, 3], [3, 2, 3], [3, 2, 3], [3, 2, 3]], "input_data": [[0.0025284828152507544, 0.04976580664515495, 0.025840921327471733, 0.0829394981265068, 0.09595223516225815, 0.08764562010765076, 0.06308566778898239, 0.062386948615312576, 0.08090643584728241, 0.09267748892307281, 0.07428313046693802, 0.08987367898225784, 0.005716216750442982, 0.0666426345705986, 0.012837404385209084, 0.05769496038556099, 0.05761152133345604, 0.08006472885608673], [0.007834953255951405, 0.011380612850189209, 0.08560049533843994, 0.022283583879470825, 0.07879520952701569, 0.04422441124916077, 0.030812596902251244, 0.006081616971641779, 0.011045408435165882, 0.08776585012674332, 0.044985152781009674, 0.015603715553879738, 0.07923348993062973, 0.04872611165046692, 0.0036642670165747404, 0.05142095685005188, 0.0963878259062767, 0.03225792199373245], [0.09952805936336517, 0.002214533044025302, 0.011696457862854004, 0.022422820329666138, 0.04151459410786629, 0.027647346258163452, 0.011919880285859108, 0.006539052817970514, 0.06569185107946396, 0.034328874200582504, 0.0032284557819366455, 0.004105025436729193, 0.022395813837647438, 0.07135921716690063, 0.07882415503263474, 0.09764843434095383, 0.05335796996951103, 0.0525360181927681]], "output_data": [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]}
{"input_shapes": [[3, 2, 3], [3, 2, 3], [3, 2, 3], [3, 2, 3]], "input_data": [[0.6261028051376343, 0.49872446060180664, -0.04514765739440918, 0.5936200618743896, 0.9271858930587769, 0.6688600778579712, -0.20331168174743652, -0.7016235589981079, 0.025863051414489746, -0.19426143169403076, 0.9827852249145508, 0.4897397756576538, 0.2992602586746216, 0.7011144161224365, 0.9278832674026489, 0.5943725109100342, -0.573331356048584, 0.3675816059112549], [0.7803324460983276, -0.9616303443908691, 0.6070173978805542, -0.028337717056274414, -0.5080242156982422, -0.9280107021331787, 0.6150380373001099, 0.3865993022918701, -0.43668973445892334, 0.17152702808380127, 0.5144252777099609, -0.28881049156188965, 0.8932310342788696, 0.059034109115600586, 0.6865451335906982, 0.009820222854614258, 0.23011493682861328, -0.9492779970169067], [-0.21352827548980713, -0.16015326976776123, -0.38964390754699707, 0.13464701175689697, -0.8814496994018555, 0.5037975311279297, -0.804405927658081, 0.9858957529067993, 0.19567716121673584, 0.9777265787124634, 0.6151977777481079, 0.568595290184021, 0.10584986209869385, -0.8975653648376465, 0.6235959529876709, -0.547879695892334, 0.9289869070053101, 0.7567293643951416]], "output_data": [[1.0, 0.0, -0.0, 1.0, 1.0, 1.0, -0.0, -1.0, 0.0, -0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, -1.0, 0.0], [0.0, -1.0, 0.0, -1.0, -1.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [-0.0, -0.0, -0.0, 1.0, -0.0, 1.0, -0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -0.0, 1.0, -0.0, 1.0, 1.0]]}

View File

@@ -1,10 +1,11 @@
pytorch2.0.1:â
pytorch2.2.2:ă

woutput_w/Round"Round

xoutput_x/Floor"Floor

youtput_y/Ceil"Ceil torch_jitZ%
youtput_y/Ceil"Ceil
main_graphZ%
w



View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
# reciprocal sqrt
m = 1 / torch.sqrt(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.8590779900550842, 0.4029041528701782, 0.6507361531257629, 0.9782488942146301, 0.37392884492874146, 0.6867020726203918, 0.11407750844955444, 0.362740159034729]]}

View File

@@ -0,0 +1,17 @@
pytorch2.2.2:Ź
$
input/Sqrt_output_0/Sqrt"Sqrt
1
/Sqrt_output_0output /Reciprocal"
Reciprocal
main_graphZ!
input


batch_size
b"
output


batch_size
B

View File

@@ -0,0 +1 @@
network.onnx filter=lfs diff=lfs merge=lfs -text

View File

@@ -0,0 +1,47 @@
## The worm
This is an onnx file for a [WormVAE](https://github.com/TuragaLab/wormvae?tab=readme-ov-file) model, which is a VAE / latent-space representation of the C. elegans connectome.
The model "is a large-scale latent variable model with a very high-dimensional latent space
consisting of voltage dynamics of 300 neurons over 5 minutes of time at the simulation frequency
of 160 Hz. The generative model for these latent variables is described by stochastic differential
equations modeling the nonlinear dynamics of the network activity." (see [here](https://openreview.net/pdf?id=CJzi3dRlJE-)).
In effect this is a generative model for a worm's voltage dynamics, which can be used to generate new worm-like voltage dynamics given previous connectome state.
Using ezkl you can create a zk circuit equivalent to the wormvae model, allowing you to "prove" execution of the worm model. If you're feeling particularly adventurous, you can also use the zk circuit to generate new worm-state that can be verified on chain.
To do so you'll first want to fetch the files using git-lfs (as the onnx file is too large to be stored in git).
```bash
git lfs fetch --all
```
You'll then want to use the usual ezkl loop to generate the zk circuit. We recommend using fixed visibility for the model parameters, as the model is quite large and this will prune the circuit significantly.
```bash
ezkl gen-settings --param-visibility=fixed
cp input.json calibration.json
ezkl calibrate-settings
ezkl compile-circuit
ezkl gen-witness
ezkl prove
```
You might also need to aggregate the proof to get it to fit on chain.
```bash
ezkl aggregate
```
You can then create a smart contract that verifies this aggregate proof
```bash
ezkl create-evm-verifier-aggr
```
This can then be deployed on the chain of your choice.
> Note: the model is large and thus we recommend a machine with at least 512GB of RAM to run the above commands. If you're ever compute constrained you can always use the lilith service to generate the zk circuit. Message us on discord or telegram for more details :)

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2f88c5901d3768ec21e3cf2f2840d255e84fa13c364df86b24d960cca3333769
size 82095882

View File

@@ -0,0 +1 @@
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":0,"param_scale":6,"scale_rebase_multiplier":1,"lookup_range":[-32768,32768],"logrows":17,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Fixed"},"num_constraints":367422820,"total_const_size":365577160,"model_instance_shapes":[[1,300,1200]],"model_output_scales":[6],"model_input_scales":[0,0,0],"module_sizes":{"kzg":[],"poseidon":[0,[0]],"elgamal":[0,[0]]},"required_lookups":[{"Div":{"denom":64.0}},"ReLU",{"Ln":{"scale":64.0}},{"Exp":{"scale":64.0}}],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null}

View File

@@ -9,7 +9,6 @@ import { EVM } from '@ethereumjs/evm'
import { buildTransaction, encodeDeployment } from './utils/tx-builder'
import { getAccountNonce, insertAccount } from './utils/account-utils'
import { encodeVerifierCalldata } from '../nodejs/ezkl';
import { error } from 'console'
async function deployContract(
vm: VM,
@@ -66,7 +65,7 @@ async function verify(
vkAddress = new Uint8Array(uint8Array.buffer);
// convert uitn8array of length
error('vkAddress', vkAddress)
console.error('vkAddress', vkAddress)
}
const data = encodeVerifierCalldata(proof, vkAddress)

View File

@@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2024-02-06"
channel = "nightly-2024-07-18"
components = ["rustfmt", "clippy"]

View File

@@ -1,25 +1,28 @@
// ignore file if compiling for wasm
#[global_allocator]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use clap::{CommandFactory, Parser};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use colored_json::ToColoredJson;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use ezkl::commands::Cli;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use ezkl::execute::run;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use ezkl::logger::init_logger;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use log::{error, info};
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
use rand::prelude::SliceRandom;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
#[cfg(feature = "icicle")]
use std::env;
#[tokio::main(flavor = "current_thread")]
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub async fn main() {
let args = Cli::parse();
@@ -56,7 +59,7 @@ pub async fn main() {
}
}
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
pub fn main() {}
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]

269
src/bin/ios_gen_bindings.rs Normal file
View File

@@ -0,0 +1,269 @@
use camino::Utf8Path;
use std::fs;
use std::fs::remove_dir_all;
use std::path::{Path, PathBuf};
use std::process::Command;
use uniffi_bindgen::bindings::SwiftBindingGenerator;
use uniffi_bindgen::library_mode::generate_bindings;
use uuid::Uuid;
fn main() {
let library_name = std::env::var("CARGO_PKG_NAME").expect("CARGO_PKG_NAME is not set");
let mode = determine_build_mode();
build_bindings(&library_name, mode);
}
/// Determines the build mode based on the CONFIGURATION environment variable.
/// Defaults to "release" if not set or unrecognized.
/// "release" mode takes longer to build but produces optimized code, which has smaller size and is faster.
fn determine_build_mode() -> &'static str {
match std::env::var("CONFIGURATION").map(|s| s.to_lowercase()) {
Ok(ref config) if config == "debug" => "debug",
_ => "release",
}
}
/// Builds the Swift bindings and XCFramework for the specified library and build mode.
fn build_bindings(library_name: &str, mode: &str) {
// Get the root directory of this Cargo project
let manifest_dir = std::env::var_os("CARGO_MANIFEST_DIR")
.map(PathBuf::from)
.unwrap_or_else(|| std::env::current_dir().unwrap());
// Define the build directory inside the manifest directory
let build_dir = manifest_dir.join("build");
// Create a temporary directory to store the bindings and combined library
let tmp_dir = mktemp_local(&build_dir);
// Define directories for Swift bindings and output bindings
let swift_bindings_dir = tmp_dir.join("SwiftBindings");
let bindings_out = create_bindings_out_dir(&tmp_dir);
let framework_out = bindings_out.join("EzklCore.xcframework");
// Define target architectures for building
// We currently only support iOS devices and simulators running on ARM Macs
// This is due to limiting the library size to under 100MB for GitHub Commit Size Limit
// To support older Macs (Intel), follow the instructions in the comments below
#[allow(clippy::useless_vec)]
let target_archs = vec![
vec!["aarch64-apple-ios"], // iOS device
vec!["aarch64-apple-ios-sim"], // iOS simulator ARM Mac
// vec!["aarch64-apple-ios-sim", "x86_64-apple-ios"], // TODO - replace the above line with this line to allow running on older Macs (Intel)
];
// Build the library for each architecture and combine them
let out_lib_paths: Vec<PathBuf> = target_archs
.iter()
.map(|archs| build_combined_archs(library_name, archs, &build_dir, mode))
.collect();
// Generate the path to the built dynamic library (.dylib)
let out_dylib_path = build_dir.join(format!(
"{}/{}/lib{}.dylib",
target_archs[0][0], mode, library_name
));
// Generate Swift bindings using uniffi_bindgen
generate_ios_bindings(&out_dylib_path, &swift_bindings_dir)
.expect("Failed to generate iOS bindings");
// Move the generated Swift file to the bindings output directory
fs::rename(
swift_bindings_dir.join(format!("{}.swift", library_name)),
bindings_out.join("EzklCore.swift"),
)
.expect("Failed to copy swift bindings file");
// Rename the `ios_ezklFFI.modulemap` file to `module.modulemap`
fs::rename(
swift_bindings_dir.join(format!("{}FFI.modulemap", library_name)),
swift_bindings_dir.join("module.modulemap"),
)
.expect("Failed to rename modulemap file");
// Create the XCFramework from the combined libraries and Swift bindings
create_xcframework(&out_lib_paths, &swift_bindings_dir, &framework_out);
// Define the destination directory for the bindings
let bindings_dest = build_dir.join("EzklCoreBindings");
if bindings_dest.exists() {
fs::remove_dir_all(&bindings_dest).expect("Failed to remove existing bindings directory");
}
// Move the bindings output to the destination directory
fs::rename(&bindings_out, &bindings_dest).expect("Failed to move framework into place");
// Clean up temporary directories
cleanup_temp_dirs(&build_dir);
}
/// Creates the output directory for the bindings.
/// Returns the path to the bindings output directory.
fn create_bindings_out_dir(base_dir: &Path) -> PathBuf {
let bindings_out = base_dir.join("EzklCoreBindings");
fs::create_dir_all(&bindings_out).expect("Failed to create bindings output directory");
bindings_out
}
/// Builds the library for each architecture and combines them into a single library using lipo.
/// Returns the path to the combined library.
fn build_combined_archs(
library_name: &str,
archs: &[&str],
build_dir: &Path,
mode: &str,
) -> PathBuf {
// Build the library for each architecture
let out_lib_paths: Vec<PathBuf> = archs
.iter()
.map(|&arch| {
build_for_arch(arch, build_dir, mode);
build_dir
.join(arch)
.join(mode)
.join(format!("lib{}.a", library_name))
})
.collect();
// Create a unique temporary directory for the combined library
let lib_out = mktemp_local(build_dir).join(format!("lib{}.a", library_name));
// Combine the libraries using lipo
let mut lipo_cmd = Command::new("lipo");
lipo_cmd
.arg("-create")
.arg("-output")
.arg(lib_out.to_str().unwrap());
for lib_path in &out_lib_paths {
lipo_cmd.arg(lib_path.to_str().unwrap());
}
let status = lipo_cmd.status().expect("Failed to run lipo command");
if !status.success() {
panic!("lipo command failed with status: {}", status);
}
lib_out
}
/// Builds the library for a specific architecture.
fn build_for_arch(arch: &str, build_dir: &Path, mode: &str) {
// Ensure the target architecture is installed
install_arch(arch);
// Run cargo build for the specified architecture and mode
let mut build_cmd = Command::new("cargo");
build_cmd
.arg("build")
.arg("--no-default-features")
.arg("--features")
.arg("ios-bindings");
if mode == "release" {
build_cmd.arg("--release");
}
build_cmd
.arg("--lib")
.env("CARGO_BUILD_TARGET_DIR", build_dir)
.env("CARGO_BUILD_TARGET", arch);
let status = build_cmd.status().expect("Failed to run cargo build");
if !status.success() {
panic!("cargo build failed for architecture: {}", arch);
}
}
/// Installs the specified target architecture using rustup.
fn install_arch(arch: &str) {
let status = Command::new("rustup")
.arg("target")
.arg("add")
.arg(arch)
.status()
.expect("Failed to run rustup command");
if !status.success() {
panic!("Failed to install target architecture: {}", arch);
}
}
/// Generates Swift bindings for the iOS library using uniffi_bindgen.
fn generate_ios_bindings(dylib_path: &Path, binding_dir: &Path) -> Result<(), std::io::Error> {
// Remove existing binding directory if it exists
if binding_dir.exists() {
remove_dir_all(binding_dir)?;
}
// Generate the Swift bindings using uniffi_bindgen
generate_bindings(
Utf8Path::from_path(dylib_path).ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid dylib path")
})?,
None,
&SwiftBindingGenerator,
None,
Utf8Path::from_path(binding_dir).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Invalid Swift bindings directory",
)
})?,
true,
)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
Ok(())
}
/// Creates an XCFramework from the combined libraries and Swift bindings.
fn create_xcframework(lib_paths: &[PathBuf], swift_bindings_dir: &Path, framework_out: &Path) {
let mut xcbuild_cmd = Command::new("xcodebuild");
xcbuild_cmd.arg("-create-xcframework");
// Add each library and its corresponding headers to the xcodebuild command
for lib_path in lib_paths {
println!("Including library: {:?}", lib_path);
xcbuild_cmd.arg("-library");
xcbuild_cmd.arg(lib_path.to_str().unwrap());
xcbuild_cmd.arg("-headers");
xcbuild_cmd.arg(swift_bindings_dir.to_str().unwrap());
}
xcbuild_cmd.arg("-output");
xcbuild_cmd.arg(framework_out.to_str().unwrap());
let status = xcbuild_cmd.status().expect("Failed to run xcodebuild");
if !status.success() {
panic!("xcodebuild failed with status: {}", status);
}
}
/// Creates a temporary directory inside the build path with a unique UUID.
/// This ensures unique build artifacts for concurrent builds.
fn mktemp_local(build_path: &Path) -> PathBuf {
let dir = tmp_local(build_path).join(Uuid::new_v4().to_string());
fs::create_dir(&dir).expect("Failed to create temporary directory");
dir
}
/// Gets the path to the local temporary directory inside the build path.
fn tmp_local(build_path: &Path) -> PathBuf {
let tmp_path = build_path.join("tmp");
if let Ok(metadata) = fs::metadata(&tmp_path) {
if !metadata.is_dir() {
panic!("Expected 'tmp' to be a directory");
}
} else {
fs::create_dir_all(&tmp_path).expect("Failed to create local temporary directory");
}
tmp_path
}
/// Cleans up temporary directories inside the build path.
fn cleanup_temp_dirs(build_dir: &Path) {
let tmp_dir = build_dir.join("tmp");
if tmp_dir.exists() {
fs::remove_dir_all(tmp_dir).expect("Failed to remove temporary directories");
}
}

12
src/bindings/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
/// Python bindings
#[cfg(feature = "python-bindings")]
pub mod python;
/// Universal bindings for all platforms
#[cfg(any(
feature = "ios-bindings",
all(target_arch = "wasm32", target_os = "unknown")
))]
pub mod universal;
/// wasm prover and verifier
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
pub mod wasm;

View File

@@ -6,7 +6,7 @@ use crate::circuit::modules::poseidon::{
use crate::circuit::modules::Module;
use crate::circuit::{CheckMode, Tolerance};
use crate::commands::*;
use crate::fieldutils::{felt_to_i64, i64_to_felt};
use crate::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::TestDataSource;
use crate::graph::{
@@ -191,6 +191,12 @@ struct PyRunArgs {
#[pyo3(get, set)]
/// str: commitment type, accepts `kzg`, `ipa`
pub commitment: PyCommitments,
/// int: The base used for decomposition
#[pyo3(get, set)]
pub decomp_base: usize,
/// int: The number of legs used for decomposition
#[pyo3(get, set)]
pub decomp_legs: usize,
}
/// default instantiation of PyRunArgs
@@ -221,6 +227,8 @@ impl From<PyRunArgs> for RunArgs {
rebase_frac_zero_constants: py_run_args.rebase_frac_zero_constants,
check_mode: py_run_args.check_mode,
commitment: Some(py_run_args.commitment.into()),
decomp_base: py_run_args.decomp_base,
decomp_legs: py_run_args.decomp_legs,
}
}
}
@@ -243,6 +251,8 @@ impl Into<PyRunArgs> for RunArgs {
rebase_frac_zero_constants: self.rebase_frac_zero_constants,
check_mode: self.check_mode,
commitment: self.commitment.into(),
decomp_base: self.decomp_base,
decomp_legs: self.decomp_legs,
}
}
}
@@ -331,9 +341,9 @@ fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
#[pyfunction(signature = (
felt,
))]
fn felt_to_int(felt: PyFelt) -> PyResult<i64> {
fn felt_to_int(felt: PyFelt) -> PyResult<IntegerRep> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let int_rep = felt_to_i64(felt);
let int_rep = felt_to_integer_rep(felt);
Ok(int_rep)
}
@@ -357,7 +367,7 @@ fn felt_to_int(felt: PyFelt) -> PyResult<i64> {
))]
fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let int_rep = felt_to_i64(felt);
let int_rep = felt_to_integer_rep(felt);
let multiplier = scale_to_multiplier(scale);
let float_rep = int_rep as f64 / multiplier;
Ok(float_rep)
@@ -385,7 +395,7 @@ fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
let int_rep = quantize_float(&input, 0.0, scale)
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
let felt = i64_to_felt(int_rep);
let felt = integer_rep_to_felt(int_rep);
Ok(crate::pfsys::field_to_string::<Fr>(&felt))
}
@@ -1490,8 +1500,8 @@ fn encode_evm_calldata<'a>(
/// srs_path: str
/// The path to the SRS file
///
/// render_vk_separately: bool
/// Whether the verifier key should be rendered as a separate contract. We recommend disabling selector compression if this is enabled. To save the verifier key as a separate contract, set this to true and then call the create_evm_vk command
/// reusable: bool
/// Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
///
/// Returns
/// -------
@@ -1503,7 +1513,7 @@ fn encode_evm_calldata<'a>(
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
))]
fn create_evm_verifier(
py: Python,
@@ -1512,7 +1522,7 @@ fn create_evm_verifier(
sol_code_path: PathBuf,
abi_path: PathBuf,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
reusable: bool,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_verifier(
@@ -1521,7 +1531,7 @@ fn create_evm_verifier(
settings_path,
sol_code_path,
abi_path,
render_vk_seperately,
reusable,
)
.await
.map_err(|e| {
@@ -1533,7 +1543,8 @@ fn create_evm_verifier(
})
}
/// Creates an Evm verifer key. This command should be called after create_evm_verifier with the render_vk_separately arg set to true. By rendering a verification key separately you can reuse the same verifier for similar circuit setups with different verifying keys, helping to reduce the amount of state our verifiers store on the blockchain.
/// Creates an Evm VK artifact. This command generated a VK with circuit specific meta data encoding in memory for use by the reusable H2 verifier.
/// This is useful for deploying verifier that were otherwise too big to fit on chain and required aggregation.
///
/// Arguments
/// ---------
@@ -1563,7 +1574,7 @@ fn create_evm_verifier(
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None
))]
fn create_evm_vk(
fn create_evm_vka(
py: Python,
vk_path: PathBuf,
settings_path: PathBuf,
@@ -1572,7 +1583,7 @@ fn create_evm_vk(
srs_path: Option<PathBuf>,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_vk(vk_path, srs_path, settings_path, sol_code_path, abi_path)
crate::execute::create_evm_vka(vk_path, srs_path, settings_path, sol_code_path, abi_path)
.await
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
@@ -1703,6 +1714,7 @@ fn setup_test_evm_witness(
addr_path,
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
rpc_url=None,
contract_type=ContractType::default(),
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
@@ -1711,6 +1723,7 @@ fn deploy_evm(
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
contract_type: ContractType,
optimizer_runs: usize,
private_key: Option<String>,
) -> PyResult<Bound<'_, PyAny>> {
@@ -1721,42 +1734,7 @@ fn deploy_evm(
addr_path,
optimizer_runs,
private_key,
"Halo2Verifier",
)
.await
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
})
}
/// deploys the solidity vk verifier
#[pyfunction(signature = (
addr_path,
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
fn deploy_vk_evm(
py: Python,
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2VerifyingKey",
contract_type,
)
.await
.map_err(|e| {
@@ -1892,8 +1870,8 @@ fn verify_evm<'a>(
/// srs_path: str
/// The path to the SRS file
///
/// render_vk_separately: bool
/// Whether the verifier key should be rendered as a separate contract. We recommend disabling selector compression if this is enabled. To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command
/// reusable: bool
/// Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
///
/// Returns
/// -------
@@ -1906,7 +1884,7 @@ fn verify_evm<'a>(
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
))]
fn create_evm_verifier_aggr(
py: Python,
@@ -1916,7 +1894,7 @@ fn create_evm_verifier_aggr(
abi_path: PathBuf,
logrows: u32,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
reusable: bool,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_aggregate_verifier(
@@ -1926,7 +1904,7 @@ fn create_evm_verifier_aggr(
abi_path,
aggregation_settings,
logrows,
render_vk_seperately,
reusable,
)
.await
.map_err(|e| {
@@ -1975,9 +1953,8 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(compile_circuit, m)?)?;
m.add_function(wrap_pyfunction!(verify_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_vk, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_vka, m)?)?;
m.add_function(wrap_pyfunction!(deploy_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_vk_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
m.add_function(wrap_pyfunction!(setup_test_evm_witness, m)?)?;

579
src/bindings/universal.rs Normal file
View File

@@ -0,0 +1,579 @@
use halo2_proofs::{
plonk::*,
poly::{
commitment::{CommitmentScheme, ParamsProver},
ipa::{
commitment::{IPACommitmentScheme, ParamsIPA},
multiopen::{ProverIPA, VerifierIPA},
strategy::SingleStrategy as IPASingleStrategy,
},
kzg::{
commitment::{KZGCommitmentScheme, ParamsKZG},
multiopen::{ProverSHPLONK, VerifierSHPLONK},
strategy::SingleStrategy as KZGSingleStrategy,
},
VerificationStrategy,
},
};
use std::fmt::Display;
use std::io::BufReader;
use std::str::FromStr;
use crate::{
circuit::region::RegionSettings,
graph::GraphSettings,
pfsys::{
create_proof_circuit,
evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript},
verify_proof_circuit, TranscriptType,
},
tensor::TensorType,
CheckMode, Commitments, EZKLError as InnerEZKLError,
};
use crate::graph::{GraphCircuit, GraphWitness};
use halo2_solidity_verifier::encode_calldata;
use halo2curves::{
bn256::{Bn256, Fr, G1Affine},
ff::{FromUniformBytes, PrimeField},
};
use snark_verifier::{loader::native::NativeLoader, system::halo2::transcript::evm::EvmTranscript};
/// Wrapper around the Error Message
#[cfg_attr(feature = "ios-bindings", derive(uniffi::Error))]
#[derive(Debug)]
pub enum EZKLError {
/// Some Comment
InternalError(String),
}
impl Display for EZKLError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
EZKLError::InternalError(e) => write!(f, "Internal error: {}", e),
}
}
}
impl From<InnerEZKLError> for EZKLError {
fn from(e: InnerEZKLError) -> Self {
EZKLError::InternalError(e.to_string())
}
}
/// Encode verifier calldata from proof and ethereum vk_address
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn encode_verifier_calldata(
// TODO - shuold it be pub(crate) or pub or pub(super)?
proof: Vec<u8>,
vk_address: Option<Vec<u8>>,
) -> Result<Vec<u8>, EZKLError> {
let snark: crate::pfsys::Snark<Fr, G1Affine> =
serde_json::from_slice(&proof[..]).map_err(InnerEZKLError::from)?;
let vk_address: Option<[u8; 20]> = if let Some(vk_address) = vk_address {
let array: [u8; 20] =
serde_json::from_slice(&vk_address[..]).map_err(InnerEZKLError::from)?;
Some(array)
} else {
None
};
let flattened_instances = snark.instances.into_iter().flatten();
let encoded = encode_calldata(
vk_address,
&snark.proof,
&flattened_instances.collect::<Vec<_>>(),
);
Ok(encoded)
}
/// Generate witness from compiled circuit and input json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn gen_witness(compiled_circuit: Vec<u8>, input: Vec<u8>) -> Result<Vec<u8>, EZKLError> {
let mut circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| {
EZKLError::InternalError(format!("Failed to deserialize compiled model: {}", e))
})?;
let input: crate::graph::input::GraphData = serde_json::from_slice(&input[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize input: {}", e)))?;
let mut input = circuit
.load_graph_input(&input)
.map_err(|e| EZKLError::InternalError(format!("{}", e)))?;
let witness = circuit
.forward::<KZGCommitmentScheme<Bn256>>(
&mut input,
None,
None,
RegionSettings::all_true(
circuit.settings().run_args.decomp_base,
circuit.settings().run_args.decomp_legs,
),
)
.map_err(|e| EZKLError::InternalError(format!("{}", e)))?;
serde_json::to_vec(&witness)
.map_err(|e| EZKLError::InternalError(format!("Failed to serialize witness: {}", e)))
}
/// Generate verifying key from compiled circuit, and parameters srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn gen_vk(
compiled_circuit: Vec<u8>,
srs: Vec<u8>,
compress_selectors: bool,
) -> Result<Vec<u8>, EZKLError> {
let mut reader = BufReader::new(&srs[..]);
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let circuit: GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize circuit: {}", e)))?;
let vk = create_vk_lean::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
)
.map_err(|e| EZKLError::InternalError(format!("Failed to create verifying key: {}", e)))?;
let mut serialized_vk = Vec::new();
vk.write(&mut serialized_vk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| {
EZKLError::InternalError(format!("Failed to serialize verifying key: {}", e))
})?;
Ok(serialized_vk)
}
/// Generate proving key from vk, compiled circuit and parameters srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn gen_pk(
vk: Vec<u8>,
compiled_circuit: Vec<u8>,
srs: Vec<u8>,
) -> Result<Vec<u8>, EZKLError> {
let mut reader = BufReader::new(&srs[..]);
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let circuit: GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize circuit: {}", e)))?;
let mut reader = BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize verifying key: {}", e)))?;
let pk = create_pk_lean::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk, &circuit, &params)
.map_err(|e| EZKLError::InternalError(format!("Failed to create proving key: {}", e)))?;
let mut serialized_pk = Vec::new();
pk.write(&mut serialized_pk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| EZKLError::InternalError(format!("Failed to serialize proving key: {}", e)))?;
Ok(serialized_pk)
}
/// Verify proof with vk, proof json, circuit settings json and srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn verify(
proof: Vec<u8>,
vk: Vec<u8>,
settings: Vec<u8>,
srs: Vec<u8>,
) -> Result<bool, EZKLError> {
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize settings: {}", e)))?;
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proof: {}", e)))?;
let mut reader = BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings.clone(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize vk: {}", e)))?;
let orig_n = 1 << circuit_settings.run_args.logrows;
let commitment = circuit_settings.run_args.commitment.into();
let mut reader = BufReader::new(&srs[..]);
let result = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> = get_params(&mut reader)?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
};
match result {
Ok(_) => Ok(true),
Err(e) => Err(EZKLError::InternalError(format!(
"Verification failed: {}",
e
))),
}
}
/// Verify aggregate proof with vk, proof, circuit settings and srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn verify_aggr(
proof: Vec<u8>,
vk: Vec<u8>,
logrows: u64,
srs: Vec<u8>,
commitment: &str,
) -> Result<bool, EZKLError> {
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proof: {}", e)))?;
let mut reader = BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, AggregationCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize vk: {}", e)))?;
let commit = Commitments::from_str(commitment)
.map_err(|e| EZKLError::InternalError(format!("Invalid commitment: {}", e)))?;
let orig_n = 1 << logrows;
let mut reader = BufReader::new(&srs[..]);
let result = match commit {
Commitments::KZG => {
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(
|e| EZKLError::InternalError(format!("Failed to deserialize params: {}", e)),
)?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
};
result
.map(|_| true)
.map_err(|e| EZKLError::InternalError(format!("{}", e)))
}
/// Prove in browser with compiled circuit, witness json, proving key, and srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn prove(
witness: Vec<u8>,
pk: Vec<u8>,
compiled_circuit: Vec<u8>,
srs: Vec<u8>,
) -> Result<Vec<u8>, EZKLError> {
#[cfg(feature = "det-prove")]
log::set_max_level(log::LevelFilter::Debug);
#[cfg(not(feature = "det-prove"))]
log::set_max_level(log::LevelFilter::Info);
let mut circuit: GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize circuit: {}", e)))?;
let data: GraphWitness = serde_json::from_slice(&witness[..]).map_err(InnerEZKLError::from)?;
let mut reader = BufReader::new(&pk[..]);
let pk = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proving key: {}", e)))?;
circuit
.load_graph_witness(&data)
.map_err(InnerEZKLError::from)?;
let public_inputs = circuit
.prepare_public_inputs(&data)
.map_err(InnerEZKLError::from)?;
let proof_split_commits: Option<crate::pfsys::ProofSplitCommit> = data.into();
let mut reader = BufReader::new(&srs[..]);
let commitment = circuit.settings().run_args.commitment.into();
let proof = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(
|e| EZKLError::InternalError(format!("Failed to deserialize srs: {}", e)),
)?;
create_proof_circuit::<
KZGCommitmentScheme<Bn256>,
_,
ProverSHPLONK<_>,
VerifierSHPLONK<_>,
KZGSingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit,
vec![public_inputs],
&params,
&pk,
CheckMode::UNSAFE,
Commitments::KZG,
TranscriptType::EVM,
proof_split_commits,
None,
)
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(
|e| EZKLError::InternalError(format!("Failed to deserialize srs: {}", e)),
)?;
create_proof_circuit::<
IPACommitmentScheme<G1Affine>,
_,
ProverIPA<_>,
VerifierIPA<_>,
IPASingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit,
vec![public_inputs],
&params,
&pk,
CheckMode::UNSAFE,
Commitments::IPA,
TranscriptType::EVM,
proof_split_commits,
None,
)
}
}
.map_err(InnerEZKLError::from)?;
Ok(serde_json::to_vec(&proof).map_err(InnerEZKLError::from)?)
}
/// Validate the witness json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn witness_validation(witness: Vec<u8>) -> Result<bool, EZKLError> {
let _: GraphWitness = serde_json::from_slice(&witness[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the compiled circuit
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn compiled_circuit_validation(compiled_circuit: Vec<u8>) -> Result<bool, EZKLError> {
let _: GraphCircuit = bincode::deserialize(&compiled_circuit[..]).map_err(|e| {
EZKLError::InternalError(format!("Failed to deserialize compiled circuit: {}", e))
})?;
Ok(true)
}
/// Validate the input json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn input_validation(input: Vec<u8>) -> Result<bool, EZKLError> {
let _: crate::graph::input::GraphData =
serde_json::from_slice(&input[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the proof json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn proof_validation(proof: Vec<u8>) -> Result<bool, EZKLError> {
let _: crate::pfsys::Snark<Fr, G1Affine> =
serde_json::from_slice(&proof[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the verifying key given the settings json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn vk_validation(vk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
let circuit_settings: GraphSettings =
serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
let mut reader = BufReader::new(&vk[..]);
let _ = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize verifying key: {}", e)))?;
Ok(true)
}
/// Validate the proving key given the settings json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn pk_validation(pk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
let circuit_settings: GraphSettings =
serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
let mut reader = BufReader::new(&pk[..]);
let _ = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proving key: {}", e)))?;
Ok(true)
}
/// Validate the settings json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn settings_validation(settings: Vec<u8>) -> Result<bool, EZKLError> {
let _: GraphSettings = serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn srs_validation(srs: Vec<u8>) -> Result<bool, EZKLError> {
let mut reader = BufReader::new(&srs[..]);
let _: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(|e| {
EZKLError::InternalError(format!("Failed to deserialize params: {}", e))
})?;
Ok(true)
}
// HELPER FUNCTIONS
fn get_params<
Scheme: for<'a> halo2_proofs::poly::commitment::Params<'a, halo2curves::bn256::G1Affine>,
>(
mut reader: &mut BufReader<&[u8]>,
) -> Result<Scheme, EZKLError> {
halo2_proofs::poly::commitment::Params::<G1Affine>::read(&mut reader)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize params: {}", e)))
}
/// Creates a [ProvingKey] for a [GraphCircuit] (`circuit`) with specific [CommitmentScheme] parameters (`params`) for the WASM target
pub fn create_vk_lean<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
circuit: &C,
params: &'_ Scheme::ParamsProver,
compress_selectors: bool,
) -> Result<VerifyingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
{
// Real proof
let empty_circuit = <C as Circuit<F>>::without_witnesses(circuit);
// Initialize the verifying key
let vk = keygen_vk_custom(params, &empty_circuit, compress_selectors)?;
Ok(vk)
}
/// Creates a [ProvingKey] from a [VerifyingKey] for a [GraphCircuit] (`circuit`) with specific [CommitmentScheme] parameters (`params`) for the WASM target
pub fn create_pk_lean<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
vk: VerifyingKey<Scheme::Curve>,
circuit: &C,
params: &'_ Scheme::ParamsProver,
) -> Result<ProvingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
{
// Real proof
let empty_circuit = <C as Circuit<F>>::without_witnesses(circuit);
// Initialize the proving key
let pk = keygen_pk(params, vk, &empty_circuit)?;
Ok(pk)
}

372
src/bindings/wasm.rs Normal file
View File

@@ -0,0 +1,372 @@
use crate::{
circuit::modules::{
polycommit::PolyCommitChip,
poseidon::{
spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH},
PoseidonChip,
},
Module,
},
fieldutils::{felt_to_integer_rep, integer_rep_to_felt},
graph::{
modules::POSEIDON_LEN_GRAPH, quantize_float, scale_to_multiplier, GraphCircuit,
GraphSettings,
},
};
use console_error_panic_hook;
use halo2_proofs::{
plonk::*,
poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG},
};
use halo2curves::{
bn256::{Bn256, Fr, G1Affine},
ff::PrimeField,
};
use wasm_bindgen::prelude::*;
use wasm_bindgen_console_logger::DEFAULT_LOGGER;
use crate::bindings::universal::{
compiled_circuit_validation, encode_verifier_calldata, gen_pk, gen_vk, gen_witness,
input_validation, pk_validation, proof_validation, settings_validation, srs_validation,
verify_aggr, vk_validation, witness_validation, EZKLError as ExternalEZKLError,
};
#[cfg(feature = "web")]
pub use wasm_bindgen_rayon::init_thread_pool;
impl From<ExternalEZKLError> for JsError {
fn from(e: ExternalEZKLError) -> Self {
JsError::new(&format!("{}", e))
}
}
#[wasm_bindgen]
/// Initialize logger for wasm
pub fn init_logger() {
log::set_logger(&DEFAULT_LOGGER).unwrap();
}
#[wasm_bindgen]
/// Initialize panic hook for wasm
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
/// Wrapper around the halo2 encode call data method
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn encodeVerifierCalldata(
proof: wasm_bindgen::Clamped<Vec<u8>>,
vk_address: Option<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
encode_verifier_calldata(proof.0, vk_address).map_err(JsError::from)
}
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToBigEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(format!("{:?}", felt))
}
/// Converts a felt to a little endian string
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToLittleEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let repr = serde_json::to_string(&felt).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
Ok(b)
}
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToInt(
array: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&felt_to_integer_rep(felt))
.map_err(|e| JsError::new(&format!("Failed to serialize integer: {}", e)))?,
))
}
/// Converts felts to a floating point element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToFloat(
array: wasm_bindgen::Clamped<Vec<u8>>,
scale: crate::Scale,
) -> Result<f64, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let int_rep = felt_to_integer_rep(felt);
let multiplier = scale_to_multiplier(scale);
Ok(int_rep as f64 / multiplier)
}
/// Converts a floating point number to a hex string representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn floatToFelt(
input: f64,
scale: crate::Scale,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let int_rep =
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
let felt = integer_rep_to_felt(int_rep);
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
)?))
}
/// Generate a kzg commitment.
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn kzgCommit(
message: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let mut reader = std::io::BufReader::new(&params_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let output = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
message,
(vk.cs().blinding_factors() + 1) as u32,
&params,
);
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&output).map_err(|e| JsError::new(&format!("{}", e)))?,
))
}
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn bufferToVecOfFelt(
buffer: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
// Convert the buffer to a slice
let buffer: &[u8] = &buffer;
// Divide the buffer into chunks of 64 bytes
let chunks = buffer.chunks_exact(16);
// Get the remainder
let remainder = chunks.remainder();
// Add 0s to the remainder to make it 64 bytes
let mut remainder = remainder.to_vec();
// Collect chunks into a Vec<[u8; 16]>.
let chunks: Result<Vec<[u8; 16]>, JsError> = chunks
.map(|slice| {
let array: [u8; 16] = slice
.try_into()
.map_err(|_| JsError::new("failed to slice input chunks"))?;
Ok(array)
})
.collect();
let mut chunks = chunks?;
if remainder.len() != 0 {
remainder.resize(16, 0);
// Convert the Vec<u8> to [u8; 16]
let remainder_array: [u8; 16] = remainder
.try_into()
.map_err(|_| JsError::new("failed to slice remainder"))?;
// append the remainder to the chunks
chunks.push(remainder_array);
}
// Convert each chunk to a field element
let field_elements: Vec<Fr> = chunks
.iter()
.map(|x| PrimeField::from_u128(u8_array_to_u128_le(*x)))
.collect();
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&field_elements)
.map_err(|e| JsError::new(&format!("Failed to serialize field elements: {}", e)))?,
))
}
/// Generate a poseidon hash in browser. Input message
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn poseidonHash(
message: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let output =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
.map_err(|e| JsError::new(&format!("{}", e)))?;
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&output).map_err(
|e| JsError::new(&format!("Failed to serialize poseidon hash output: {}", e)),
)?))
}
/// Generate a witness file from input.json, compiled model and a settings.json file.
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genWitness(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
input: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
gen_witness(compiled_circuit.0, input.0).map_err(JsError::from)
}
/// Generate verifying key in browser
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genVk(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
compress_selectors: bool,
) -> Result<Vec<u8>, JsError> {
gen_vk(compiled_circuit.0, params_ser.0, compress_selectors).map_err(JsError::from)
}
/// Generate proving key in browser
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genPk(
vk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
gen_pk(vk.0, compiled_circuit.0, params_ser.0).map_err(JsError::from)
}
/// Verify proof in browser using wasm
#[wasm_bindgen]
pub fn verify(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
super::universal::verify(proof_js.0, vk.0, settings.0, srs.0).map_err(JsError::from)
}
/// Verify aggregate proof in browser using wasm
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn verifyAggr(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
logrows: u64,
srs: wasm_bindgen::Clamped<Vec<u8>>,
commitment: &str,
) -> Result<bool, JsError> {
verify_aggr(proof_js.0, vk.0, logrows, srs.0, commitment).map_err(JsError::from)
}
/// Prove in browser using wasm
#[wasm_bindgen]
pub fn prove(
witness: wasm_bindgen::Clamped<Vec<u8>>,
pk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
super::universal::prove(witness.0, pk.0, compiled_circuit.0, srs.0).map_err(JsError::from)
}
// VALIDATION FUNCTIONS
/// Witness file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn witnessValidation(witness: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
witness_validation(witness.0).map_err(JsError::from)
}
/// Compiled circuit validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn compiledCircuitValidation(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
compiled_circuit_validation(compiled_circuit.0).map_err(JsError::from)
}
/// Input file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn inputValidation(input: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
input_validation(input.0).map_err(JsError::from)
}
/// Proof file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn proofValidation(proof: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
proof_validation(proof.0).map_err(JsError::from)
}
/// Vk file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn vkValidation(
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
vk_validation(vk.0, settings.0).map_err(JsError::from)
}
/// Pk file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn pkValidation(
pk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
pk_validation(pk.0, settings.0).map_err(JsError::from)
}
/// Settings file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn settingsValidation(settings: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
settings_validation(settings.0).map_err(JsError::from)
}
/// Srs file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn srsValidation(srs: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
srs_validation(srs.0).map_err(JsError::from)
}
/// HELPER FUNCTIONS
pub fn u8_array_to_u128_le(arr: [u8; 16]) -> u128 {
let mut n: u128 = 0;
for &b in arr.iter().rev() {
n <<= 8;
n |= b as u128;
}
n
}

View File

@@ -219,7 +219,7 @@ mod tests {
fn polycommit_chip_for_a_range_of_input_sizes() {
let rng = rand::rngs::OsRng;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
{
@@ -247,7 +247,7 @@ mod tests {
#[test]
#[ignore]
fn polycommit_chip_much_longer_input() {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
let rng = rand::rngs::OsRng;

View File

@@ -560,7 +560,7 @@ mod tests {
fn hash_for_a_range_of_input_sizes() {
let rng = rand::rngs::OsRng;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
{

View File

@@ -14,6 +14,7 @@ use pyo3::{
types::PyString,
};
use serde::{Deserialize, Serialize};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tosubcommand::ToFlags;
use crate::{
@@ -22,7 +23,7 @@ use crate::{
table::{Range, RangeCheck, Table},
utils,
},
tensor::{IntoI64, Tensor, TensorType, ValTensor, VarTensor},
tensor::{Tensor, TensorType, ValTensor, VarTensor},
};
use std::{collections::BTreeMap, marker::PhantomData};
@@ -49,6 +50,7 @@ impl std::fmt::Display for CheckMode {
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl ToFlags for CheckMode {
/// Convert the struct to a subcommand string
fn to_flags(&self) -> Vec<String> {
@@ -88,6 +90,7 @@ impl std::fmt::Display for Tolerance {
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl ToFlags for Tolerance {
/// Convert the struct to a subcommand string
fn to_flags(&self) -> Vec<String> {
@@ -174,7 +177,7 @@ impl<'source> FromPyObject<'source> for Tolerance {
#[derive(Clone, Debug, Default)]
pub struct DynamicLookups {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub lookup_selectors: BTreeMap<(usize, usize), Selector>,
pub lookup_selectors: BTreeMap<(usize, (usize, usize)), Selector>,
/// Selectors for the dynamic lookup tables
pub table_selectors: Vec<Selector>,
/// Inputs:
@@ -206,7 +209,7 @@ impl DynamicLookups {
#[derive(Clone, Debug, Default)]
pub struct Shuffles {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub input_selectors: BTreeMap<(usize, usize), Selector>,
pub input_selectors: BTreeMap<(usize, (usize, usize)), Selector>,
/// Selectors for the dynamic lookup tables
pub reference_selectors: Vec<Selector>,
/// Inputs:
@@ -327,7 +330,7 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> BaseConfig<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
/// Returns a new [BaseConfig] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
Self {
@@ -643,57 +646,73 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> BaseCo
}
for t in tables.iter() {
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
if !t.is_advice() || t.num_inner_cols() > 1 {
return Err(CircuitError::WrongDynamicColumnType(t.name().to_string()));
}
}
// assert all tables have the same number of inner columns
if tables
.iter()
.map(|t| t.num_blocks())
.collect::<Vec<_>>()
.windows(2)
.any(|w| w[0] != w[1])
{
return Err(CircuitError::WrongDynamicColumnType(
"tables inner cols".to_string(),
));
}
let one = Expression::Constant(F::ONE);
let s_ltable = cs.complex_selector();
for q in 0..tables[0].num_blocks() {
let s_ltable = cs.complex_selector();
for x in 0..lookups[0].num_blocks() {
for y in 0..lookups[0].num_inner_cols() {
let s_lookup = cs.complex_selector();
for x in 0..lookups[0].num_blocks() {
for y in 0..lookups[0].num_inner_cols() {
let s_lookup = cs.complex_selector();
cs.lookup_any("lookup", |cs| {
let s_lookupq = cs.query_selector(s_lookup);
let mut expression = vec![];
let s_ltableq = cs.query_selector(s_ltable);
let mut lookup_queries = vec![one.clone()];
cs.lookup_any("lookup", |cs| {
let s_lookupq = cs.query_selector(s_lookup);
let mut expression = vec![];
let s_ltableq = cs.query_selector(s_ltable);
let mut lookup_queries = vec![one.clone()];
for lookup in lookups {
lookup_queries.push(match lookup {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
for lookup in lookups {
lookup_queries.push(match lookup {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
let mut table_queries = vec![one.clone()];
for table in tables {
table_queries.push(match table {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[0][0], Rotation(0))
}
_ => unreachable!(),
});
}
let mut table_queries = vec![one.clone()];
for table in tables {
table_queries.push(match table {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[q][0], Rotation(0))
}
_ => unreachable!(),
});
}
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
expression.extend(lhs.zip(rhs));
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
expression.extend(lhs.zip(rhs));
expression
});
self.dynamic_lookups
.lookup_selectors
.entry((x, y))
.or_insert(s_lookup);
expression
});
self.dynamic_lookups
.lookup_selectors
.entry((q, (x, y)))
.or_insert(s_lookup);
}
}
self.dynamic_lookups.table_selectors.push(s_ltable);
}
self.dynamic_lookups.table_selectors.push(s_ltable);
// if we haven't previously initialized the input/output, do so now
if self.dynamic_lookups.tables.is_empty() {
@@ -726,57 +745,72 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> BaseCo
}
for t in references.iter() {
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
if !t.is_advice() || t.num_inner_cols() > 1 {
return Err(CircuitError::WrongDynamicColumnType(t.name().to_string()));
}
}
// assert all tables have the same number of blocks
if references
.iter()
.map(|t| t.num_blocks())
.collect::<Vec<_>>()
.windows(2)
.any(|w| w[0] != w[1])
{
return Err(CircuitError::WrongDynamicColumnType(
"references inner cols".to_string(),
));
}
let one = Expression::Constant(F::ONE);
let s_reference = cs.complex_selector();
for q in 0..references[0].num_blocks() {
let s_reference = cs.complex_selector();
for x in 0..inputs[0].num_blocks() {
for y in 0..inputs[0].num_inner_cols() {
let s_input = cs.complex_selector();
for x in 0..inputs[0].num_blocks() {
for y in 0..inputs[0].num_inner_cols() {
let s_input = cs.complex_selector();
cs.lookup_any("lookup", |cs| {
let s_inputq = cs.query_selector(s_input);
let mut expression = vec![];
let s_referenceq = cs.query_selector(s_reference);
let mut input_queries = vec![one.clone()];
cs.lookup_any("lookup", |cs| {
let s_inputq = cs.query_selector(s_input);
let mut expression = vec![];
let s_referenceq = cs.query_selector(s_reference);
let mut input_queries = vec![one.clone()];
for input in inputs {
input_queries.push(match input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
for input in inputs {
input_queries.push(match input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
let mut ref_queries = vec![one.clone()];
for reference in references {
ref_queries.push(match reference {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[0][0], Rotation(0))
}
_ => unreachable!(),
});
}
let mut ref_queries = vec![one.clone()];
for reference in references {
ref_queries.push(match reference {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[q][0], Rotation(0))
}
_ => unreachable!(),
});
}
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
expression.extend(lhs.zip(rhs));
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
expression.extend(lhs.zip(rhs));
expression
});
self.shuffles
.input_selectors
.entry((x, y))
.or_insert(s_input);
expression
});
self.shuffles
.input_selectors
.entry((q, (x, y)))
.or_insert(s_input);
}
}
self.shuffles.reference_selectors.push(s_reference);
}
self.shuffles.reference_selectors.push(s_reference);
// if we haven't previously initialized the input/output, do so now
if self.shuffles.references.is_empty() {

View File

@@ -1,6 +1,6 @@
use std::convert::Infallible;
use crate::tensor::TensorError;
use crate::{fieldutils::IntegerRep, tensor::TensorError};
use halo2_proofs::plonk::Error as PlonkError;
use thiserror::Error;
@@ -57,7 +57,7 @@ pub enum CircuitError {
InvalidConversion(#[from] Infallible),
/// Invalid min/max lookup range
#[error("invalid min/max lookup range: min: {0}, max: {1}")]
InvalidMinMaxRange(i64, i64),
InvalidMinMaxRange(IntegerRep, IntegerRep),
/// Missing product in einsum
#[error("missing product in einsum")]
MissingEinsumProduct,
@@ -81,7 +81,7 @@ pub enum CircuitError {
MissingSelectors(String),
/// Table lookup error
#[error("value ({0}) out of range: ({1}, {2})")]
TableOOR(i64, i64, i64),
TableOOR(IntegerRep, IntegerRep, IntegerRep),
/// Loookup not configured
#[error("lookup not configured: {0}")]
LookupNotConfigured(String),
@@ -91,4 +91,10 @@ pub enum CircuitError {
/// Missing layout
#[error("missing layout for op: {0}")]
MissingLayout(String),
#[error("[io] {0}")]
/// IO error
IoError(#[from] std::io::Error),
/// Invalid scale
#[error("negative scale for an op that requires positive inputs {0}")]
NegativeScale(String),
}

View File

@@ -1,7 +1,7 @@
use super::*;
use crate::{
circuit::{layouts, utils, Tolerance},
fieldutils::i64_to_felt,
fieldutils::integer_rep_to_felt,
graph::multiplier_to_scale,
tensor::{self, Tensor, TensorType, ValTensor},
};
@@ -13,10 +13,21 @@ use serde::{Deserialize, Serialize};
/// An enum representing the operations that consist of both lookups and arithmetic operations.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum HybridOp {
Ceil {
scale: utils::F32,
legs: usize,
},
Floor {
scale: utils::F32,
legs: usize,
},
Round {
scale: utils::F32,
legs: usize,
},
Recip {
input_scale: utils::F32,
output_scale: utils::F32,
use_range_check_for_int: bool,
},
Div {
denom: utils::F32,
@@ -45,6 +56,8 @@ pub enum HybridOp {
ReduceArgMin {
dim: usize,
},
Max,
Min,
Softmax {
input_scale: utils::F32,
output_scale: utils::F32,
@@ -71,12 +84,19 @@ pub enum HybridOp {
},
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for HybridOp {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for HybridOp {
///
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
match self {
HybridOp::Greater | HybridOp::Less | HybridOp::Equals => vec![0, 1],
HybridOp::GreaterEqual | HybridOp::LessEqual => vec![0, 1],
HybridOp::Greater { .. }
| HybridOp::Less { .. }
| HybridOp::Equals { .. }
| HybridOp::GreaterEqual { .. }
| HybridOp::Max
| HybridOp::Min
| HybridOp::LessEqual { .. } => {
vec![0, 1]
}
_ => vec![],
}
}
@@ -88,13 +108,17 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
fn as_string(&self) -> String {
match self {
HybridOp::Ceil { scale, legs } => format!("CEIL(scale={}, legs={})", scale, legs),
HybridOp::Floor { scale, legs } => format!("FLOOR(scale={}, legs={})", scale, legs),
HybridOp::Round { scale, legs } => format!("ROUND(scale={}, legs={})", scale, legs),
HybridOp::Max => format!("MAX"),
HybridOp::Min => format!("MIN"),
HybridOp::Recip {
input_scale,
output_scale,
use_range_check_for_int,
} => format!(
"RECIP (input_scale={}, output_scale={}, use_range_check_for_int={})",
input_scale, output_scale, use_range_check_for_int
"RECIP (input_scale={}, output_scale={})",
input_scale, output_scale
),
HybridOp::Div {
denom,
@@ -135,10 +159,10 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
)
}
HybridOp::RangeCheck(p) => format!("RANGECHECK (tol={:?})", p),
HybridOp::Greater => "GREATER".into(),
HybridOp::GreaterEqual => "GREATEREQUAL".into(),
HybridOp::Less => "LESS".into(),
HybridOp::LessEqual => "LESSEQUAL".into(),
HybridOp::Greater => "GREATER".to_string(),
HybridOp::GreaterEqual => "GREATEREQUAL".to_string(),
HybridOp::Less => "LESS".to_string(),
HybridOp::LessEqual => "LESSEQUAL".to_string(),
HybridOp::Equals => "EQUALS".into(),
HybridOp::Gather { dim, .. } => format!("GATHER (dim={})", dim),
HybridOp::TopK { k, dim, largest } => {
@@ -157,6 +181,17 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
values: &[ValTensor<F>],
) -> Result<Option<ValTensor<F>>, CircuitError> {
Ok(Some(match self {
HybridOp::Ceil { scale, legs } => {
layouts::ceil(config, region, values[..].try_into()?, *scale, *legs)?
}
HybridOp::Floor { scale, legs } => {
layouts::floor(config, region, values[..].try_into()?, *scale, *legs)?
}
HybridOp::Round { scale, legs } => {
layouts::round(config, region, values[..].try_into()?, *scale, *legs)?
}
HybridOp::Max => layouts::max_comp(config, region, values[..].try_into()?)?,
HybridOp::Min => layouts::min_comp(config, region, values[..].try_into()?)?,
HybridOp::SumPool {
padding,
stride,
@@ -174,31 +209,13 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
HybridOp::Recip {
input_scale,
output_scale,
use_range_check_for_int,
} => {
if input_scale.0.fract() == 0.0
&& output_scale.0.fract() == 0.0
&& *use_range_check_for_int
{
layouts::recip(
config,
region,
values[..].try_into()?,
i64_to_felt(input_scale.0 as i64),
i64_to_felt(output_scale.0 as i64),
)?
} else {
layouts::nonlinearity(
config,
region,
values.try_into()?,
&LookupOp::Recip {
input_scale: *input_scale,
output_scale: *output_scale,
},
)?
}
}
} => layouts::recip(
config,
region,
values[..].try_into()?,
integer_rep_to_felt(input_scale.0 as i128),
integer_rep_to_felt(output_scale.0 as i128),
)?,
HybridOp::Div {
denom,
use_range_check_for_int,
@@ -209,7 +226,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
config,
region,
values[..].try_into()?,
i64_to_felt(denom.0 as i64),
integer_rep_to_felt(denom.0 as i128),
)?
} else {
layouts::nonlinearity(

File diff suppressed because it is too large Load Diff

View File

@@ -3,9 +3,9 @@ use serde::{Deserialize, Serialize};
use crate::{
circuit::{layouts, table::Range, utils},
fieldutils::{felt_to_i64, i64_to_felt},
fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep},
graph::multiplier_to_scale,
tensor::{self, IntoI64, Tensor, TensorError, TensorType},
tensor::{self, Tensor, TensorError, TensorType},
};
use super::Op;
@@ -15,225 +15,154 @@ use halo2curves::ff::PrimeField;
/// An enum representing the operations that can be used to express more complex operations via accumulation
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize)]
pub enum LookupOp {
Abs,
Div {
denom: utils::F32,
},
Cast {
scale: utils::F32,
},
ReLU,
Max {
scale: utils::F32,
a: utils::F32,
},
Min {
scale: utils::F32,
a: utils::F32,
},
Ceil {
scale: utils::F32,
},
Floor {
scale: utils::F32,
},
Round {
scale: utils::F32,
},
RoundHalfToEven {
scale: utils::F32,
},
Sqrt {
scale: utils::F32,
},
Rsqrt {
scale: utils::F32,
},
Recip {
input_scale: utils::F32,
output_scale: utils::F32,
},
LeakyReLU {
slope: utils::F32,
},
Sigmoid {
scale: utils::F32,
},
Ln {
scale: utils::F32,
},
Exp {
scale: utils::F32,
},
Cos {
scale: utils::F32,
},
ACos {
scale: utils::F32,
},
Cosh {
scale: utils::F32,
},
ACosh {
scale: utils::F32,
},
Sin {
scale: utils::F32,
},
ASin {
scale: utils::F32,
},
Sinh {
scale: utils::F32,
},
ASinh {
scale: utils::F32,
},
Tan {
scale: utils::F32,
},
ATan {
scale: utils::F32,
},
Tanh {
scale: utils::F32,
},
ATanh {
scale: utils::F32,
},
Erf {
scale: utils::F32,
},
GreaterThan {
a: utils::F32,
},
LessThan {
a: utils::F32,
},
GreaterThanEqual {
a: utils::F32,
},
LessThanEqual {
a: utils::F32,
},
Sign,
KroneckerDelta,
Pow {
scale: utils::F32,
a: utils::F32,
},
HardSwish {
scale: utils::F32,
},
Div { denom: utils::F32 },
Cast { scale: utils::F32 },
RoundHalfToEven { scale: utils::F32 },
Sqrt { scale: utils::F32 },
Rsqrt { scale: utils::F32 },
Sigmoid { scale: utils::F32 },
Ln { scale: utils::F32 },
Exp { scale: utils::F32 },
Cos { scale: utils::F32 },
ACos { scale: utils::F32 },
Cosh { scale: utils::F32 },
ACosh { scale: utils::F32 },
Sin { scale: utils::F32 },
ASin { scale: utils::F32 },
Sinh { scale: utils::F32 },
ASinh { scale: utils::F32 },
Tan { scale: utils::F32 },
ATan { scale: utils::F32 },
Tanh { scale: utils::F32 },
ATanh { scale: utils::F32 },
Erf { scale: utils::F32 },
Pow { scale: utils::F32, a: utils::F32 },
HardSwish { scale: utils::F32 },
}
impl LookupOp {
/// Returns the range of values that can be represented by the table
pub fn bit_range(max_len: usize) -> Range {
let range = (max_len - 1) as f64 / 2_f64;
let range = range as i64;
let range = range as IntegerRep;
(-range, range)
}
/// as path
pub fn as_path(&self) -> String {
match self {
LookupOp::RoundHalfToEven { scale } => format!("round_half_to_even_{}", scale),
LookupOp::Pow { scale, a } => format!("pow_{}_{}", scale, a),
LookupOp::Div { denom } => format!("div_{}", denom),
LookupOp::Cast { scale } => format!("cast_{}", scale),
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
LookupOp::Sqrt { scale } => format!("sqrt_{}", scale),
LookupOp::Rsqrt { scale } => format!("rsqrt_{}", scale),
LookupOp::Erf { scale } => format!("erf_{}", scale),
LookupOp::Exp { scale } => format!("exp_{}", scale),
LookupOp::Ln { scale } => format!("ln_{}", scale),
LookupOp::Cos { scale } => format!("cos_{}", scale),
LookupOp::ACos { scale } => format!("acos_{}", scale),
LookupOp::Cosh { scale } => format!("cosh_{}", scale),
LookupOp::ACosh { scale } => format!("acosh_{}", scale),
LookupOp::Sin { scale } => format!("sin_{}", scale),
LookupOp::ASin { scale } => format!("asin_{}", scale),
LookupOp::Sinh { scale } => format!("sinh_{}", scale),
LookupOp::ASinh { scale } => format!("asinh_{}", scale),
LookupOp::Tan { scale } => format!("tan_{}", scale),
LookupOp::ATan { scale } => format!("atan_{}", scale),
LookupOp::ATanh { scale } => format!("atanh_{}", scale),
LookupOp::Tanh { scale } => format!("tanh_{}", scale),
LookupOp::HardSwish { scale } => format!("hardswish_{}", scale),
}
}
/// Matches a [Op] to an operation in the `tensor::ops` module.
pub(crate) fn f<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64>(
pub(crate) fn f<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
x: &[Tensor<F>],
) -> Result<ForwardResult<F>, TensorError> {
let x = x[0].clone().map(|x| felt_to_i64(x));
let res = match &self {
LookupOp::Abs => Ok(tensor::ops::abs(&x)?),
LookupOp::Ceil { scale } => Ok(tensor::ops::nonlinearities::ceil(&x, scale.into())),
LookupOp::Floor { scale } => Ok(tensor::ops::nonlinearities::floor(&x, scale.into())),
LookupOp::Round { scale } => Ok(tensor::ops::nonlinearities::round(&x, scale.into())),
LookupOp::RoundHalfToEven { scale } => Ok(
tensor::ops::nonlinearities::round_half_to_even(&x, scale.into()),
),
LookupOp::Pow { scale, a } => Ok(tensor::ops::nonlinearities::pow(
&x,
scale.0.into(),
a.0.into(),
)),
LookupOp::KroneckerDelta => Ok(tensor::ops::nonlinearities::kronecker_delta(&x)),
LookupOp::Max { scale, a } => Ok(tensor::ops::nonlinearities::max(
&x,
scale.0.into(),
a.0.into(),
)),
LookupOp::Min { scale, a } => Ok(tensor::ops::nonlinearities::min(
&x,
scale.0.into(),
a.0.into(),
)),
LookupOp::Sign => Ok(tensor::ops::nonlinearities::sign(&x)),
LookupOp::LessThan { a } => Ok(tensor::ops::nonlinearities::less_than(
&x,
f32::from(*a).into(),
)),
LookupOp::LessThanEqual { a } => Ok(tensor::ops::nonlinearities::less_than_equal(
&x,
f32::from(*a).into(),
)),
LookupOp::GreaterThan { a } => Ok(tensor::ops::nonlinearities::greater_than(
&x,
f32::from(*a).into(),
)),
LookupOp::GreaterThanEqual { a } => Ok(
tensor::ops::nonlinearities::greater_than_equal(&x, f32::from(*a).into()),
),
LookupOp::Div { denom } => Ok(tensor::ops::nonlinearities::const_div(
&x,
f32::from(*denom).into(),
)),
LookupOp::Cast { scale } => Ok(tensor::ops::nonlinearities::const_div(
&x,
f32::from(*scale).into(),
)),
LookupOp::Recip {
input_scale,
output_scale,
} => Ok(tensor::ops::nonlinearities::recip(
&x,
input_scale.into(),
output_scale.into(),
)),
LookupOp::ReLU => Ok(tensor::ops::nonlinearities::leakyrelu(&x, 0_f64)),
let x = x[0].clone().map(|x| felt_to_integer_rep(x));
let res =
match &self {
LookupOp::RoundHalfToEven { scale } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::round_half_to_even(&x, scale.into()),
),
LookupOp::Pow { scale, a } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::pow(&x, scale.0.into(), a.0.into()),
),
LookupOp::Div { denom } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::const_div(&x, f32::from(*denom).into()),
),
LookupOp::Cast { scale } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::const_div(&x, f32::from(*scale).into()),
),
LookupOp::Sigmoid { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sigmoid(&x, scale.into()))
}
LookupOp::Sqrt { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sqrt(&x, scale.into()))
}
LookupOp::Rsqrt { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::rsqrt(&x, scale.into()))
}
LookupOp::Erf { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::erffunc(&x, scale.into()))
}
LookupOp::Exp { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::exp(&x, scale.into()))
}
LookupOp::Ln { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::ln(&x, scale.into()))
}
LookupOp::Cos { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::cos(&x, scale.into()))
}
LookupOp::ACos { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::acos(&x, scale.into()))
}
LookupOp::Cosh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::cosh(&x, scale.into()))
}
LookupOp::ACosh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::acosh(&x, scale.into()))
}
LookupOp::Sin { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sin(&x, scale.into()))
}
LookupOp::ASin { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::asin(&x, scale.into()))
}
LookupOp::Sinh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sinh(&x, scale.into()))
}
LookupOp::ASinh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::asinh(&x, scale.into()))
}
LookupOp::Tan { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::tan(&x, scale.into()))
}
LookupOp::ATan { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::atan(&x, scale.into()))
}
LookupOp::ATanh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::atanh(&x, scale.into()))
}
LookupOp::Tanh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::tanh(&x, scale.into()))
}
LookupOp::HardSwish { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::hardswish(&x, scale.into()))
}
}?;
LookupOp::LeakyReLU { slope: a } => {
Ok(tensor::ops::nonlinearities::leakyrelu(&x, a.0.into()))
}
LookupOp::Sigmoid { scale } => {
Ok(tensor::ops::nonlinearities::sigmoid(&x, scale.into()))
}
LookupOp::Sqrt { scale } => Ok(tensor::ops::nonlinearities::sqrt(&x, scale.into())),
LookupOp::Rsqrt { scale } => Ok(tensor::ops::nonlinearities::rsqrt(&x, scale.into())),
LookupOp::Erf { scale } => Ok(tensor::ops::nonlinearities::erffunc(&x, scale.into())),
LookupOp::Exp { scale } => Ok(tensor::ops::nonlinearities::exp(&x, scale.into())),
LookupOp::Ln { scale } => Ok(tensor::ops::nonlinearities::ln(&x, scale.into())),
LookupOp::Cos { scale } => Ok(tensor::ops::nonlinearities::cos(&x, scale.into())),
LookupOp::ACos { scale } => Ok(tensor::ops::nonlinearities::acos(&x, scale.into())),
LookupOp::Cosh { scale } => Ok(tensor::ops::nonlinearities::cosh(&x, scale.into())),
LookupOp::ACosh { scale } => Ok(tensor::ops::nonlinearities::acosh(&x, scale.into())),
LookupOp::Sin { scale } => Ok(tensor::ops::nonlinearities::sin(&x, scale.into())),
LookupOp::ASin { scale } => Ok(tensor::ops::nonlinearities::asin(&x, scale.into())),
LookupOp::Sinh { scale } => Ok(tensor::ops::nonlinearities::sinh(&x, scale.into())),
LookupOp::ASinh { scale } => Ok(tensor::ops::nonlinearities::asinh(&x, scale.into())),
LookupOp::Tan { scale } => Ok(tensor::ops::nonlinearities::tan(&x, scale.into())),
LookupOp::ATan { scale } => Ok(tensor::ops::nonlinearities::atan(&x, scale.into())),
LookupOp::ATanh { scale } => Ok(tensor::ops::nonlinearities::atanh(&x, scale.into())),
LookupOp::Tanh { scale } => Ok(tensor::ops::nonlinearities::tanh(&x, scale.into())),
LookupOp::HardSwish { scale } => {
Ok(tensor::ops::nonlinearities::hardswish(&x, scale.into()))
}
}?;
let output = res.map(|x| i64_to_felt(x));
let output = res.map(|x| integer_rep_to_felt(x));
Ok(ForwardResult { output })
}
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for LookupOp {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for LookupOp {
/// Returns a reference to the Any trait.
fn as_any(&self) -> &dyn Any {
self
@@ -242,32 +171,11 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
/// Returns the name of the operation
fn as_string(&self) -> String {
match self {
LookupOp::Abs => "ABS".into(),
LookupOp::Ceil { scale } => format!("CEIL(scale={})", scale),
LookupOp::Floor { scale } => format!("FLOOR(scale={})", scale),
LookupOp::Round { scale } => format!("ROUND(scale={})", scale),
LookupOp::RoundHalfToEven { scale } => format!("ROUND_HALF_TO_EVEN(scale={})", scale),
LookupOp::Pow { a, scale } => format!("POW(scale={}, exponent={})", scale, a),
LookupOp::KroneckerDelta => "K_DELTA".into(),
LookupOp::Max { scale, a } => format!("MAX(scale={}, a={})", scale, a),
LookupOp::Min { scale, a } => format!("MIN(scale={}, a={})", scale, a),
LookupOp::Sign => "SIGN".into(),
LookupOp::GreaterThan { a } => format!("GREATER_THAN(a={})", a),
LookupOp::GreaterThanEqual { a } => format!("GREATER_THAN_EQUAL(a={})", a),
LookupOp::LessThan { a } => format!("LESS_THAN(a={})", a),
LookupOp::LessThanEqual { a } => format!("LESS_THAN_EQUAL(a={})", a),
LookupOp::Recip {
input_scale,
output_scale,
} => format!(
"RECIP(input_scale={}, output_scale={})",
input_scale, output_scale
),
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
LookupOp::Cast { scale } => format!("CAST(scale={})", scale),
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
LookupOp::ReLU => "RELU".to_string(),
LookupOp::LeakyReLU { slope: a } => format!("L_RELU(slope={})", a),
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
LookupOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
LookupOp::Erf { scale } => format!("ERF(scale={})", scale),
@@ -310,13 +218,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
let in_scale = inputs_scale[0];
in_scale + multiplier_to_scale(1. / scale.0 as f64)
}
LookupOp::Recip { output_scale, .. } => multiplier_to_scale(output_scale.into()),
LookupOp::Sign
| LookupOp::GreaterThan { .. }
| LookupOp::LessThan { .. }
| LookupOp::GreaterThanEqual { .. }
| LookupOp::LessThanEqual { .. }
| LookupOp::KroneckerDelta => 0,
_ => inputs_scale[0],
};
Ok(scale)

View File

@@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize};
use crate::{
graph::quantize_tensor,
tensor::{self, IntoI64, Tensor, TensorType, ValTensor},
tensor::{self, Tensor, TensorType, ValTensor},
};
use halo2curves::ff::PrimeField;
@@ -31,12 +31,12 @@ pub use errors::CircuitError;
/// A struct representing the result of a forward pass.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> {
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
pub(crate) output: Tensor<F>,
}
/// A trait representing operations that can be represented as constraints in a circuit.
pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64>:
pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>:
std::fmt::Debug + Send + Sync + Any
{
/// Returns a string representation of the operation.
@@ -75,7 +75,7 @@ pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64
fn as_any(&self) -> &dyn Any;
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Clone for Box<dyn Op<F>> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Clone for Box<dyn Op<F>> {
fn clone(&self) -> Self {
self.clone_dyn()
}
@@ -142,7 +142,7 @@ pub struct Input {
pub datum_type: InputType,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for Input {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input {
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
Ok(self.scale)
}
@@ -197,7 +197,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Unknown;
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for Unknown {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Unknown {
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
Ok(0)
}
@@ -224,7 +224,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F>
///
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> {
pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
///
pub quantized_values: Tensor<F>,
///
@@ -234,7 +234,7 @@ pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash +
pub pre_assigned_val: Option<ValTensor<F>>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Constant<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
///
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>) -> Self {
Self {
@@ -255,7 +255,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Consta
self.raw_values = Tensor::new(None, &[0]).unwrap();
}
///
/// Pre-assign a value
pub fn pre_assign(&mut self, val: ValTensor<F>) {
self.pre_assigned_val = Some(val)
}
@@ -267,8 +267,7 @@ impl<
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>
+ IntoI64,
+ for<'de> Deserialize<'de>,
> Op<F> for Constant<F>
{
fn as_any(&self) -> &dyn Any {

View File

@@ -1,5 +1,8 @@
use crate::{
circuit::layouts,
circuit::{
layouts,
utils::{self, F32},
},
tensor::{self, Tensor, TensorError},
};
@@ -9,6 +12,12 @@ use super::{base::BaseOp, *};
/// An enum representing the operations that can be expressed as arithmetic (non lookup) operations.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum PolyOp {
Abs,
Sign,
LeakyReLU {
slope: utils::F32,
scale: i32,
},
GatherElements {
dim: usize,
constant_idx: Option<Tensor<usize>>,
@@ -33,6 +42,7 @@ pub enum PolyOp {
Conv {
padding: Vec<(usize, usize)>,
stride: Vec<usize>,
group: usize,
},
Downsample {
axis: usize,
@@ -43,6 +53,7 @@ pub enum PolyOp {
padding: Vec<(usize, usize)>,
output_padding: Vec<usize>,
stride: Vec<usize>,
group: usize,
},
Add,
Sub,
@@ -97,8 +108,7 @@ impl<
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>
+ IntoI64,
+ for<'de> Deserialize<'de>,
> Op<F> for PolyOp
{
/// Returns a reference to the Any trait.
@@ -108,6 +118,9 @@ impl<
fn as_string(&self) -> String {
match &self {
PolyOp::LeakyReLU { slope: a, .. } => format!("LEAKYRELU (slope={})", a),
PolyOp::Abs => "ABS".to_string(),
PolyOp::Sign => "SIGN".to_string(),
PolyOp::GatherElements { dim, constant_idx } => format!(
"GATHERELEMENTS (dim={}, constant_idx{})",
dim,
@@ -148,17 +161,25 @@ impl<
PolyOp::Sum { axes } => format!("SUM (axes={:?})", axes),
PolyOp::Prod { .. } => "PROD".into(),
PolyOp::Pow(_) => "POW".into(),
PolyOp::Conv { stride, padding } => {
format!("CONV (stride={:?}, padding={:?})", stride, padding)
PolyOp::Conv {
stride,
padding,
group,
} => {
format!(
"CONV (stride={:?}, padding={:?}, group={})",
stride, padding, group
)
}
PolyOp::DeConv {
stride,
padding,
output_padding,
group,
} => {
format!(
"DECONV (stride={:?}, padding={:?}, output_padding={:?})",
stride, padding, output_padding
"DECONV (stride={:?}, padding={:?}, output_padding={:?}, group={})",
stride, padding, output_padding, group
)
}
PolyOp::Concat { axis } => format!("CONCAT (axis={})", axis),
@@ -181,6 +202,11 @@ impl<
values: &[ValTensor<F>],
) -> Result<Option<ValTensor<F>>, CircuitError> {
Ok(Some(match self {
PolyOp::Abs => layouts::abs(config, region, values[..].try_into()?)?,
PolyOp::Sign => layouts::sign(config, region, values[..].try_into()?)?,
PolyOp::LeakyReLU { slope, scale } => {
layouts::leaky_relu(config, region, values[..].try_into()?, slope, scale)?
}
PolyOp::MultiBroadcastTo { shape } => {
layouts::expand(config, region, values[..].try_into()?, shape)?
}
@@ -212,9 +238,18 @@ impl<
PolyOp::Prod { axes, .. } => {
layouts::prod_axes(config, region, values[..].try_into()?, axes)?
}
PolyOp::Conv { padding, stride } => {
layouts::conv(config, region, values[..].try_into()?, padding, stride)?
}
PolyOp::Conv {
padding,
stride,
group,
} => layouts::conv(
config,
region,
values[..].try_into()?,
padding,
stride,
*group,
)?,
PolyOp::GatherElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
@@ -261,6 +296,7 @@ impl<
padding,
output_padding,
stride,
group,
} => layouts::deconv(
config,
region,
@@ -268,6 +304,7 @@ impl<
padding,
output_padding,
stride,
*group,
)?,
PolyOp::Add => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Add)?,
PolyOp::Sub => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Sub)?,
@@ -300,6 +337,12 @@ impl<
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
let scale = match self {
// this corresponds to the relu operation
PolyOp::LeakyReLU {
slope: F32(0.0), ..
} => in_scales[0],
// this corresponds to the leaky relu operation with a slope which induces a change in scale
PolyOp::LeakyReLU { scale, .. } => in_scales[0] + *scale,
PolyOp::MeanOfSquares { .. } => 2 * in_scales[0],
PolyOp::Xor | PolyOp::Or | PolyOp::And | PolyOp::Not => 0,
PolyOp::Iff => in_scales[1],
@@ -347,6 +390,7 @@ impl<
PolyOp::Reshape(_) | PolyOp::Flatten(_) => in_scales[0],
PolyOp::Pow(pow) => in_scales[0] * (*pow as crate::Scale),
PolyOp::Identity { out_scale } => out_scale.unwrap_or(in_scales[0]),
PolyOp::Sign { .. } => 0,
_ => in_scales[0],
};
Ok(scale)

View File

@@ -1,15 +1,17 @@
use crate::{
circuit::table::Range,
fieldutils::IntegerRep,
tensor::{Tensor, TensorType, ValTensor, ValType, VarTensor},
};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use colored::Colorize;
use halo2_proofs::{
circuit::Region,
plonk::{Error, Selector},
};
use halo2curves::ff::PrimeField;
use portable_atomic::AtomicI64 as AtomicInt;
use itertools::Itertools;
use maybe_rayon::iter::ParallelExtend;
use std::{
cell::RefCell,
collections::{HashMap, HashSet},
@@ -84,6 +86,88 @@ impl ShuffleIndex {
}
}
#[derive(Debug, Clone)]
/// Some settings for a region to differentiate it across the different phases of proof generation
pub struct RegionSettings {
/// whether we are in witness generation mode
pub witness_gen: bool,
/// whether we should check range checks for validity
pub check_range: bool,
/// base for decompositions
pub base: usize,
/// number of legs for decompositions
pub legs: usize,
}
#[allow(unsafe_code)]
unsafe impl Sync for RegionSettings {}
#[allow(unsafe_code)]
unsafe impl Send for RegionSettings {}
impl RegionSettings {
/// Create a new region settings
pub fn new(witness_gen: bool, check_range: bool, base: usize, legs: usize) -> RegionSettings {
RegionSettings {
witness_gen,
check_range,
base,
legs,
}
}
/// Create a new region settings with all true
pub fn all_true(base: usize, legs: usize) -> RegionSettings {
RegionSettings {
witness_gen: true,
check_range: true,
base,
legs,
}
}
/// Create a new region settings with all false
pub fn all_false(base: usize, legs: usize) -> RegionSettings {
RegionSettings {
witness_gen: false,
check_range: false,
base,
legs,
}
}
}
#[derive(Debug, Default, Clone)]
/// Region statistics
pub struct RegionStatistics {
/// the current maximum value of the lookup inputs
pub max_lookup_inputs: IntegerRep,
/// the current minimum value of the lookup inputs
pub min_lookup_inputs: IntegerRep,
/// the current maximum value of the range size
pub max_range_size: IntegerRep,
/// the current set of used lookups
pub used_lookups: HashSet<LookupOp>,
/// the current set of used range checks
pub used_range_checks: HashSet<Range>,
}
impl RegionStatistics {
/// update the statistics with another set of statistics
pub fn update(&mut self, other: &RegionStatistics) {
self.max_lookup_inputs = self.max_lookup_inputs.max(other.max_lookup_inputs);
self.min_lookup_inputs = self.min_lookup_inputs.min(other.min_lookup_inputs);
self.max_range_size = self.max_range_size.max(other.max_range_size);
self.used_lookups.extend(other.used_lookups.clone());
self.used_range_checks
.extend(other.used_range_checks.clone());
}
}
#[allow(unsafe_code)]
unsafe impl Sync for RegionStatistics {}
#[allow(unsafe_code)]
unsafe impl Send for RegionStatistics {}
#[derive(Debug)]
/// A context for a region
pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
@@ -93,22 +177,33 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Ha
num_inner_cols: usize,
dynamic_lookup_index: DynamicLookupIndex,
shuffle_index: ShuffleIndex,
used_lookups: HashSet<LookupOp>,
used_range_checks: HashSet<Range>,
max_lookup_inputs: i64,
min_lookup_inputs: i64,
max_range_size: i64,
witness_gen: bool,
check_lookup_range: bool,
statistics: RegionStatistics,
settings: RegionSettings,
assigned_constants: ConstantsMap<F>,
max_dynamic_input_len: usize,
}
impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a, F> {
#[cfg(not(target_arch = "wasm32"))]
/// get the region's decomposition base
pub fn base(&self) -> usize {
self.settings.base
}
/// get the region's decomposition legs
pub fn legs(&self) -> usize {
self.settings.legs
}
/// get the max dynamic input len
pub fn max_dynamic_input_len(&self) -> usize {
self.max_dynamic_input_len
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
///
pub fn debug_report(&self) {
log::debug!(
"(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={})",
"(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={}, max_dynamic_input_len={})",
self.row().to_string().blue(),
self.linear_coord().to_string().yellow(),
self.total_constants().to_string().red(),
@@ -116,7 +211,9 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
self.min_lookup_inputs().to_string().green(),
self.max_range_size().to_string().green(),
self.dynamic_lookup_col_coord().to_string().green(),
self.shuffle_col_coord().to_string().green());
self.shuffle_col_coord().to_string().green(),
self.max_dynamic_input_len().to_string().green()
);
}
///
@@ -134,6 +231,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
self.dynamic_lookup_index.index += n;
}
/// increment the max dynamic input len
pub fn update_max_dynamic_input_len(&mut self, n: usize) {
self.max_dynamic_input_len = self.max_dynamic_input_len.max(n);
}
///
pub fn increment_dynamic_lookup_col_coord(&mut self, n: usize) {
self.dynamic_lookup_index.col_coord += n;
@@ -151,16 +253,27 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
///
pub fn witness_gen(&self) -> bool {
self.witness_gen
self.settings.witness_gen
}
///
pub fn check_lookup_range(&self) -> bool {
self.check_lookup_range
pub fn check_range(&self) -> bool {
self.settings.check_range
}
///
pub fn statistics(&self) -> &RegionStatistics {
&self.statistics
}
/// Create a new region context
pub fn new(region: Region<'a, F>, row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> {
pub fn new(
region: Region<'a, F>,
row: usize,
num_inner_cols: usize,
decomp_base: usize,
decomp_legs: usize,
) -> RegionCtx<'a, F> {
let region = Some(RefCell::new(region));
let linear_coord = row * num_inner_cols;
@@ -171,14 +284,10 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
linear_coord,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen: true,
check_lookup_range: true,
statistics: RegionStatistics::default(),
settings: RegionSettings::all_true(decomp_base, decomp_legs),
assigned_constants: HashMap::new(),
max_dynamic_input_len: 0,
}
}
@@ -187,45 +296,20 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
region: Region<'a, F>,
row: usize,
num_inner_cols: usize,
decomp_base: usize,
decomp_legs: usize,
constants: ConstantsMap<F>,
) -> RegionCtx<'a, F> {
let mut new_self = Self::new(region, row, num_inner_cols);
let mut new_self = Self::new(region, row, num_inner_cols, decomp_base, decomp_legs);
new_self.assigned_constants = constants;
new_self
}
/// Create a new region context from a wrapped region
pub fn from_wrapped_region(
region: Option<RefCell<Region<'a, F>>>,
row: usize,
num_inner_cols: usize,
dynamic_lookup_index: DynamicLookupIndex,
shuffle_index: ShuffleIndex,
) -> RegionCtx<'a, F> {
let linear_coord = row * num_inner_cols;
RegionCtx {
region,
num_inner_cols,
linear_coord,
row,
dynamic_lookup_index,
shuffle_index,
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen: false,
check_lookup_range: false,
assigned_constants: HashMap::new(),
}
}
/// Create a new region context
pub fn new_dummy(
row: usize,
num_inner_cols: usize,
witness_gen: bool,
check_lookup_range: bool,
settings: RegionSettings,
) -> RegionCtx<'a, F> {
let region = None;
let linear_coord = row * num_inner_cols;
@@ -237,14 +321,10 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
row,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen,
check_lookup_range,
statistics: RegionStatistics::default(),
settings,
assigned_constants: HashMap::new(),
max_dynamic_input_len: 0,
}
}
@@ -253,8 +333,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
row: usize,
linear_coord: usize,
num_inner_cols: usize,
witness_gen: bool,
check_lookup_range: bool,
settings: RegionSettings,
) -> RegionCtx<'a, F> {
let region = None;
RegionCtx {
@@ -264,14 +343,10 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
row,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen,
check_lookup_range,
statistics: RegionStatistics::default(),
settings,
assigned_constants: HashMap::new(),
max_dynamic_input_len: 0,
}
}
@@ -321,12 +396,9 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
) -> Result<(), CircuitError> {
let row = AtomicUsize::new(self.row());
let linear_coord = AtomicUsize::new(self.linear_coord());
let max_lookup_inputs = AtomicInt::new(self.max_lookup_inputs());
let min_lookup_inputs = AtomicInt::new(self.min_lookup_inputs());
let lookups = Arc::new(Mutex::new(self.used_lookups.clone()));
let range_checks = Arc::new(Mutex::new(self.used_range_checks.clone()));
let dynamic_lookup_index = Arc::new(Mutex::new(self.dynamic_lookup_index.clone()));
let statistics = Arc::new(Mutex::new(self.statistics.clone()));
let shuffle_index = Arc::new(Mutex::new(self.shuffle_index.clone()));
let dynamic_lookup_index = Arc::new(Mutex::new(self.dynamic_lookup_index.clone()));
let constants = Arc::new(Mutex::new(self.assigned_constants.clone()));
*output = output.par_enum_map(|idx, _| {
@@ -340,8 +412,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
starting_offset,
starting_linear_coord,
self.num_inner_cols,
self.witness_gen,
self.check_lookup_range,
self.settings.clone(),
);
let res = inner_loop_function(idx, &mut local_reg);
// we update the offset and constants
@@ -351,14 +422,9 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
Ordering::SeqCst,
);
max_lookup_inputs.fetch_max(local_reg.max_lookup_inputs(), Ordering::SeqCst);
min_lookup_inputs.fetch_min(local_reg.min_lookup_inputs(), Ordering::SeqCst);
// update the lookups
let mut lookups = lookups.lock().unwrap();
lookups.extend(local_reg.used_lookups());
// update the range checks
let mut range_checks = range_checks.lock().unwrap();
range_checks.extend(local_reg.used_range_checks());
let mut statistics = statistics.lock().unwrap();
statistics.update(local_reg.statistics());
// update the dynamic lookup index
let mut dynamic_lookup_index = dynamic_lookup_index.lock().unwrap();
dynamic_lookup_index.update(&local_reg.dynamic_lookup_index);
@@ -372,20 +438,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
res
})?;
self.linear_coord = linear_coord.into_inner();
#[allow(trivial_numeric_casts)]
{
self.max_lookup_inputs = max_lookup_inputs.into_inner();
self.min_lookup_inputs = min_lookup_inputs.into_inner();
}
self.row = row.into_inner();
self.used_lookups = Arc::try_unwrap(lookups)
self.statistics = Arc::try_unwrap(statistics)
.map_err(|e| CircuitError::GetLookupsError(format!("{:?}", e)))?
.into_inner()
.map_err(|e| CircuitError::GetLookupsError(format!("{:?}", e)))?;
self.used_range_checks = Arc::try_unwrap(range_checks)
.map_err(|e| CircuitError::GetRangeChecksError(format!("{:?}", e)))?
.into_inner()
.map_err(|e| CircuitError::GetRangeChecksError(format!("{:?}", e)))?;
self.dynamic_lookup_index = Arc::try_unwrap(dynamic_lookup_index)
.map_err(|e| CircuitError::GetDynamicLookupError(format!("{:?}", e)))?
.into_inner()
@@ -409,11 +466,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
) -> Result<(), CircuitError> {
let (mut min, mut max) = (0, 0);
for i in inputs {
max = max.max(i.get_int_evals()?.into_iter().max().unwrap_or_default());
min = min.min(i.get_int_evals()?.into_iter().min().unwrap_or_default());
max = max.max(i.int_evals()?.into_iter().max().unwrap_or_default());
min = min.min(i.int_evals()?.into_iter().min().unwrap_or_default());
}
self.max_lookup_inputs = self.max_lookup_inputs.max(max);
self.min_lookup_inputs = self.min_lookup_inputs.min(min);
self.statistics.max_lookup_inputs = self.statistics.max_lookup_inputs.max(max);
self.statistics.min_lookup_inputs = self.statistics.min_lookup_inputs.min(min);
Ok(())
}
@@ -425,7 +482,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
let range_size = (range.1 - range.0).abs();
self.max_range_size = self.max_range_size.max(range_size);
self.statistics.max_range_size = self.statistics.max_range_size.max(range_size);
Ok(())
}
@@ -440,13 +497,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
lookup: LookupOp,
inputs: &[ValTensor<F>],
) -> Result<(), CircuitError> {
self.used_lookups.insert(lookup);
self.statistics.used_lookups.insert(lookup);
self.update_max_min_lookup_inputs(inputs)
}
/// add used range check
pub fn add_used_range_check(&mut self, range: Range) -> Result<(), CircuitError> {
self.used_range_checks.insert(range);
self.statistics.used_range_checks.insert(range);
self.update_max_min_lookup_range(range)
}
@@ -487,27 +544,27 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
/// get used lookups
pub fn used_lookups(&self) -> HashSet<LookupOp> {
self.used_lookups.clone()
self.statistics.used_lookups.clone()
}
/// get used range checks
pub fn used_range_checks(&self) -> HashSet<Range> {
self.used_range_checks.clone()
self.statistics.used_range_checks.clone()
}
/// max lookup inputs
pub fn max_lookup_inputs(&self) -> i64 {
self.max_lookup_inputs
pub fn max_lookup_inputs(&self) -> IntegerRep {
self.statistics.max_lookup_inputs
}
/// min lookup inputs
pub fn min_lookup_inputs(&self) -> i64 {
self.min_lookup_inputs
pub fn min_lookup_inputs(&self) -> IntegerRep {
self.statistics.min_lookup_inputs
}
/// max range check
pub fn max_range_size(&self) -> i64 {
self.max_range_size
pub fn max_range_size(&self) -> IntegerRep {
self.statistics.max_range_size
}
/// Assign a valtensor to a vartensor
@@ -515,18 +572,18 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, Error> {
) -> Result<ValTensor<F>, CircuitError> {
if let Some(region) = &self.region {
var.assign(
Ok(var.assign(
&mut region.borrow_mut(),
self.linear_coord,
values,
&mut self.assigned_constants,
)
)?)
} else {
if !values.is_instance() {
let values_map = values.create_constants_map_iterator();
self.assigned_constants.extend(values_map);
self.assigned_constants.par_extend(values_map);
}
Ok(values.clone())
}
@@ -542,20 +599,27 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, Error> {
) -> Result<(ValTensor<F>, usize), CircuitError> {
self.update_max_dynamic_input_len(values.len());
if let Some(region) = &self.region {
var.assign(
Ok(var.assign_exact_column(
&mut region.borrow_mut(),
self.combined_dynamic_shuffle_coord(),
values,
&mut self.assigned_constants,
)
)?)
} else {
if !values.is_instance() {
let values_map = values.create_constants_map_iterator();
self.assigned_constants.extend(values_map);
self.assigned_constants.par_extend(values_map);
}
Ok(values.clone())
let flush_len = var.get_column_flush(self.combined_dynamic_shuffle_coord(), values)?;
// get the diff between the current column and the next row
Ok((values.clone(), flush_len))
}
}
@@ -564,7 +628,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, Error> {
) -> Result<(ValTensor<F>, usize), CircuitError> {
self.assign_dynamic_lookup(var, values)
}
@@ -573,27 +637,24 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
ommissions: &HashSet<&usize>,
) -> Result<ValTensor<F>, Error> {
ommissions: &HashSet<usize>,
) -> Result<ValTensor<F>, CircuitError> {
if let Some(region) = &self.region {
var.assign_with_omissions(
Ok(var.assign_with_omissions(
&mut region.borrow_mut(),
self.linear_coord,
values,
ommissions,
&mut self.assigned_constants,
)
)?)
} else {
let inner_tensor = values.get_inner_tensor().unwrap();
let mut values_map = values.create_constants_map();
let mut values_clone = values.clone();
let mut indices = ommissions.clone().into_iter().collect_vec();
values_clone.remove_indices(&mut indices, false)?;
for o in ommissions {
if let ValType::Constant(value) = inner_tensor.get_flat_index(**o) {
values_map.remove(&value);
}
}
let values_map = values.create_constants_map();
self.assigned_constants.extend(values_map);
self.assigned_constants.par_extend(values_map);
Ok(values.clone())
}

View File

@@ -11,20 +11,33 @@ use maybe_rayon::prelude::{IntoParallelIterator, ParallelIterator};
use crate::{
circuit::CircuitError,
fieldutils::i64_to_felt,
tensor::{IntoI64, Tensor, TensorType},
fieldutils::{integer_rep_to_felt, IntegerRep},
tensor::{Tensor, TensorType},
};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::execute::EZKL_REPO_PATH;
use crate::circuit::lookup::LookupOp;
/// The range of the lookup table.
pub type Range = (i64, i64);
pub type Range = (IntegerRep, IntegerRep);
/// The safety factor for the range of the lookup table.
pub const RANGE_MULTIPLIER: i64 = 2;
pub const RANGE_MULTIPLIER: IntegerRep = 2;
/// The safety factor offset for the number of rows in the lookup table.
pub const RESERVED_BLINDING_ROWS_PAD: usize = 3;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
lazy_static::lazy_static! {
/// an optional directory to read and write the lookup table cache
pub static ref LOOKUP_CACHE: String = format!("{}/cache", *EZKL_REPO_PATH);
}
/// The lookup table cache is disabled on wasm32 target.
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
pub const LOOKUP_CACHE: &str = "";
#[derive(Debug, Clone)]
///
pub struct SelectorConstructor<F: PrimeField> {
@@ -96,21 +109,22 @@ pub struct Table<F: PrimeField> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
/// get column index given input
pub fn get_col_index(&self, input: F) -> F {
// range is split up into chunks of size col_size, find the chunk that input is in
let chunk =
(crate::fieldutils::felt_to_i64(input) - self.range.0).abs() / (self.col_size as i64);
let chunk = (crate::fieldutils::felt_to_integer_rep(input) - self.range.0).abs()
/ (self.col_size as IntegerRep);
i64_to_felt(chunk)
integer_rep_to_felt(chunk)
}
/// get first_element of column
pub fn get_first_element(&self, chunk: usize) -> (F, F) {
let chunk = chunk as i64;
let chunk = chunk as IntegerRep;
// we index from 1 to prevent soundness issues
let first_element = i64_to_felt(chunk * (self.col_size as i64) + self.range.0);
let first_element =
integer_rep_to_felt(chunk * (self.col_size as IntegerRep) + self.range.0);
let op_f = self
.nonlinearity
.f(&[Tensor::from(vec![first_element].into_iter())])
@@ -130,12 +144,20 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<
}
///
pub fn num_cols_required(range_len: i64, col_size: usize) -> usize {
pub fn num_cols_required(range_len: IntegerRep, col_size: usize) -> usize {
// number of cols needed to store the range
(range_len / (col_size as i64)) as usize + 1
(range_len / (col_size as IntegerRep)) as usize + 1
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
fn name(&self) -> String {
format!(
"{}_{}_{}",
self.nonlinearity.as_path(),
self.range.0,
self.range.1
)
}
/// Configures the table.
pub fn configure(
cs: &mut ConstraintSystem<F>,
@@ -202,8 +224,51 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<
let smallest = self.range.0;
let largest = self.range.1;
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i64_to_felt(x));
let evals = self.nonlinearity.f(&[inputs.clone()])?;
let gen_table = || -> Result<(Tensor<F>, Tensor<F>), crate::tensor::TensorError> {
let inputs = Tensor::from(smallest..=largest)
.par_enum_map(|_, x| Ok::<_, crate::tensor::TensorError>(integer_rep_to_felt(x)))?;
let evals = self.nonlinearity.f(&[inputs.clone()])?;
Ok((inputs, evals.output))
};
let (inputs, evals) = if !LOOKUP_CACHE.is_empty() {
let cache = std::path::Path::new(&*LOOKUP_CACHE);
let cache_path = cache.join(self.name());
let input_path = cache_path.join("inputs");
let output_path = cache_path.join("outputs");
if cache_path.exists() {
log::info!("Loading lookup table from cache: {:?}", cache_path);
let (input_cache, output_cache) =
(Tensor::load(&input_path)?, Tensor::load(&output_path)?);
(input_cache, output_cache)
} else {
log::info!(
"Generating lookup table and saving to cache: {:?}",
cache_path
);
// mkdir -p cache_path
std::fs::create_dir_all(&cache_path).map_err(|e| {
CircuitError::TensorError(crate::tensor::TensorError::FileSaveError(
e.to_string(),
))
})?;
let (inputs, evals) = gen_table()?;
inputs.save(&input_path)?;
evals.save(&output_path)?;
(inputs, evals)
}
} else {
log::info!(
"Generating lookup table {} without cache",
self.nonlinearity.as_path()
);
gen_table()?
};
let chunked_inputs = inputs.chunks(self.col_size);
self.is_assigned = true;
@@ -235,7 +300,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<
)?;
}
let output = evals.output[row_offset];
let output = evals[row_offset];
table.assign_cell(
|| format!("nl_o_col row {}", row_offset),
@@ -272,12 +337,17 @@ pub struct RangeCheck<F: PrimeField> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeCheck<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
/// as path
pub fn as_path(&self) -> String {
format!("rangecheck_{}_{}", self.range.0, self.range.1)
}
/// get first_element of column
pub fn get_first_element(&self, chunk: usize) -> F {
let chunk = chunk as i64;
let chunk = chunk as IntegerRep;
// we index from 1 to prevent soundness issues
i64_to_felt(chunk * (self.col_size as i64) + self.range.0)
integer_rep_to_felt(chunk * (self.col_size as IntegerRep) + self.range.0)
}
///
@@ -293,14 +363,14 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeC
/// get column index given input
pub fn get_col_index(&self, input: F) -> F {
// range is split up into chunks of size col_size, find the chunk that input is in
let chunk =
(crate::fieldutils::felt_to_i64(input) - self.range.0).abs() / (self.col_size as i64);
let chunk = (crate::fieldutils::felt_to_integer_rep(input) - self.range.0).abs()
/ (self.col_size as IntegerRep);
i64_to_felt(chunk)
integer_rep_to_felt(chunk)
}
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeCheck<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
/// Configures the table.
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range, logrows: usize) -> RangeCheck<F> {
log::debug!("range check range: {:?}", range);
@@ -350,7 +420,32 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeC
let smallest = self.range.0;
let largest = self.range.1;
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i64_to_felt(x));
let inputs: Tensor<F> = if !LOOKUP_CACHE.is_empty() {
let cache = std::path::Path::new(&*LOOKUP_CACHE);
let cache_path = cache.join(self.as_path());
let input_path = cache_path.join("inputs");
if cache_path.exists() {
log::info!("Loading range check table from cache: {:?}", cache_path);
Tensor::load(&input_path)?
} else {
log::info!(
"Generating range check table and saving to cache: {:?}",
cache_path
);
// mkdir -p cache_path
std::fs::create_dir_all(&cache_path)?;
let inputs = Tensor::from(smallest..=largest).map(|x| integer_rep_to_felt(x));
inputs.save(&input_path)?;
inputs
}
} else {
log::info!("Generating range check {} without cache", self.as_path());
Tensor::from(smallest..=largest).map(|x| integer_rep_to_felt(x))
};
let chunked_inputs = inputs.chunks(self.col_size);
self.is_assigned = true;

View File

@@ -8,6 +8,10 @@ use halo2_proofs::{
};
use halo2curves::bn256::Fr as F;
use halo2curves::ff::{Field, PrimeField};
#[cfg(not(any(
all(target_arch = "wasm32", target_os = "unknown"),
not(feature = "ezkl")
)))]
use ops::lookup::LookupOp;
use ops::region::RegionCtx;
use rand::rngs::OsRng;
@@ -55,7 +59,7 @@ mod matmul {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -132,7 +136,7 @@ mod matmul_col_overflow_double_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 128, 2);
config
.layout(
&mut region,
@@ -206,7 +210,7 @@ mod matmul_col_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -243,7 +247,10 @@ mod matmul_col_overflow {
}
#[cfg(test)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
mod matmul_col_ultra_overflow_double_col {
use halo2_proofs::poly::kzg::{
@@ -256,7 +263,7 @@ mod matmul_col_ultra_overflow_double_col {
use super::*;
const K: usize = 4;
const LEN: usize = 20;
const LEN: usize = 10;
const NUM_INNER_COLS: usize = 2;
#[derive(Clone)]
@@ -290,7 +297,7 @@ mod matmul_col_ultra_overflow_double_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 128, 2);
config
.layout(
&mut region,
@@ -361,7 +368,10 @@ mod matmul_col_ultra_overflow_double_col {
}
#[cfg(test)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
mod matmul_col_ultra_overflow {
use halo2_proofs::poly::kzg::{
@@ -374,7 +384,7 @@ mod matmul_col_ultra_overflow {
use super::*;
const K: usize = 4;
const LEN: usize = 20;
const LEN: usize = 10;
#[derive(Clone)]
struct MatmulCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -407,7 +417,7 @@ mod matmul_col_ultra_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -518,7 +528,7 @@ mod dot {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -595,7 +605,7 @@ mod dot_col_overflow_triple_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 3);
let mut region = RegionCtx::new(region, 0, 3, 128, 2);
config
.layout(
&mut region,
@@ -668,7 +678,7 @@ mod dot_col_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -741,7 +751,7 @@ mod sum {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -811,7 +821,7 @@ mod sum_col_overflow_double_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 128, 2);
config
.layout(
&mut region,
@@ -880,7 +890,7 @@ mod sum_col_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -951,7 +961,7 @@ mod composition {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let _ = config
.layout(
&mut region,
@@ -1042,7 +1052,7 @@ mod conv {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -1050,6 +1060,7 @@ mod conv {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis)
@@ -1143,7 +1154,10 @@ mod conv {
}
#[cfg(test)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
mod conv_col_ultra_overflow {
use halo2_proofs::poly::{
@@ -1158,7 +1172,7 @@ mod conv_col_ultra_overflow {
use super::*;
const K: usize = 4;
const LEN: usize = 28;
const LEN: usize = 10;
#[derive(Clone)]
struct ConvCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -1192,7 +1206,7 @@ mod conv_col_ultra_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(
&mut region,
@@ -1200,6 +1214,7 @@ mod conv_col_ultra_overflow {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis)
@@ -1283,7 +1298,10 @@ mod conv_col_ultra_overflow {
#[cfg(test)]
// not wasm 32 unknown
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
mod conv_relu_col_ultra_overflow {
use halo2_proofs::poly::kzg::{
@@ -1295,8 +1313,8 @@ mod conv_relu_col_ultra_overflow {
use super::*;
const K: usize = 4;
const LEN: usize = 28;
const K: usize = 8;
const LEN: usize = 15;
#[derive(Clone)]
struct ConvCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -1315,15 +1333,23 @@ mod conv_relu_col_ultra_overflow {
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let a = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
let b = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
let output = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
let a = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * 4);
let b = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * 4);
let output = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * 4);
let mut base_config =
Self::Config::configure(cs, &[a.clone(), b.clone()], &output, CheckMode::SAFE);
// sets up a new relu table
base_config
.configure_lookup(cs, &b, &output, &a, (-3, 3), K, &LookupOp::ReLU)
.configure_range_check(cs, &a, &b, (-1, 1), K)
.unwrap();
base_config
.configure_range_check(cs, &a, &b, (0, 1), K)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K, 8, false);
base_config.clone()
}
@@ -1332,12 +1358,12 @@ mod conv_relu_col_ultra_overflow {
mut config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
config.layout_tables(&mut layouter).unwrap();
config.layout_range_checks(&mut layouter).unwrap();
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 2, 2);
let output = config
.layout(
&mut region,
@@ -1345,6 +1371,7 @@ mod conv_relu_col_ultra_overflow {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis);
@@ -1352,7 +1379,10 @@ mod conv_relu_col_ultra_overflow {
.layout(
&mut region,
&[output.unwrap().unwrap()],
Box::new(LookupOp::ReLU),
Box::new(PolyOp::LeakyReLU {
slope: 0.0.into(),
scale: 1,
}),
)
.unwrap();
Ok(())
@@ -1473,7 +1503,7 @@ mod add_w_shape_casting {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Add))
.map_err(|_| Error::Synthesis)
@@ -1489,7 +1519,7 @@ mod add_w_shape_casting {
// parameters
let a = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1))));
let b = Tensor::from((0..1).map(|i| Value::known(F::from(i as u64 + 1))));
let b = Tensor::from((0..1).map(|i| Value::known(F::from(i + 1))));
let circuit = MyCircuit::<F> {
inputs: [ValTensor::from(a), ValTensor::from(b)],
@@ -1540,7 +1570,7 @@ mod add {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Add))
.map_err(|_| Error::Synthesis)
@@ -1624,7 +1654,7 @@ mod dynamic_lookup {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
for i in 0..NUM_LOOP {
layouts::dynamic_lookup(
&config,
@@ -1766,7 +1796,7 @@ mod shuffle {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
for i in 0..NUM_LOOP {
layouts::shuffles(
&config,
@@ -1881,7 +1911,7 @@ mod add_with_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Add))
.map_err(|_| Error::Synthesis)
@@ -1983,7 +2013,7 @@ mod add_with_overflow_and_poseidon {
layouter.assign_region(
|| "model",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.base
.layout(&mut region, &inputs, Box::new(PolyOp::Add))
@@ -2089,7 +2119,7 @@ mod sub {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Sub))
.map_err(|_| Error::Synthesis)
@@ -2156,7 +2186,7 @@ mod mult {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Mult))
.map_err(|_| Error::Synthesis)
@@ -2223,7 +2253,7 @@ mod pow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Pow(5)))
.map_err(|_| Error::Synthesis)
@@ -2255,7 +2285,6 @@ mod matmul_relu {
const K: usize = 18;
const LEN: usize = 32;
use crate::circuit::lookup::LookupOp;
#[derive(Clone)]
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -2285,11 +2314,17 @@ mod matmul_relu {
let mut base_config =
BaseConfig::configure(cs, &[a.clone(), b.clone()], &output, CheckMode::SAFE);
// sets up a new relu table
base_config
.configure_lookup(cs, &b, &output, &a, (-32768, 32768), K, &LookupOp::ReLU)
.configure_range_check(cs, &a, &b, (-1, 1), K)
.unwrap();
base_config
.configure_range_check(cs, &a, &b, (0, 1023), K)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K, 8, false);
MyConfig { base_config }
}
@@ -2298,11 +2333,14 @@ mod matmul_relu {
mut config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
config.base_config.layout_tables(&mut layouter).unwrap();
config
.base_config
.layout_range_checks(&mut layouter)
.unwrap();
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
@@ -2312,7 +2350,14 @@ mod matmul_relu {
.unwrap();
let _output = config
.base_config
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[output.unwrap()],
Box::new(PolyOp::LeakyReLU {
slope: 0.0.into(),
scale: 1,
}),
)
.unwrap();
Ok(())
},
@@ -2351,6 +2396,8 @@ mod relu {
plonk::{Circuit, ConstraintSystem, Error},
};
const K: u32 = 8;
#[derive(Clone)]
struct ReLUCircuit<F: PrimeField + TensorType + PartialOrd> {
pub input: ValTensor<F>,
@@ -2367,16 +2414,26 @@ mod relu {
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let advices = (0..3)
.map(|_| VarTensor::new_advice(cs, 4, 1, 3))
.map(|_| VarTensor::new_advice(cs, 8, 1, 3))
.collect::<Vec<_>>();
let nl = LookupOp::ReLU;
let mut config = BaseConfig::default();
let mut config = BaseConfig::configure(
cs,
&[advices[0].clone(), advices[1].clone()],
&advices[2],
CheckMode::SAFE,
);
config
.configure_lookup(cs, &advices[0], &advices[1], &advices[2], (-6, 6), 4, &nl)
.configure_range_check(cs, &advices[0], &advices[1], (-1, 1), K as usize)
.unwrap();
config
.configure_range_check(cs, &advices[0], &advices[1], (0, 1), K as usize)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K as usize, 8, false);
config
}
@@ -2385,15 +2442,22 @@ mod relu {
mut config: Self::Config,
mut layouter: impl Layouter<F>, // layouter is our 'write buffer' for the circuit
) -> Result<(), Error> {
config.layout_tables(&mut layouter).unwrap();
config.layout_range_checks(&mut layouter).unwrap();
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
.map_err(|_| Error::Synthesis)
let mut region = RegionCtx::new(region, 0, 1, 2, 2);
Ok(config
.layout(
&mut region,
&[self.input.clone()],
Box::new(PolyOp::LeakyReLU {
slope: 0.0.into(),
scale: 1,
}),
)
.unwrap())
},
)
.unwrap();
@@ -2411,13 +2475,16 @@ mod relu {
input: ValTensor::from(input),
};
let prover = MockProver::run(4_u32, &circuit, vec![]).unwrap();
let prover = MockProver::run(K, &circuit, vec![]).unwrap();
prover.assert_satisfied();
}
}
#[cfg(test)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
mod lookup_ultra_overflow {
use super::*;
use halo2_proofs::{
@@ -2432,11 +2499,11 @@ mod lookup_ultra_overflow {
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
#[derive(Clone)]
struct ReLUCircuit<F: PrimeField + TensorType + PartialOrd> {
struct SigmoidCircuit<F: PrimeField + TensorType + PartialOrd> {
pub input: ValTensor<F>,
}
impl Circuit<F> for ReLUCircuit<F> {
impl Circuit<F> for SigmoidCircuit<F> {
type Config = BaseConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
type Params = TestParams;
@@ -2450,7 +2517,7 @@ mod lookup_ultra_overflow {
.map(|_| VarTensor::new_advice(cs, 4, 1, 3))
.collect::<Vec<_>>();
let nl = LookupOp::ReLU;
let nl = LookupOp::Sigmoid { scale: 1.0.into() };
let mut config = BaseConfig::default();
@@ -2478,9 +2545,13 @@ mod lookup_ultra_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
config
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[self.input.clone()],
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
)
.map_err(|_| Error::Synthesis)
},
)
@@ -2492,13 +2563,13 @@ mod lookup_ultra_overflow {
#[test]
#[ignore]
fn relucircuit() {
fn sigmoidcircuit() {
// get some logs fam
crate::logger::init_logger();
// parameters
let a = Tensor::from((0..4).map(|i| Value::known(F::from(i + 1))));
let circuit = ReLUCircuit::<F> {
let circuit = SigmoidCircuit::<F> {
input: ValTensor::from(a),
};
@@ -2508,7 +2579,7 @@ mod lookup_ultra_overflow {
let pk = crate::pfsys::create_keys::<
halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme<halo2curves::bn256::Bn256>,
ReLUCircuit<F>,
SigmoidCircuit<F>,
>(&circuit, &params, true)
.unwrap();

View File

@@ -141,23 +141,23 @@ mod tests {
#[test]
fn f32_eq() {
assert!(F32(std::f32::NAN) == F32(std::f32::NAN));
assert!(F32(std::f32::NAN) != F32(5.0));
assert!(F32(5.0) != F32(std::f32::NAN));
assert!(F32(f32::NAN) == F32(f32::NAN));
assert!(F32(f32::NAN) != F32(5.0));
assert!(F32(5.0) != F32(f32::NAN));
assert!(F32(0.0) == F32(-0.0));
}
#[test]
fn f32_cmp() {
assert!(F32(std::f32::NAN) == F32(std::f32::NAN));
assert!(F32(std::f32::NAN) < F32(5.0));
assert!(F32(5.0) > F32(std::f32::NAN));
assert!(F32(f32::NAN) == F32(f32::NAN));
assert!(F32(f32::NAN) < F32(5.0));
assert!(F32(5.0) > F32(f32::NAN));
assert!(F32(0.0) == F32(-0.0));
}
#[test]
fn f32_hash() {
assert!(calculate_hash(&F32(0.0)) == calculate_hash(&F32(-0.0)));
assert!(calculate_hash(&F32(std::f32::NAN)) == calculate_hash(&F32(-std::f32::NAN)));
assert!(calculate_hash(&F32(f32::NAN)) == calculate_hash(&F32(-f32::NAN)));
}
}

View File

@@ -1,4 +1,3 @@
#[cfg(not(target_arch = "wasm32"))]
use alloy::primitives::Address as H160;
use clap::{Command, Parser, Subcommand};
use clap_complete::{generate, Generator, Shell};
@@ -17,7 +16,6 @@ use tosubcommand::{ToFlags, ToSubcommand};
use crate::{pfsys::ProofType, Commitments, RunArgs};
use crate::circuit::CheckMode;
#[cfg(not(target_arch = "wasm32"))]
use crate::graph::TestDataSource;
use crate::pfsys::TranscriptType;
@@ -81,8 +79,10 @@ pub const DEFAULT_CALIBRATION_FILE: &str = "calibration.json";
pub const DEFAULT_LOOKUP_SAFETY_MARGIN: &str = "2";
/// Default Compress selectors
pub const DEFAULT_DISABLE_SELECTOR_COMPRESSION: &str = "false";
/// Default render vk separately
pub const DEFAULT_RENDER_VK_SEPERATELY: &str = "false";
/// Default render reusable verifier
pub const DEFAULT_RENDER_REUSABLE: &str = "false";
/// Default contract deployment type
pub const DEFAULT_CONTRACT_DEPLOYMENT_TYPE: &str = "verifier";
/// Default VK sol path
pub const DEFAULT_VK_SOL: &str = "vk.sol";
/// Default VK abi path
@@ -181,28 +181,85 @@ impl From<&str> for CalibrationTarget {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
/// Determines what type of contract (verifier, verifier/reusable, vka) should be deployed
pub enum ContractType {
/// Deploys a verifier contrat tailored to the circuit and not reusable
Verifier {
/// Whether to deploy a reusable verifier. This can reduce state bloat on-chain since you need only deploy a verifying key artifact (vka) for a given circuit which is significantly smaller than the verifier contract (up to 4 times smaller for large circuits)
/// Can also be used as an alternative to aggregation for verifiers that are otherwise too large to fit on-chain.
reusable: bool,
},
/// Deploys a verifying key artifact that the reusable verifier loads into memory during runtime. Encodes the circuit specific data that was otherwise hardcoded onto the stack.
VerifyingKeyArtifact,
}
impl Default for ContractType {
fn default() -> Self {
ContractType::Verifier {
reusable: false,
}
}
}
impl std::fmt::Display for ContractType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
ContractType::Verifier { reusable: true } => {
"verifier/reusable".to_string()
},
ContractType::Verifier {
reusable: false,
} => "verifier".to_string(),
ContractType::VerifyingKeyArtifact => "vka".to_string(),
}
)
}
}
impl ToFlags for ContractType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl From<&str> for ContractType {
fn from(s: &str) -> Self {
match s {
"verifier" => ContractType::Verifier { reusable: false },
"verifier/reusable" => ContractType::Verifier { reusable: true },
"vka" => ContractType::VerifyingKeyArtifact,
_ => {
log::error!("Invalid value for ContractType");
log::warn!("Defaulting to verifier");
ContractType::default()
}
}
}
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
/// wrapper for H160 to make it easy to parse into flag vals
pub struct H160Flag {
inner: H160,
}
#[cfg(not(target_arch = "wasm32"))]
impl From<H160Flag> for H160 {
fn from(val: H160Flag) -> H160 {
val.inner
}
}
#[cfg(not(target_arch = "wasm32"))]
impl ToFlags for H160Flag {
fn to_flags(&self) -> Vec<String> {
vec![format!("{:#x}", self.inner)]
}
}
#[cfg(not(target_arch = "wasm32"))]
impl From<&str> for H160Flag {
fn from(s: &str) -> Self {
Self {
@@ -243,6 +300,39 @@ impl<'source> FromPyObject<'source> for CalibrationTarget {
}
}
}
#[cfg(feature = "python-bindings")]
/// Converts ContractType into a PyObject (Required for ContractType to be compatible with Python)
impl IntoPy<PyObject> for ContractType {
fn into_py(self, py: Python) -> PyObject {
match self {
ContractType::Verifier { reusable: true } => {
"verifier/reusable".to_object(py)
}
ContractType::Verifier {
reusable: false,
} => "verifier".to_object(py),
ContractType::VerifyingKeyArtifact => "vka".to_object(py),
}
}
}
#[cfg(feature = "python-bindings")]
/// Obtains ContractType from PyObject (Required for ContractType to be compatible with Python)
impl<'source> FromPyObject<'source> for ContractType {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
let strval = trystr.to_string();
match strval.to_lowercase().as_str() {
"verifier" => Ok(ContractType::Verifier {
reusable: false,
}),
"verifier/reusable" => Ok(ContractType::Verifier { reusable: true }),
"vka" => Ok(ContractType::VerifyingKeyArtifact),
_ => Err(PyValueError::new_err("Invalid value for ContractType")),
}
}
}
// not wasm
use lazy_static::lazy_static;
@@ -365,8 +455,7 @@ pub enum Commands {
},
/// Calibrates the proving scale, lookup bits and logrows from a circuit settings file.
#[cfg(not(target_arch = "wasm32"))]
CalibrateSettings {
CalibrateSettings {
/// The path to the .json calibration data file.
#[arg(short = 'D', long, default_value = DEFAULT_CALIBRATION_FILE, value_hint = clap::ValueHint::FilePath)]
data: Option<PathBuf>,
@@ -416,11 +505,10 @@ pub enum Commands {
commitment: Option<Commitments>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Gets an SRS from a circuit settings file.
/// Gets an SRS from a circuit settings file.
#[command(name = "get-srs")]
GetSrs {
/// The path to output the desired srs file, if set to None will save to $EZKL_REPO_PATH/srs
/// The path to output the desired srs file, if set to None will save to ~/.ezkl/srs
#[arg(long, default_value = None, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// Path to the circuit settings .json file to read in logrows from. Overriden by logrows if specified.
@@ -467,7 +555,7 @@ pub enum Commands {
/// The path to save the proving key to
#[arg(long, default_value = DEFAULT_PK_AGGREGATED, value_hint = clap::ValueHint::FilePath)]
pk_path: Option<PathBuf>,
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// logrows used for aggregation circuit
@@ -494,7 +582,7 @@ pub enum Commands {
/// The path to output the proof file to
#[arg(long, default_value = DEFAULT_PROOF_AGGREGATED, value_hint = clap::ValueHint::FilePath)]
proof_path: Option<PathBuf>,
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long)]
srs_path: Option<PathBuf>,
#[arg(
@@ -536,7 +624,7 @@ pub enum Commands {
/// The path to the compiled model file (generated using the compile-circuit command)
#[arg(short = 'M', long, default_value = DEFAULT_COMPILED_CIRCUIT, value_hint = clap::ValueHint::FilePath)]
compiled_circuit: Option<PathBuf>,
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// The path to output the verification key file to
@@ -552,8 +640,7 @@ pub enum Commands {
#[arg(long, default_value = DEFAULT_DISABLE_SELECTOR_COMPRESSION, action = clap::ArgAction::SetTrue)]
disable_selector_compression: Option<bool>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
#[command(arg_required_else_help = true)]
SetupTestEvmData {
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
@@ -577,8 +664,7 @@ pub enum Commands {
#[arg(long, default_value = "on-chain", value_hint = clap::ValueHint::Other)]
output_source: TestDataSource,
},
#[cfg(not(target_arch = "wasm32"))]
/// The Data Attestation Verifier contract stores the account calls to fetch data to feed into ezkl. This call data can be updated by an admin account. This tests that admin account is able to update this call data.
/// The Data Attestation Verifier contract stores the account calls to fetch data to feed into ezkl. This call data can be updated by an admin account. This tests that admin account is able to update this call data.
#[command(arg_required_else_help = true)]
TestUpdateAccountCalls {
/// The path to the verifier contract's address
@@ -591,8 +677,7 @@ pub enum Commands {
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
rpc_url: Option<String>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Swaps the positions in the transcript that correspond to commitments
/// Swaps the positions in the transcript that correspond to commitments
SwapProofCommitments {
/// The path to the proof file
#[arg(short = 'P', long, default_value = DEFAULT_PROOF, value_hint = clap::ValueHint::FilePath)]
@@ -602,8 +687,7 @@ pub enum Commands {
witness_path: Option<PathBuf>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Loads model, data, and creates proof
/// Loads model, data, and creates proof
Prove {
/// The path to the .json witness file (generated using the gen-witness command)
#[arg(short = 'W', long, default_value = DEFAULT_WITNESS, value_hint = clap::ValueHint::FilePath)]
@@ -617,7 +701,7 @@ pub enum Commands {
/// The path to output the proof file to
#[arg(long, default_value = DEFAULT_PROOF, value_hint = clap::ValueHint::FilePath)]
proof_path: Option<PathBuf>,
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
#[arg(
@@ -633,8 +717,7 @@ pub enum Commands {
#[arg(long, default_value = DEFAULT_CHECKMODE, value_hint = clap::ValueHint::Other)]
check_mode: Option<CheckMode>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Encodes a proof into evm calldata
/// Encodes a proof into evm calldata
#[command(name = "encode-evm-calldata")]
EncodeEvmCalldata {
/// The path to the proof file (generated using the prove command)
@@ -647,11 +730,10 @@ pub enum Commands {
#[arg(long, value_hint = clap::ValueHint::Other)]
addr_vk: Option<H160Flag>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an Evm verifier for a single proof
/// Creates an Evm verifier for a single proof
#[command(name = "create-evm-verifier")]
CreateEvmVerifier {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// The path to load circuit settings .json file from (generated using the gen-settings command)
@@ -666,17 +748,14 @@ pub enum Commands {
/// The path to output the Solidity verifier ABI
#[arg(long, default_value = DEFAULT_VERIFIER_ABI, value_hint = clap::ValueHint::FilePath)]
abi_path: Option<PathBuf>,
/// Whether the verifier key should be rendered as a separate contract.
/// We recommend disabling selector compression if this is enabled.
/// To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command.
#[arg(long, default_value = DEFAULT_RENDER_VK_SEPERATELY, action = clap::ArgAction::SetTrue)]
render_vk_seperately: Option<bool>,
/// Whether the to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
#[arg(long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue)]
reusable: Option<bool>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an Evm verifier for a single proof
#[command(name = "create-evm-vk")]
CreateEvmVK {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// Creates an Evm verifier artifact for a single proof to be used by the reusable verifier
#[command(name = "create-evm-vka")]
CreateEvmVKArtifact {
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// The path to load circuit settings .json file from (generated using the gen-settings command)
@@ -692,8 +771,7 @@ pub enum Commands {
#[arg(long, default_value = DEFAULT_VK_ABI, value_hint = clap::ValueHint::FilePath)]
abi_path: Option<PathBuf>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
#[command(name = "create-evm-da")]
CreateEvmDataAttestation {
/// The path to load circuit settings .json file from (generated using the gen-settings command)
@@ -717,11 +795,10 @@ pub enum Commands {
witness: Option<PathBuf>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an Evm verifier for an aggregate proof
/// Creates an Evm verifier for an aggregate proof
#[command(name = "create-evm-verifier-aggr")]
CreateEvmVerifierAggr {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// The path to load the desired verification key file
@@ -739,11 +816,9 @@ pub enum Commands {
// logrows used for aggregation circuit
#[arg(long, default_value = DEFAULT_AGGREGATED_LOGROWS, value_hint = clap::ValueHint::Other)]
logrows: Option<u32>,
/// Whether the verifier key should be rendered as a separate contract.
/// We recommend disabling selector compression if this is enabled.
/// To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command.
#[arg(long, default_value = DEFAULT_RENDER_VK_SEPERATELY, action = clap::ArgAction::SetTrue)]
render_vk_seperately: Option<bool>,
/// Whether the to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
#[arg(long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue)]
reusable: Option<bool>,
},
/// Verifies a proof, returning accept or reject
Verify {
@@ -756,7 +831,7 @@ pub enum Commands {
/// The path to the verification key file (generated using the setup command)
#[arg(long, default_value = DEFAULT_VK, value_hint = clap::ValueHint::FilePath)]
vk_path: Option<PathBuf>,
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// Reduce SRS logrows to the number of instances rather than the number of logrows used for proofs (only works if the srs were generated in the same ceremony)
@@ -774,7 +849,7 @@ pub enum Commands {
/// reduced srs
#[arg(long, default_value = DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION, action = clap::ArgAction::SetTrue)]
reduced_srs: Option<bool>,
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
/// The path to SRS, if None will use ~/.ezkl/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
/// logrows used for aggregation circuit
@@ -784,9 +859,8 @@ pub enum Commands {
#[arg(long, default_value = DEFAULT_COMMITMENT, value_hint = clap::ValueHint::Other)]
commitment: Option<Commitments>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm verifier that is generated by ezkl
DeployEvmVerifier {
/// Deploys an evm contract (verifier, reusable verifier, or vk artifact) that is generated by ezkl
DeployEvm {
/// The path to the Solidity code (generated using the create-evm-verifier command)
#[arg(long, default_value = DEFAULT_SOL_CODE, value_hint = clap::ValueHint::FilePath)]
sol_code_path: Option<PathBuf>,
@@ -802,28 +876,11 @@ pub enum Commands {
/// Private secp256K1 key in hex format, 64 chars, no 0x prefix, of the account signing transactions. If None the private key will be generated by Anvil
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
private_key: Option<String>,
/// Contract type to be deployed
#[arg(long = "contract-type", short = 'C', default_value = DEFAULT_CONTRACT_DEPLOYMENT_TYPE, value_hint = clap::ValueHint::Other)]
contract: ContractType,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm verifier that is generated by ezkl
DeployEvmVK {
/// The path to the Solidity code (generated using the create-evm-verifier command)
#[arg(long, default_value = DEFAULT_VK_SOL, value_hint = clap::ValueHint::FilePath)]
sol_code_path: Option<PathBuf>,
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
rpc_url: Option<String>,
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS_VK, value_hint = clap::ValueHint::Other)]
/// The path to output the contract address
addr_path: Option<PathBuf>,
/// The optimizer runs to set on the verifier. Lower values optimize for deployment cost, while higher values optimize for gas cost.
#[arg(long, default_value = DEFAULT_OPTIMIZER_RUNS, value_hint = clap::ValueHint::Other)]
optimizer_runs: usize,
/// Private secp256K1 key in hex format, 64 chars, no 0x prefix, of the account signing transactions. If None the private key will be generated by Anvil
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
private_key: Option<String>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm verifier that allows for data attestation
/// Deploys an evm verifier that allows for data attestation
#[command(name = "deploy-evm-da")]
DeployEvmDataAttestation {
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
@@ -848,8 +905,7 @@ pub enum Commands {
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
private_key: Option<String>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Verifies a proof using a local Evm executor, returning accept or reject
/// Verifies a proof using a local Evm executor, returning accept or reject
#[command(name = "verify-evm")]
VerifyEvm {
/// The path to the proof file (generated using the prove command)

View File

@@ -1,7 +1,6 @@
use crate::graph::input::{CallsToAccount, FileSourceInner, GraphData};
use crate::graph::modules::POSEIDON_INSTANCES;
use crate::graph::DataSource;
#[cfg(not(target_arch = "wasm32"))]
use crate::graph::GraphSettings;
use crate::pfsys::evm::EvmVerificationError;
use crate::pfsys::Snark;
@@ -11,8 +10,6 @@ use alloy::core::primitives::Bytes;
use alloy::core::primitives::U256;
use alloy::dyn_abi::abi::token::{DynSeqToken, PackedSeqToken, WordToken};
use alloy::dyn_abi::abi::TokenSeq;
#[cfg(target_arch = "wasm32")]
use alloy::prelude::Wallet;
// use alloy::providers::Middleware;
use alloy::json_abi::JsonAbi;
use alloy::node_bindings::Anvil;
@@ -285,7 +282,6 @@ pub type EthersClient = Arc<
pub type ContractFactory<M> = CallBuilder<Http<Client>, Arc<M>, ()>;
/// Return an instance of Anvil and a client for the given RPC URL. If none is provided, a local client is used.
#[cfg(not(target_arch = "wasm32"))]
pub async fn setup_eth_backend(
rpc_url: Option<&str>,
private_key: Option<&str>,
@@ -614,7 +610,6 @@ pub async fn update_account_calls(
}
/// Verify a proof using a Solidity verifier contract
#[cfg(not(target_arch = "wasm32"))]
pub async fn verify_proof_via_solidity(
proof: Snark<Fr, G1Affine>,
addr: H160,
@@ -716,7 +711,6 @@ pub async fn setup_test_contract<M: 'static + Provider<Http<Client>, Ethereum>>(
/// Verify a proof using a Solidity DataAttestation contract.
/// Used for testing purposes.
#[cfg(not(target_arch = "wasm32"))]
pub async fn verify_proof_with_data_attestation(
proof: Snark<Fr, G1Affine>,
addr_verifier: H160,
@@ -731,7 +725,7 @@ pub async fn verify_proof_with_data_attestation(
for val in flattened_instances.clone() {
let bytes = val.to_repr();
let u = U256::from_le_slice(bytes.as_slice());
let u = U256::from_le_slice(bytes.inner().as_slice());
public_inputs.push(u);
}
@@ -829,7 +823,6 @@ pub async fn test_on_chain_data<M: 'static + Provider<Http<Client>, Ethereum>>(
}
/// Reads on-chain inputs, returning the raw encoded data returned from making all the calls in on_chain_input_data
#[cfg(not(target_arch = "wasm32"))]
pub async fn read_on_chain_inputs<M: 'static + Provider<Http<Client>, Ethereum>>(
client: Arc<M>,
address: H160,
@@ -863,7 +856,6 @@ pub async fn read_on_chain_inputs<M: 'static + Provider<Http<Client>, Ethereum>>
}
///
#[cfg(not(target_arch = "wasm32"))]
pub async fn evm_quantize<M: 'static + Provider<Http<Client>, Ethereum>>(
client: Arc<M>,
scales: Vec<crate::Scale>,
@@ -964,7 +956,6 @@ fn get_sol_contract_factory<'a, M: 'static + Provider<Http<Client>, Ethereum>, T
}
/// Compiles a solidity verifier contract and returns the abi, bytecode, and runtime bytecode
#[cfg(not(target_arch = "wasm32"))]
pub async fn get_contract_artifacts(
sol_code_path: PathBuf,
contract_name: &str,

View File

@@ -1,17 +1,13 @@
use crate::circuit::region::RegionSettings;
use crate::circuit::CheckMode;
#[cfg(not(target_arch = "wasm32"))]
use crate::commands::CalibrationTarget;
#[cfg(not(target_arch = "wasm32"))]
use crate::eth::{deploy_contract_via_solidity, deploy_da_verifier_via_solidity};
#[cfg(not(target_arch = "wasm32"))]
#[allow(unused_imports)]
use crate::eth::{fix_da_sol, get_contract_artifacts, verify_proof_via_solidity};
use crate::graph::input::GraphData;
use crate::graph::{GraphCircuit, GraphSettings, GraphWitness, Model};
#[cfg(not(target_arch = "wasm32"))]
use crate::graph::{TestDataSource, TestSources};
use crate::pfsys::evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript};
#[cfg(not(target_arch = "wasm32"))]
use crate::pfsys::{
create_keys, load_pk, load_vk, save_params, save_pk, Snark, StrategyType, TranscriptType,
};
@@ -20,11 +16,9 @@ use crate::pfsys::{
};
use crate::pfsys::{save_vk, srs::*};
use crate::tensor::TensorError;
#[cfg(not(target_arch = "wasm32"))]
use crate::EZKL_BUF_CAPACITY;
use crate::{commands::*, EZKLError};
use crate::{Commitments, RunArgs};
#[cfg(not(target_arch = "wasm32"))]
use colored::Colorize;
#[cfg(unix)]
use gag::Gag;
@@ -44,17 +38,13 @@ use halo2_proofs::poly::kzg::{
};
use halo2_proofs::poly::VerificationStrategy;
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer};
#[cfg(not(target_arch = "wasm32"))]
use halo2_solidity_verifier;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
use halo2curves::ff::{FromUniformBytes, WithSmallOrderMulGroup};
use halo2curves::serde::SerdeObject;
#[cfg(not(target_arch = "wasm32"))]
use indicatif::{ProgressBar, ProgressStyle};
use instant::Instant;
#[cfg(not(target_arch = "wasm32"))]
use itertools::Itertools;
#[cfg(not(target_arch = "wasm32"))]
use log::debug;
use log::{info, trace, warn};
use serde::de::DeserializeOwned;
@@ -64,9 +54,7 @@ use snark_verifier::system::halo2::compile;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
use snark_verifier::system::halo2::Config;
use std::fs::File;
#[cfg(not(target_arch = "wasm32"))]
use std::io::BufWriter;
#[cfg(not(target_arch = "wasm32"))]
use std::io::{Cursor, Write};
use std::path::Path;
use std::path::PathBuf;
@@ -127,7 +115,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
logrows as u32,
commitment.unwrap_or(Commitments::from_str(DEFAULT_COMMITMENT).unwrap()),
),
#[cfg(not(target_arch = "wasm32"))]
Commands::GetSrs {
srs_path,
settings_path,
@@ -144,7 +131,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
args,
),
#[cfg(not(target_arch = "wasm32"))]
Commands::CalibrateSettings {
model,
settings_path,
@@ -187,14 +173,13 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
model.unwrap_or(DEFAULT_MODEL.into()),
witness.unwrap_or(DEFAULT_WITNESS.into()),
),
#[cfg(not(target_arch = "wasm32"))]
Commands::CreateEvmVerifier {
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
render_vk_seperately,
reusable,
} => {
create_evm_verifier(
vk_path.unwrap_or(DEFAULT_VK.into()),
@@ -202,11 +187,10 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
sol_code_path.unwrap_or(DEFAULT_SOL_CODE.into()),
abi_path.unwrap_or(DEFAULT_VERIFIER_ABI.into()),
render_vk_seperately.unwrap_or(DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap()),
reusable.unwrap_or(DEFAULT_RENDER_REUSABLE.parse().unwrap()),
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::EncodeEvmCalldata {
proof_path,
calldata_path,
@@ -218,14 +202,14 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.map(|e| serde_json::to_string(&e).unwrap()),
Commands::CreateEvmVK {
Commands::CreateEvmVKArtifact {
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
} => {
create_evm_vk(
create_evm_vka(
vk_path.unwrap_or(DEFAULT_VK.into()),
srs_path,
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
@@ -234,7 +218,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::CreateEvmDataAttestation {
settings_path,
sol_code_path,
@@ -251,7 +234,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::CreateEvmVerifierAggr {
vk_path,
srs_path,
@@ -259,7 +241,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
abi_path,
aggregation_settings,
logrows,
render_vk_seperately,
reusable,
} => {
create_evm_aggregate_verifier(
vk_path.unwrap_or(DEFAULT_VK.into()),
@@ -268,7 +250,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
abi_path.unwrap_or(DEFAULT_VERIFIER_AGGREGATED_ABI.into()),
aggregation_settings,
logrows.unwrap_or(DEFAULT_AGGREGATED_LOGROWS.parse().unwrap()),
render_vk_seperately.unwrap_or(DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap()),
reusable.unwrap_or(DEFAULT_RENDER_REUSABLE.parse().unwrap()),
)
.await
}
@@ -297,7 +279,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
disable_selector_compression
.unwrap_or(DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap()),
),
#[cfg(not(target_arch = "wasm32"))]
Commands::SetupTestEvmData {
data,
compiled_circuit,
@@ -316,13 +297,11 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::TestUpdateAccountCalls {
addr,
data,
rpc_url,
} => test_update_account_calls(addr, data.unwrap_or(DEFAULT_DATA.into()), rpc_url).await,
#[cfg(not(target_arch = "wasm32"))]
Commands::SwapProofCommitments {
proof_path,
witness_path,
@@ -332,7 +311,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.map(|e| serde_json::to_string(&e).unwrap()),
#[cfg(not(target_arch = "wasm32"))]
Commands::Prove {
witness,
compiled_circuit,
@@ -432,13 +410,13 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
commitment.into(),
)
.map(|e| serde_json::to_string(&e).unwrap()),
#[cfg(not(target_arch = "wasm32"))]
Commands::DeployEvmVerifier {
Commands::DeployEvm {
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
contract,
} => {
deploy_evm(
sol_code_path.unwrap_or(DEFAULT_SOL_CODE.into()),
@@ -446,29 +424,10 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
addr_path.unwrap_or(DEFAULT_CONTRACT_ADDRESS.into()),
optimizer_runs,
private_key,
"Halo2Verifier",
contract,
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::DeployEvmVK {
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
} => {
deploy_evm(
sol_code_path.unwrap_or(DEFAULT_VK_SOL.into()),
rpc_url,
addr_path.unwrap_or(DEFAULT_CONTRACT_ADDRESS_VK.into()),
optimizer_runs,
private_key,
"Halo2VerifyingKey",
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::DeployEvmDataAttestation {
data,
settings_path,
@@ -489,7 +448,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::VerifyEvm {
proof_path,
addr_verifier,
@@ -609,7 +567,6 @@ pub(crate) fn gen_srs_cmd(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
async fn fetch_srs(uri: &str) -> Result<Vec<u8>, EZKLError> {
let pb = {
let pb = init_spinner();
@@ -629,7 +586,6 @@ async fn fetch_srs(uri: &str) -> Result<Vec<u8>, EZKLError> {
Ok(std::mem::take(&mut buf))
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) fn get_file_hash(path: &PathBuf) -> Result<String, EZKLError> {
use std::io::Read;
let file = std::fs::File::open(path)?;
@@ -648,7 +604,6 @@ pub(crate) fn get_file_hash(path: &PathBuf) -> Result<String, EZKLError> {
Ok(hash)
}
#[cfg(not(target_arch = "wasm32"))]
fn check_srs_hash(
logrows: u32,
srs_path: Option<PathBuf>,
@@ -674,7 +629,6 @@ fn check_srs_hash(
Ok(hash)
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn get_srs_cmd(
srs_path: Option<PathBuf>,
settings_path: Option<PathBuf>,
@@ -717,20 +671,18 @@ pub(crate) async fn get_srs_cmd(
let srs_uri = format!("{}{}", PUBLIC_SRS_URL, k);
let mut reader = Cursor::new(fetch_srs(&srs_uri).await?);
// check the SRS
#[cfg(not(target_arch = "wasm32"))]
let pb = init_spinner();
#[cfg(not(target_arch = "wasm32"))]
pb.set_message("Validating SRS (this may take a while) ...");
let pb = init_spinner();
pb.set_message("Validating SRS (this may take a while) ...");
let params = ParamsKZG::<Bn256>::read(&mut reader)?;
#[cfg(not(target_arch = "wasm32"))]
pb.finish_with_message("SRS validated.");
pb.finish_with_message("SRS validated.");
info!("Saving SRS to disk...");
let mut file = std::fs::File::create(get_srs_path(k, srs_path.clone(), commitment))?;
let computed_srs_path = get_srs_path(k, srs_path.clone(), commitment);
let mut file = std::fs::File::create(&computed_srs_path)?;
let mut buffer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, &mut file);
params.write(&mut buffer)?;
info!("Saved SRS to disk.");
info!("Saved SRS to {}.", computed_srs_path.as_os_str().to_str().unwrap_or("disk"));
info!("SRS downloaded");
} else {
@@ -776,15 +728,17 @@ pub(crate) async fn gen_witness(
None
};
#[cfg(not(target_arch = "wasm32"))]
let mut input = circuit.load_graph_input(&data).await?;
#[cfg(target_arch = "wasm32")]
let mut input = circuit.load_graph_input(&data).await?;
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
let mut input = circuit.load_graph_input(&data)?;
// if any of the settings have kzg visibility then we need to load the srs
let commitment: Commitments = settings.run_args.commitment.into();
let region_settings =
RegionSettings::all_true(settings.run_args.decomp_base, settings.run_args.decomp_legs);
let start_time = Instant::now();
let witness = if settings.module_requires_polycommit() {
if get_srs_path(settings.run_args.logrows, srs_path.clone(), commitment).exists() {
@@ -799,8 +753,7 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
Some(&srs),
true,
true,
region_settings,
)?
}
Commitments::IPA => {
@@ -814,8 +767,7 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
Some(&srs),
true,
true,
region_settings,
)?
}
}
@@ -825,12 +777,16 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
None,
true,
true,
region_settings,
)?
}
} else {
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, true, true)?
circuit.forward::<KZGCommitmentScheme<Bn256>>(
&mut input,
vk.as_ref(),
None,
region_settings,
)?
};
// print each variable tuple (symbol, value) as symbol=value
@@ -869,7 +825,6 @@ pub(crate) fn gen_circuit_settings(
}
// not for wasm targets
#[cfg(not(target_arch = "wasm32"))]
pub(crate) fn init_spinner() -> ProgressBar {
let pb = indicatif::ProgressBar::new_spinner();
pb.set_draw_target(indicatif::ProgressDrawTarget::stdout());
@@ -891,7 +846,6 @@ pub(crate) fn init_spinner() -> ProgressBar {
}
// not for wasm targets
#[cfg(not(target_arch = "wasm32"))]
pub(crate) fn init_bar(len: u64) -> ProgressBar {
let pb = ProgressBar::new(len);
pb.set_draw_target(indicatif::ProgressDrawTarget::stdout());
@@ -905,7 +859,6 @@ pub(crate) fn init_bar(len: u64) -> ProgressBar {
pb
}
#[cfg(not(target_arch = "wasm32"))]
use colored_json::ToColoredJson;
#[derive(Debug, Clone, Tabled)]
@@ -1005,7 +958,6 @@ impl AccuracyResults {
}
/// Calibrate the circuit parameters to a given a dataset
#[cfg(not(target_arch = "wasm32"))]
#[allow(trivial_casts)]
#[allow(clippy::too_many_arguments)]
pub(crate) async fn calibrate(
@@ -1023,6 +975,8 @@ pub(crate) async fn calibrate(
use std::collections::HashMap;
use tabled::Table;
use crate::fieldutils::IntegerRep;
let data = GraphData::from_path(data)?;
// load the pre-generated settings
let settings = GraphSettings::load(&settings_path)?;
@@ -1131,17 +1085,17 @@ pub(crate) async fn calibrate(
param_scale,
scale_rebase_multiplier,
div_rebasing,
lookup_range: (i64::MIN, i64::MAX),
lookup_range: (IntegerRep::MIN, IntegerRep::MAX),
..settings.run_args.clone()
};
// if unix get a gag
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
let _r = match Gag::stdout() {
Ok(g) => Some(g),
_ => None,
};
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
let _g = match Gag::stderr() {
Ok(g) => Some(g),
_ => None,
@@ -1171,8 +1125,10 @@ pub(crate) async fn calibrate(
&mut data.clone(),
None,
None,
true,
false,
RegionSettings::all_true(
settings.run_args.decomp_base,
settings.run_args.decomp_legs,
),
)
.map_err(|e| format!("failed to forward: {}", e))?;
@@ -1198,9 +1154,9 @@ pub(crate) async fn calibrate(
}
// drop the gag
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
drop(_r);
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
drop(_g);
let result = forward_pass_res.get(&key).ok_or("key not found")?;
@@ -1248,6 +1204,7 @@ pub(crate) async fn calibrate(
num_rows: new_settings.num_rows,
total_assignments: new_settings.total_assignments,
total_const_size: new_settings.total_const_size,
total_dynamic_col_size: new_settings.total_dynamic_col_size,
..settings.clone()
};
@@ -1365,9 +1322,13 @@ pub(crate) async fn calibrate(
let lookup_log_rows = best_params.lookup_log_rows_with_blinding();
let module_log_row = best_params.module_constraint_logrows_with_blinding();
let instance_logrows = best_params.log2_total_instances_with_blinding();
let dynamic_lookup_logrows = best_params.dynamic_lookup_and_shuffle_logrows_with_blinding();
let dynamic_lookup_logrows =
best_params.min_dynamic_lookup_and_shuffle_logrows_with_blinding();
let range_check_logrows = best_params.range_check_log_rows_with_blinding();
let mut reduction = std::cmp::max(lookup_log_rows, module_log_row);
reduction = std::cmp::max(reduction, range_check_logrows);
reduction = std::cmp::max(reduction, instance_logrows);
reduction = std::cmp::max(reduction, dynamic_lookup_logrows);
reduction = std::cmp::max(reduction, crate::graph::MIN_LOGROWS);
@@ -1413,14 +1374,13 @@ pub(crate) fn mock(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn create_evm_verifier(
vk_path: PathBuf,
srs_path: Option<PathBuf>,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
render_vk_seperately: bool,
reusable: bool,
) -> Result<String, EZKLError> {
let settings = GraphSettings::load(&settings_path)?;
let commitment: Commitments = settings.run_args.commitment.into();
@@ -1442,24 +1402,23 @@ pub(crate) async fn create_evm_verifier(
halo2_solidity_verifier::BatchOpenScheme::Bdfg21,
num_instance,
);
let verifier_solidity = if render_vk_seperately {
generator.render_separately()?.0 // ignore the rendered vk for now and generate it in create_evm_vk
let (verifier_solidity, name) = if reusable {
(generator.render_separately()?.0, "Halo2VerifierReusable") // ignore the rendered vk artifact for now and generate it in create_evm_vka
} else {
generator.render()?
(generator.render()?, "Halo2Verifier")
};
File::create(sol_code_path.clone())?.write_all(verifier_solidity.as_bytes())?;
// fetch abi of the contract
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2Verifier", 0).await?;
let (abi, _, _) = get_contract_artifacts(sol_code_path, name, 0).await?;
// save abi to file
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn create_evm_vk(
pub(crate) async fn create_evm_vka(
vk_path: PathBuf,
srs_path: Option<PathBuf>,
settings_path: PathBuf,
@@ -1492,20 +1451,19 @@ pub(crate) async fn create_evm_vk(
File::create(sol_code_path.clone())?.write_all(vk_solidity.as_bytes())?;
// fetch abi of the contract
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2VerifyingKey", 0).await?;
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2VerifyingArtifact", 0).await?;
// save abi to file
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn create_evm_data_attestation(
settings_path: PathBuf,
_sol_code_path: PathBuf,
_abi_path: PathBuf,
_input: PathBuf,
_witness: Option<PathBuf>,
sol_code_path: PathBuf,
abi_path: PathBuf,
input: PathBuf,
witness: Option<PathBuf>,
) -> Result<String, EZKLError> {
#[allow(unused_imports)]
use crate::graph::{DataSource, VarVisibility};
@@ -1517,7 +1475,7 @@ pub(crate) async fn create_evm_data_attestation(
trace!("params computed");
// if input is not provided, we just instantiate dummy input data
let data = GraphData::from_path(_input).unwrap_or(GraphData::new(DataSource::File(vec![])));
let data = GraphData::from_path(input).unwrap_or(GraphData::new(DataSource::File(vec![])));
let output_data = if let Some(DataSource::OnChain(source)) = data.output_data {
if visibility.output.is_private() {
@@ -1552,7 +1510,7 @@ pub(crate) async fn create_evm_data_attestation(
|| settings.run_args.output_visibility == Visibility::KZGCommit
|| settings.run_args.param_visibility == Visibility::KZGCommit
{
let witness = GraphWitness::from_path(_witness.unwrap_or(DEFAULT_WITNESS.into()))?;
let witness = GraphWitness::from_path(witness.unwrap_or(DEFAULT_WITNESS.into()))?;
let commitments = witness.get_polycommitments();
let proof_first_bytes = get_proof_commitments::<
KZGCommitmentScheme<Bn256>,
@@ -1566,17 +1524,16 @@ pub(crate) async fn create_evm_data_attestation(
};
let output = fix_da_sol(input_data, output_data, commitment_bytes)?;
let mut f = File::create(_sol_code_path.clone())?;
let mut f = File::create(sol_code_path.clone())?;
let _ = f.write(output.as_bytes());
// fetch abi of the contract
let (abi, _, _) = get_contract_artifacts(_sol_code_path, "DataAttestation", 0).await?;
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestation", 0).await?;
// save abi to file
serde_json::to_writer(std::fs::File::create(_abi_path)?, &abi)?;
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn deploy_da_evm(
data: PathBuf,
settings_path: PathBuf,
@@ -1603,15 +1560,19 @@ pub(crate) async fn deploy_da_evm(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn deploy_evm(
sol_code_path: PathBuf,
rpc_url: Option<String>,
addr_path: PathBuf,
runs: usize,
private_key: Option<String>,
contract_name: &str,
contract: ContractType,
) -> Result<String, EZKLError> {
let contract_name = match contract {
ContractType::Verifier { reusable: false } => "Halo2Verifier",
ContractType::Verifier { reusable: true } => "Halo2VerifierReusable",
ContractType::VerifyingKeyArtifact => "Halo2VerifyingArtifact",
};
let contract_address = deploy_contract_via_solidity(
sol_code_path,
rpc_url.as_deref(),
@@ -1654,7 +1615,6 @@ pub(crate) fn encode_evm_calldata(
Ok(encoded)
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn verify_evm(
proof_path: PathBuf,
addr_verifier: H160Flag,
@@ -1694,7 +1654,6 @@ pub(crate) async fn verify_evm(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn create_evm_aggregate_verifier(
vk_path: PathBuf,
srs_path: Option<PathBuf>,
@@ -1702,7 +1661,7 @@ pub(crate) async fn create_evm_aggregate_verifier(
abi_path: PathBuf,
circuit_settings: Vec<PathBuf>,
logrows: u32,
render_vk_seperately: bool,
reusable: bool,
) -> Result<String, EZKLError> {
let srs_path = get_srs_path(logrows, srs_path, Commitments::KZG);
let params: ParamsKZG<Bn256> = load_srs_verifier::<KZGCommitmentScheme<Bn256>>(srs_path)?;
@@ -1740,8 +1699,8 @@ pub(crate) async fn create_evm_aggregate_verifier(
generator = generator.set_acc_encoding(Some(acc_encoding));
let verifier_solidity = if render_vk_seperately {
generator.render_separately()?.0 // ignore the rendered vk for now and generate it in create_evm_vk
let verifier_solidity = if reusable {
generator.render_separately()?.0 // ignore the rendered vk artifact for now and generate it in create_evm_vka
} else {
generator.render()?
};
@@ -1818,7 +1777,6 @@ pub(crate) fn setup(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn setup_test_evm_witness(
data_path: PathBuf,
compiled_circuit_path: PathBuf,
@@ -1854,9 +1812,7 @@ pub(crate) async fn setup_test_evm_witness(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
use crate::pfsys::ProofType;
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn test_update_account_calls(
addr: H160Flag,
data: PathBuf,
@@ -1869,7 +1825,6 @@ pub(crate) async fn test_update_account_calls(
Ok(String::new())
}
#[cfg(not(target_arch = "wasm32"))]
#[allow(clippy::too_many_arguments)]
pub(crate) fn prove(
data_path: PathBuf,
@@ -2067,8 +2022,7 @@ pub(crate) fn mock_aggregate(
}
}
// proof aggregation
#[cfg(not(target_arch = "wasm32"))]
let pb = {
let pb = {
let pb = init_spinner();
pb.set_message("Aggregating (may take a while)...");
pb
@@ -2079,8 +2033,7 @@ pub(crate) fn mock_aggregate(
let prover = halo2_proofs::dev::MockProver::run(logrows, &circuit, vec![circuit.instances()])
.map_err(|e| ExecutionError::MockProverError(e.to_string()))?;
prover.verify().map_err(ExecutionError::VerifyError)?;
#[cfg(not(target_arch = "wasm32"))]
pb.finish_with_message("Done.");
pb.finish_with_message("Done.");
Ok(String::new())
}
@@ -2174,8 +2127,7 @@ pub(crate) fn aggregate(
}
// proof aggregation
#[cfg(not(target_arch = "wasm32"))]
let pb = {
let pb = {
let pb = init_spinner();
pb.set_message("Aggregating (may take a while)...");
pb
@@ -2324,8 +2276,7 @@ pub(crate) fn aggregate(
);
snark.save(&proof_path)?;
#[cfg(not(target_arch = "wasm32"))]
pb.finish_with_message("Done.");
pb.finish_with_message("Done.");
Ok(snark)
}

View File

@@ -2,42 +2,21 @@ use halo2_proofs::arithmetic::Field;
/// Utilities for converting from Halo2 PrimeField types to integers (and vice-versa).
use halo2curves::ff::PrimeField;
/// Converts an i32 to a PrimeField element.
pub fn i32_to_felt<F: PrimeField>(x: i32) -> F {
if x >= 0 {
F::from(x as u64)
} else {
-F::from(x.unsigned_abs() as u64)
}
}
/// Integer representation of a PrimeField element.
pub type IntegerRep = i128;
/// Converts an i64 to a PrimeField element.
pub fn i64_to_felt<F: PrimeField>(x: i64) -> F {
pub fn integer_rep_to_felt<F: PrimeField>(x: IntegerRep) -> F {
if x >= 0 {
F::from_u128(x as u128)
} else {
-F::from_u128((-x) as u128)
}
}
/// Converts a PrimeField element to an i32.
pub fn felt_to_i32<F: PrimeField + PartialOrd + Field>(x: F) -> i32 {
if x > F::from(i32::MAX as u64) {
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
let lower_32 = u32::from_le_bytes(negtmp[..4].try_into().unwrap());
-(lower_32 as i32)
} else {
let rep = (x).to_repr();
let tmp: &[u8] = rep.as_ref();
let lower_32 = u32::from_le_bytes(tmp[..4].try_into().unwrap());
lower_32 as i32
-F::from_u128(x.saturating_neg() as u128)
}
}
/// Converts a PrimeField element to an f64.
pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
if x > F::from_u128(i64::MAX as u128) {
if x > F::from_u128(IntegerRep::MAX as u128) {
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
@@ -51,17 +30,17 @@ pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
}
/// Converts a PrimeField element to an i64.
pub fn felt_to_i64<F: PrimeField + PartialOrd + Field>(x: F) -> i64 {
if x > F::from_u128(i64::MAX as u128) {
pub fn felt_to_integer_rep<F: PrimeField + PartialOrd + Field>(x: F) -> IntegerRep {
if x > F::from_u128(IntegerRep::MAX as u128) {
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
-(lower_128 as i64)
-(lower_128 as IntegerRep)
} else {
let rep = (x).to_repr();
let tmp: &[u8] = rep.as_ref();
let lower_128: u128 = u128::from_le_bytes(tmp[..16].try_into().unwrap());
lower_128 as i64
lower_128 as IntegerRep
}
}
@@ -73,33 +52,24 @@ mod test {
#[test]
fn test_conv() {
let res: F = i32_to_felt(-15i32);
let res: F = integer_rep_to_felt(-15);
assert_eq!(res, -F::from(15));
let res: F = i32_to_felt(2_i32.pow(17));
let res: F = integer_rep_to_felt(2_i128.pow(17));
assert_eq!(res, F::from(131072));
let res: F = i64_to_felt(-15i64);
let res: F = integer_rep_to_felt(-15);
assert_eq!(res, -F::from(15));
let res: F = i64_to_felt(2_i64.pow(17));
let res: F = integer_rep_to_felt(2_i128.pow(17));
assert_eq!(res, F::from(131072));
}
#[test]
fn felttoi32() {
for x in -(2i32.pow(16))..(2i32.pow(16)) {
let fieldx: F = i32_to_felt::<F>(x);
let xf: i32 = felt_to_i32::<F>(fieldx);
assert_eq!(x, xf);
}
}
#[test]
fn felttoi64() {
for x in -(2i64.pow(20))..(2i64.pow(20)) {
let fieldx: F = i64_to_felt::<F>(x);
let xf: i64 = felt_to_i64::<F>(fieldx);
fn felttointegerrep() {
for x in -(2_i128.pow(16))..(2_i128.pow(16)) {
let fieldx: F = integer_rep_to_felt::<F>(x);
let xf: i128 = felt_to_integer_rep::<F>(fieldx);
assert_eq!(x, xf);
}
}

View File

@@ -48,7 +48,10 @@ pub enum GraphError {
#[error("failed to ser/deser model: {0}")]
ModelSerialize(#[from] bincode::Error),
/// Tract error
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
#[error("[tract] {0}")]
TractError(#[from] tract_onnx::prelude::TractError),
/// Packing exponent is too large
@@ -85,11 +88,17 @@ pub enum GraphError {
#[error("unknown dimension batch_size in model inputs, set batch_size in variables")]
MissingBatchSize,
/// Tokio postgres error
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
#[error("[tokio postgres] {0}")]
TokioPostgresError(#[from] tokio_postgres::Error),
/// Eth error
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
#[error("[eth] {0}")]
EthError(#[from] crate::eth::EthError),
/// Json error

View File

@@ -1,10 +1,10 @@
use super::errors::GraphError;
use super::quantize_float;
use crate::circuit::InputType;
use crate::fieldutils::i64_to_felt;
#[cfg(not(target_arch = "wasm32"))]
use crate::fieldutils::integer_rep_to_felt;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::graph::postgres::Client;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::tensor::Tensor;
use crate::EZKL_BUF_CAPACITY;
use halo2curves::bn256::Fr as Fp;
@@ -20,12 +20,12 @@ use std::io::BufReader;
use std::io::BufWriter;
use std::io::Read;
use std::panic::UnwindSafe;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_core::{
tract_data::{prelude::Tensor as TractTensor, TVec},
value::TValue,
};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_hir::tract_num_traits::ToPrimitive;
type Decimals = u8;
@@ -128,7 +128,7 @@ impl FileSourceInner {
/// Convert to a field element
pub fn to_field(&self, scale: crate::Scale) -> Fp {
match self {
FileSourceInner::Float(f) => i64_to_felt(quantize_float(f, 0.0, scale).unwrap()),
FileSourceInner::Float(f) => integer_rep_to_felt(quantize_float(f, 0.0, scale).unwrap()),
FileSourceInner::Bool(f) => {
if *f {
Fp::one()
@@ -150,7 +150,7 @@ impl FileSourceInner {
0.0
}
}
FileSourceInner::Field(f) => crate::fieldutils::felt_to_i64(*f) as f64,
FileSourceInner::Field(f) => crate::fieldutils::felt_to_integer_rep(*f) as f64,
}
}
}
@@ -171,7 +171,7 @@ impl OnChainSource {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Inner elements of inputs/outputs coming from postgres DB
#[derive(Clone, Debug, Deserialize, Serialize, Default, PartialOrd, PartialEq)]
pub struct PostgresSource {
@@ -189,7 +189,7 @@ pub struct PostgresSource {
pub port: String,
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl PostgresSource {
/// Create a new PostgresSource
pub fn new(
@@ -268,7 +268,7 @@ impl PostgresSource {
}
impl OnChainSource {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Create dummy local on-chain data to test the OnChain data source
pub async fn test_from_file_data(
data: &FileSource,
@@ -359,7 +359,7 @@ pub enum DataSource {
/// On-chain data source. The first element is the calls to the account, and the second is the RPC url.
OnChain(OnChainSource),
/// Postgres DB
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
DB(PostgresSource),
}
@@ -419,7 +419,7 @@ impl<'de> Deserialize<'de> for DataSource {
if let Ok(t) = second_try {
return Ok(DataSource::OnChain(t));
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
{
let third_try: Result<PostgresSource, _> = serde_json::from_str(this_json.get());
if let Ok(t) = third_try {
@@ -445,7 +445,7 @@ impl UnwindSafe for GraphData {}
impl GraphData {
// not wasm
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Convert the input data to tract data
pub fn to_tract_data(
&self,
@@ -530,7 +530,7 @@ impl GraphData {
"on-chain data cannot be split into batches".to_string(),
))
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
GraphData {
input_data: DataSource::DB(data),
output_data: _,

View File

@@ -7,7 +7,7 @@ pub mod modules;
/// Inner elements of a computational graph that represent a single operation / constraints.
pub mod node;
/// postgres helper functions
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub mod postgres;
/// Helper functions
pub mod utilities;
@@ -17,27 +17,28 @@ pub mod vars;
/// errors for the graph
pub mod errors;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use colored_json::ToColoredJson;
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
use gag::Gag;
use halo2_proofs::plonk::VerifyingKey;
use halo2_proofs::poly::commitment::CommitmentScheme;
pub use input::DataSource;
use itertools::Itertools;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tosubcommand::ToFlags;
use self::errors::GraphError;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use self::input::OnChainSource;
use self::input::{FileSource, GraphData};
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
use crate::circuit::lookup::LookupOp;
use crate::circuit::modules::ModulePlanner;
use crate::circuit::region::ConstantsMap;
use crate::circuit::region::{ConstantsMap, RegionSettings};
use crate::circuit::table::{num_cols_required, Range, Table, RESERVED_BLINDING_ROWS_PAD};
use crate::circuit::{CheckMode, InputType};
use crate::fieldutils::felt_to_f64;
use crate::fieldutils::{felt_to_f64, IntegerRep};
use crate::pfsys::PrettyElements;
use crate::tensor::{Tensor, ValTensor};
use crate::{RunArgs, EZKL_BUF_CAPACITY};
@@ -48,7 +49,7 @@ use halo2_proofs::{
};
use halo2curves::bn256::{self, Fr as Fp, G1Affine};
use halo2curves::ff::{Field, PrimeField};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use lazy_static::lazy_static;
use log::{debug, error, trace, warn};
use maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
@@ -69,15 +70,16 @@ pub use vars::*;
use crate::pfsys::field_to_string;
/// The safety factor for the range of the lookup table.
pub const RANGE_MULTIPLIER: i64 = 2;
pub const RANGE_MULTIPLIER: IntegerRep = 2;
/// The maximum number of columns in a lookup table.
pub const MAX_NUM_LOOKUP_COLS: usize = 12;
/// Max representation of a lookup table input
pub const MAX_LOOKUP_ABS: i64 = (MAX_NUM_LOOKUP_COLS as i64) * 2_i64.pow(MAX_PUBLIC_SRS);
pub const MAX_LOOKUP_ABS: IntegerRep =
(MAX_NUM_LOOKUP_COLS as IntegerRep) * 2_i128.pow(MAX_PUBLIC_SRS);
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
lazy_static! {
/// Max circuit area
pub static ref EZKL_MAX_CIRCUIT_AREA: Option<usize> =
@@ -88,7 +90,7 @@ lazy_static! {
};
}
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
const EZKL_MAX_CIRCUIT_AREA: Option<usize> = None;
///
@@ -126,11 +128,11 @@ pub struct GraphWitness {
/// Any hashes of outputs generated during the forward pass
pub processed_outputs: Option<ModuleForwardResult>,
/// max lookup input
pub max_lookup_inputs: i64,
pub max_lookup_inputs: IntegerRep,
/// max lookup input
pub min_lookup_inputs: i64,
pub min_lookup_inputs: IntegerRep,
/// max range check size
pub max_range_size: i64,
pub max_range_size: IntegerRep,
}
impl GraphWitness {
@@ -383,7 +385,7 @@ fn insert_poseidon_hash_pydict(pydict: &PyDict, poseidon_hash: &Vec<Fp>) -> Resu
#[cfg(feature = "python-bindings")]
fn insert_polycommit_pydict(pydict: &PyDict, commits: &Vec<Vec<G1Affine>>) -> Result<(), PyErr> {
use crate::python::PyG1Affine;
use crate::bindings::python::PyG1Affine;
let poseidon_hash: Vec<Vec<PyG1Affine>> = commits
.iter()
.map(|c| c.iter().map(|x| PyG1Affine::from(*x)).collect())
@@ -406,6 +408,8 @@ pub struct GraphSettings {
pub total_const_size: usize,
/// total dynamic column size
pub total_dynamic_col_size: usize,
/// max dynamic column input length
pub max_dynamic_input_len: usize,
/// number of dynamic lookups
pub num_dynamic_lookups: usize,
/// number of shuffles
@@ -450,6 +454,18 @@ impl GraphSettings {
.ceil() as u32
}
/// Calc the number of rows required for the range checks
pub fn range_check_log_rows_with_blinding(&self) -> u32 {
let max_range = self
.required_range_checks
.iter()
.map(|x| x.1 - x.0)
.max()
.unwrap_or(0);
(max_range as f32).log2().ceil() as u32
}
fn model_constraint_logrows_with_blinding(&self) -> u32 {
(self.num_rows as f64 + RESERVED_BLINDING_ROWS as f64)
.log2()
@@ -471,6 +487,13 @@ impl GraphSettings {
.ceil() as u32
}
/// calculate the number of rows required for the dynamic lookup and shuffle
pub fn min_dynamic_lookup_and_shuffle_logrows_with_blinding(&self) -> u32 {
(self.max_dynamic_input_len as f64 + RESERVED_BLINDING_ROWS as f64)
.log2()
.ceil() as u32
}
fn dynamic_lookup_and_shuffle_col_size(&self) -> usize {
self.total_dynamic_col_size + self.total_shuffle_col_size
}
@@ -684,6 +707,7 @@ impl std::fmt::Display for TestDataSource {
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl ToFlags for TestDataSource {}
impl From<String> for TestDataSource {
@@ -804,18 +828,26 @@ impl GraphCircuit {
// the ordering here is important, we want the inputs to come before the outputs
// as they are configured in that order as Column<Instances>
let mut public_inputs: Vec<Fp> = vec![];
if self.settings().run_args.input_visibility.is_public() {
public_inputs.extend(self.graph_witness.inputs.clone().into_iter().flatten())
} else if let Some(processed_inputs) = &data.processed_inputs {
// we first process the inputs
if let Some(processed_inputs) = &data.processed_inputs {
public_inputs.extend(processed_inputs.get_instances().into_iter().flatten());
}
// we then process the params
if let Some(processed_params) = &data.processed_params {
public_inputs.extend(processed_params.get_instances().into_iter().flatten());
}
// if the inputs are public, we add them to the public inputs AFTER the processed params as they are configured in that order as Column<Instances>
if self.settings().run_args.input_visibility.is_public() {
public_inputs.extend(self.graph_witness.inputs.clone().into_iter().flatten())
}
// if the outputs are public, we add them to the public inputs
if self.settings().run_args.output_visibility.is_public() {
public_inputs.extend(self.graph_witness.outputs.clone().into_iter().flatten());
// if the outputs are processed, we add the processed outputs to the public inputs
} else if let Some(processed_outputs) = &data.processed_outputs {
public_inputs.extend(processed_outputs.get_instances().into_iter().flatten());
}
@@ -864,7 +896,7 @@ impl GraphCircuit {
public_inputs.processed_outputs = elements.processed_outputs.clone();
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
debug!(
"rescaled and processed public inputs: {}",
serde_json::to_string(&public_inputs)?.to_colored_json_auto()?
@@ -874,7 +906,7 @@ impl GraphCircuit {
}
///
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
pub fn load_graph_input(&mut self, data: &GraphData) -> Result<Vec<Tensor<Fp>>, GraphError> {
let shapes = self.model().graph.input_shapes()?;
let scales = self.model().graph.get_input_scales();
@@ -901,7 +933,7 @@ impl GraphCircuit {
}
///
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub async fn load_graph_input(
&mut self,
data: &GraphData,
@@ -915,7 +947,7 @@ impl GraphCircuit {
.await
}
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
/// Process the data source for the model
fn process_data_source(
&mut self,
@@ -932,7 +964,7 @@ impl GraphCircuit {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Process the data source for the model
async fn process_data_source(
&mut self,
@@ -962,7 +994,7 @@ impl GraphCircuit {
}
/// Prepare on chain test data
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub async fn load_on_chain_data(
&mut self,
source: OnChainSource,
@@ -1036,12 +1068,12 @@ impl GraphCircuit {
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: f64) -> Range {
(
(lookup_safety_margin * min_max_lookup.0 as f64).floor() as i64,
(lookup_safety_margin * min_max_lookup.1 as f64).ceil() as i64,
(lookup_safety_margin * min_max_lookup.0 as f64).floor() as IntegerRep,
(lookup_safety_margin * min_max_lookup.1 as f64).ceil() as IntegerRep,
)
}
fn calc_num_cols(range_len: i64, max_logrows: u32) -> usize {
fn calc_num_cols(range_len: IntegerRep, max_logrows: u32) -> usize {
let max_col_size = Table::<Fp>::cal_col_size(max_logrows as usize, RESERVED_BLINDING_ROWS);
num_cols_required(range_len, max_col_size)
}
@@ -1049,7 +1081,7 @@ impl GraphCircuit {
fn table_size_logrows(
&self,
safe_lookup_range: Range,
max_range_size: i64,
max_range_size: IntegerRep,
) -> Result<u32, GraphError> {
// pick the range with the largest absolute size safe_lookup_range or max_range_size
let safe_range = std::cmp::max(
@@ -1068,7 +1100,7 @@ impl GraphCircuit {
pub fn calc_min_logrows(
&mut self,
min_max_lookup: Range,
max_range_size: i64,
max_range_size: IntegerRep,
max_logrows: Option<u32>,
lookup_safety_margin: f64,
) -> Result<(), GraphError> {
@@ -1086,7 +1118,7 @@ impl GraphCircuit {
(safe_lookup_range.1.saturating_sub(safe_lookup_range.0)).saturating_abs();
// check if has overflowed max lookup input
if lookup_size > (MAX_LOOKUP_ABS as f64 / lookup_safety_margin).floor() as i64 {
if lookup_size > (MAX_LOOKUP_ABS as f64 / lookup_safety_margin).floor() as IntegerRep {
return Err(GraphError::LookupRangeTooLarge(
lookup_size.unsigned_abs() as usize
));
@@ -1166,7 +1198,7 @@ impl GraphCircuit {
&self,
k: u32,
safe_lookup_range: Range,
max_range_size: i64,
max_range_size: IntegerRep,
) -> bool {
// if num cols is too large then the extended k is too large
if Self::calc_num_cols(safe_lookup_range.1 - safe_lookup_range.0, k) > MAX_NUM_LOOKUP_COLS
@@ -1181,12 +1213,12 @@ impl GraphCircuit {
settings.required_range_checks = vec![(0, max_range_size)];
let mut cs = ConstraintSystem::default();
// if unix get a gag
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
let _r = match Gag::stdout() {
Ok(g) => Some(g),
_ => None,
};
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
let _g = match Gag::stderr() {
Ok(g) => Some(g),
_ => None,
@@ -1195,9 +1227,9 @@ impl GraphCircuit {
Self::configure_with_params(&mut cs, settings);
// drop the gag
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
drop(_r);
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
drop(_g);
#[cfg(feature = "mv-lookup")]
@@ -1224,8 +1256,7 @@ impl GraphCircuit {
inputs: &mut [Tensor<Fp>],
vk: Option<&VerifyingKey<G1Affine>>,
srs: Option<&Scheme::ParamsProver>,
witness_gen: bool,
check_lookup: bool,
region_settings: RegionSettings,
) -> Result<GraphWitness, GraphError> {
let original_inputs = inputs.to_vec();
@@ -1274,7 +1305,7 @@ impl GraphCircuit {
let mut model_results =
self.model()
.forward(inputs, &self.settings().run_args, witness_gen, check_lookup)?;
.forward(inputs, &self.settings().run_args, region_settings)?;
if visibility.output.requires_processing() {
let module_outlets = visibility.output.overwrites_inputs();
@@ -1327,7 +1358,7 @@ impl GraphCircuit {
visibility,
);
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
log::trace!(
"witness: \n {}",
&witness.as_json()?.to_colored_json_auto()?
@@ -1337,7 +1368,7 @@ impl GraphCircuit {
}
/// Create a new circuit from a set of input data and [RunArgs].
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn from_run_args(
run_args: &RunArgs,
model_path: &std::path::Path,
@@ -1347,7 +1378,7 @@ impl GraphCircuit {
}
/// Create a new circuit from a set of input data and [GraphSettings].
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn from_settings(
params: &GraphSettings,
model_path: &std::path::Path,
@@ -1362,7 +1393,7 @@ impl GraphCircuit {
}
///
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub async fn populate_on_chain_test_data(
&mut self,
data: &mut GraphData,
@@ -1455,7 +1486,7 @@ impl CircuitSize {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Export the ezkl configuration as json
pub fn as_json(&self) -> Result<String, GraphError> {
let serialized = match serde_json::to_string(&self) {
@@ -1543,7 +1574,7 @@ impl Circuit<Fp> for GraphCircuit {
let circuit_size = CircuitSize::from_cs(cs, params.run_args.logrows);
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
debug!(
"circuit size: \n {}",
circuit_size

View File

@@ -7,10 +7,12 @@ use super::GraphSettings;
use crate::circuit::hybrid::HybridOp;
use crate::circuit::region::ConstantsMap;
use crate::circuit::region::RegionCtx;
use crate::circuit::region::RegionSettings;
use crate::circuit::table::Range;
use crate::circuit::Input;
use crate::circuit::InputType;
use crate::circuit::Unknown;
use crate::fieldutils::IntegerRep;
use crate::tensor::ValType;
use crate::{
circuit::{lookup::LookupOp, BaseConfig as PolyConfig, CheckMode, Op},
@@ -19,9 +21,9 @@ use crate::{
};
use halo2curves::bn256::Fr as Fp;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use super::input::GraphData;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use colored::Colorize;
use halo2_proofs::{
circuit::{Layouter, Value},
@@ -34,29 +36,29 @@ use log::{debug, info, trace};
use serde::Deserialize;
use serde::Serialize;
use std::collections::BTreeMap;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use std::collections::HashMap;
use std::collections::HashSet;
use std::fs;
use std::io::Read;
use std::path::PathBuf;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tabled::Table;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::prelude::{
Framework, Graph, InferenceFact, InferenceModelExt, SymbolValues, TypedFact, TypedOp,
};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_core::internal::DatumType;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_hir::ops::scan::Scan;
use unzip_n::unzip_n;
unzip_n!(pub 3);
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
type TractResult = (Graph<TypedFact, Box<dyn TypedOp>>, SymbolValues);
/// The result of a forward pass.
#[derive(Clone, Debug)]
@@ -64,11 +66,11 @@ pub struct ForwardResult {
/// The outputs of the forward pass.
pub outputs: Vec<Tensor<Fp>>,
/// The maximum value of any input to a lookup operation.
pub max_lookup_inputs: i64,
pub max_lookup_inputs: IntegerRep,
/// The minimum value of any input to a lookup operation.
pub min_lookup_inputs: i64,
pub min_lookup_inputs: IntegerRep,
/// The max range check size
pub max_range_size: i64,
pub max_range_size: IntegerRep,
}
impl From<DummyPassRes> for ForwardResult {
@@ -101,6 +103,8 @@ pub struct DummyPassRes {
pub num_rows: usize,
/// num dynamic lookups
pub num_dynamic_lookups: usize,
/// max dynamic lookup input len
pub max_dynamic_input_len: usize,
/// dynamic lookup col size
pub dynamic_lookup_col_coord: usize,
/// num shuffles
@@ -116,11 +120,11 @@ pub struct DummyPassRes {
/// range checks
pub range_checks: HashSet<Range>,
/// max lookup inputs
pub max_lookup_inputs: i64,
pub max_lookup_inputs: IntegerRep,
/// min lookup inputs
pub min_lookup_inputs: i64,
pub min_lookup_inputs: IntegerRep,
/// min range check
pub max_range_size: i64,
pub max_range_size: IntegerRep,
/// outputs
pub outputs: Vec<Tensor<Fp>>,
}
@@ -358,6 +362,14 @@ impl NodeType {
NodeType::SubGraph { .. } => SupportedOp::Unknown(Unknown),
}
}
/// check if it is a softmax
pub fn is_softmax(&self) -> bool {
match self {
NodeType::Node(n) => n.is_softmax(),
NodeType::SubGraph { .. } => false,
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
@@ -468,7 +480,7 @@ impl Model {
/// # Arguments
/// * `reader` - A reader for an Onnx file.
/// * `run_args` - [RunArgs]
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn new(reader: &mut dyn std::io::Read, run_args: &RunArgs) -> Result<Self, GraphError> {
let visibility = VarVisibility::from_args(run_args)?;
@@ -515,7 +527,7 @@ impl Model {
check_mode: CheckMode,
) -> Result<GraphSettings, GraphError> {
let instance_shapes = self.instance_shapes()?;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
debug!(
"{} {} {}",
"model has".blue(),
@@ -545,7 +557,11 @@ impl Model {
})
.collect::<Result<Vec<_>, GraphError>>()?;
let res = self.dummy_layout(run_args, &inputs, false, false)?;
let res = self.dummy_layout(
run_args,
&inputs,
RegionSettings::all_false(run_args.decomp_base, run_args.decomp_legs),
)?;
// if we're using percentage tolerance, we need to add the necessary range check ops for it.
@@ -556,6 +572,7 @@ impl Model {
num_rows: res.num_rows,
total_assignments: res.linear_coord,
required_lookups: res.lookup_ops.into_iter().collect(),
max_dynamic_input_len: res.max_dynamic_input_len,
required_range_checks: res.range_checks.into_iter().collect(),
model_output_scales: self.graph.get_output_scales()?,
model_input_scales: self.graph.get_input_scales(),
@@ -568,13 +585,13 @@ impl Model {
version: env!("CARGO_PKG_VERSION").to_string(),
num_blinding_factors: None,
// unix time timestamp
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
timestamp: Some(
instant::SystemTime::now()
.duration_since(instant::SystemTime::UNIX_EPOCH)?
.as_millis(),
),
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
timestamp: None,
})
}
@@ -588,14 +605,13 @@ impl Model {
&self,
model_inputs: &[Tensor<Fp>],
run_args: &RunArgs,
witness_gen: bool,
check_lookup: bool,
region_settings: RegionSettings,
) -> Result<ForwardResult, GraphError> {
let valtensor_inputs: Vec<ValTensor<Fp>> = model_inputs
.iter()
.map(|x| x.map(|elem| ValType::Value(Value::known(elem))).into())
.collect();
let res = self.dummy_layout(run_args, &valtensor_inputs, witness_gen, check_lookup)?;
let res = self.dummy_layout(run_args, &valtensor_inputs, region_settings)?;
Ok(res.into())
}
@@ -604,14 +620,12 @@ impl Model {
/// * `reader` - A reader for an Onnx file.
/// * `scale` - The scale to use for quantization.
/// * `public_params` - Whether to make the params public.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn load_onnx_using_tract(
reader: &mut dyn std::io::Read,
run_args: &RunArgs,
) -> Result<TractResult, GraphError> {
use tract_onnx::{
tract_core::internal::IntoArcTensor, tract_hir::internal::GenericFactoid,
};
use tract_onnx::tract_hir::internal::GenericFactoid;
let mut model = tract_onnx::onnx().model_for_read(reader)?;
@@ -648,29 +662,11 @@ impl Model {
}
// Note: do not optimize the model, as the layout will depend on underlying hardware
let mut typed_model = model
let typed_model = model
.into_typed()?
.concretize_dims(&symbol_values)?
.into_decluttered()?;
// concretize constants
for node in typed_model.eval_order()? {
let node = typed_model.node_mut(node);
if let Some(op) = node.op_as_mut::<tract_onnx::tract_core::ops::konst::Const>() {
if op.0.datum_type() == DatumType::TDim {
// get inner value to Arc<Tensor>
let mut constant = op.0.as_ref().clone();
// Generally a shape or hyperparam
constant
.as_slice_mut::<tract_onnx::prelude::TDim>()?
.iter_mut()
.for_each(|x| *x = x.eval(&symbol_values));
op.0 = constant.into_arc_tensor();
}
}
}
Ok((typed_model, symbol_values))
}
@@ -679,7 +675,7 @@ impl Model {
/// * `reader` - A reader for an Onnx file.
/// * `scale` - The scale to use for quantization.
/// * `public_params` - Whether to make the params public.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn load_onnx_model(
reader: &mut dyn std::io::Read,
run_args: &RunArgs,
@@ -715,7 +711,7 @@ impl Model {
}
/// Formats nodes (including subgraphs) into tables !
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn table_nodes(&self) -> String {
let mut node_accumulator = vec![];
let mut string = String::new();
@@ -757,7 +753,7 @@ impl Model {
/// * `visibility` - Which inputs to the model are public and private (params, inputs, outputs) using [VarVisibility].
/// * `input_scales` - The scales of the model's inputs.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn nodes_from_graph(
graph: &Graph<TypedFact, Box<dyn TypedOp>>,
run_args: &RunArgs,
@@ -902,16 +898,8 @@ impl Model {
);
}
None => {
let mut n = Node::new(
n.clone(),
&mut nodes,
scales,
&run_args.param_visibility,
i,
symbol_values,
run_args.div_rebasing,
run_args.rebase_frac_zero_constants,
)?;
let mut n =
Node::new(n.clone(), &mut nodes, scales, i, symbol_values, run_args)?;
if let Some(ref scales) = override_input_scales {
if let Some(inp) = n.opkind.get_input() {
let scale = scales[input_idx];
@@ -954,7 +942,7 @@ impl Model {
Ok(nodes)
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Removes all nodes that are consts with 0 uses
fn remove_unused_nodes(nodes: &mut BTreeMap<usize, NodeType>) {
// remove all nodes that are consts with 0 uses now
@@ -973,7 +961,7 @@ impl Model {
});
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Run tract onnx model on sample data !
pub fn run_onnx_predictions(
run_args: &RunArgs,
@@ -1014,7 +1002,7 @@ impl Model {
/// Creates a `Model` from parsed run_args
/// # Arguments
/// * `params` - A [GraphSettings] struct holding parsed CLI arguments.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn from_run_args(run_args: &RunArgs, model: &std::path::Path) -> Result<Self, GraphError> {
let mut file = std::fs::File::open(model).map_err(|e| {
GraphError::ReadWriteFileError(model.display().to_string(), e.to_string())
@@ -1131,6 +1119,8 @@ impl Model {
region,
0,
run_args.num_inner_cols,
run_args.decomp_base,
run_args.decomp_legs,
original_constants.clone(),
);
// we need to do this as this loop is called multiple times
@@ -1187,7 +1177,7 @@ impl Model {
})?;
}
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
thread_safe_region.debug_report();
*constants = thread_safe_region.assigned_constants().clone();
@@ -1218,7 +1208,7 @@ impl Model {
for (idx, node) in self.graph.nodes.iter() {
debug!("laying out {}: {}", idx, node.as_str(),);
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
region.debug_report();
debug!("input indices: {:?}", node.inputs());
debug!("output scales: {:?}", node.out_scales());
@@ -1410,8 +1400,7 @@ impl Model {
&self,
run_args: &RunArgs,
inputs: &[ValTensor<Fp>],
witness_gen: bool,
check_lookup: bool,
region_settings: RegionSettings,
) -> Result<DummyPassRes, GraphError> {
debug!("calculating num of constraints using dummy model layout...");
@@ -1430,8 +1419,7 @@ impl Model {
vars: ModelVars::new_dummy(),
};
let mut region =
RegionCtx::new_dummy(0, run_args.num_inner_cols, witness_gen, check_lookup);
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, region_settings);
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;
@@ -1474,7 +1462,7 @@ impl Model {
trace!("dummy model layout took: {:?}", duration);
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
region.debug_report();
let outputs = outputs
@@ -1488,6 +1476,7 @@ impl Model {
let res = DummyPassRes {
num_rows: region.row(),
linear_coord: region.linear_coord(),
max_dynamic_input_len: region.max_dynamic_input_len(),
total_const_size: region.total_constants(),
lookup_ops: region.used_lookups(),
range_checks: region.used_range_checks(),

View File

@@ -1,9 +1,9 @@
use super::scale_to_multiplier;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use super::utilities::node_output_shapes;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use super::VarScales;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use super::Visibility;
use crate::circuit::hybrid::HybridOp;
use crate::circuit::lookup::LookupOp;
@@ -13,29 +13,29 @@ use crate::circuit::Constant;
use crate::circuit::Input;
use crate::circuit::Op;
use crate::circuit::Unknown;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::graph::errors::GraphError;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::graph::new_op_from_onnx;
use crate::tensor::TensorError;
use halo2curves::bn256::Fr as Fp;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use log::trace;
use serde::Deserialize;
use serde::Serialize;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use std::collections::BTreeMap;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use std::fmt;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tabled::Tabled;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::{
self,
prelude::{Node as OnnxNode, SymbolValues, TypedFact, TypedOp},
};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn display_vector<T: fmt::Debug>(v: &Vec<T>) -> String {
if !v.is_empty() {
format!("{:?}", v)
@@ -44,7 +44,7 @@ fn display_vector<T: fmt::Debug>(v: &Vec<T>) -> String {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn display_opkind(v: &SupportedOp) -> String {
v.as_string()
}
@@ -125,6 +125,7 @@ impl RebaseScale {
if (op_out_scale > (global_scale * scale_rebase_multiplier as i32))
&& !inner.is_constant()
&& !inner.is_input()
&& !inner.is_identity()
{
let multiplier =
scale_to_multiplier(op_out_scale - global_scale * scale_rebase_multiplier as i32);
@@ -302,7 +303,7 @@ impl SupportedOp {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn homogenous_rescale(
&self,
in_scales: Vec<crate::Scale>,
@@ -326,6 +327,19 @@ impl SupportedOp {
SupportedOp::RebaseScale(op) => op,
}
}
/// check if is the identity operation
/// # Returns
/// * `true` if the operation is the identity operation
/// * `false` otherwise
pub fn is_identity(&self) -> bool {
match self {
SupportedOp::Linear(op) => matches!(op, PolyOp::Identity { .. }),
SupportedOp::Rescaled(op) => op.inner.is_identity(),
SupportedOp::RebaseScale(op) => op.inner.is_identity(),
_ => false,
}
}
}
impl From<Box<dyn Op<Fp>>> for SupportedOp {
@@ -427,7 +441,7 @@ pub struct Node {
pub num_uses: usize,
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl Tabled for Node {
const LENGTH: usize = 6;
@@ -467,17 +481,15 @@ impl Node {
/// * `other_nodes` - [BTreeMap] of other previously initialized [Node]s in the computational graph.
/// * `public_params` - flag if parameters of model are public
/// * `idx` - The node's unique identifier.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
#[allow(clippy::too_many_arguments)]
pub fn new(
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
other_nodes: &mut BTreeMap<usize, super::NodeType>,
scales: &VarScales,
param_visibility: &Visibility,
idx: usize,
symbol_values: &SymbolValues,
div_rebasing: bool,
rebase_frac_zero_constants: bool,
run_args: &crate::RunArgs,
) -> Result<Self, GraphError> {
trace!("Create {:?}", node);
trace!("Create op {:?}", node.op);
@@ -517,11 +529,10 @@ impl Node {
let (mut opkind, deleted_indices) = new_op_from_onnx(
idx,
scales,
param_visibility,
node.clone(),
&mut inputs,
symbol_values,
rebase_frac_zero_constants,
run_args,
)?; // parses the op name
// we can only take the inputs as mutable once -- so we need to collect them first
@@ -569,7 +580,7 @@ impl Node {
rescale_const_with_single_use(
constant,
in_scales.clone(),
param_visibility,
&run_args.param_visibility,
input_node.num_uses(),
)?;
input_node.replace_opkind(constant.clone_dyn().into());
@@ -589,7 +600,7 @@ impl Node {
global_scale,
out_scale,
scales.rebase_multiplier,
div_rebasing,
run_args.div_rebasing,
);
out_scale = opkind.out_scale(in_scales)?;
@@ -612,9 +623,18 @@ impl Node {
num_uses,
})
}
/// check if it is a softmax node
pub fn is_softmax(&self) -> bool {
if let SupportedOp::Hybrid(HybridOp::Softmax { .. }) = self.opkind {
true
} else {
false
}
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn rescale_const_with_single_use(
constant: &mut Constant<Fp>,
in_scales: Vec<crate::Scale>,

View File

@@ -1,7 +1,7 @@
use log::{debug, error, info};
use std::fmt::Debug;
use std::net::IpAddr;
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
use std::path::Path;
use std::str::FromStr;
use std::sync::Arc;
@@ -150,7 +150,7 @@ impl Config {
/// Adds a Unix socket host to the configuration.
///
/// Unlike `host`, this method allows non-UTF8 paths.
#[cfg(unix)]
#[cfg(all(not(not(feature = "ezkl")), unix))]
pub fn host_path<T>(&mut self, host: T) -> &mut Config
where
T: AsRef<Path>,

View File

@@ -1,25 +1,26 @@
use super::errors::GraphError;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use super::VarScales;
use super::{Rescaled, SupportedOp, Visibility};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::circuit::hybrid::HybridOp;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::circuit::lookup::LookupOp;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::circuit::poly::PolyOp;
use crate::circuit::Op;
use crate::fieldutils::IntegerRep;
use crate::tensor::{Tensor, TensorError, TensorType};
use halo2curves::bn256::Fr as Fp;
use halo2curves::ff::PrimeField;
use itertools::Itertools;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use log::{debug, warn};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use std::sync::Arc;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::prelude::{DatumType, Node as OnnxNode, TypedFact, TypedOp};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_core::ops::{
array::{
Gather, GatherElements, GatherNd, MultiBroadcastTo, OneHot, ScatterElements, ScatterNd,
@@ -32,7 +33,7 @@ use tract_onnx::tract_core::ops::{
nn::{LeakyRelu, Reduce, Softmax},
Downsample,
};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_hir::{
internal::DimLike,
ops::array::{Pad, PadMode, TypedConcat},
@@ -40,7 +41,7 @@ use tract_onnx::tract_hir::{
ops::konst::Const,
ops::nn::DataFormat,
tract_core::ops::cast::Cast,
tract_core::ops::cnn::{conv::KernelFormat, MaxPool, PaddingSpec, SumPool},
tract_core::ops::cnn::{conv::KernelFormat, MaxPool, SumPool},
};
/// Quantizes an iterable of f32s to a [Tensor] of i32s using a fixed point representation.
@@ -50,16 +51,20 @@ use tract_onnx::tract_hir::{
/// * `dims` - the dimensionality of the resulting [Tensor].
/// * `shift` - offset used in the fixed point representation.
/// * `scale` - `2^scale` used in the fixed point representation.
pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i64, TensorError> {
pub fn quantize_float(
elem: &f64,
shift: f64,
scale: crate::Scale,
) -> Result<IntegerRep, TensorError> {
let mult = scale_to_multiplier(scale);
let max_value = ((i64::MAX as f64 - shift) / mult).round(); // the maximum value that can be represented w/o sig bit truncation
let max_value = ((IntegerRep::MAX as f64 - shift) / mult).round(); // the maximum value that can be represented w/o sig bit truncation
if *elem > max_value {
return Err(TensorError::SigBitTruncationError);
}
// we parallelize the quantization process as it seems to be quite slow at times
let scaled = (mult * *elem + shift).round() as i64;
let scaled = (mult * *elem + shift).round() as IntegerRep;
Ok(scaled)
}
@@ -70,7 +75,7 @@ pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i64
/// * `scale` - `2^scale` used in the fixed point representation.
/// * `shift` - offset used in the fixed point representation.
pub fn dequantize(felt: Fp, scale: crate::Scale, shift: f64) -> f64 {
let int_rep = crate::fieldutils::felt_to_i64(felt);
let int_rep = crate::fieldutils::felt_to_integer_rep(felt);
let multiplier = scale_to_multiplier(scale);
int_rep as f64 / multiplier - shift
}
@@ -85,25 +90,26 @@ pub fn multiplier_to_scale(mult: f64) -> crate::Scale {
mult.log2().round() as crate::Scale
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// extract padding from a onnx node.
pub fn extract_padding(
pool_spec: &PoolSpec,
num_dims: usize,
image_size: &[usize],
) -> Result<Vec<(usize, usize)>, GraphError> {
let padding = match &pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
PaddingSpec::Valid => vec![(0, 0); num_dims],
_ => {
return Err(GraphError::MissingParams("padding".to_string()));
}
};
let num_relevant_dims = pool_spec.kernel_shape.len();
// get the last num_relevant_dims of the image size
let image_size = &image_size[image_size.len() - num_relevant_dims..];
let dims = pool_spec.computed_padding(image_size);
let mut padding = Vec::new();
for dim in dims {
padding.push((dim.pad_before, dim.pad_after));
}
Ok(padding)
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Extracts the strides from a onnx node.
pub fn extract_strides(pool_spec: &PoolSpec) -> Result<Vec<usize>, GraphError> {
Ok(pool_spec
@@ -114,7 +120,7 @@ pub fn extract_strides(pool_spec: &PoolSpec) -> Result<Vec<usize>, GraphError> {
}
/// Gets the shape of a onnx node's outlets.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn node_output_shapes(
node: &OnnxNode<TypedFact, Box<dyn TypedOp>>,
symbol_values: &SymbolValues,
@@ -129,9 +135,9 @@ pub fn node_output_shapes(
}
Ok(shapes)
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::prelude::SymbolValues;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
/// Extracts the raw values from a tensor.
pub fn extract_tensor_value(
input: Arc<tract_onnx::prelude::Tensor>,
@@ -240,7 +246,7 @@ pub fn extract_tensor_value(
Ok(const_value)
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn load_op<C: tract_onnx::prelude::Op + Clone>(
op: &dyn tract_onnx::prelude::Op,
idx: usize,
@@ -264,15 +270,14 @@ fn load_op<C: tract_onnx::prelude::Op + Clone>(
/// * `param_visibility` - [Visibility] of the node.
/// * `node` - the [OnnxNode] to be matched.
/// * `inputs` - the node's inputs.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub fn new_op_from_onnx(
idx: usize,
scales: &VarScales,
param_visibility: &Visibility,
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
inputs: &mut [super::NodeType],
symbol_values: &SymbolValues,
rebase_frac_zero_constants: bool,
run_args: &crate::RunArgs,
) -> Result<(SupportedOp, Vec<usize>), GraphError> {
use tract_onnx::tract_core::ops::array::Trilu;
@@ -283,11 +288,7 @@ pub fn new_op_from_onnx(
.flat_map(|x| x.out_scales())
.collect::<Vec<_>>();
let input_dims = inputs
.iter()
.map(|x| x.out_dims())
.flatten()
.collect::<Vec<_>>();
let input_dims = inputs.iter().flat_map(|x| x.out_dims()).collect::<Vec<_>>();
let mut replace_const = |scale: crate::Scale,
index: usize,
@@ -343,12 +344,9 @@ pub fn new_op_from_onnx(
}
}
"MultiBroadcastTo" => {
let op = load_op::<MultiBroadcastTo>(node.op(), idx, node.op().name().to_string())?;
let shape = op.shape.clone();
let shape = shape
.iter()
.map(|x| x.to_usize())
.collect::<Result<Vec<_>, _>>()?;
let _op = load_op::<MultiBroadcastTo>(node.op(), idx, node.op().name().to_string())?;
let shapes = node_output_shapes(&node, symbol_values)?;
let shape = shapes[0].clone();
SupportedOp::Linear(PolyOp::MultiBroadcastTo { shape })
}
@@ -666,13 +664,16 @@ pub fn new_op_from_onnx(
// if all raw_values are round then set scale to 0
let all_round = raw_value.iter().all(|x| (x).fract() == 0.0);
if all_round && rebase_frac_zero_constants {
if all_round && run_args.rebase_frac_zero_constants {
constant_scale = 0;
}
// Quantize the raw value
let quantized_value =
quantize_tensor(raw_value.clone(), constant_scale, param_visibility)?;
let quantized_value = quantize_tensor(
raw_value.clone(),
constant_scale,
&run_args.param_visibility,
)?;
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
// Create a constant op
SupportedOp::Constant(c)
@@ -762,81 +763,41 @@ pub fn new_op_from_onnx(
.map(|(i, _)| i)
.collect::<Vec<_>>();
if const_inputs.len() != 1 {
return Err(GraphError::OpMismatch(idx, "Max".to_string()));
}
let const_idx = const_inputs[0];
let boxed_op = inputs[const_idx].opkind();
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
if c.len() == 1 {
c[0]
} else {
return Err(GraphError::InvalidDims(idx, "max".to_string()));
}
} else {
return Err(GraphError::OpMismatch(idx, "Max".to_string()));
};
if inputs.len() == 2 {
if let Some(node) = inputs.get_mut(const_idx) {
node.decrement_use();
deleted_indices.push(const_idx);
}
if unit == 0. {
SupportedOp::Nonlinear(LookupOp::ReLU)
if const_inputs.len() > 0 {
let const_idx = const_inputs[0];
let boxed_op = inputs[const_idx].opkind();
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
if c.len() == 1 {
c[0]
} else {
return Err(GraphError::InvalidDims(idx, "max".to_string()));
}
} else {
return Err(GraphError::OpMismatch(idx, "Max".to_string()));
};
if unit == 0. {
if let Some(node) = inputs.get_mut(const_idx) {
node.decrement_use();
deleted_indices.push(const_idx);
}
SupportedOp::Linear(PolyOp::LeakyReLU {
slope: 0.0.into(),
scale: 1,
})
} else {
SupportedOp::Hybrid(HybridOp::Max)
}
} else {
// get the non-constant index
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
SupportedOp::Nonlinear(LookupOp::Max {
scale: scale_to_multiplier(inputs[non_const_idx].out_scales()[0]).into(),
a: crate::circuit::utils::F32(unit),
})
SupportedOp::Hybrid(HybridOp::Max)
}
} else {
return Err(GraphError::InvalidDims(idx, "max".to_string()));
}
}
"Min" => {
// Extract the min value
// first find the input that is a constant
// and then extract the value
let const_inputs = inputs
.iter()
.enumerate()
.filter(|(_, n)| n.is_constant())
.map(|(i, _)| i)
.collect::<Vec<_>>();
if const_inputs.len() != 1 {
return Err(GraphError::OpMismatch(idx, "Min".to_string()));
}
let const_idx = const_inputs[0];
let boxed_op = inputs[const_idx].opkind();
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
if c.len() == 1 {
c[0]
} else {
return Err(GraphError::InvalidDims(idx, "min".to_string()));
}
} else {
return Err(GraphError::OpMismatch(idx, "Min".to_string()));
};
if inputs.len() == 2 {
if let Some(node) = inputs.get_mut(const_idx) {
node.decrement_use();
deleted_indices.push(const_idx);
}
// get the non-constant index
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
SupportedOp::Nonlinear(LookupOp::Min {
scale: scale_to_multiplier(inputs[non_const_idx].out_scales()[0]).into(),
a: crate::circuit::utils::F32(unit),
})
SupportedOp::Hybrid(HybridOp::Min)
} else {
return Err(GraphError::InvalidDims(idx, "min".to_string()));
}
@@ -848,7 +809,6 @@ pub fn new_op_from_onnx(
SupportedOp::Hybrid(HybridOp::Recip {
input_scale: (scale_to_multiplier(in_scale) as f32).into(),
output_scale: (scale_to_multiplier(max_scale) as f32).into(),
use_range_check_for_int: true,
})
}
@@ -863,8 +823,9 @@ pub fn new_op_from_onnx(
}
};
SupportedOp::Nonlinear(LookupOp::LeakyReLU {
SupportedOp::Linear(PolyOp::LeakyReLU {
slope: crate::circuit::utils::F32(leaky_op.alpha),
scale: scales.params,
})
}
"Scan" => {
@@ -873,7 +834,7 @@ pub fn new_op_from_onnx(
"QuantizeLinearU8" | "DequantizeLinearF32" => {
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
"Abs" => SupportedOp::Nonlinear(LookupOp::Abs),
"Abs" => SupportedOp::Linear(PolyOp::Abs),
"Neg" => SupportedOp::Linear(PolyOp::Neg),
"HardSwish" => SupportedOp::Nonlinear(LookupOp::HardSwish {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
@@ -1016,8 +977,13 @@ pub fn new_op_from_onnx(
if raw_values.log2().fract() == 0.0 {
inputs[const_idx].decrement_use();
deleted_indices.push(const_idx);
// get the non constant index
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
op = SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(input_scales[0] + raw_values.log2() as i32),
out_scale: Some(
input_scales[non_const_idx] + raw_values.log2() as i32,
),
});
}
}
@@ -1107,8 +1073,8 @@ pub fn new_op_from_onnx(
));
}
let stride = extract_strides(&pool_spec)?;
let padding = extract_padding(&pool_spec, input_dims[0].len())?;
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
let kernel_shape = &pool_spec.kernel_shape;
SupportedOp::Hybrid(HybridOp::MaxPool {
@@ -1117,19 +1083,22 @@ pub fn new_op_from_onnx(
pool_dims: kernel_shape.to_vec(),
})
}
"Ceil" => SupportedOp::Nonlinear(LookupOp::Ceil {
"Ceil" => SupportedOp::Hybrid(HybridOp::Ceil {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
legs: run_args.decomp_legs,
}),
"Floor" => SupportedOp::Nonlinear(LookupOp::Floor {
"Floor" => SupportedOp::Hybrid(HybridOp::Floor {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
legs: run_args.decomp_legs,
}),
"Round" => SupportedOp::Nonlinear(LookupOp::Round {
"Round" => SupportedOp::Hybrid(HybridOp::Round {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
legs: run_args.decomp_legs,
}),
"RoundHalfToEven" => SupportedOp::Nonlinear(LookupOp::RoundHalfToEven {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sign" => SupportedOp::Nonlinear(LookupOp::Sign),
"Sign" => SupportedOp::Linear(PolyOp::Sign),
"Pow" => {
// Extract the slope layer hyperparams from a const
@@ -1140,10 +1109,17 @@ pub fn new_op_from_onnx(
if c.raw_values.len() > 1 {
unimplemented!("only support scalar pow")
}
SupportedOp::Nonlinear(LookupOp::Pow {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
a: crate::circuit::utils::F32(c.raw_values[0]),
})
let exponent = c.raw_values[0];
if exponent.fract() == 0.0 {
SupportedOp::Linear(PolyOp::Pow(exponent as u32))
} else {
SupportedOp::Nonlinear(LookupOp::Pow {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
a: crate::circuit::utils::F32(exponent),
})
}
} else {
unimplemented!("only support constant pow for now")
}
@@ -1177,8 +1153,8 @@ pub fn new_op_from_onnx(
let pool_spec = &conv_node.pool_spec;
let stride = extract_strides(&pool_spec)?;
let padding = extract_padding(&pool_spec, input_dims[0].len())?;
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
// if bias exists then rescale it to the input + kernel scale
if input_scales.len() == 3 {
@@ -1196,7 +1172,13 @@ pub fn new_op_from_onnx(
}
}
SupportedOp::Linear(PolyOp::Conv { padding, stride })
let group = conv_node.group;
SupportedOp::Linear(PolyOp::Conv {
padding,
stride,
group,
})
}
"Not" => SupportedOp::Linear(PolyOp::Not),
"And" => SupportedOp::Linear(PolyOp::And),
@@ -1229,8 +1211,8 @@ pub fn new_op_from_onnx(
let pool_spec = &deconv_node.pool_spec;
let stride = extract_strides(&pool_spec)?;
let padding = extract_padding(&pool_spec, input_dims[0].len())?;
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
// if bias exists then rescale it to the input + kernel scale
if input_scales.len() == 3 {
let bias_scale = input_scales[2];
@@ -1251,6 +1233,7 @@ pub fn new_op_from_onnx(
padding,
output_padding: deconv_node.adjustments.to_vec(),
stride,
group: deconv_node.group,
})
}
"Downsample" => {
@@ -1341,8 +1324,8 @@ pub fn new_op_from_onnx(
));
}
let stride = extract_strides(&pool_spec)?;
let padding = extract_padding(&pool_spec, input_dims[0].len())?;
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
SupportedOp::Hybrid(HybridOp::SumPool {
padding,
@@ -1351,11 +1334,6 @@ pub fn new_op_from_onnx(
normalized: sumpool_node.normalize,
})
}
// "GlobalAvgPool" => SupportedOp::Linear(PolyOp::SumPool {
// padding: [(0, 0); 2],
// stride: (1, 1),
// kernel_shape: (inputs[0].out_dims()[0][1], inputs[0].out_dims()[0][2]),
// }),
"Pad" => {
let pad_node: &Pad = match node.op().downcast_ref::<Pad>() {
Some(b) => b,
@@ -1424,7 +1402,7 @@ pub fn quantize_tensor<F: PrimeField + TensorType + PartialOrd>(
visibility: &Visibility,
) -> Result<Tensor<F>, TensorError> {
let mut value: Tensor<F> = const_value.par_enum_map(|_, x| {
Ok::<_, TensorError>(crate::fieldutils::i64_to_felt::<F>(quantize_float(
Ok::<_, TensorError>(crate::fieldutils::integer_rep_to_felt::<F>(quantize_float(
&(x).into(),
0.0,
scale,

View File

@@ -14,6 +14,7 @@ use pyo3::{
};
use serde::{Deserialize, Serialize};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tosubcommand::ToFlags;
use self::errors::GraphError;
@@ -64,6 +65,7 @@ impl Display for Visibility {
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl ToFlags for Visibility {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
@@ -441,7 +443,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
let dynamic_lookup =
VarTensor::new_advice(cs, logrows, 1, dynamic_lookup_and_shuffle_size);
if dynamic_lookup.num_blocks() > 1 {
panic!("dynamic lookup or shuffle should only have one block");
warn!("dynamic lookup has {} blocks", dynamic_lookup.num_blocks());
};
advices.push(dynamic_lookup);
}

View File

@@ -23,18 +23,23 @@
)]
// we allow this for our dynamic range based indexing scheme
#![allow(clippy::single_range_in_vec_init)]
#![feature(buf_read_has_data_left)]
#![feature(stmt_expr_attributes)]
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
//!
/// Error type
// #[cfg_attr(not(feature = "ezkl"), derive(uniffi::Error))]
#[derive(thiserror::Error, Debug)]
#[allow(missing_docs)]
pub enum EZKLError {
#[error("[aggregation] {0}")]
AggregationError(#[from] pfsys::evm::aggregation_kzg::AggregationError),
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
#[error("[eth] {0}")]
EthError(#[from] eth::EthError),
#[error("[graph] {0}")]
@@ -53,7 +58,10 @@ pub enum EZKLError {
JsonError(#[from] serde_json::Error),
#[error("[utf8] {0}")]
Utf8Error(#[from] std::str::Utf8Error),
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
#[error("[reqwest] {0}")]
ReqwestError(#[from] reqwest::Error),
#[error("[fmt] {0}")]
@@ -62,7 +70,10 @@ pub enum EZKLError {
Halo2Error(#[from] halo2_proofs::plonk::Error),
#[error("[Uncategorized] {0}")]
UncategorizedError(String),
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
#[error("[execute] {0}")]
ExecutionError(#[from] execute::ExecutionError),
#[error("[srs] {0}")]
@@ -84,59 +95,72 @@ impl From<String> for EZKLError {
use std::str::FromStr;
use circuit::{table::Range, CheckMode, Tolerance};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use clap::Args;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use fieldutils::IntegerRep;
use graph::Visibility;
use halo2_proofs::poly::{
ipa::commitment::IPACommitmentScheme, kzg::commitment::KZGCommitmentScheme,
};
use halo2curves::bn256::{Bn256, G1Affine};
use serde::{Deserialize, Serialize};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tosubcommand::ToFlags;
/// Bindings managment
#[cfg(any(
feature = "ios-bindings",
all(target_arch = "wasm32", target_os = "unknown"),
feature = "python-bindings"
))]
pub mod bindings;
/// Methods for configuring tensor operations and assigning values to them in a Halo2 circuit.
pub mod circuit;
/// CLI commands.
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub mod commands;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
// abigen doesn't generate docs for this module
#[allow(missing_docs)]
/// Utility functions for contracts
pub mod eth;
/// Command execution
///
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub mod execute;
/// Utilities for converting from Halo2 Field types to integers (and vice-versa).
pub mod fieldutils;
/// Methods for loading onnx format models and automatically laying them out in
/// a Halo2 circuit.
#[cfg(feature = "onnx")]
#[cfg(any(feature = "onnx", not(feature = "ezkl")))]
pub mod graph;
/// beautiful logging
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
pub mod logger;
/// Tools for proofs and verification used by cli
pub mod pfsys;
/// Python bindings
#[cfg(feature = "python-bindings")]
pub mod python;
/// srs sha hashes
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[cfg(all(
feature = "ezkl",
not(all(target_arch = "wasm32", target_os = "unknown"))
))]
pub mod srs_sha;
/// An implementation of multi-dimensional tensors.
pub mod tensor;
/// wasm prover and verifier
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
pub mod wasm;
#[cfg(feature = "ios-bindings")]
uniffi::setup_scaffolding!();
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use lazy_static::lazy_static;
/// The denominator in the fixed point representation used when quantizing inputs
pub type Scale = i32;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
// Buf writer capacity
lazy_static! {
/// The capacity of the buffer used for writing to disk
@@ -148,12 +172,13 @@ lazy_static! {
/// The serialization format for the keys
pub static ref EZKL_KEY_FORMAT: String = std::env::var("EZKL_KEY_FORMAT")
.unwrap_or("raw-bytes".to_string());
}
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
const EZKL_KEY_FORMAT: &str = "raw-bytes";
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
const EZKL_BUF_CAPACITY: &usize = &8000;
#[derive(
@@ -206,6 +231,7 @@ impl std::fmt::Display for Commitments {
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl ToFlags for Commitments {
/// Convert the struct to a subcommand string
fn to_flags(&self) -> Vec<String> {
@@ -228,53 +254,69 @@ impl From<String> for Commitments {
}
/// Parameters specific to a proving run
#[derive(Debug, Args, Deserialize, Serialize, Clone, PartialEq, PartialOrd, ToFlags)]
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, PartialOrd)]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
derive(Args, ToFlags)
)]
pub struct RunArgs {
/// The tolerance for error on model outputs
#[arg(short = 'T', long, default_value = "0", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'T', long, default_value = "0", value_hint = clap::ValueHint::Other))]
pub tolerance: Tolerance,
/// The denominator in the fixed point representation used when quantizing inputs
#[arg(short = 'S', long, default_value = "7", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'S', long, default_value = "7", value_hint = clap::ValueHint::Other))]
pub input_scale: Scale,
/// The denominator in the fixed point representation used when quantizing parameters
#[arg(long, default_value = "7", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "7", value_hint = clap::ValueHint::Other))]
pub param_scale: Scale,
/// if the scale is ever > scale_rebase_multiplier * input_scale then the scale is rebased to input_scale (this a more advanced parameter, use with caution)
#[arg(long, default_value = "1", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "1", value_hint = clap::ValueHint::Other))]
pub scale_rebase_multiplier: u32,
/// The min and max elements in the lookup table input column
#[arg(short = 'B', long, value_parser = parse_key_val::<i64, i64>, default_value = "-32768->32768")]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'B', long, value_parser = parse_key_val::<IntegerRep, IntegerRep>, default_value = "-32768->32768"))]
pub lookup_range: Range,
/// The log_2 number of rows
#[arg(short = 'K', long, default_value = "17", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'K', long, default_value = "17", value_hint = clap::ValueHint::Other))]
pub logrows: u32,
/// The log_2 number of rows
#[arg(short = 'N', long, default_value = "2", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'N', long, default_value = "2", value_hint = clap::ValueHint::Other))]
pub num_inner_cols: usize,
/// Hand-written parser for graph variables, eg. batch_size=1
#[arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size->1", value_delimiter = ',', value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size->1", value_delimiter = ',', value_hint = clap::ValueHint::Other))]
pub variables: Vec<(String, usize)>,
/// Flags whether inputs are public, private, fixed, hashed, polycommit
#[arg(long, default_value = "private", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "private", value_hint = clap::ValueHint::Other))]
pub input_visibility: Visibility,
/// Flags whether outputs are public, private, fixed, hashed, polycommit
#[arg(long, default_value = "public", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "public", value_hint = clap::ValueHint::Other))]
pub output_visibility: Visibility,
/// Flags whether params are fixed, private, hashed, polycommit
#[arg(long, default_value = "private", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "private", value_hint = clap::ValueHint::Other))]
pub param_visibility: Visibility,
#[arg(long, default_value = "false")]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
arg(long, default_value = "false")
)]
/// Rebase the scale using lookup table for division instead of using a range check
pub div_rebasing: bool,
/// Should constants with 0.0 fraction be rebased to scale 0
#[arg(long, default_value = "false")]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
arg(long, default_value = "false")
)]
pub rebase_frac_zero_constants: bool,
/// check mode (safe, unsafe, etc)
#[arg(long, default_value = "unsafe", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "unsafe", value_hint = clap::ValueHint::Other))]
pub check_mode: CheckMode,
/// commitment scheme
#[arg(long, default_value = "kzg", value_hint = clap::ValueHint::Other)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "kzg", value_hint = clap::ValueHint::Other))]
pub commitment: Option<Commitments>,
/// the base used for decompositions
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "16384", value_hint = clap::ValueHint::Other))]
pub decomp_base: usize,
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "2", value_hint = clap::ValueHint::Other))]
/// the number of legs used for decompositions
pub decomp_legs: usize,
}
impl Default for RunArgs {
@@ -295,6 +337,8 @@ impl Default for RunArgs {
rebase_frac_zero_constants: false,
check_mode: CheckMode::UNSAFE,
commitment: None,
decomp_base: 16384,
decomp_legs: 2,
}
}
}
@@ -343,6 +387,7 @@ impl RunArgs {
}
/// Parse a single key-value pair
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
fn parse_key_val<T, U>(
s: &str,
) -> Result<(T, U), Box<dyn std::error::Error + Send + Sync + 'static>>

View File

@@ -76,7 +76,7 @@ pub fn init_logger() {
prefix_token(&record.level()),
// pretty print UTC time
chrono::Utc::now()
.format("%Y-%m-%d %H:%M:%S")
.format("%Y-%m-%d %H:%M:%S:%3f")
.to_string()
.bright_magenta(),
record.metadata().target(),

View File

@@ -1,7 +1,7 @@
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::graph::CircuitSize;
use crate::pfsys::{Snark, SnarkWitness};
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use colored_json::ToColoredJson;
use halo2_proofs::circuit::AssignedCell;
use halo2_proofs::plonk::{self};
@@ -20,7 +20,7 @@ use halo2_wrong_ecc::{
use halo2curves::bn256::{Bn256, Fq, Fr, G1Affine};
use halo2curves::ff::PrimeField;
use itertools::Itertools;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use log::debug;
use log::trace;
use rand::rngs::OsRng;
@@ -200,7 +200,7 @@ impl AggregationConfig {
let range_config =
RangeChip::<F>::configure(meta, &main_gate_config, composition_bits, overflow_bits);
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
{
let circuit_size = CircuitSize::from_cs(meta, 23);

View File

@@ -13,6 +13,7 @@ use crate::circuit::CheckMode;
use crate::graph::GraphWitness;
use crate::pfsys::evm::aggregation_kzg::PoseidonTranscript;
use crate::{Commitments, EZKL_BUF_CAPACITY, EZKL_KEY_FORMAT};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use clap::ValueEnum;
use halo2_proofs::circuit::Value;
use halo2_proofs::plonk::{
@@ -42,6 +43,7 @@ use std::io::{self, BufReader, BufWriter, Cursor, Write};
use std::ops::Deref;
use std::path::PathBuf;
use thiserror::Error as thisError;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tosubcommand::ToFlags;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
@@ -56,8 +58,10 @@ fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
}
#[allow(missing_docs)]
#[derive(
ValueEnum, Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd,
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd)]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
derive(ValueEnum)
)]
pub enum ProofType {
#[default]
@@ -77,7 +81,7 @@ impl std::fmt::Display for ProofType {
)
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl ToFlags for ProofType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
@@ -129,17 +133,38 @@ impl<'source> pyo3::FromPyObject<'source> for ProofType {
}
#[allow(missing_docs)]
#[derive(ValueEnum, Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
derive(ValueEnum)
)]
pub enum StrategyType {
Single,
Accum,
}
impl std::fmt::Display for StrategyType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.to_possible_value()
.expect("no values are skipped")
.get_name()
.fmt(f)
// When the `ezkl` feature is disabled or we're targeting `wasm32`, use basic string representation.
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
{
write!(
f,
"{}",
match self {
StrategyType::Single => "single",
StrategyType::Accum => "accum",
}
)
}
// When the `ezkl` feature is enabled and we're not targeting `wasm32`, use `to_possible_value`.
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
{
self.to_possible_value()
.expect("no values are skipped")
.get_name()
.fmt(f)
}
}
}
#[cfg(feature = "python-bindings")]
@@ -177,8 +202,10 @@ pub enum PfSysError {
}
#[allow(missing_docs)]
#[derive(
ValueEnum, Default, Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd,
#[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd)]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
derive(ValueEnum)
)]
pub enum TranscriptType {
Poseidon,
@@ -198,7 +225,7 @@ impl std::fmt::Display for TranscriptType {
)
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl ToFlags for TranscriptType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
@@ -558,7 +585,8 @@ where
+ PrimeField
+ FromUniformBytes<64>
+ WithSmallOrderMulGroup<3>,
Scheme::Curve: Serialize + DeserializeOwned,
Scheme::Curve: Serialize + DeserializeOwned + SerdeObject,
Scheme::ParamsProver: Send + Sync,
{
let strategy = Strategy::new(params.verifier_params());
let mut transcript = TranscriptWriterBuffer::<_, Scheme::Curve, _>::init(vec![]);
@@ -861,7 +889,7 @@ pub fn save_params<Scheme: CommitmentScheme>(
////////////////////////
#[cfg(test)]
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
mod tests {
use super::*;

View File

@@ -1,5 +1,7 @@
use thiserror::Error;
use super::ops::DecompositionError;
/// A wrapper for tensor related errors.
#[derive(Debug, Error)]
pub enum TensorError {
@@ -27,4 +29,13 @@ pub enum TensorError {
/// Unset visibility
#[error("unset visibility")]
UnsetVisibility,
/// File save error
#[error("save error: {0}")]
FileSaveError(String),
/// File load error
#[error("load error: {0}")]
FileLoadError(String),
/// Decomposition error
#[error("decomposition error: {0}")]
DecompositionError(#[from] DecompositionError),
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,12 @@
use core::{iter::FilterMap, slice::Iter};
use crate::circuit::region::ConstantsMap;
use crate::{circuit::region::ConstantsMap, fieldutils::felt_to_integer_rep};
use maybe_rayon::slice::Iter;
use super::{
ops::{intercalate_values, pad, resize},
*,
};
use halo2_proofs::{arithmetic::Field, circuit::Cell, plonk::Instance};
use maybe_rayon::iter::{FilterMap, IntoParallelIterator, ParallelIterator};
pub(crate) fn create_constant_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
@@ -54,6 +54,44 @@ pub enum ValType<F: PrimeField + TensorType + std::marker::Send + std::marker::S
AssignedConstant(AssignedCell<F, F>, F),
}
impl<F: PrimeField + TensorType + PartialOrd> From<ValType<F>> for IntegerRep {
fn from(val: ValType<F>) -> Self {
match val {
ValType::Value(v) => {
let mut output = 0;
let mut i = 0;
v.map(|y| {
let e = felt_to_integer_rep(y);
output = e;
i += 1;
});
output
}
ValType::AssignedValue(v) => {
let mut output = 0;
let mut i = 0;
v.evaluate().map(|y| {
let e = felt_to_integer_rep(y);
output = e;
i += 1;
});
output
}
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
let mut output = 0;
let mut i = 0;
v.value().map(|y| {
let e = felt_to_integer_rep(*y);
output = e;
i += 1;
});
output
}
ValType::Constant(v) => felt_to_integer_rep(v),
}
}
}
impl<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd> ValType<F> {
/// Returns the inner cell of the [ValType].
pub fn cell(&self) -> Option<Cell> {
@@ -121,44 +159,6 @@ impl<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + Partia
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<ValType<F>> for i32 {
fn from(val: ValType<F>) -> Self {
match val {
ValType::Value(v) => {
let mut output = 0_i32;
let mut i = 0;
v.map(|y| {
let e = felt_to_i32(y);
output = e;
i += 1;
});
output
}
ValType::AssignedValue(v) => {
let mut output = 0_i32;
let mut i = 0;
v.evaluate().map(|y| {
let e = felt_to_i32(y);
output = e;
i += 1;
});
output
}
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
let mut output = 0_i32;
let mut i = 0;
v.value().map(|y| {
let e = felt_to_i32(*y);
output = e;
i += 1;
});
output
}
ValType::Constant(v) => felt_to_i32(v),
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<F> for ValType<F> {
fn from(t: F) -> ValType<F> {
ValType::Constant(t)
@@ -317,8 +317,8 @@ impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<AssignedCell<F, F>>> f
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
/// Allocate a new [ValTensor::Value] from the given [Tensor] of [i64].
pub fn from_i64_tensor(t: Tensor<i64>) -> ValTensor<F> {
let inner = t.map(|x| ValType::Value(Value::known(i64_to_felt(x))));
pub fn from_integer_rep_tensor(t: Tensor<IntegerRep>) -> ValTensor<F> {
let inner = t.map(|x| ValType::Value(Value::known(integer_rep_to_felt(x))));
inner.into()
}
@@ -460,7 +460,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
&self,
) -> FilterMap<Iter<'_, ValType<F>>, fn(&ValType<F>) -> Option<(F, ValType<F>)>> {
match self {
ValTensor::Value { inner, .. } => inner.iter().filter_map(|x| {
ValTensor::Value { inner, .. } => inner.par_iter().filter_map(|x| {
if let ValType::Constant(v) = x {
Some((*v, x.clone()))
} else {
@@ -520,10 +520,58 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
}
}
/// Get the sign of the inner values
pub fn sign(&self) -> Result<Self, TensorError> {
let evals = self.int_evals()?;
Ok(evals
.par_enum_map(|_, val| {
Ok::<_, TensorError>(ValType::Value(Value::known(integer_rep_to_felt(
val.signum(),
))))
})?
.into())
}
/// Decompose the inner values into base `base` and `n` legs.
pub fn decompose(&self, base: usize, n: usize) -> Result<Self, TensorError> {
let res = self
.get_inner()?
.par_iter()
.map(|x| {
let mut is_empty = true;
x.map(|_| is_empty = false);
if is_empty {
Ok::<_, TensorError>(vec![Value::<F>::unknown(); n + 1])
} else {
let mut res = vec![Value::unknown(); n + 1];
let mut int_rep = 0;
x.map(|f| {
int_rep = crate::fieldutils::felt_to_integer_rep(f);
});
let decompe = crate::tensor::ops::get_rep(&int_rep, base, n)?;
for (i, x) in decompe.iter().enumerate() {
res[i] = Value::known(crate::fieldutils::integer_rep_to_felt(*x));
}
Ok(res)
}
})
.collect::<Result<Vec<_>, _>>();
let mut tensor = Tensor::from(res?.into_iter().flatten().collect::<Vec<_>>().into_iter());
let mut dims = self.dims().to_vec();
dims.push(n + 1);
tensor.reshape(&dims)?;
Ok(tensor.into())
}
/// Calls `int_evals` on the inner tensor.
pub fn get_int_evals(&self) -> Result<Tensor<i64>, TensorError> {
pub fn int_evals(&self) -> Result<Tensor<IntegerRep>, TensorError> {
// finally convert to vector of integers
let mut integer_evals: Vec<i64> = vec![];
let mut integer_evals: Vec<IntegerRep> = vec![];
match self {
ValTensor::Value {
inner: v, dims: _, ..
@@ -531,25 +579,26 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
// we have to push to an externally created vector or else vaf.map() returns an evaluation wrapped in Value<> (which we don't want)
let _ = v.map(|vaf| match vaf {
ValType::Value(v) => v.map(|f| {
integer_evals.push(crate::fieldutils::felt_to_i64(f));
integer_evals.push(crate::fieldutils::felt_to_integer_rep(f));
}),
ValType::AssignedValue(v) => v.map(|f| {
integer_evals.push(crate::fieldutils::felt_to_i64(f.evaluate()));
integer_evals.push(crate::fieldutils::felt_to_integer_rep(f.evaluate()));
}),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
v.value_field().map(|f| {
integer_evals.push(crate::fieldutils::felt_to_i64(f.evaluate()));
integer_evals
.push(crate::fieldutils::felt_to_integer_rep(f.evaluate()));
})
}
ValType::Constant(v) => {
integer_evals.push(crate::fieldutils::felt_to_i64(v));
integer_evals.push(crate::fieldutils::felt_to_integer_rep(v));
Value::unknown()
}
});
}
_ => return Err(TensorError::WrongMethod),
};
let mut tensor: Tensor<i64> = integer_evals.into_iter().into();
let mut tensor: Tensor<IntegerRep> = integer_evals.into_iter().into();
match tensor.reshape(self.dims()) {
_ => {}
};
@@ -573,6 +622,48 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
Ok(())
}
/// Calls `last` on the inner tensor.
pub fn last(&self) -> Result<ValTensor<F>, TensorError> {
let slice = match self {
ValTensor::Value {
inner: v,
dims: _,
scale,
} => {
let inner = v.last()?;
let dims = inner.dims().to_vec();
ValTensor::Value {
inner,
dims,
scale: *scale,
}
}
_ => return Err(TensorError::WrongMethod),
};
Ok(slice)
}
/// Calls `first`
pub fn first(&self) -> Result<ValTensor<F>, TensorError> {
let slice = match self {
ValTensor::Value {
inner: v,
dims: _,
scale,
} => {
let inner = v.first()?;
let dims = inner.dims().to_vec();
ValTensor::Value {
inner,
dims,
scale: *scale,
}
}
_ => return Err(TensorError::WrongMethod),
};
Ok(slice)
}
/// Calls `get_slice` on the inner tensor.
pub fn get_slice(&self, indices: &[Range<usize>]) -> Result<ValTensor<F>, TensorError> {
if indices.iter().map(|x| x.end - x.start).collect::<Vec<_>>() == self.dims() {
@@ -753,43 +844,104 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
Ok(())
}
/// gets constants
pub fn get_const_zero_indices(&self) -> Result<Vec<usize>, TensorError> {
/// Calls `get_every_n` on the inner [Tensor].
pub fn get_every_n(&mut self, n: usize) -> Result<(), TensorError> {
match self {
ValTensor::Value { inner: v, .. } => {
let mut indices = vec![];
for (i, e) in v.iter().enumerate() {
if let ValType::Constant(r) = e {
if *r == F::ZERO {
indices.push(i);
}
} else if let ValType::AssignedConstant(_, r) = e {
if *r == F::ZERO {
indices.push(i);
}
}
}
Ok(indices)
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.get_every_n(n)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => Ok(vec![]),
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
/// Calls `exclude_every_n` on the inner [Tensor].
pub fn exclude_every_n(&mut self, n: usize) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.exclude_every_n(n)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
/// remove constant zero values constants
pub fn remove_const_zero_values(&mut self) {
match self {
ValTensor::Value { inner: v, dims, .. } => {
*v = v
.clone()
.into_par_iter()
.filter_map(|e| {
if let ValType::Constant(r) = e {
if r == F::ZERO {
return None;
}
} else if let ValType::AssignedConstant(_, r) = e {
if r == F::ZERO {
return None;
}
}
Some(e)
})
.collect();
*dims = v.dims().to_vec();
}
ValTensor::Instance { .. } => {}
}
}
/// gets constants
pub fn get_const_indices(&self) -> Result<Vec<usize>, TensorError> {
pub fn get_const_zero_indices(&self) -> Vec<usize> {
match self {
ValTensor::Value { inner: v, .. } => {
let mut indices = vec![];
for (i, e) in v.iter().enumerate() {
if let ValType::Constant(_) = e {
indices.push(i);
} else if let ValType::AssignedConstant(_, _) = e {
indices.push(i);
ValTensor::Value { inner: v, .. } => v
.par_iter()
.enumerate()
.filter_map(|(i, e)| {
if let ValType::Constant(r) = e {
if *r == F::ZERO {
return Some(i);
}
} else if let ValType::AssignedConstant(_, r) = e {
if *r == F::ZERO {
return Some(i);
}
}
}
Ok(indices)
}
ValTensor::Instance { .. } => Ok(vec![]),
None
})
.collect(),
ValTensor::Instance { .. } => vec![],
}
}
/// gets constants
pub fn get_const_indices(&self) -> Vec<usize> {
match self {
ValTensor::Value { inner: v, .. } => v
.par_iter()
.enumerate()
.filter_map(|(i, e)| {
if let ValType::Constant(_) = e {
Some(i)
} else if let ValType::AssignedConstant(_, _) = e {
Some(i)
} else {
None
}
})
.collect(),
ValTensor::Instance { .. } => vec![],
}
}
@@ -952,25 +1104,22 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
}
/// A [String] representation of the [ValTensor] for display, for example in showing intermediate values in a computational graph.
pub fn show(&self) -> String {
match self.clone() {
ValTensor::Value {
inner: v, dims: _, ..
} => {
let r: Tensor<i32> = v.map(|x| x.into());
if r.len() > 10 {
let start = r[..5].to_vec();
let end = r[r.len() - 5..].to_vec();
// print the two split by ... in the middle
format!(
"[{} ... {}]",
start.iter().map(|x| format!("{}", x)).join(", "),
end.iter().map(|x| format!("{}", x)).join(", ")
)
} else {
format!("{:?}", r)
}
}
_ => "ValTensor not PrevAssigned".into(),
let r = match self.int_evals() {
Ok(v) => v,
Err(_) => return "ValTensor not PrevAssigned".into(),
};
if r.len() > 10 {
let start = r[..5].to_vec();
let end = r[r.len() - 5..].to_vec();
// print the two split by ... in the middle
format!(
"[{} ... {}]",
start.iter().map(|x| format!("{}", x)).join(", "),
end.iter().map(|x| format!("{}", x)).join(", ")
)
} else {
format!("{:?}", r)
}
}
}

View File

@@ -319,7 +319,7 @@ impl VarTensor {
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
omissions: &HashSet<&usize>,
omissions: &HashSet<usize>,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, halo2_proofs::plonk::Error> {
let mut assigned_coord = 0;
@@ -368,7 +368,7 @@ impl VarTensor {
.sum::<usize>();
let dims = &dims[*idx];
// this should never ever fail
let t: Tensor<i32> = Tensor::new(None, dims).unwrap();
let t: Tensor<IntegerRep> = Tensor::new(None, dims).unwrap();
Ok(t.enum_map(|coord, _| {
let (x, y, z) = self.cartesian_coord(offset + coord);
region.assign_advice_from_instance(
@@ -396,6 +396,53 @@ impl VarTensor {
Ok(res)
}
/// Helper function to get the remaining size of the column
pub fn get_column_flush<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
offset: usize,
values: &ValTensor<F>,
) -> Result<usize, halo2_proofs::plonk::Error> {
if values.len() > self.col_size() {
error!("Values are too large for the column");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
// this can only be called on columns that have a single inner column
if self.num_inner_cols() != 1 {
error!("This function can only be called on columns with a single inner column");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
// check if the values fit in the remaining space of the column
let current_cartesian = self.cartesian_coord(offset);
let final_cartesian = self.cartesian_coord(offset + values.len());
let mut flush_len = 0;
if current_cartesian.0 != final_cartesian.0 {
debug!("Values overflow the column, flushing to next column");
// diff is the number of values that overflow the column
flush_len += self.col_size() - current_cartesian.2;
}
Ok(flush_len)
}
/// Assigns [ValTensor] to the columns of the inner tensor. Whereby the values are assigned to a single column, without overflowing.
/// So for instance if we are assigning 10 values and we are at index 18 of the column, and the columns are of length 20, we skip the last 2 values of current column and start from the beginning of the next column.
pub fn assign_exact_column<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
let flush_len = self.get_column_flush(offset, values)?;
let assigned_vals = self.assign(region, offset + flush_len, values, constants)?;
Ok((assigned_vals, flush_len))
}
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
/// Duplication occurs by copying the last cell of the column to the first cell next column and creating a copy constraint between the two.
pub fn dummy_assign_with_duplication<
@@ -497,7 +544,7 @@ impl VarTensor {
let (x, y, z) = self.cartesian_coord(offset + coord * step);
if matches!(check_mode, CheckMode::SAFE) && coord > 0 && z == 0 && y == 0 {
// assert that duplication occurred correctly
assert_eq!(Into::<i32>::into(k.clone()), Into::<i32>::into(v[coord - 1].clone()));
assert_eq!(Into::<IntegerRep>::into(k.clone()), Into::<IntegerRep>::into(v[coord - 1].clone()));
};
let cell = self.assign_value(region, offset, k.clone(), coord * step, constants)?;
@@ -533,13 +580,14 @@ impl VarTensor {
if matches!(check_mode, CheckMode::SAFE) {
// during key generation this will be 0 so we use this as a flag to check
// TODO: this isn't very safe and would be better to get the phase directly
let is_assigned = !Into::<Tensor<i32>>::into(res.clone().get_inner().unwrap())
let res_evals = res.int_evals().unwrap();
let is_assigned = res_evals
.iter()
.all(|&x| x == 0);
if is_assigned {
if !is_assigned {
assert_eq!(
Into::<Tensor<i32>>::into(values.get_inner().unwrap()),
Into::<Tensor<i32>>::into(res.get_inner().unwrap())
values.int_evals().unwrap(),
res_evals
)};
}

View File

@@ -1,774 +0,0 @@
use crate::circuit::modules::polycommit::PolyCommitChip;
use crate::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use crate::circuit::modules::poseidon::PoseidonChip;
use crate::circuit::modules::Module;
use crate::fieldutils::felt_to_i64;
use crate::fieldutils::i64_to_felt;
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::quantize_float;
use crate::graph::scale_to_multiplier;
use crate::graph::{GraphCircuit, GraphSettings};
use crate::pfsys::create_proof_circuit;
use crate::pfsys::evm::aggregation_kzg::AggregationCircuit;
use crate::pfsys::evm::aggregation_kzg::PoseidonTranscript;
use crate::pfsys::verify_proof_circuit;
use crate::pfsys::TranscriptType;
use crate::tensor::TensorType;
use crate::CheckMode;
use crate::Commitments;
use console_error_panic_hook;
use halo2_proofs::plonk::*;
use halo2_proofs::poly::commitment::{CommitmentScheme, ParamsProver};
use halo2_proofs::poly::ipa::multiopen::{ProverIPA, VerifierIPA};
use halo2_proofs::poly::ipa::{
commitment::{IPACommitmentScheme, ParamsIPA},
strategy::SingleStrategy as IPASingleStrategy,
};
use halo2_proofs::poly::kzg::multiopen::ProverSHPLONK;
use halo2_proofs::poly::kzg::multiopen::VerifierSHPLONK;
use halo2_proofs::poly::kzg::{
commitment::{KZGCommitmentScheme, ParamsKZG},
strategy::SingleStrategy as KZGSingleStrategy,
};
use halo2_proofs::poly::VerificationStrategy;
use halo2_solidity_verifier::encode_calldata;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
use halo2curves::ff::{FromUniformBytes, PrimeField};
use snark_verifier::loader::native::NativeLoader;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
use std::str::FromStr;
use wasm_bindgen::prelude::*;
use wasm_bindgen_console_logger::DEFAULT_LOGGER;
#[cfg(feature = "web")]
pub use wasm_bindgen_rayon::init_thread_pool;
#[wasm_bindgen]
/// Initialize logger for wasm
pub fn init_logger() {
log::set_logger(&DEFAULT_LOGGER).unwrap();
}
#[wasm_bindgen]
/// Initialize panic hook for wasm
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
/// Wrapper around the halo2 encode call data method
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn encodeVerifierCalldata(
proof: wasm_bindgen::Clamped<Vec<u8>>,
vk_address: Option<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
let snark: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
let vk_address: Option<[u8; 20]> = if let Some(vk_address) = vk_address {
let array: [u8; 20] = serde_json::from_slice(&vk_address[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize vk address: {}", e)))?;
Some(array)
} else {
None
};
let flattened_instances = snark.instances.into_iter().flatten();
let encoded = encode_calldata(
vk_address,
&snark.proof,
&flattened_instances.collect::<Vec<_>>(),
);
Ok(encoded)
}
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToBigEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(format!("{:?}", felt))
}
/// Converts a felt to a little endian string
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToLittleEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let repr = serde_json::to_string(&felt).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
Ok(b)
}
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToInt(
array: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&felt_to_i64(felt))
.map_err(|e| JsError::new(&format!("Failed to serialize integer: {}", e)))?,
))
}
/// Converts felts to a floating point element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToFloat(
array: wasm_bindgen::Clamped<Vec<u8>>,
scale: crate::Scale,
) -> Result<f64, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let int_rep = felt_to_i64(felt);
let multiplier = scale_to_multiplier(scale);
Ok(int_rep as f64 / multiplier)
}
/// Converts a floating point number to a hex string representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn floatToFelt(
input: f64,
scale: crate::Scale,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let int_rep =
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
let felt = i64_to_felt(int_rep);
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
)?))
}
/// Generate a kzg commitment.
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn kzgCommit(
message: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let mut reader = std::io::BufReader::new(&params_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let output = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
message,
(vk.cs().blinding_factors() + 1) as u32,
&params,
);
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&output).map_err(|e| JsError::new(&format!("{}", e)))?,
))
}
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn bufferToVecOfFelt(
buffer: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
// Convert the buffer to a slice
let buffer: &[u8] = &buffer;
// Divide the buffer into chunks of 64 bytes
let chunks = buffer.chunks_exact(16);
// Get the remainder
let remainder = chunks.remainder();
// Add 0s to the remainder to make it 64 bytes
let mut remainder = remainder.to_vec();
// Collect chunks into a Vec<[u8; 16]>.
let chunks: Result<Vec<[u8; 16]>, JsError> = chunks
.map(|slice| {
let array: [u8; 16] = slice
.try_into()
.map_err(|_| JsError::new("failed to slice input chunks"))?;
Ok(array)
})
.collect();
let mut chunks = chunks?;
if remainder.len() != 0 {
remainder.resize(16, 0);
// Convert the Vec<u8> to [u8; 16]
let remainder_array: [u8; 16] = remainder
.try_into()
.map_err(|_| JsError::new("failed to slice remainder"))?;
// append the remainder to the chunks
chunks.push(remainder_array);
}
// Convert each chunk to a field element
let field_elements: Vec<Fr> = chunks
.iter()
.map(|x| PrimeField::from_u128(u8_array_to_u128_le(*x)))
.collect();
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&field_elements)
.map_err(|e| JsError::new(&format!("Failed to serialize field elements: {}", e)))?,
))
}
/// Generate a poseidon hash in browser. Input message
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn poseidonHash(
message: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let output =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
.map_err(|e| JsError::new(&format!("{}", e)))?;
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&output).map_err(
|e| JsError::new(&format!("Failed to serialize poseidon hash output: {}", e)),
)?))
}
/// Generate a witness file from input.json, compiled model and a settings.json file.
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genWitness(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
input: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
let mut circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled model: {}", e)))?;
let input: crate::graph::input::GraphData = serde_json::from_slice(&input[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize input: {}", e)))?;
let mut input = circuit
.load_graph_input(&input)
.map_err(|e| JsError::new(&format!("{}", e)))?;
let witness = circuit
.forward::<KZGCommitmentScheme<Bn256>>(&mut input, None, None, false, false)
.map_err(|e| JsError::new(&format!("{}", e)))?;
serde_json::to_vec(&witness)
.map_err(|e| JsError::new(&format!("Failed to serialize witness: {}", e)))
}
/// Generate verifying key in browser
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genVk(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
compress_selectors: bool,
) -> Result<Vec<u8>, JsError> {
// Read in kzg params
let mut reader = std::io::BufReader::new(&params_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
// Read in compiled circuit
let circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled model: {}", e)))?;
// Create verifying key
let vk = create_vk_wasm::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
)
.map_err(Box::<dyn std::error::Error>::from)
.map_err(|e| JsError::new(&format!("Failed to create verifying key: {}", e)))?;
let mut serialized_vk = Vec::new();
vk.write(&mut serialized_vk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| JsError::new(&format!("Failed to serialize vk: {}", e)))?;
Ok(serialized_vk)
}
/// Generate proving key in browser
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genPk(
vk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
// Read in kzg params
let mut reader = std::io::BufReader::new(&params_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
// Read in compiled circuit
let circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled model: {}", e)))?;
// Read in verifying key
let mut reader = std::io::BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize verifying key: {}", e)))?;
// Create proving key
let pk = create_pk_wasm::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk, &circuit, &params)
.map_err(Box::<dyn std::error::Error>::from)
.map_err(|e| JsError::new(&format!("Failed to create proving key: {}", e)))?;
let mut serialized_pk = Vec::new();
pk.write(&mut serialized_pk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| JsError::new(&format!("Failed to serialize pk: {}", e)))?;
Ok(serialized_pk)
}
/// Verify proof in browser using wasm
#[wasm_bindgen]
pub fn verify(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof_js[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings.clone(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let orig_n = 1 << circuit_settings.run_args.logrows;
let commitment = circuit_settings.run_args.commitment.into();
let mut reader = std::io::BufReader::new(&srs[..]);
let result = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
};
match result {
Ok(_) => Ok(true),
Err(e) => Err(JsError::new(&format!("{}", e))),
}
}
#[wasm_bindgen]
#[allow(non_snake_case)]
/// Verify aggregate proof in browser using wasm
pub fn verifyAggr(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
logrows: u64,
srs: wasm_bindgen::Clamped<Vec<u8>>,
commitment: &str,
) -> Result<bool, JsError> {
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof_js[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, AggregationCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let commit = Commitments::from_str(commitment).map_err(|e| JsError::new(&format!("{}", e)))?;
let orig_n = 1 << logrows;
let mut reader = std::io::BufReader::new(&srs[..]);
let result = match commit {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
};
match result {
Ok(_) => Ok(true),
Err(e) => Err(JsError::new(&format!("{}", e))),
}
}
/// Prove in browser using wasm
#[wasm_bindgen]
pub fn prove(
witness: wasm_bindgen::Clamped<Vec<u8>>,
pk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
#[cfg(feature = "det-prove")]
log::set_max_level(log::LevelFilter::Debug);
#[cfg(not(feature = "det-prove"))]
log::set_max_level(log::LevelFilter::Info);
// read in circuit
let mut circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize circuit: {}", e)))?;
// read in model input
let data: crate::graph::GraphWitness = serde_json::from_slice(&witness[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize witness: {}", e)))?;
// read in proving key
let mut reader = std::io::BufReader::new(&pk[..]);
let pk = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize proving key: {}", e)))?;
// prep public inputs
circuit
.load_graph_witness(&data)
.map_err(|e| JsError::new(&format!("{}", e)))?;
let public_inputs = circuit
.prepare_public_inputs(&data)
.map_err(|e| JsError::new(&format!("{}", e)))?;
let proof_split_commits: Option<crate::pfsys::ProofSplitCommit> = data.into();
// read in kzg params
let mut reader = std::io::BufReader::new(&srs[..]);
let commitment = circuit.settings().run_args.commitment.into();
// creates and verifies the proof
let proof = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize srs: {}", e)))?;
create_proof_circuit::<
KZGCommitmentScheme<Bn256>,
_,
ProverSHPLONK<_>,
VerifierSHPLONK<_>,
KZGSingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit,
vec![public_inputs],
&params,
&pk,
CheckMode::UNSAFE,
crate::Commitments::KZG,
TranscriptType::EVM,
proof_split_commits,
None,
)
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize srs: {}", e)))?;
create_proof_circuit::<
IPACommitmentScheme<G1Affine>,
_,
ProverIPA<_>,
VerifierIPA<_>,
IPASingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit,
vec![public_inputs],
&params,
&pk,
CheckMode::UNSAFE,
crate::Commitments::IPA,
TranscriptType::EVM,
proof_split_commits,
None,
)
}
}
.map_err(|e| JsError::new(&format!("{}", e)))?;
Ok(serde_json::to_string(&proof)
.map_err(|e| JsError::new(&format!("{}", e)))?
.into_bytes())
}
// VALIDATION FUNCTIONS
/// Witness file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn witnessValidation(witness: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
let _: crate::graph::GraphWitness = serde_json::from_slice(&witness[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize witness: {}", e)))?;
Ok(true)
}
/// Compiled circuit validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn compiledCircuitValidation(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
let _: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled circuit: {}", e)))?;
Ok(true)
}
/// Input file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn inputValidation(input: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
let _: crate::graph::input::GraphData = serde_json::from_slice(&input[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize input: {}", e)))?;
Ok(true)
}
/// Proof file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn proofValidation(proof: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
let _: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
Ok(true)
}
/// Vk file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn vkValidation(
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let _ = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
Ok(true)
}
/// Pk file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn pkValidation(
pk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let mut reader = std::io::BufReader::new(&pk[..]);
let _ = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| JsError::new(&format!("Failed to deserialize proving key: {}", e)))?;
Ok(true)
}
/// Settings file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn settingsValidation(settings: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
let _: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
Ok(true)
}
/// Srs file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn srsValidation(srs: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
let mut reader = std::io::BufReader::new(&srs[..]);
let _: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
Ok(true)
}
// HELPER FUNCTIONS
/// Creates a [ProvingKey] for a [GraphCircuit] (`circuit`) with specific [CommitmentScheme] parameters (`params`) for the WASM target
#[cfg(target_arch = "wasm32")]
pub fn create_vk_wasm<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
circuit: &C,
params: &'_ Scheme::ParamsProver,
compress_selectors: bool,
) -> Result<VerifyingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
{
// Real proof
let empty_circuit = <C as Circuit<F>>::without_witnesses(circuit);
// Initialize the verifying key
let vk = keygen_vk_custom(params, &empty_circuit, compress_selectors)?;
Ok(vk)
}
/// Creates a [ProvingKey] from a [VerifyingKey] for a [GraphCircuit] (`circuit`) with specific [CommitmentScheme] parameters (`params`) for the WASM target
#[cfg(target_arch = "wasm32")]
pub fn create_pk_wasm<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
vk: VerifyingKey<Scheme::Curve>,
circuit: &C,
params: &'_ Scheme::ParamsProver,
) -> Result<ProvingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
{
// Real proof
let empty_circuit = <C as Circuit<F>>::without_witnesses(circuit);
// Initialize the proving key
let pk = keygen_pk(params, vk, &empty_circuit)?;
Ok(pk)
}
///
pub fn u8_array_to_u128_le(arr: [u8; 16]) -> u128 {
let mut n: u128 = 0;
for &b in arr.iter().rev() {
n <<= 8;
n |= b as u128;
}
n
}

BIN
tests/assets/pk.key Normal file

Binary file not shown.

1
tests/assets/proof.json Normal file

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More