Compare commits

...

46 Commits

Author SHA1 Message Date
dante
d51cba589a feat: dynamic lookup overflow (#853) 2024-10-23 23:12:00 -04:00
Artem
1cb1b6e143 feat: iOS Bindings (#846) 2024-10-23 09:58:55 -04:00
Ethan Cemer
d2b683b527 feat: reusable verifier (#821) 2024-10-22 09:10:24 -04:00
Jseam
a06b09ef1f docs: add batch demo (#849) 2024-10-15 08:36:24 +01:00
dante
e5aa48fbd6 chore: support all padding types (#848) 2024-10-05 10:43:12 -04:00
dante
64fbc8a1c9 refactor: lookup-less sign relu abs (#845) 2024-09-17 11:58:58 -04:00
dante
c9f9d17f16 chore: optimized release builds by default (#844) 2024-09-05 15:52:03 +02:00
Ethan Cemer
b49b0487c4 fix: remove console import from index.ts in in-browser-evm-verifier package (#841) 2024-08-30 18:47:59 +01:00
dante
61b7a8e9b5 chore: perf updates (#838) 2024-08-27 09:45:40 -04:00
dante
5dbc7d5176 chore: cache lookup tables (#835) 2024-08-19 00:24:53 -04:00
dante
ada45a3197 chore: swap h2 collections hash for rustc hasher (#832) 2024-08-04 17:28:36 -04:00
dante
616b421967 fix: bump compiler to latest to accomodate latest serde diagnostic (#830) 2024-07-25 07:56:21 -04:00
dante
f64f0ecd23 fix: instance order when using processed params (#829) 2024-07-24 07:58:46 -04:00
dante
5be12b7a54 fix: num groups for conv operations should be specified at load time (#828) 2024-07-18 09:58:41 -04:00
dante
2fd877c716 chore: small worm example (#568) 2024-07-15 09:20:37 -04:00
dante
8197340985 chore: const filtering optimizations (#825) 2024-07-12 12:37:02 +01:00
dante
6855ea1947 feat: parallel polynomial reads in halo2 (#826) 2024-07-12 00:33:14 +01:00
dante
2ca57bde2c chore: bump tract (#823) 2024-06-29 01:53:18 +01:00
Ethan Cemer
390de88194 feat: create_evm_vk python bindings (#818) 2024-06-23 22:21:59 -04:00
dante
cd91f0af26 chore: allow for float lookup safety margins (#817) 2024-06-19 09:12:01 -04:00
dante
4771192823 fix: more verbose io / rw errors (#815) 2024-06-14 12:31:50 -04:00
dante
a863ccc868 chore: update cmd feature flag (#814) 2024-06-11 16:07:49 -04:00
Ethan Cemer
8e6ccc863d feat: all file source kzg commit DA (#812) 2024-06-11 09:32:54 -04:00
dante
00d6873f9a fix: should update using bash when possible (#813) 2024-06-10 12:13:59 -04:00
dante
c97ff84198 refactor: rm boxed errors (opaque) (#810) 2024-06-08 22:41:47 -04:00
dante
f5f8ef56f7 chore: ezkl self update (#809) 2024-06-07 10:30:21 -04:00
Ethan Cemer
685487c853 feat: DA swap proof commitments (#807) 2024-06-07 10:19:38 -04:00
dante
33d41c7e49 fix: calldata cmd 2024-05-31 13:19:33 -04:00
dante
e04c959662 fix: sequential dependence between JS releases (#806) 2024-05-31 08:34:51 -04:00
dante
27b1f2e9d4 fix: move banner to command loop 2024-05-30 17:01:40 -04:00
dante
4a172877af feat: add autocompletion tooling to cli (#805) 2024-05-30 16:44:24 -04:00
dante
5a8498894d feat: create encode-evm-calldata in cli and python (#803) 2024-05-29 21:03:16 -04:00
Snoppy
095c0ca5b4 chore: fix typos (#802) 2024-05-29 11:32:11 -04:00
dante
3fa482c9ef fix: remove error messages in calibration loop (confusing) (#800) 2024-05-22 09:32:52 +01:00
dante
6be3b1d663 chore: update alloy (#798) 2024-05-16 12:10:27 +09:00
dante
d5a1d1439c fix: elif syntax in install script 2024-05-14 03:31:26 +09:00
Ethan Cemer
ff8fd01f86 fix: patch invalid ${{ github.ref_name#v }} syntax on verify release script (#791) 2024-05-14 02:25:12 +09:00
dante
e9020f942e fix: python build matrix (#797) 2024-05-13 13:29:51 +09:00
dante
e7f54cb6ac fix: windows build (#796) 2024-05-13 12:47:36 +09:00
dante
ed65e8c090 Revert "fix: revert maturin version (#795)"
This reverts commit d9f2adad99.
2024-05-13 12:41:02 +09:00
dante
d9f2adad99 fix: revert maturin version (#795) 2024-05-13 12:21:35 +09:00
dante
5125aaa090 chore: add aarch64 linux to release pipeline (#788) 2024-05-13 11:29:49 +09:00
Jseam
f1950e6cd0 docs: add polycommit to RunArgs (#794) 2024-05-10 22:19:05 +09:00
dante
998ca22c2a chore: make most command struct args Option (#793) 2024-05-10 22:02:34 +09:00
dante
5c574adc31 chore: logistic regression example (#792) 2024-05-08 20:30:13 +09:00
dante
749e0ba652 chore: update h2 solidity verifier (#787) 2024-05-03 01:25:14 +01:00
166 changed files with 13754 additions and 7723 deletions

View File

@@ -22,15 +22,18 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Install binaryen
run: |
set -e
@@ -178,3 +181,58 @@ jobs:
npm publish
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
in-browser-evm-ver-publish:
name: publish-in-browser-evm-verifier-package
needs: [publish-wasm-bindings]
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
steps:
- uses: actions/checkout@v4
- name: Update version in package.json
shell: bash
env:
RELEASE_TAG: ${{ github.ref_name }}
run: |
sed -i "s|\"version\": \".*\"|\"version\": \"${{ github.ref_name }}\"|" in-browser-evm-verifier/package.json
- name: Prepare tag and fetch package integrity
run: |
CLEANED_TAG=${{ github.ref_name }} # Get the tag from ref_name
CLEANED_TAG="${CLEANED_TAG#v}" # Remove leading 'v'
echo "CLEANED_TAG=${CLEANED_TAG}" >> $GITHUB_ENV # Set it as an environment variable for later steps
ENGINE_INTEGRITY=$(npm view @ezkljs/engine@$CLEANED_TAG dist.integrity)
echo "ENGINE_INTEGRITY=$ENGINE_INTEGRITY" >> $GITHUB_ENV
- name: Update @ezkljs/engine version in package.json
shell: bash
env:
RELEASE_TAG: ${{ github.ref_name }}
run: |
sed -i "s|\"@ezkljs/engine\": \".*\"|\"@ezkljs/engine\": \"$CLEANED_TAG\"|" in-browser-evm-verifier/package.json
- name: Update the engine import in in-browser-evm-verifier to use @ezkljs/engine package instead of the local one;
run: |
sed -i "s|import { encodeVerifierCalldata } from '../nodejs/ezkl';|import { encodeVerifierCalldata } from '@ezkljs/engine';|" in-browser-evm-verifier/src/index.ts
- name: Update pnpm-lock.yaml versions and integrity
run: |
awk -v integrity="$ENGINE_INTEGRITY" -v tag="$CLEANED_TAG" '
NR==30{$0=" specifier: \"" tag "\""}
NR==31{$0=" version: \"" tag "\""}
NR==400{$0=" /@ezkljs/engine@" tag ":"}
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
- name: Use pnpm 8
uses: pnpm/action-setup@v2
with:
version: 8
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: "18.12.1"
registry-url: "https://registry.npmjs.org"
- name: Publish to npm
run: |
cd in-browser-evm-verifier
pnpm install --frozen-lockfile
pnpm run build
pnpm publish --no-git-checks
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

View File

@@ -11,7 +11,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: nanoGPT Mock

View File

@@ -40,7 +40,7 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-06-27
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
@@ -50,6 +50,7 @@ jobs:
target: ${{ matrix.target }}
args: --release --out dist --features python-bindings
- name: Install built wheel
if: matrix.target == 'universal2-apple-darwin'
run: |
pip install ezkl --no-index --find-links dist --force-reinstall
python -c "import ezkl"
@@ -85,7 +86,7 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-06-27
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
@@ -110,7 +111,7 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
strategy:
matrix:
target: [x86_64, i686]
target: [x86_64]
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v4

View File

@@ -45,7 +45,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Checkout repo
@@ -102,28 +102,32 @@ jobs:
PCRE2_SYS_STATIC: 1
strategy:
matrix:
build: [windows-msvc, macos, macos-aarch64, linux-musl, linux-gnu]
build: [windows-msvc, macos, macos-aarch64, linux-musl, linux-gnu, linux-aarch64]
include:
- build: windows-msvc
os: windows-latest
rust: nightly-2023-06-27
rust: nightly-2024-07-18
target: x86_64-pc-windows-msvc
- build: macos
os: macos-13
rust: nightly-2023-06-27
rust: nightly-2024-07-18
target: x86_64-apple-darwin
- build: macos-aarch64
os: macos-13
rust: nightly-2023-06-27
rust: nightly-2024-07-18
target: aarch64-apple-darwin
- build: linux-musl
os: ubuntu-22.04
rust: nightly-2023-06-27
rust: nightly-2024-07-18
target: x86_64-unknown-linux-musl
- build: linux-gnu
os: ubuntu-22.04
rust: nightly-2023-06-27
rust: nightly-2024-07-18
target: x86_64-unknown-linux-gnu
- build: linux-aarch64
os: ubuntu-22.04
rust: nightly-2024-07-18
target: aarch64-unknown-linux-gnu
steps:
- name: Checkout repo
@@ -177,11 +181,16 @@ jobs:
echo "target flag is: ${{ env.TARGET_FLAGS }}"
echo "target dir is: ${{ env.TARGET_DIR }}"
- name: Build release binary
- name: Build release binary (no asm)
if: matrix.build != 'linux-gnu'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry
- name: Build release binary (asm)
if: matrix.build == 'linux-gnu'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features asm
- name: Strip release binary
if: matrix.build != 'windows-msvc'
if: matrix.build != 'windows-msvc' && matrix.build != 'linux-aarch64'
run: strip "target/${{ matrix.target }}/release/ezkl"
- name: Strip release binary (Windows)

View File

@@ -26,7 +26,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Build
@@ -38,7 +38,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Docs
@@ -50,7 +50,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -65,40 +65,40 @@ jobs:
- name: Library tests (original lookup)
run: cargo nextest run --lib --verbose --no-default-features --features ezkl
ultra-overflow-tests-gpu:
runs-on: GPU
env:
ENABLE_ICICLE_GPU: true
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- uses: mwilliamson/setup-wasmtime-action@v2
with:
wasmtime-version: "3.0.1"
- name: Install wasm32-wasi
run: rustup target add wasm32-wasi
- name: Install cargo-wasi
run: cargo install cargo-wasi
# - name: Matmul overflow (wasi)
# run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# - name: Conv overflow (wasi)
# run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
- name: lookup overflow
run: cargo nextest run --release lookup_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Matmul overflow
run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Conv overflow
run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Conv + relu overflow
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# ultra-overflow-tests-gpu:
# runs-on: GPU
# env:
# ENABLE_ICICLE_GPU: true
# steps:
# - uses: actions/checkout@v4
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - uses: mwilliamson/setup-wasmtime-action@v2
# with:
# wasmtime-version: "3.0.1"
# - name: Install wasm32-wasi
# run: rustup target add wasm32-wasi
# - name: Install cargo-wasi
# run: cargo install cargo-wasi
# # - name: Matmul overflow (wasi)
# # run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# # - name: Conv overflow (wasi)
# # run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
# - name: lookup overflow
# run: cargo nextest run lookup_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Matmul overflow
# run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Conv overflow
# run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Conv + relu overflow
# run: cargo nextest run conv_relu_col_ultra_overflow --no-capture --features icicle -- --include-ignored
ultra-overflow-tests_og-lookup:
runs-on: non-gpu
@@ -106,7 +106,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -139,7 +139,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -172,7 +172,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -184,22 +184,24 @@ jobs:
wasm32-tests:
runs-on: ubuntu-latest
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- uses: nanasess/setup-chromedriver@v2
# with:
# chromedriver-version: "115.0.5790.102"
- name: Install wasm32-unknown-unknown
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Run wasm verifier tests
# on mac:
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
@@ -212,7 +214,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -229,17 +231,19 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
# - name: The Worm Mock
# run: cargo nextest run --release --verbose tests::large_mock_::large_tests_5_expects -- --include-ignored
- name: public outputs and tolerance > 0
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
- name: public outputs + batch size == 10
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 32
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 16
- name: kzg inputs
run: cargo nextest run --release --verbose tests::mock_kzg_input_::t --test-threads 32
- name: kzg params
@@ -258,6 +262,8 @@ jobs:
run: cargo nextest run --release --verbose tests::mock_hashed_input_::t --test-threads 32
- name: hashed params
run: cargo nextest run --release --verbose tests::mock_hashed_params_::t --test-threads 32
- name: hashed params public inputs
run: cargo nextest run --release --verbose tests::mock_hashed_params_public_inputs_::t --test-threads 32
- name: hashed outputs
run: cargo nextest run --release --verbose tests::mock_hashed_output_::t --test-threads 32
- name: hashed inputs + params + outputs
@@ -286,7 +292,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -304,7 +310,7 @@ jobs:
node-version: "18.12.1"
cache: "pnpm"
- name: "Add rust-src"
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Install dependencies for js tests and in-browser-evm-verifier package
run: |
pnpm install --frozen-lockfile
@@ -323,12 +329,12 @@ jobs:
cd in-browser-evm-verifier
pnpm build:commonjs
cd ..
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: KZG prove and verify tests (EVM + VK rendered seperately)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_render_seperately_ --test-threads 1
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: KZG prove and verify tests (EVM + reusable verifier + col-overflow)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_reusable_verifier --test-threads 1
- name: KZG prove and verify tests (EVM + kzg all)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_kzg_all_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + kzg inputs)
@@ -341,6 +347,12 @@ jobs:
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_output_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain inputs & outputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_input_output_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain inputs & kzg outputs + params)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_input_kzg_output_kzg_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain outputs & kzg inputs + params)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_output_kzg_input_kzg_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain all kzg)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_all_kzg_params_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + on chain inputs & outputs hashes)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_on_chain_input_output_hashed_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM)
@@ -359,15 +371,18 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- name: Use pnpm 8
uses: pnpm/action-setup@v2
@@ -425,40 +440,40 @@ jobs:
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed
prove-and-verify-tests-gpu:
runs-on: GPU
env:
ENABLE_ICICLE_GPU: true
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: KZG prove and verify tests (kzg outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + fixed params + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
- name: KZG prove and verify tests (public inputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
- name: KZG prove and verify tests (fixed params)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
# prove-and-verify-tests-gpu:
# runs-on: GPU
# env:
# ENABLE_ICICLE_GPU: true
# steps:
# - uses: actions/checkout@v4
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - name: Add rust-src
# run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
# - uses: actions/checkout@v3
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - name: KZG prove and verify tests (kzg outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + fixed params + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public inputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
# - name: KZG prove and verify tests (fixed params)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
# - name: KZG prove and verify tests (hashed outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
prove-and-verify-mock-aggr-tests:
runs-on: self-hosted
@@ -467,7 +482,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -485,7 +500,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -493,7 +508,7 @@ jobs:
crate: cargo-nextest
locked: true
- name: KZG )tests
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
prove-and-verify-aggr-tests:
runs-on: large-self-hosted
@@ -502,7 +517,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -519,17 +534,17 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: KZG prove and verify aggr tests
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
@@ -540,7 +555,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -560,21 +575,21 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Install cmake
run: sudo apt-get install -y cmake
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt;
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Run pytest
run: source .env/bin/activate; pytest -vv
run: source .env/bin/activate; pip install pytest-asyncio; pytest -vv
accuracy-measurement-tests:
runs-on: ubuntu-latest-32-cores
@@ -586,7 +601,7 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -635,17 +650,17 @@ jobs:
python-version: "3.11"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: Install pip
run: python -m ensurepip --upgrade
- name: Setup Virtual Env and Install python dependencies
@@ -671,3 +686,68 @@ jobs:
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
# - name: Reusable verifier tutorial
# run: source .env/bin/activate; cargo nextest run py_tests::tests::reusable_
ios-integration-tests:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: Run ios tests
run: CARGO_BUILD_TARGET=aarch64-apple-darwin RUSTUP_TOOLCHAIN=nightly-2024-07-18-aarch64-apple-darwin cargo test --test ios_integration_tests --features ios-bindings-test --no-default-features
swift-package-tests:
runs-on: macos-latest
needs: [ios-integration-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Build EzklCoreBindings
run: CONFIGURATION=debug cargo run --bin ios_gen_bindings --features "ios-bindings uuid camino uniffi_bindgen" --no-default-features
- name: Clone ezkl-swift- repository
run: |
git clone https://github.com/zkonduit/ezkl-swift-package.git
- name: Copy EzklCoreBindings
run: |
rm -rf ezkl-swift-package/Sources/EzklCoreBindings
cp -r build/EzklCoreBindings ezkl-swift-package/Sources/
- name: Set up Xcode environment
run: |
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
sudo xcodebuild -license accept
- name: Run Package Tests
run: |
cd ezkl-swift-package
xcodebuild test \
-scheme EzklPackage \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-resultBundlePath ../testResults
- name: Run Example App Tests
run: |
cd ezkl-swift-package/Example
xcodebuild test \
-project Example.xcodeproj \
-scheme EzklApp \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-parallel-testing-enabled NO \
-resultBundlePath ../../exampleTestResults \
-skip-testing:EzklAppUITests/EzklAppUITests/testButtonClicksInOrder

View File

@@ -0,0 +1,75 @@
name: Build and Publish EZKL iOS SPM package
on:
workflow_dispatch:
inputs:
tag:
description: "The tag to release"
required: true
push:
tags:
- "*"
jobs:
build-and-update:
runs-on: macos-latest
steps:
- name: Checkout EZKL
uses: actions/checkout@v3
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- name: Build EzklCoreBindings
run: CONFIGURATION=release cargo run --bin ios_gen_bindings --features "ios-bindings uuid camino uniffi_bindgen" --no-default-features
- name: Clone ezkl-swift-package repository
run: |
git clone https://github.com/zkonduit/ezkl-swift-package.git
- name: Copy EzklCoreBindings
run: |
rm -rf ezkl-swift-package/Sources/EzklCoreBindings
cp -r build/EzklCoreBindings ezkl-swift-package/Sources/
- name: Set up Xcode environment
run: |
sudo xcode-select -s /Applications/Xcode.app/Contents/Developer
sudo xcodebuild -license accept
- name: Run Package Tests
run: |
cd ezkl-swift-package
xcodebuild test \
-scheme EzklPackage \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-resultBundlePath ../testResults
- name: Run Example App Tests
run: |
cd ezkl-swift-package/Example
xcodebuild test \
-project Example.xcodeproj \
-scheme EzklApp \
-destination 'platform=iOS Simulator,name=iPhone 15 Pro,OS=17.5' \
-parallel-testing-enabled NO \
-resultBundlePath ../../exampleTestResults \
-skip-testing:EzklAppUITests/EzklAppUITests/testButtonClicksInOrder
- name: Commit and Push Changes to feat/ezkl-direct-integration
run: |
cd ezkl-swift-package
git config user.name "GitHub Action"
git config user.email "action@github.com"
git add Sources/EzklCoreBindings
git commit -m "Automatically updated EzklCoreBindings for EZKL"
git tag ${{ github.event.inputs.tag }}
git remote set-url origin https://zkonduit:${EZKL_PORTER_TOKEN}@github.com/zkonduit/ezkl-swift-package.git
git push origin
git push origin --tags
env:
EZKL_PORTER_TOKEN: ${{ secrets.EZKL_PORTER_TOKEN }}

View File

@@ -1,65 +0,0 @@
name: Build and Publish EZKL npm packages (wasm bindings and in-browser evm verifier)
on:
workflow_dispatch:
inputs:
tag:
description: "The tag to release"
required: true
push:
tags:
- "*"
defaults:
run:
working-directory: .
jobs:
in-browser-evm-ver-publish:
name: publish-in-browser-evm-verifier-package
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
steps:
- uses: actions/checkout@v4
- name: Update version in package.json
shell: bash
env:
RELEASE_TAG: ${{ github.ref_name }}
run: |
sed -i "s|\"version\": \".*\"|\"version\": \"${{ github.ref_name }}\"|" in-browser-evm-verifier/package.json
- name: Update @ezkljs/engine version in package.json
shell: bash
env:
RELEASE_TAG: ${{ github.ref_name }}
run: |
sed -i "s|\"@ezkljs/engine\": \".*\"|\"@ezkljs/engine\": \"${{ github.ref_name#v }}\"|" in-browser-evm-verifier/package.json
- name: Update the engine import in in-browser-evm-verifier to use @ezkljs/engine package instead of the local one;
run: |
sed -i "s|import { encodeVerifierCalldata } from '../nodejs/ezkl';|import { encodeVerifierCalldata } from '@ezkljs/engine';|" in-browser-evm-verifier/src/index.ts
- name: Fetch integrity
run: |
ENGINE_INTEGRITY=$(npm view @ezkljs/engine@${{ github.ref_name#v }} dist.integrity)
echo "ENGINE_INTEGRITY=$ENGINE_INTEGRITY" >> $GITHUB_ENV
- name: Update pnpm-lock.yaml versions and integrity
run: |
awk -v integrity="$ENGINE_INTEGRITY" -v tag="${{ github.ref_name#v }}" '
NR==30{$0=" specifier: \"" tag "\""}
NR==31{$0=" version: \"" tag "\""}
NR==400{$0=" /@ezkljs/engine@" tag ":"}
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
- name: Use pnpm 8
uses: pnpm/action-setup@v2
with:
version: 8
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: "18.12.1"
registry-url: "https://registry.npmjs.org"
- name: Publish to npm
run: |
cd in-browser-evm-verifier
pnpm install --frozen-lockfile
pnpm run build
pnpm publish
env:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}

6
.gitignore vendored
View File

@@ -46,7 +46,7 @@ var/
node_modules
/dist
timingData.json
!tests/wasm/pk.key
!tests/wasm/vk.key
!tests/assets/pk.key
!tests/assets/vk.key
docs/python/build
!tests/wasm/vk_aggr.key
!tests/assets/vk_aggr.key

2506
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ cargo-features = ["profile-rustflags"]
name = "ezkl"
version = "0.0.0"
edition = "2021"
default-run = "ezkl"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -11,89 +12,93 @@ edition = "2021"
# Name to be imported within python
# Example: import ezkl
name = "ezkl"
crate-type = ["cdylib", "rlib"]
crate-type = ["cdylib", "rlib", "staticlib"]
[dependencies]
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "9fff22c", features = [
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "b753a832e92d5c86c5c997327a9cf9de86a18851", features = [
"derive_serde",
] }
rand = { version = "0.8", default_features = false }
itertools = { version = "0.10.3", default_features = false }
clap = { version = "4.5.3", features = ["derive"] }
serde = { version = "1.0.126", features = ["derive"], optional = true }
serde_json = { version = "1.0.97", default_features = false, features = [
"float_roundtrip",
"raw_value",
], optional = true }
log = { version = "0.4.17", default_features = false, optional = true }
thiserror = { version = "1.0.38", default_features = false }
hex = { version = "0.4.3", default_features = false }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", package = "halo2_proofs", branch = "ac/cache-lookup-commitments", features = ["circuit-params"] }
rand = { version = "0.8", default-features = false }
itertools = { version = "0.10.3", default-features = false }
clap = { version = "4.5.3", features = ["derive"], optional = true }
serde = { version = "1.0.126", features = ["derive"] }
clap_complete = { version = "4.5.2", optional = true }
log = { version = "0.4.17", default-features = false }
thiserror = { version = "1.0.38", default-features = false }
hex = { version = "0.4.3", default-features = false }
halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac/chunked-mv-lookup", package = "ecc" }
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features = [
"derive_serde",
] }
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "main" }
maybe-rayon = { version = "0.1.1", default_features = false }
bincode = { version = "1.3.3", default_features = false }
ark-std = { version = "^0.3.0", default-features = false }
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "ac/update-h2-curves", optional = true }
maybe-rayon = { version = "0.1.1", default-features = false }
bincode = { version = "1.3.3", default-features = false }
unzip-n = "0.1.2"
num = "0.4.1"
portable-atomic = "1.6.0"
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand" }
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
portable-atomic = { version = "1.6.0", optional = true }
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand", optional = true }
semver = { version = "1.0.22", optional = true }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
serde_json = { version = "1.0.97", features = [
"float_roundtrip",
"raw_value",
] }
# evm related deps
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ethers = { version = "2.0.11", default_features = false, features = [
"ethers-solc",
] }
indicatif = { version = "0.17.5", features = ["rayon"] }
gag = { version = "1.0.0", default_features = false }
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = [
"provider-http",
"signers",
"contract",
"rpc-types-eth",
"signer-wallet",
"node-bindings",
], optional = true }
foundry-compilers = { version = "0.4.1", features = ["svm-solc"], optional = true }
ethabi = { version = "18", optional = true }
indicatif = { version = "0.17.5", features = ["rayon"], optional = true }
gag = { version = "1.0.0", default-features = false, optional = true }
instant = { version = "0.1" }
reqwest = { version = "0.11.14", default-features = false, features = [
"default-tls",
"multipart",
"stream",
] }
openssl = { version = "0.10.55", features = ["vendored"] }
postgres = "0.19.5"
pg_bigdecimal = "0.1.5"
lazy_static = "1.4.0"
colored_json = { version = "3.0.1", default_features = false, optional = true }
plotters = { version = "0.3.0", default_features = false, optional = true }
regex = { version = "1", default_features = false }
tokio = { version = "1.26.0", default_features = false, features = [
"macros",
"rt",
] }
tokio-util = { version = "0.7.9", features = ["codec"] }
pyo3 = { version = "0.20.2", features = [
"extension-module",
"abi3-py37",
"macros",
], default_features = false, optional = true }
pyo3-asyncio = { version = "0.20.0", features = [
"attributes",
"tokio-runtime",
], default_features = false, optional = true }
pyo3-log = { version = "0.9.0", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "05ebf550aa9922b221af4635c21a67a8d2af12a9", default_features = false, optional = true }
reqwest = { version = "0.12.4", default-features = false, features = ["default-tls", "multipart", "stream"], optional = true }
openssl = { version = "0.10.55", features = ["vendored"], optional = true }
tokio-postgres = { version = "0.7.10", optional = true }
pg_bigdecimal = { version = "0.1.5", optional = true }
lazy_static = { version = "1.4.0", optional = true }
colored_json = { version = "3.0.1", default-features = false, optional = true }
regex = { version = "1", default-features = false, optional = true }
tokio = { version = "1.35.0", default-features = false, features = ["macros", "rt-multi-thread"], optional = true }
pyo3 = { version = "0.21.2", features = ["extension-module", "abi3-py37", "macros"], default-features = false, optional = true }
pyo3-asyncio = { git = "https://github.com/jopemachine/pyo3-asyncio/", branch="migration-pyo3-0.21", features = ["attributes", "tokio-runtime"], default-features = false, optional = true }
pyo3-log = { version = "0.10.0", default-features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "40c64319291184814d9fea5fdf4fa16f5a4f7116", default-features = false, optional = true }
tabled = { version = "0.12.0", optional = true }
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
objc = { version = "0.2.4", optional = true }
mimalloc = { version = "0.1", optional = true }
# universal bindings
uniffi = { version = "=0.28.0", optional = true }
getrandom = { version = "0.2.8", optional = true }
uniffi_bindgen = { version = "=0.28.0", optional = true }
camino = { version = "^1.1", optional = true }
uuid = { version = "1.10.0", features = ["v4"], optional = true }
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies]
colored = { version = "2.0.0", default_features = false, optional = true }
env_logger = { version = "0.10.0", default_features = false, optional = true }
chrono = "0.4.31"
sha256 = "1.4.0"
colored = { version = "2.0.0", default-features = false, optional = true }
env_logger = { version = "0.10.0", default-features = false, optional = true }
chrono = { version = "0.4.31", optional = true }
sha256 = { version = "1.4.0", optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
serde_json = { version = "1.0.97", default-features = false, features = [
"float_roundtrip",
"raw_value",
] }
getrandom = { version = "0.2.8", features = ["js"] }
instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] }
@@ -106,8 +111,14 @@ console_error_panic_hook = "0.1.7"
wasm-bindgen-console-logger = "0.1.1"
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies]
criterion = { version = "0.5.1", features = ["html_reports"] }
[build-dependencies]
uniffi = { version = "0.28", features = ["build"], optional = true }
[dev-dependencies]
criterion = { version = "0.3", features = ["html_reports"] }
tempfile = "3.3.0"
lazy_static = "1.4.0"
mnist = "0.5"
@@ -161,6 +172,10 @@ harness = false
name = "relu"
harness = false
[[bench]]
name = "relu_lookupless"
harness = false
[[bench]]
name = "accum_matmul_relu"
harness = false
@@ -176,40 +191,73 @@ test = false
bench = false
required-features = ["ezkl"]
[[bin]]
name = "ios_gen_bindings"
required-features = ["ios-bindings", "uuid", "camino", "uniffi_bindgen"]
[features]
web = ["wasm-bindgen-rayon"]
default = ["ezkl", "mv-lookup"]
default = ["ezkl", "mv-lookup", "precompute-coset", "no-banner", "parallel-poly-read"]
onnx = ["dep:tract-onnx"]
python-bindings = ["pyo3", "pyo3-log", "pyo3-asyncio"]
ios-bindings = ["mv-lookup", "precompute-coset", "parallel-poly-read", "uniffi"]
ios-bindings-test = ["ios-bindings", "uniffi/bindgen-tests"]
ezkl = [
"onnx",
"serde",
"serde_json",
"log",
"colored",
"env_logger",
"dep:colored",
"dep:env_logger",
"tabled/color",
"serde_json/std",
"colored_json",
"halo2_proofs/circuit-params",
"dep:alloy",
"dep:foundry-compilers",
"dep:ethabi",
"dep:indicatif",
"dep:gag",
"dep:reqwest",
"dep:openssl",
"dep:tokio-postgres",
"dep:pg_bigdecimal",
"dep:lazy_static",
"dep:regex",
"dep:tokio",
"dep:mimalloc",
"dep:chrono",
"dep:sha256",
"dep:portable-atomic",
"dep:clap_complete",
"dep:halo2_solidity_verifier",
"dep:semver",
"dep:clap",
"dep:tosubcommand",
]
parallel-poly-read = ["halo2_proofs/circuit-params", "halo2_proofs/parallel-poly-read"]
mv-lookup = [
"halo2_proofs/mv-lookup",
"snark-verifier/mv-lookup",
"halo2_solidity_verifier/mv-lookup",
]
asm = ["halo2curves/asm", "halo2_proofs/asm"]
precompute-coset = ["halo2_proofs/precompute-coset"]
det-prove = []
icicle = ["halo2_proofs/icicle_gpu"]
empty-cmd = []
no-banner = []
metal = ["dep:metal", "dep:objc"]
no-update = []
# icicle patch to 0.1.0 if feature icicle is enabled
[patch.'https://github.com/ingonyama-zk/icicle']
icicle = { git = "https://github.com/ingonyama-zk/icicle?rev=45b00fb", package = "icicle", branch = "fix/vhnat/ezkl-build-fix" }
[patch.'https://github.com/zkonduit/halo2']
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/optional-selector-poly#54f54453cf186aa5d89579c4e7663f9a27cfb89a", package = "halo2_proofs", branch = "ac/optional-selector-poly" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/cache-lookup-commitments#8b13a0d2a7a34d8daab010dadb2c47dfa47d37d0", package = "halo2_proofs", branch = "ac/cache-lookup-commitments" }
[patch.crates-io]
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
[profile.release]
rustflags = ["-C", "relocation-model=pic"]
lto = "fat"
codegen-units = 1
# panic = "abort"

View File

@@ -91,9 +91,9 @@ You can install the library from source
cargo install --locked --path .
```
You will need a functioning installation of `solc` in order to run `ezkl` properly.
[solc-select](https://github.com/crytic/solc-select) is recommended.
Follow the instructions on [solc-select](https://github.com/crytic/solc-select) to activate `solc` in your environment.
`ezkl` now auto-manages solc installation for you.
#### building python bindings

View File

@@ -64,7 +64,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,
@@ -72,6 +72,7 @@ impl Circuit<Fr> for MyCircuit {
Box::new(PolyOp::Conv {
padding: vec![(0, 0)],
stride: vec![1; 2],
group: 1,
}),
)
.unwrap();

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -57,7 +57,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -57,7 +57,15 @@ impl Circuit<Fr> for MyCircuit {
// sets up a new relu table
base_config
.configure_lookup(cs, &b, &output, &a, BITS, K, &LookupOp::ReLU)
.configure_lookup(
cs,
&b,
&output,
&a,
BITS,
K,
&LookupOp::LeakyReLU { slope: 0.0.into() },
)
.unwrap();
MyConfig { base_config }
@@ -75,14 +83,18 @@ impl Circuit<Fr> for MyCircuit {
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let output = config
.base_config
.layout(&mut region, &self.inputs, Box::new(op))
.unwrap();
let _output = config
.base_config
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[output.unwrap()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.unwrap();
Ok(())
},

View File

@@ -58,7 +58,15 @@ impl Circuit<Fr> for MyCircuit {
// sets up a new relu table
base_config
.configure_lookup(cs, &b, &output, &a, BITS, k, &LookupOp::ReLU)
.configure_lookup(
cs,
&b,
&output,
&a,
BITS,
k,
&LookupOp::LeakyReLU { slope: 0.0.into() },
)
.unwrap();
MyConfig { base_config }
@@ -76,14 +84,18 @@ impl Circuit<Fr> for MyCircuit {
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let output = config
.base_config
.layout(&mut region, &self.inputs, Box::new(op))
.unwrap();
let _output = config
.base_config
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[output.unwrap()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.unwrap();
Ok(())
},

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -59,7 +59,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(
&mut region,

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1);
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &self.inputs, Box::new(PolyOp::Add))
.unwrap();

View File

@@ -56,7 +56,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &self.inputs, Box::new(PolyOp::Pow(4)))
.unwrap();

View File

@@ -2,6 +2,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Through
use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::table::Range;
use ezkl::circuit::{ops::lookup::LookupOp, BaseConfig as Config, CheckMode};
use ezkl::fieldutils::IntegerRep;
use ezkl::pfsys::create_proof_circuit;
use ezkl::pfsys::TranscriptType;
use ezkl::pfsys::{create_keys, srs::gen_srs};
@@ -41,7 +42,7 @@ impl Circuit<Fr> for NLCircuit {
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
.collect::<Vec<_>>();
let nl = LookupOp::ReLU;
let nl = LookupOp::LeakyReLU { slope: 0.0.into() };
let mut config = Config::default();
@@ -62,9 +63,13 @@ impl Circuit<Fr> for NLCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[self.input.clone()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.unwrap();
Ok(())
},
@@ -84,7 +89,7 @@ fn runrelu(c: &mut Criterion) {
};
let input: Tensor<Value<Fr>> =
Tensor::<i32>::from((0..len).map(|_| rng.gen_range(0..10))).into();
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
let circuit = NLCircuit {
input: ValTensor::from(input.clone()),

143
benches/relu_lookupless.rs Normal file
View File

@@ -0,0 +1,143 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ezkl::circuit::poly::PolyOp;
use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{BaseConfig as Config, CheckMode};
use ezkl::fieldutils::IntegerRep;
use ezkl::pfsys::create_proof_circuit;
use ezkl::pfsys::TranscriptType;
use ezkl::pfsys::{create_keys, srs::gen_srs};
use ezkl::tensor::*;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2_proofs::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
use halo2_proofs::poly::kzg::strategy::SingleStrategy;
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
plonk::{Circuit, ConstraintSystem, Error},
};
use halo2curves::bn256::{Bn256, Fr};
use rand::Rng;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
static mut LEN: usize = 4;
const K: usize = 16;
#[derive(Clone)]
struct NLCircuit {
pub input: ValTensor<Fr>,
}
impl Circuit<Fr> for NLCircuit {
type Config = Config<Fr>;
type FloorPlanner = SimpleFloorPlanner;
type Params = ();
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<Fr>) -> Self::Config {
unsafe {
let advices = (0..3)
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
.collect::<Vec<_>>();
let mut config = Config::default();
config
.configure_range_check(cs, &advices[0], &advices[1], (-1, 1), K)
.unwrap();
config
.configure_range_check(cs, &advices[0], &advices[1], (0, 1023), K)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
config
}
}
fn synthesize(
&self,
mut config: Self::Config,
mut layouter: impl Layouter<Fr>, // layouter is our 'write buffer' for the circuit
) -> Result<(), Error> {
config.layout_range_checks(&mut layouter).unwrap();
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &[self.input.clone()], Box::new(PolyOp::ReLU))
.unwrap();
Ok(())
},
)?;
Ok(())
}
}
fn runrelu(c: &mut Criterion) {
let mut group = c.benchmark_group("relu");
let mut rng = rand::thread_rng();
let params = gen_srs::<KZGCommitmentScheme<_>>(17);
for &len in [4, 8].iter() {
unsafe {
LEN = len;
};
let input: Tensor<Value<Fr>> =
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
let circuit = NLCircuit {
input: ValTensor::from(input.clone()),
};
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, &params, true).unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {
b.iter(|| {
let prover = create_proof_circuit::<
KZGCommitmentScheme<_>,
NLCircuit,
ProverSHPLONK<_>,
VerifierSHPLONK<_>,
SingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit.clone(),
vec![],
&params,
&pk,
CheckMode::UNSAFE,
ezkl::Commitments::KZG,
TranscriptType::EVM,
None,
None,
);
prover.unwrap();
});
});
}
group.finish();
}
criterion_group! {
name = benches;
config = Criterion::default().with_plots();
targets = runrelu
}
criterion_main!(benches);

7
build.rs Normal file
View File

@@ -0,0 +1,7 @@
fn main() {
if cfg!(feature = "ios-bindings-test") {
println!("cargo::rustc-env=UNIFFI_CARGO_BUILD_EXTRA_ARGS=--features=ios-bindings --no-default-features");
}
println!("cargo::rerun-if-changed=build.rs");
}

View File

@@ -1,6 +1,167 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.20;
import './LoadInstances.sol';
contract LoadInstances {
/**
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
* @notice must pass encoded bytes from memory
* @param encoded - verifier calldata
*/
function getInstancesMemory(
bytes memory encoded
) internal pure returns (uint256[] memory instances) {
bytes4 funcSig;
uint256 instances_offset;
uint256 instances_length;
assembly {
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
funcSig := mload(add(encoded, 0x20))
// Fetch instances offset which is 4 + 32 + 32 bytes away from
// start of encoded for `verifyProof(bytes,uint256[])`,
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
instances_offset := mload(
add(encoded, add(0x44, mul(0x20, eq(funcSig, 0xaf83a18d))))
)
instances_length := mload(add(add(encoded, 0x24), instances_offset))
}
instances = new uint256[](instances_length); // Allocate memory for the instances array.
assembly {
// Now instances points to the start of the array data
// (right after the length field).
for {
let i := 0x20
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
i := add(i, 0x20)
} {
mstore(
add(instances, i),
mload(add(add(encoded, add(i, 0x24)), instances_offset))
)
}
}
}
/**
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
* @notice must pass encoded bytes from calldata
* @param encoded - verifier calldata
*/
function getInstancesCalldata(
bytes calldata encoded
) internal pure returns (uint256[] memory instances) {
bytes4 funcSig;
uint256 instances_offset;
uint256 instances_length;
assembly {
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
funcSig := calldataload(encoded.offset)
// Fetch instances offset which is 4 + 32 + 32 bytes away from
// start of encoded for `verifyProof(bytes,uint256[])`,
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
instances_offset := calldataload(
add(
encoded.offset,
add(0x24, mul(0x20, eq(funcSig, 0xaf83a18d)))
)
)
instances_length := calldataload(
add(add(encoded.offset, 0x04), instances_offset)
)
}
instances = new uint256[](instances_length); // Allocate memory for the instances array.
assembly {
// Now instances points to the start of the array data
// (right after the length field).
for {
let i := 0x20
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
i := add(i, 0x20)
} {
mstore(
add(instances, i),
calldataload(
add(add(encoded.offset, add(i, 0x04)), instances_offset)
)
)
}
}
}
}
// The kzg commitments of a given model, all aggregated into a single bytes array.
// At solidity generation time, the commitments are hardcoded into the contract via the COMMITMENT_KZG constant.
// It will be used to check that the proof commitments match the expected commitments.
bytes constant COMMITMENT_KZG = hex"";
contract SwapProofCommitments {
/**
* @dev Swap the proof commitments
* @notice must pass encoded bytes from memory
* @param encoded - verifier calldata
*/
function checkKzgCommits(
bytes calldata encoded
) internal pure returns (bool equal) {
bytes4 funcSig;
uint256 proof_offset;
uint256 proof_length;
assembly {
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
funcSig := calldataload(encoded.offset)
// Fetch proof offset which is 4 + 32 bytes away from
// start of encoded for `verifyProof(bytes,uint256[])`,
// and 4 + 32 + 32 away for `verifyProof(address,bytes,uint256[])`
proof_offset := calldataload(
add(
encoded.offset,
add(0x04, mul(0x20, eq(funcSig, 0xaf83a18d)))
)
)
proof_length := calldataload(
add(add(encoded.offset, 0x04), proof_offset)
)
}
// Check the length of the commitment against the proof bytes
if (proof_length < COMMITMENT_KZG.length) {
return false;
}
// Load COMMITMENT_KZG into memory
bytes memory commitment = COMMITMENT_KZG;
// Compare the first N bytes of the proof with COMMITMENT_KZG
uint words = (commitment.length + 31) / 32; // Calculate the number of 32-byte words
assembly {
// Now we compare the commitment with the proof,
// ensuring that the commitments divided up into 32 byte words are all equal.
for {
let i := 0x20
} lt(i, add(mul(words, 0x20), 0x20)) {
i := add(i, 0x20)
} {
let wordProof := calldataload(
add(add(encoded.offset, add(i, 0x04)), proof_offset)
)
let wordCommitment := mload(add(commitment, i))
equal := eq(wordProof, wordCommitment)
if eq(equal, 0) {
return(0, 0)
}
}
}
return equal; // Return true if the commitment comparison passed
} /// end checkKzgCommits
}
// This contract serves as a Data Attestation Verifier for the EZKL model.
// It is designed to read and attest to instances of proofs generated from a specified circuit.
@@ -13,9 +174,10 @@ import './LoadInstances.sol';
// 4. Field Element Conversion: The fixed-point representation is then converted into a field element modulo P using the `toFieldElement` method.
// 5. Data Attestation: The `attestData` method validates that the public instances match the data fetched and processed by the contract.
// 6. Proof Verification: The `verifyWithDataAttestation` method parses the instances out of the encoded calldata and calls the `attestData` method to validate the public instances,
// 6b. Optional KZG Commitment Verification: It also checks the KZG commitments in the proof against the expected commitments using the `checkKzgCommits` method.
// then calls the `verifyProof` method to verify the proof on the verifier.
contract DataAttestation is LoadInstances {
contract DataAttestation is LoadInstances, SwapProofCommitments {
/**
* @notice Struct used to make view only calls to accounts to fetch the data that EZKL reads from.
* @param the address of the account to make calls to
@@ -34,11 +196,14 @@ contract DataAttestation is LoadInstances {
address public admin;
/**
* @notice EZKL P value
* @notice EZKL P value
* @dev In order to prevent the verifier from accepting two version of the same pubInput, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
*/
uint256 constant ORDER = uint256(0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001);
uint256 constant ORDER =
uint256(
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
);
uint256 constant INPUT_CALLS = 0;
@@ -69,7 +234,7 @@ contract DataAttestation is LoadInstances {
function updateAdmin(address _admin) external {
require(msg.sender == admin, "Only admin can update admin");
if(_admin == address(0)) {
if (_admin == address(0)) {
revert();
}
admin = _admin;
@@ -80,7 +245,7 @@ contract DataAttestation is LoadInstances {
bytes[][] memory _callData,
uint256[][] memory _decimals
) external {
require(msg.sender == admin, "Only admin can update instanceOffset");
require(msg.sender == admin, "Only admin can update account calls");
populateAccountCalls(_contractAddresses, _callData, _decimals);
}
@@ -111,7 +276,10 @@ contract DataAttestation is LoadInstances {
// count the total number of storage reads across all of the accounts
counter += _callData[i].length;
}
require(counter == INPUT_CALLS + OUTPUT_CALLS, "Invalid number of calls");
require(
counter == INPUT_CALLS + OUTPUT_CALLS,
"Invalid number of calls"
);
}
function mulDiv(
@@ -167,7 +335,7 @@ contract DataAttestation is LoadInstances {
* @dev Quantize the data returned from the account calls to the scale used by the EZKL model.
* @param data - The data returned from the account calls.
* @param decimals - The number of decimals the data returned from the account calls has (for floating point representation).
* @param scale - The scale used to convert the floating point value into a fixed point value.
* @param scale - The scale used to convert the floating point value into a fixed point value.
*/
function quantizeData(
bytes memory data,
@@ -181,7 +349,7 @@ contract DataAttestation is LoadInstances {
if (mulmod(uint256(x), scale, decimals) * 2 >= decimals) {
output += 1;
}
quantized_data = neg ? -int256(output): int256(output);
quantized_data = neg ? -int256(output) : int256(output);
}
/**
* @dev Make a static call to the account to fetch the data that EZKL reads from.
@@ -211,7 +379,9 @@ contract DataAttestation is LoadInstances {
* @param x - The quantized data.
* @return field_element - The field element.
*/
function toFieldElement(int256 x) internal pure returns (uint256 field_element) {
function toFieldElement(
int256 x
) internal pure returns (uint256 field_element) {
// The casting down to uint256 is safe because the order is about 2^254, and the value
// of x ranges of -2^127 to 2^127, so x + int(ORDER) is always positive.
return uint256(x + int(ORDER)) % ORDER;
@@ -251,13 +421,18 @@ contract DataAttestation is LoadInstances {
}
}
/**
* @dev Verify the proof with the data attestation.
* @param verifier - The address of the verifier contract.
* @param encoded - The verifier calldata.
*/
function verifyWithDataAttestation(
address verifier,
bytes calldata encoded
) public view returns (bool) {
require(verifier.code.length > 0,"Address: call to non-contract");
require(verifier.code.length > 0, "Address: call to non-contract");
attestData(getInstancesCalldata(encoded));
require(checkKzgCommits(encoded), "Invalid KZG commitments");
// static call the verifier contract to verify the proof
(bool success, bytes memory returndata) = verifier.staticcall(encoded);

View File

@@ -1,92 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.20;
contract LoadInstances {
/**
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
* @notice must pass encoded bytes from memory
* @param encoded - verifier calldata
*/
function getInstancesMemory(
bytes memory encoded
) internal pure returns (uint256[] memory instances) {
bytes4 funcSig;
uint256 instances_offset;
uint256 instances_length;
assembly {
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
funcSig := mload(add(encoded, 0x20))
// Fetch instances offset which is 4 + 32 + 32 bytes away from
// start of encoded for `verifyProof(bytes,uint256[])`,
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
instances_offset := mload(
add(encoded, add(0x44, mul(0x20, eq(funcSig, 0xaf83a18d))))
)
instances_length := mload(add(add(encoded, 0x24), instances_offset))
}
instances = new uint256[](instances_length); // Allocate memory for the instances array.
assembly {
// Now instances points to the start of the array data
// (right after the length field).
for {
let i := 0x20
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
i := add(i, 0x20)
} {
mstore(
add(instances, i),
mload(add(add(encoded, add(i, 0x24)), instances_offset))
)
}
}
}
/**
* @dev Parse the instances array from the Halo2Verifier encoded calldata.
* @notice must pass encoded bytes from calldata
* @param encoded - verifier calldata
*/
function getInstancesCalldata(
bytes calldata encoded
) internal pure returns (uint256[] memory instances) {
bytes4 funcSig;
uint256 instances_offset;
uint256 instances_length;
assembly {
// fetch function sig. Either `verifyProof(bytes,uint256[])` or `verifyProof(address,bytes,uint256[])`
funcSig := calldataload(encoded.offset)
// Fetch instances offset which is 4 + 32 + 32 bytes away from
// start of encoded for `verifyProof(bytes,uint256[])`,
// and 4 + 32 + 32 +32 away for `verifyProof(address,bytes,uint256[])`
instances_offset := calldataload(
add(
encoded.offset,
add(0x24, mul(0x20, eq(funcSig, 0xaf83a18d)))
)
)
instances_length := calldataload(add(add(encoded.offset, 0x04), instances_offset))
}
instances = new uint256[](instances_length); // Allocate memory for the instances array.
assembly{
// Now instances points to the start of the array data
// (right after the length field).
for {
let i := 0x20
} lt(i, add(mul(instances_length, 0x20), 0x20)) {
i := add(i, 0x20)
} {
mstore(
add(instances, i),
calldataload(
add(add(encoded.offset, add(i, 0x04)), instances_offset)
)
)
}
}
}
}

View File

@@ -1,135 +0,0 @@
// SPDX-License-Identifier: GPL-3.0
pragma solidity ^0.8.17;
contract QuantizeData {
/**
* @notice EZKL P value
* @dev In order to prevent the verifier from accepting two version of the same instance, n and the quantity (n + P), where n + P <= 2^256, we require that all instances are stricly less than P. a
* @dev The reason for this is that the assmebly code of the verifier performs all arithmetic operations modulo P and as a consequence can't distinguish between n and n + P.
*/
uint256 constant ORDER =
uint256(
0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
);
/**
* @notice Calculates floor(x * y / denominator) with full precision. Throws if result overflows a uint256 or denominator == 0
* @dev Original credit to Remco Bloemen under MIT license (https://xn--2-umb.com/21/muldiv)
* with further edits by Uniswap Labs also under MIT license.
*/
function mulDiv(
uint256 x,
uint256 y,
uint256 denominator
) internal pure returns (uint256 result) {
unchecked {
// 512-bit multiply [prod1 prod0] = x * y. Compute the product mod 2^256 and mod 2^256 - 1, then use
// use the Chinese Remainder Theorem to reconstruct the 512 bit result. The result is stored in two 256
// variables such that product = prod1 * 2^256 + prod0.
uint256 prod0; // Least significant 256 bits of the product
uint256 prod1; // Most significant 256 bits of the product
assembly {
let mm := mulmod(x, y, not(0))
prod0 := mul(x, y)
prod1 := sub(sub(mm, prod0), lt(mm, prod0))
}
// Handle non-overflow cases, 256 by 256 division.
if (prod1 == 0) {
// Solidity will revert if denominator == 0, unlike the div opcode on its own.
// The surrounding unchecked block does not change this fact.
// See https://docs.soliditylang.org/en/latest/control-structures.html#checked-or-unchecked-arithmetic.
return prod0 / denominator;
}
// Make sure the result is less than 2^256. Also prevents denominator == 0.
require(denominator > prod1, "Math: mulDiv overflow");
///////////////////////////////////////////////
// 512 by 256 division.
///////////////////////////////////////////////
// Make division exact by subtracting the remainder from [prod1 prod0].
uint256 remainder;
assembly {
// Compute remainder using mulmod.
remainder := mulmod(x, y, denominator)
// Subtract 256 bit number from 512 bit number.
prod1 := sub(prod1, gt(remainder, prod0))
prod0 := sub(prod0, remainder)
}
// Factor powers of two out of denominator and compute largest power of two divisor of denominator. Always >= 1.
// See https://cs.stackexchange.com/q/138556/92363.
// Does not overflow because the denominator cannot be zero at this stage in the function.
uint256 twos = denominator & (~denominator + 1);
assembly {
// Divide denominator by twos.
denominator := div(denominator, twos)
// Divide [prod1 prod0] by twos.
prod0 := div(prod0, twos)
// Flip twos such that it is 2^256 / twos. If twos is zero, then it becomes one.
twos := add(div(sub(0, twos), twos), 1)
}
// Shift in bits from prod1 into prod0.
prod0 |= prod1 * twos;
// Invert denominator mod 2^256. Now that denominator is an odd number, it has an inverse modulo 2^256 such
// that denominator * inv = 1 mod 2^256. Compute the inverse by starting with a seed that is correct for
// four bits. That is, denominator * inv = 1 mod 2^4.
uint256 inverse = (3 * denominator) ^ 2;
// Use the Newton-Raphson iteration to improve the precision. Thanks to Hensel's lifting lemma, this also works
// in modular arithmetic, doubling the correct bits in each step.
inverse *= 2 - denominator * inverse; // inverse mod 2^8
inverse *= 2 - denominator * inverse; // inverse mod 2^16
inverse *= 2 - denominator * inverse; // inverse mod 2^32
inverse *= 2 - denominator * inverse; // inverse mod 2^64
inverse *= 2 - denominator * inverse; // inverse mod 2^128
inverse *= 2 - denominator * inverse; // inverse mod 2^256
// Because the division is now exact we can divide by multiplying with the modular inverse of denominator.
// This will give us the correct result modulo 2^256. Since the preconditions guarantee that the outcome is
// less than 2^256, this is the final result. We don't need to compute the high bits of the result and prod1
// is no longer required.
result = prod0 * inverse;
return result;
}
}
function quantize_data(
bytes[] memory data,
uint256[] memory decimals,
uint256[] memory scales
) external pure returns (int256[] memory quantized_data) {
quantized_data = new int256[](data.length);
for (uint i; i < data.length; i++) {
int x = abi.decode(data[i], (int256));
bool neg = x < 0;
if (neg) x = -x;
uint denom = 10 ** decimals[i];
uint scale = 1 << scales[i];
uint output = mulDiv(uint256(x), scale, denom);
if (mulmod(uint256(x), scale, denom) * 2 >= denom) {
output += 1;
}
quantized_data[i] = neg ? -int256(output) : int256(output);
}
}
function to_field_element(
int64[] memory quantized_data
) public pure returns (uint256[] memory output) {
output = new uint256[](quantized_data.length);
for (uint i; i < quantized_data.length; i++) {
output[i] = uint256(quantized_data[i] + int(ORDER)) % ORDER;
}
}
}

View File

@@ -1,12 +0,0 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.17;
contract TestReads {
int[] public arr;
constructor(int256[] memory _numbers) {
for (uint256 i = 0; i < _numbers.length; i++) {
arr.push(_numbers[i]);
}
}
}

View File

@@ -2,8 +2,7 @@ use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
};
use ezkl::fieldutils;
use ezkl::fieldutils::i32_to_felt;
use ezkl::fieldutils::{self, integer_rep_to_felt, IntegerRep};
use ezkl::tensor::*;
use halo2_proofs::dev::MockProver;
use halo2_proofs::poly::commitment::Params;
@@ -42,8 +41,8 @@ const NUM_INNER_COLS: usize = 1;
struct Config<
const LEN: usize, //LEN = CHOUT x OH x OW flattened //not supported yet in rust stable
const CLASSES: usize,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -66,8 +65,8 @@ struct Config<
struct MyCircuit<
const LEN: usize, //LEN = CHOUT x OH x OW flattened
const CLASSES: usize,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -90,8 +89,8 @@ struct MyCircuit<
impl<
const LEN: usize,
const CLASSES: usize,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -164,7 +163,7 @@ where
&params,
(LOOKUP_MIN, LOOKUP_MAX),
K,
&LookupOp::ReLU,
&LookupOp::LeakyReLU { slope: 0.0.into() },
)
.unwrap();
@@ -200,11 +199,12 @@ where
.assign_region(
|| "mlp_4d",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 1024, 2);
let op = PolyOp::Conv {
padding: vec![(PADDING, PADDING); 2],
stride: vec![STRIDE; 2],
group: 1,
};
let x = config
.layer_config
@@ -221,7 +221,11 @@ where
let x = config
.layer_config
.layout(&mut region, &[x.unwrap()], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[x.unwrap()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.unwrap();
let mut x = config
@@ -281,7 +285,7 @@ where
}
pub fn runconv() {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
const KERNEL_HEIGHT: usize = 5;
@@ -315,7 +319,11 @@ pub fn runconv() {
.test_set_length(10_000)
.finalize();
let mut train_data = Tensor::from(trn_img.iter().map(|x| i32_to_felt::<F>(*x as i32 / 16)));
let mut train_data = Tensor::from(
trn_img
.iter()
.map(|x| integer_rep_to_felt::<F>(*x as IntegerRep / 16)),
);
train_data.reshape(&[50_000, 28, 28]).unwrap();
let mut train_labels = Tensor::from(trn_lbl.iter().map(|x| *x as f32));
@@ -343,8 +351,8 @@ pub fn runconv() {
.map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
}),
);
@@ -355,7 +363,8 @@ pub fn runconv() {
let l0_kernels = l0_kernels.try_into().unwrap();
let mut l0_bias = Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::i32_to_felt(0)));
let mut l0_bias =
Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::integer_rep_to_felt(0)));
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
let l0_bias = l0_bias.try_into().unwrap();
@@ -363,8 +372,8 @@ pub fn runconv() {
let mut l2_biases = Tensor::<F>::from(myparams.biases.into_iter().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
}));
l2_biases.set_visibility(&ezkl::graph::Visibility::Private);
l2_biases.reshape(&[l2_biases.len(), 1]).unwrap();
@@ -374,8 +383,8 @@ pub fn runconv() {
let mut l2_weights = Tensor::<F>::from(myparams.weights.into_iter().flatten().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
}));
l2_weights.set_visibility(&ezkl::graph::Visibility::Private);
l2_weights.reshape(&[CLASSES, LEN]).unwrap();
@@ -401,13 +410,13 @@ pub fn runconv() {
l2_params: [l2_weights, l2_biases],
};
let public_input: Tensor<i32> = vec![
-25124i32, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
let public_input: Tensor<IntegerRep> = vec![
-25124, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
]
.into_iter()
.into();
let pi_inner: Tensor<F> = public_input.map(i32_to_felt::<F>);
let pi_inner: Tensor<F> = public_input.map(integer_rep_to_felt::<F>);
println!("MOCK PROVING");
let now = Instant::now();

View File

@@ -2,7 +2,7 @@ use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
};
use ezkl::fieldutils::i32_to_felt;
use ezkl::fieldutils::{integer_rep_to_felt, IntegerRep};
use ezkl::tensor::*;
use halo2_proofs::dev::MockProver;
use halo2_proofs::{
@@ -23,8 +23,8 @@ struct MyConfig {
#[derive(Clone)]
struct MyCircuit<
const LEN: usize, //LEN = CHOUT x OH x OW flattened
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
> {
// Given the stateless MyConfig type information, a DNN trace is determined by its input and the parameters of its layers.
// Computing the trace still requires a forward pass. The intermediate activations are stored only by the layouter.
@@ -34,7 +34,7 @@ struct MyCircuit<
_marker: PhantomData<F>,
}
impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRep> Circuit<F>
for MyCircuit<LEN, LOOKUP_MIN, LOOKUP_MAX>
{
type Config = MyConfig;
@@ -69,7 +69,7 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
&params,
(LOOKUP_MIN, LOOKUP_MAX),
K,
&LookupOp::ReLU,
&LookupOp::LeakyReLU { slope: 0.0.into() },
)
.unwrap();
@@ -108,7 +108,7 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
.assign_region(
|| "mlp_4d",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
let x = config
.layer_config
.layout(
@@ -141,7 +141,11 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
println!("x shape: {:?}", x.dims());
let mut x = config
.layer_config
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[x],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.unwrap()
.unwrap();
println!("3");
@@ -177,7 +181,11 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
println!("x shape: {:?}", x.dims());
let x = config
.layer_config
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
.layout(
&mut region,
&[x],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.unwrap();
println!("6");
println!("offset: {}", region.row());
@@ -212,36 +220,36 @@ impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
}
pub fn runmlp() {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
// parameters
let mut l0_kernel: Tensor<F> = Tensor::<i32>::new(
let mut l0_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
Some(&[10, 0, 0, -1, 0, 10, 1, 0, 0, 1, 10, 0, 1, 0, 0, 10]),
&[4, 4],
)
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l0_kernel.set_visibility(&ezkl::graph::Visibility::Private);
let mut l0_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
let mut l0_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
let mut l2_kernel: Tensor<F> = Tensor::<i32>::new(
let mut l2_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
Some(&[0, 3, 10, -1, 0, 10, 1, 0, 0, 1, 0, 12, 1, -2, 32, 0]),
&[4, 4],
)
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l2_kernel.set_visibility(&ezkl::graph::Visibility::Private);
// input data, with 1 padding to allow for bias
let input: Tensor<Value<F>> = Tensor::<i32>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
let input: Tensor<Value<F>> = Tensor::<IntegerRep>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
.unwrap()
.into();
let mut l2_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
let mut l2_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
.unwrap()
.map(i32_to_felt);
.map(integer_rep_to_felt);
l2_bias.set_visibility(&ezkl::graph::Visibility::Private);
let circuit = MyCircuit::<4, -8192, 8192> {
@@ -251,12 +259,12 @@ pub fn runmlp() {
_marker: PhantomData,
};
let public_input: Vec<i32> = unsafe {
let public_input: Vec<IntegerRep> = unsafe {
vec![
(531f32 / 128f32).round().to_int_unchecked::<i32>(),
(103f32 / 128f32).round().to_int_unchecked::<i32>(),
(4469f32 / 128f32).round().to_int_unchecked::<i32>(),
(2849f32 / 128f32).to_int_unchecked::<i32>(),
(531f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(103f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(4469f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(2849f32 / 128f32).to_int_unchecked::<IntegerRep>(),
]
};
@@ -265,7 +273,10 @@ pub fn runmlp() {
let prover = MockProver::run(
K as u32,
&circuit,
vec![public_input.iter().map(|x| i32_to_felt::<F>(*x)).collect()],
vec![public_input
.iter()
.map(|x| integer_rep_to_felt::<F>(*x))
.collect()],
)
.unwrap();
prover.assert_satisfied();

View File

@@ -251,7 +251,7 @@
"with open(cal_path, \"w\") as f:\n",
" json.dump(cal_data, f)\n",
"\n",
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -307,7 +307,7 @@
"metadata": {},
"outputs": [],
"source": [
"ezkl.setup_test_evm_witness(\n",
"await ezkl.setup_test_evm_witness(\n",
" data_path,\n",
" compiled_model_path,\n",
" # we write the call data to the same file as the input data\n",
@@ -333,7 +333,7 @@
"metadata": {},
"outputs": [],
"source": [
"res = ezkl.get_srs( settings_path)\n"
"res = await ezkl.get_srs( settings_path)\n"
]
},
{
@@ -354,7 +354,7 @@
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
]
},
{
@@ -462,7 +462,7 @@
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
@@ -482,7 +482,7 @@
"\n",
"addr_path_verifier = \"addr_verifier.txt\"\n",
"\n",
"res = ezkl.deploy_evm(\n",
"res = await ezkl.deploy_evm(\n",
" addr_path_verifier,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -510,7 +510,7 @@
"sol_code_path = 'test.sol'\n",
"input_path = 'input.json'\n",
"\n",
"res = ezkl.create_evm_data_attestation(\n",
"res = await ezkl.create_evm_data_attestation(\n",
" input_path,\n",
" settings_path,\n",
" sol_code_path,\n",
@@ -535,7 +535,7 @@
"source": [
"addr_path_da = \"addr_da.txt\"\n",
"\n",
"res = ezkl.deploy_da_evm(\n",
"res = await ezkl.deploy_da_evm(\n",
" addr_path_da,\n",
" input_path,\n",
" settings_path,\n",
@@ -567,7 +567,7 @@
"with open(addr_path_da, 'r') as f:\n",
" addr_da = f.read()\n",
"\n",
"res = ezkl.verify_evm(\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" RPC_URL,\n",
@@ -592,7 +592,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.2"
},
"orig_nbformat": 4
},

View File

@@ -249,7 +249,7 @@
"with open(cal_path, \"w\") as f:\n",
" json.dump(cal_data, f)\n",
"\n",
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -278,7 +278,7 @@
"metadata": {},
"outputs": [],
"source": [
"res = ezkl.get_srs( settings_path)\n"
"res = await ezkl.get_srs( settings_path)\n"
]
},
{
@@ -299,7 +299,7 @@
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"\n"
]
},
@@ -518,7 +518,7 @@
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
@@ -538,7 +538,7 @@
"\n",
"addr_path_verifier = \"addr_verifier.txt\"\n",
"\n",
"res = ezkl.deploy_evm(\n",
"res = await ezkl.deploy_evm(\n",
" addr_path_verifier,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -566,7 +566,7 @@
"sol_code_path = 'test.sol'\n",
"input_path = 'input.json'\n",
"\n",
"res = ezkl.create_evm_data_attestation(\n",
"res = await ezkl.create_evm_data_attestation(\n",
" input_path,\n",
" settings_path,\n",
" sol_code_path,\n",
@@ -591,7 +591,7 @@
"source": [
"addr_path_da = \"addr_da.txt\"\n",
"\n",
"res = ezkl.deploy_da_evm(\n",
"res = await ezkl.deploy_da_evm(\n",
" addr_path_da,\n",
" input_path,\n",
" settings_path,\n",
@@ -623,7 +623,7 @@
"with open(addr_path_da, 'r') as f:\n",
" addr_da = f.read()\n",
"\n",
"res = ezkl.verify_evm(\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" RPC_URL,\n",
@@ -654,4 +654,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
}

View File

@@ -0,0 +1,604 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# data-attest-kzg-vis\n",
"\n",
"Here's an example leveraging EZKL whereby the inputs to the model are read and attested to from an on-chain source and the params and outputs are committed to using kzg-commitments. \n",
"\n",
"In this setup:\n",
"- the inputs and outputs are publicly known to the prover and verifier\n",
"- the on chain inputs will be fetched and then fed directly into the circuit\n",
"- the quantization of the on-chain inputs happens within the evm and is replicated at proving time \n",
"- The kzg commitment to the params and inputs will be read from the proof and checked to make sure it matches the expected commitment stored on-chain.\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"First we import the necessary dependencies and set up logging to be as informative as possible. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# check if notebook is in colab\n",
"try:\n",
" # install ezkl\n",
" import google.colab\n",
" import subprocess\n",
" import sys\n",
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
"\n",
"# rely on local installation of ezkl if the notebook is not in colab\n",
"except:\n",
" pass\n",
"\n",
"\n",
"from torch import nn\n",
"import ezkl\n",
"import os\n",
"import json\n",
"import logging\n",
"\n",
"# uncomment for more descriptive logging \n",
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
"logging.basicConfig(format=FORMAT)\n",
"logging.getLogger().setLevel(logging.DEBUG)\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we define our model. It is a very simple PyTorch model that has just one layer, an average pooling 2D layer. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"# Defines the model\n",
"\n",
"class MyModel(nn.Module):\n",
" def __init__(self):\n",
" super(MyModel, self).__init__()\n",
" self.layer = nn.AvgPool2d(2, 1, (1, 1))\n",
"\n",
" def forward(self, x):\n",
" return self.layer(x)[0]\n",
"\n",
"\n",
"circuit = MyModel()\n",
"\n",
"# this is where you'd train your model"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"We omit training for purposes of this demonstration. We've marked where training would happen in the cell above. \n",
"Now we export the model to onnx and create a corresponding (randomly generated) input. This input data will eventually be stored on chain and read from according to the call_data field in the graph input.\n",
"\n",
"You can replace the random `x` with real data if you so wish. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"x = 0.1*torch.rand(1,*[3, 2, 2], requires_grad=True)\n",
"\n",
"# Flips the neural net into inference mode\n",
"circuit.eval()\n",
"\n",
" # Export the model\n",
"torch.onnx.export(circuit, # model being run\n",
" x, # model input (or a tuple for multiple inputs)\n",
" \"network.onnx\", # where to save the model (can be a file or file-like object)\n",
" export_params=True, # store the trained parameter weights inside the model file\n",
" opset_version=10, # the ONNX version to export the model to\n",
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
" input_names = ['input'], # the model's input names\n",
" output_names = ['output'], # the model's output names\n",
" dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes\n",
" 'output' : {0 : 'batch_size'}})\n",
"\n",
"data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
"\n",
"data = dict(input_data = [data_array])\n",
"\n",
" # Serialize data into file:\n",
"json.dump(data, open(\"input.json\", 'w' ))\n",
"\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"We now define a function that will create a new anvil instance which we will deploy our test contract too. This contract will contain in its storage the data that we will read from and attest to. In production you would not need to set up a local anvil instance. Instead you would replace RPC_URL with the actual RPC endpoint of the chain you are deploying your verifiers too, reading from the data on said chain."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import subprocess\n",
"import time\n",
"import threading\n",
"\n",
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"RPC_URL = \"http://localhost:3030\"\n",
"\n",
"# Save process globally\n",
"anvil_process = None\n",
"\n",
"def start_anvil():\n",
" global anvil_process\n",
" if anvil_process is None:\n",
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
" if anvil_process.returncode is not None:\n",
" raise Exception(\"failed to start anvil process\")\n",
" time.sleep(3)\n",
"\n",
"def stop_anvil():\n",
" global anvil_process\n",
" if anvil_process is not None:\n",
" anvil_process.terminate()\n",
" anvil_process = None\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"We define our `PyRunArgs` objects which contains the visibility parameters for out model. \n",
"- `input_visibility` defines the visibility of the model inputs\n",
"- `param_visibility` defines the visibility of the model weights and constants and parameters \n",
"- `output_visibility` defines the visibility of the model outputs\n",
"\n",
"Here we create the following setup:\n",
"- `input_visibility`: \"public\"\n",
"- `param_visibility`: \"polycommitment\" \n",
"- `output_visibility`: \"polycommitment\"\n",
"\n",
"**Note**:\n",
"When we set this to polycommitment, we are saying that the model parameters are committed to using a polynomial commitment scheme. This commitment will be stored on chain as a constant stored in the DA contract, and the proof will contain the commitment to the parameters. The DA verification will then check that the commitment in the proof matches the commitment stored on chain. \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import ezkl\n",
"\n",
"model_path = os.path.join('network.onnx')\n",
"compiled_model_path = os.path.join('network.compiled')\n",
"pk_path = os.path.join('test.pk')\n",
"vk_path = os.path.join('test.vk')\n",
"settings_path = os.path.join('settings.json')\n",
"srs_path = os.path.join('kzg.srs')\n",
"data_path = os.path.join('input.json')\n",
"\n",
"run_args = ezkl.PyRunArgs()\n",
"run_args.input_visibility = \"public\"\n",
"run_args.param_visibility = \"polycommit\"\n",
"run_args.output_visibility = \"polycommit\"\n",
"run_args.num_inner_cols = 1\n",
"run_args.variables = [(\"batch_size\", 1)]\n",
"\n",
"\n",
"\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we generate a settings file. This file basically instantiates a bunch of parameters that determine their circuit shape, size etc... Because of the way we represent nonlinearities in the circuit (using Halo2's [lookup tables](https://zcash.github.io/halo2/design/proving-system/lookup.html)), it is often best to _calibrate_ this settings file as some data can fall out of range of these lookups.\n",
"\n",
"You can pass a dataset for calibration that will be representative of real inputs you might find if and when you deploy the prover. Here we create a dummy calibration dataset for demonstration purposes. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!RUST_LOG=trace\n",
"# TODO: Dictionary outputs\n",
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
"assert res == True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# generate a bunch of dummy calibration data\n",
"cal_data = {\n",
" \"input_data\": [(0.1*torch.rand(2, *[3, 2, 2])).flatten().tolist()],\n",
"}\n",
"\n",
"cal_path = os.path.join('val_data.json')\n",
"# save as json file\n",
"with open(cal_path, \"w\") as f:\n",
" json.dump(cal_data, f)\n",
"\n",
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
"assert res == True"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The graph input for on chain data sources is formatted completely differently compared to file based data sources.\n",
"\n",
"- For file data sources, the raw floating point values that eventually get quantized, converted into field elements and stored in `witness.json` to be consumed by the circuit are stored. The output data contains the expected floating point values returned as outputs from running your vanilla pytorch model on the given inputs.\n",
"- For on chain data sources, the input_data field contains all the data necessary to read and format the on chain data into something digestable by EZKL (aka field elements :-D). \n",
"Here is what the schema for an on-chain data source graph input file should look like:\n",
" \n",
"```json\n",
"{\n",
" \"input_data\": {\n",
" \"rpc\": \"http://localhost:3030\", // The rpc endpoint of the chain you are deploying your verifier to\n",
" \"calls\": [\n",
" {\n",
" \"call_data\": [\n",
" [\n",
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000000\", // The abi encoded call data to a view function that returns a single on-chain data point (we only support uint256 returns for now)\n",
" 7 // The number of decimal places of the large uint256 value. This is our way of representing large wei values as floating points on chain, since the evm only natively supports integer values.\n",
" ],\n",
" [\n",
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000001\",\n",
" 5\n",
" ],\n",
" [\n",
" \"71e5ee5f0000000000000000000000000000000000000000000000000000000000000002\",\n",
" 5\n",
" ]\n",
" ],\n",
" \"address\": \"5fbdb2315678afecb367f032d93f642f64180aa3\" // The address of the contract that we are calling to get the data. \n",
" }\n",
" ]\n",
" }\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"await ezkl.setup_test_evm_witness(\n",
" data_path,\n",
" compiled_model_path,\n",
" # we write the call data to the same file as the input data\n",
" data_path,\n",
" input_source=ezkl.PyTestDataSource.OnChain,\n",
" output_source=ezkl.PyTestDataSource.File,\n",
" rpc_url=RPC_URL)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
"\n",
"These SRS were generated with [this](https://github.com/privacy-scaling-explorations/perpetualpowersoftau) ceremony. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"res = await ezkl.get_srs( settings_path)\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"We now need to generate the circuit witness. These are the model outputs (and any hashes) that are generated when feeding the previously generated `input.json` through the circuit / model. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!export RUST_BACKTRACE=1\n",
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
"# WE GOT KEYS\n",
"# WE GOT CIRCUIT PARAMETERS\n",
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
"res = ezkl.setup(\n",
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" )\n",
"\n",
"assert res == True\n",
"assert os.path.isfile(vk_path)\n",
"assert os.path.isfile(pk_path)\n",
"assert os.path.isfile(settings_path)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we generate a full proof. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# GENERATE A PROOF\n",
"\n",
"proof_path = os.path.join('test.pf')\n",
"\n",
"res = ezkl.prove(\n",
" witness_path,\n",
" compiled_model_path,\n",
" pk_path,\n",
" proof_path,\n",
" \n",
" \"single\",\n",
" )\n",
"\n",
"print(res)\n",
"assert os.path.isfile(proof_path)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"And verify it as a sanity check. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# VERIFY IT\n",
"\n",
"res = ezkl.verify(\n",
" proof_path,\n",
" settings_path,\n",
" vk_path,\n",
" \n",
" )\n",
"\n",
"assert res == True\n",
"print(\"verified\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can now create and then deploy a vanilla evm verifier."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
" sol_code_path,\n",
" abi_path,\n",
" )\n",
"assert res == True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"addr_path_verifier = \"addr_verifier.txt\"\n",
"\n",
"res = await ezkl.deploy_evm(\n",
" addr_path_verifier,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
")\n",
"\n",
"assert res == True"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"When deploying a DA with kzg commitments, we need to make sure to also pass a witness file that contains the commitments to the parameters and inputs. This is because the verifier will need to check that the commitments in the proof match the commitments stored on chain."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"input_path = 'input.json'\n",
"\n",
"res = await ezkl.create_evm_data_attestation(\n",
" input_path,\n",
" settings_path,\n",
" sol_code_path,\n",
" abi_path,\n",
" witness_path = witness_path,\n",
" )"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Now we can deploy the data attest verifier contract. For security reasons, this binding will only deploy to a local anvil instance, using accounts generated by anvil. \n",
"So should only be used for testing purposes."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"addr_path_da = \"addr_da.txt\"\n",
"\n",
"res = await ezkl.deploy_da_evm(\n",
" addr_path_da,\n",
" input_path,\n",
" settings_path,\n",
" sol_code_path,\n",
" RPC_URL,\n",
" )\n"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# read the verifier address\n",
"addr_verifier = None\n",
"with open(addr_path_verifier, 'r') as f:\n",
" addr = f.read()\n",
"#read the data attestation address\n",
"addr_da = None\n",
"with open(addr_path_da, 'r') as f:\n",
" addr_da = f.read()\n",
"\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" RPC_URL,\n",
" addr_da,\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "ezkl",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -150,7 +150,7 @@
"res = ezkl.gen_settings(model_path, settings_path)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True"
]
},
@@ -170,7 +170,7 @@
"with open(cal_path, \"w\") as f:\n",
" json.dump(cal_data, f)\n",
"\n",
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -192,7 +192,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -204,7 +204,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -303,4 +303,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -352,14 +352,8 @@
"# Specify all the files we need\n",
"\n",
"model_path = os.path.join('network.onnx')\n",
"compiled_model_path = os.path.join('network.ezkl')\n",
"pk_path = os.path.join('test.pk')\n",
"vk_path = os.path.join('test.vk')\n",
"settings_path = os.path.join('settings.json')\n",
"\n",
"witness_path = os.path.join('witness.json')\n",
"data_path = os.path.join('input.json')\n",
"cal_data_path = os.path.join('cal_data.json')"
"cal_data_path = os.path.join('calibration.json')"
]
},
{
@@ -424,7 +418,7 @@
"source": [
"!RUST_LOG=trace\n",
"# TODO: Dictionary outputs\n",
"res = ezkl.gen_settings(model_path, settings_path)\n",
"res = ezkl.gen_settings()\n",
"assert res == True\n",
"\n"
]
@@ -443,7 +437,7 @@
"\n",
"# Optimize for resources, we cap logrows at 12 to reduce setup and proving time, at the expense of accuracy\n",
"# You may want to increase the max logrows if accuracy is a concern\n",
"res = ezkl.calibrate_settings(cal_data_path, model_path, settings_path, \"resources\", max_logrows = 12, scales = [2])"
"res = await ezkl.calibrate_settings(target = \"resources\", max_logrows = 12, scales = [2])"
]
},
{
@@ -463,7 +457,7 @@
},
"outputs": [],
"source": [
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
"res = ezkl.compile_circuit()\n",
"assert res == True"
]
},
@@ -484,7 +478,7 @@
},
"outputs": [],
"source": [
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs()"
]
},
{
@@ -504,17 +498,10 @@
},
"outputs": [],
"source": [
"res = ezkl.setup(\n",
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" )\n",
"res = ezkl.setup()\n",
"\n",
"\n",
"assert res == True\n",
"assert os.path.isfile(vk_path)\n",
"assert os.path.isfile(pk_path)\n",
"assert os.path.isfile(settings_path)"
"assert res == True"
]
},
{
@@ -539,7 +526,7 @@
"# now generate the witness file\n",
"witness_path = os.path.join('witness.json')\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness()\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -559,13 +546,7 @@
"\n",
"proof_path = os.path.join('proof.json')\n",
"\n",
"proof = ezkl.prove(\n",
" witness_path,\n",
" compiled_model_path,\n",
" pk_path,\n",
" proof_path,\n",
" \"single\",\n",
" )\n",
"proof = ezkl.prove(proof_type=\"single\", proof_path=proof_path)\n",
"\n",
"print(proof)\n",
"assert os.path.isfile(proof_path)"
@@ -585,11 +566,7 @@
"source": [
"# verify our proof\n",
"\n",
"res = ezkl.verify(\n",
" proof_path,\n",
" settings_path,\n",
" vk_path,\n",
" )\n",
"res = ezkl.verify()\n",
"\n",
"assert res == True\n",
"print(\"verified\")"
@@ -664,12 +641,9 @@
"sol_code_path = os.path.join('Verifier.sol')\n",
"abi_path = os.path.join('Verifier.abi')\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
" sol_code_path,\n",
" abi_path\n",
"res = await ezkl.create_evm_verifier(\n",
" sol_code_path=sol_code_path,\n",
" abi_path=abi_path, \n",
" )\n",
"\n",
"assert res == True\n",
@@ -757,7 +731,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.2"
}
},
"nbformat": 4,

File diff suppressed because one or more lines are too long

View File

@@ -467,7 +467,7 @@
"outputs": [],
"source": [
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True"
]
},
@@ -494,7 +494,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -508,7 +508,7 @@
"source": [
"# now generate the witness file\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -625,4 +625,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -195,7 +195,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -222,7 +222,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -236,7 +236,7 @@
"source": [
"# now generate the witness file\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},

View File

@@ -179,7 +179,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -202,7 +202,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -214,7 +214,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -313,4 +313,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -241,7 +241,7 @@
"with open(cal_path, \"w\") as f:\n",
" json.dump(cal_data, f)\n",
"\n",
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -270,7 +270,7 @@
"metadata": {},
"outputs": [],
"source": [
"res = ezkl.get_srs( settings_path)\n"
"res = await ezkl.get_srs( settings_path)\n"
]
},
{
@@ -291,7 +291,7 @@
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
]
},
{
@@ -420,7 +420,7 @@
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
@@ -451,7 +451,7 @@
"\n",
"address_path = os.path.join(\"address.json\")\n",
"\n",
"res = ezkl.deploy_evm(\n",
"res = await ezkl.deploy_evm(\n",
" address_path,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -472,7 +472,7 @@
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"res = ezkl.verify_evm(\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\"\n",

View File

@@ -152,7 +152,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -175,7 +175,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs(settings_path = settings_path)"
"res = await ezkl.get_srs(settings_path = settings_path)"
]
},
{
@@ -188,7 +188,7 @@
"# now generate the witness file \n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -284,4 +284,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -155,7 +155,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -178,7 +178,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -190,7 +190,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -289,4 +289,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -233,7 +233,7 @@
"with open(cal_path, \"w\") as f:\n",
" json.dump(cal_data, f)\n",
"\n",
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -262,7 +262,7 @@
"metadata": {},
"outputs": [],
"source": [
"res = ezkl.get_srs( settings_path)\n"
"res = await ezkl.get_srs( settings_path)\n"
]
},
{
@@ -315,7 +315,7 @@
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n"
]
},
{
@@ -429,7 +429,7 @@
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
@@ -460,7 +460,7 @@
"\n",
"address_path = os.path.join(\"address.json\")\n",
"\n",
"res = ezkl.deploy_evm(\n",
"res = await ezkl.deploy_evm(\n",
" address_path,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -481,7 +481,7 @@
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"res = ezkl.verify_evm(\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\"\n",

View File

@@ -193,7 +193,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -216,7 +216,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -228,7 +228,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -347,4 +347,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -142,7 +142,7 @@
"# Serialize data into file:\n",
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -165,7 +165,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -177,7 +177,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -276,4 +276,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -347,7 +347,7 @@
"# Serialize data into file:\n",
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -370,7 +370,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -383,7 +383,7 @@
"# now generate the witness file \n",
"witness_path = \"gan_witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -490,4 +490,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -0,0 +1,279 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
"metadata": {},
"source": [
"## Logistic Regression\n",
"\n",
"\n",
"Sklearn based models are slightly finicky to get into a suitable onnx format. \n",
"This notebook showcases how to do so using the `hummingbird-ml` python package for a Logistic Regression model. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "95613ee9",
"metadata": {},
"outputs": [],
"source": [
"# check if notebook is in colab\n",
"try:\n",
" # install ezkl\n",
" import google.colab\n",
" import subprocess\n",
" import sys\n",
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"hummingbird-ml\"])\n",
"\n",
"# rely on local installation of ezkl if the notebook is not in colab\n",
"except:\n",
" pass\n",
"\n",
"import os\n",
"import torch\n",
"import ezkl\n",
"import json\n",
"from hummingbird.ml import convert\n",
"\n",
"\n",
"# here we create and (potentially train a model)\n",
"\n",
"# make sure you have the dependencies required here already installed\n",
"import numpy as np\n",
"from sklearn.linear_model import LogisticRegression\n",
"X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n",
"# y = 1 * x_0 + 2 * x_1 + 3\n",
"y = np.dot(X, np.array([1, 2])) + 3\n",
"reg = LogisticRegression().fit(X, y)\n",
"reg.score(X, y)\n",
"\n",
"circuit = convert(reg, \"torch\", X[:1]).model\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b37637c4",
"metadata": {},
"outputs": [],
"source": [
"model_path = os.path.join('network.onnx')\n",
"compiled_model_path = os.path.join('network.compiled')\n",
"pk_path = os.path.join('test.pk')\n",
"vk_path = os.path.join('test.vk')\n",
"settings_path = os.path.join('settings.json')\n",
"\n",
"witness_path = os.path.join('witness.json')\n",
"data_path = os.path.join('input.json')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "82db373a",
"metadata": {},
"outputs": [],
"source": [
"\n",
"\n",
"# export to onnx format\n",
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
"\n",
"# Input to the model\n",
"shape = X.shape[1:]\n",
"x = torch.rand(1, *shape, requires_grad=True)\n",
"torch_out = circuit(x)\n",
"# Export the model\n",
"torch.onnx.export(circuit, # model being run\n",
" # model input (or a tuple for multiple inputs)\n",
" x,\n",
" # where to save the model (can be a file or file-like object)\n",
" \"network.onnx\",\n",
" export_params=True, # store the trained parameter weights inside the model file\n",
" opset_version=10, # the ONNX version to export the model to\n",
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
" input_names=['input'], # the model's input names\n",
" output_names=['output'], # the model's output names\n",
" dynamic_axes={'input': {0: 'batch_size'}, # variable length axes\n",
" 'output': {0: 'batch_size'}})\n",
"\n",
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
"\n",
"data = dict(input_shapes=[shape],\n",
" input_data=[d],\n",
" output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])\n",
"\n",
"# Serialize data into file:\n",
"json.dump(data, open(\"input.json\", 'w'))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d5e374a2",
"metadata": {},
"outputs": [],
"source": [
"!RUST_LOG=trace\n",
"# TODO: Dictionary outputs\n",
"res = ezkl.gen_settings(model_path, settings_path)\n",
"assert res == True\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cal_path = os.path.join(\"calibration.json\")\n",
"\n",
"data_array = (torch.randn(20, *shape).detach().numpy()).reshape([-1]).tolist()\n",
"\n",
"data = dict(input_data = [data_array])\n",
"\n",
"# Serialize data into file:\n",
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3aa4f090",
"metadata": {},
"outputs": [],
"source": [
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
"assert res == True"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8b74dcee",
"metadata": {},
"outputs": [],
"source": [
"# srs path\n",
"res = await ezkl.get_srs( settings_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "18c8b7c7",
"metadata": {},
"outputs": [],
"source": [
"# now generate the witness file \n",
"\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b1c561a8",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
"# WE GOT KEYS\n",
"# WE GOT CIRCUIT PARAMETERS\n",
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
"\n",
"\n",
"\n",
"res = ezkl.setup(\n",
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" \n",
" )\n",
"\n",
"assert res == True\n",
"assert os.path.isfile(vk_path)\n",
"assert os.path.isfile(pk_path)\n",
"assert os.path.isfile(settings_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c384cbc8",
"metadata": {},
"outputs": [],
"source": [
"# GENERATE A PROOF\n",
"\n",
"\n",
"proof_path = os.path.join('test.pf')\n",
"\n",
"res = ezkl.prove(\n",
" witness_path,\n",
" compiled_model_path,\n",
" pk_path,\n",
" proof_path,\n",
" \n",
" \"single\",\n",
" )\n",
"\n",
"print(res)\n",
"assert os.path.isfile(proof_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "76f00d41",
"metadata": {},
"outputs": [],
"source": [
"# VERIFY IT\n",
"\n",
"res = ezkl.verify(\n",
" proof_path,\n",
" settings_path,\n",
" vk_path,\n",
" \n",
" )\n",
"\n",
"assert res == True\n",
"print(\"verified\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@@ -139,7 +139,7 @@
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n"
]
},
@@ -180,7 +180,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -193,7 +193,7 @@
"# now generate the witness file \n",
"witness_path = \"lstmwitness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -300,4 +300,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -91,11 +91,14 @@
"os.system(\"echo shovel is now installed. starting:\")\n",
"\n",
"command = [\"./shovel\", \"-config\", \"config.json\"]\n",
"subprocess.Popen(command)\n",
"proc = subprocess.Popen(command)\n",
"\n",
"os.system(\"echo shovel started.\")\n",
"\n",
"time.sleep(5)\n",
"time.sleep(10)\n",
"\n",
"# after we've fetched some data -- kill the process\n",
"proc.terminate()\n",
"\n"
]
},
@@ -310,7 +313,7 @@
"\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(input_filename, onnx_filename, settings_filename, \"resources\")\n",
"res = await ezkl.calibrate_settings(input_filename, onnx_filename, settings_filename, \"resources\")\n",
"\n",
"assert res == True"
]
@@ -387,7 +390,7 @@
"witness_path = \"witness.json\"\n",
"\n",
"# generate the witness\n",
"res = ezkl.gen_witness(\n",
"res = await ezkl.gen_witness(\n",
" input_filename,\n",
" compiled_filename,\n",
" witness_path\n",
@@ -425,16 +428,6 @@
"assert os.path.isfile(proof_path)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# kill all shovel process \n",
"os.system(\"pkill -f shovel\")"
]
}
],
"metadata": {

View File

@@ -323,7 +323,7 @@
"res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[2,7])\n",
"assert res == True"
]
},
@@ -348,7 +348,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs(settings_path)"
"res = await ezkl.get_srs(settings_path)"
]
},
{
@@ -362,7 +362,7 @@
"# now generate the witness file\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -469,7 +469,7 @@
"abi_path = 'test.abi'\n",
"sol_code_path = 'test_1.sol'\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" settings_path,\n",
" sol_code_path,\n",
@@ -502,7 +502,7 @@
"\n",
"address_path = os.path.join(\"address.json\")\n",
"\n",
"res = ezkl.deploy_evm(\n",
"res = await ezkl.deploy_evm(\n",
" address_path,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -525,7 +525,7 @@
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"res = ezkl.verify_evm(\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\"\n",
@@ -558,4 +558,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
}
}

View File

@@ -289,7 +289,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales=[0,6])"
]
},
{
@@ -309,7 +309,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -321,7 +321,7 @@
"# now generate the witness file \n",
"witness_path = \"gan_witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},

File diff suppressed because one or more lines are too long

View File

@@ -215,7 +215,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -235,7 +235,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -247,7 +247,7 @@
"# now generate the witness file\n",
"witness_path = \"ae_witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -451,7 +451,7 @@
"res = ezkl.gen_settings(model_path, settings_path)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True\n",
"print(\"verified\")"
]
@@ -473,7 +473,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -485,7 +485,7 @@
"# now generate the witness file \n",
"witness_path = \"vae_witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},

View File

@@ -845,7 +845,7 @@
"res = ezkl.gen_settings(model_path, settings_path)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", max_logrows = 20, scales = [3])\n",
"assert res == True"
]
},
@@ -870,7 +870,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -881,7 +881,7 @@
},
"outputs": [],
"source": [
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -993,4 +993,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
}
}

View File

@@ -232,7 +232,7 @@
"run_args.param_visibility = \"fixed\"\n",
"run_args.output_visibility = \"public\"\n",
"run_args.input_scale = 2\n",
"run_args.logrows = 8\n",
"run_args.logrows = 15\n",
"\n",
"ezkl.get_srs(logrows=run_args.logrows, commitment=ezkl.PyCommitments.KZG)"
]
@@ -261,7 +261,7 @@
"source": [
"# iterate over each submodel gen-settings, compile circuit and setup zkSNARK\n",
"\n",
"def setup(i):\n",
"async def setup(i):\n",
" # file names\n",
" model_path = os.path.join('network_split_'+str(i)+'.onnx')\n",
" settings_path = os.path.join('settings_split_'+str(i)+'.json')\n",
@@ -282,7 +282,7 @@
"\n",
" # generate settings for the current model\n",
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=run_args)\n",
" res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
" res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\", scales=[run_args.input_scale], max_logrows=run_args.logrows)\n",
" assert res == True\n",
"\n",
" # load settings and print them to the console\n",
@@ -303,11 +303,11 @@
" assert os.path.isfile(vk_path)\n",
" assert os.path.isfile(pk_path)\n",
"\n",
" res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
" res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
" run_args.input_scale = settings[\"model_output_scales\"][0]\n",
"\n",
"for i in range(2):\n",
" setup(i)\n"
" await setup(i)\n"
]
},
{
@@ -404,7 +404,7 @@
"run_args.output_visibility = \"polycommit\"\n",
"run_args.variables = [(\"batch_size\", 1)]\n",
"run_args.input_scale = 2\n",
"run_args.logrows = 8\n"
"run_args.logrows = 15\n"
]
},
{
@@ -414,7 +414,7 @@
"outputs": [],
"source": [
"for i in range(2):\n",
" setup(i)"
" await setup(i)"
]
},
{
@@ -466,7 +466,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.5"
},
"orig_nbformat": 4
},

View File

@@ -174,7 +174,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -196,7 +196,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -208,7 +208,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},

View File

@@ -0,0 +1,339 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Reusable Verifiers \n",
"\n",
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
"\n",
"- `1l_mlp sigmoid`\n",
"- `1l_mlp relu`\n",
"- `1l_conv sigmoid`\n",
"- `1l_conv relu`\n",
"\n",
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
"\n",
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we deploy a unique and much smaller verifying key artifact (VKA) contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.onnx\n",
"\n",
"# Define the models\n",
"class MLP_Sigmoid(nn.Module):\n",
" def __init__(self):\n",
" super(MLP_Sigmoid, self).__init__()\n",
" self.fc = nn.Linear(3, 3)\n",
" self.sigmoid = nn.Sigmoid()\n",
"\n",
" def forward(self, x):\n",
" x = self.fc(x)\n",
" x = self.sigmoid(x)\n",
" return x\n",
"\n",
"class MLP_Relu(nn.Module):\n",
" def __init__(self):\n",
" super(MLP_Relu, self).__init__()\n",
" self.fc = nn.Linear(3, 3)\n",
" self.relu = nn.ReLU()\n",
"\n",
" def forward(self, x):\n",
" x = self.fc(x)\n",
" x = self.relu(x)\n",
" return x\n",
"\n",
"class Conv_Sigmoid(nn.Module):\n",
" def __init__(self):\n",
" super(Conv_Sigmoid, self).__init__()\n",
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
" self.sigmoid = nn.Sigmoid()\n",
"\n",
" def forward(self, x):\n",
" x = self.conv(x)\n",
" x = self.sigmoid(x)\n",
" return x\n",
"\n",
"class Conv_Relu(nn.Module):\n",
" def __init__(self):\n",
" super(Conv_Relu, self).__init__()\n",
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
" self.relu = nn.ReLU()\n",
"\n",
" def forward(self, x):\n",
" x = self.conv(x)\n",
" x = self.relu(x)\n",
" return x\n",
"\n",
"# Instantiate the models\n",
"mlp_sigmoid = MLP_Sigmoid()\n",
"mlp_relu = MLP_Relu()\n",
"conv_sigmoid = Conv_Sigmoid()\n",
"conv_relu = Conv_Relu()\n",
"\n",
"# Dummy input tensor for mlp\n",
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
"input_mlp_path = 'mlp_input.json'\n",
"\n",
"# Dummy input tensor for conv\n",
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
"input_conv_path = 'conv_input.json'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"import torch\n",
"import ezkl\n",
"\n",
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
" # Create a new directory for the model if it doesn't exist\n",
" if not os.path.exists(name):\n",
" os.mkdir(name)\n",
" # Store the paths in each of their respective directories\n",
" model_path = os.path.join(name, \"network.onnx\")\n",
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
" pk_path = os.path.join(name, \"test.pk\")\n",
" vk_path = os.path.join(name, \"test.vk\")\n",
" settings_path = os.path.join(name, \"settings.json\")\n",
"\n",
" witness_path = os.path.join(name, \"witness.json\")\n",
" sol_code_path = os.path.join(name, 'test.sol')\n",
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
" abi_path = os.path.join(name, 'test.abi')\n",
" proof_path = os.path.join(name, \"proof.json\")\n",
"\n",
" # Flips the neural net into inference mode\n",
" model.eval()\n",
"\n",
" # Export the model\n",
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
" do_constant_folding=True, input_names=['input'],\n",
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
" 'output': {0: 'batch_size'}})\n",
"\n",
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
" data = dict(input_data=[data_array])\n",
" json.dump(data, open(input_path, 'w'))\n",
"\n",
" py_run_args = ezkl.PyRunArgs()\n",
" py_run_args.input_visibility = \"private\"\n",
" py_run_args.output_visibility = \"public\"\n",
" py_run_args.param_visibility = \"fixed\" # private by default\n",
"\n",
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
" assert res == True\n",
"\n",
" await ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
"\n",
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
" assert res == True\n",
"\n",
" res = await ezkl.get_srs(settings_path)\n",
" assert res == True\n",
"\n",
" # now generate the witness file\n",
" res = await ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
" assert os.path.isfile(witness_path) == True\n",
"\n",
" # SETUP \n",
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
" assert res == True\n",
" assert os.path.isfile(vk_path)\n",
" assert os.path.isfile(pk_path)\n",
" assert os.path.isfile(settings_path)\n",
"\n",
" # GENERATE A PROOF\n",
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path, \"single\")\n",
" assert os.path.isfile(proof_path)\n",
"\n",
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
" assert res == True\n",
"\n",
" res = await ezkl.create_evm_vka(vk_path, settings_path, sol_key_code_path, abi_path)\n",
" assert res == True\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import subprocess\n",
"import time\n",
"\n",
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"RPC_URL = \"http://localhost:3030\"\n",
"\n",
"# Save process globally\n",
"anvil_process = None\n",
"\n",
"def start_anvil():\n",
" global anvil_process\n",
" if anvil_process is None:\n",
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
" if anvil_process.returncode is not None:\n",
" raise Exception(\"failed to start anvil process\")\n",
" time.sleep(3)\n",
"\n",
"def stop_anvil():\n",
" global anvil_process\n",
" if anvil_process is not None:\n",
" anvil_process.terminate()\n",
" anvil_process = None\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check that the generated verifiers are identical for all models."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"start_anvil()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import filecmp\n",
"\n",
"def compare_files(file1, file2):\n",
" return filecmp.cmp(file1, file2, shallow=False)\n",
"\n",
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
"\n",
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
"\n",
"\n",
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we deploy separate verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os \n",
"addr_path_verifier = \"addr_verifier.txt\"\n",
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
"\n",
"res = await ezkl.deploy_evm(\n",
" addr_path_verifier,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030',\n",
" \"verifier/reusable\"\n",
")\n",
"\n",
"assert res == True\n",
"\n",
"with open(addr_path_verifier, 'r') as file:\n",
" addr = file.read().rstrip()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for name in names:\n",
" addr_path_vk = \"addr_vk.txt\"\n",
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
" res = await ezkl.deploy_evm(addr_path_vk, sol_key_code_path, 'http://127.0.0.1:3030', \"vka\")\n",
" assert res == True\n",
"\n",
" with open(addr_path_vk, 'r') as file:\n",
" addr_vk = file.read().rstrip()\n",
" \n",
" proof_path = os.path.join(name, \"proof.json\")\n",
" sol_code_path = os.path.join(name, 'vk.sol')\n",
" res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\",\n",
" addr_vk = addr_vk\n",
" )\n",
" assert res == True"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".env",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -215,7 +215,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -229,7 +229,7 @@
"source": [
"# now generate the witness file\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -265,7 +265,7 @@
" # Serialize data into file:\n",
"json.dump( data, open(data_path_faulty, 'w' ))\n",
"\n",
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path_faulty)\n",
"assert os.path.isfile(witness_path_faulty)"
]
},
@@ -310,7 +310,7 @@
"# Serialize data into file:\n",
"json.dump( data, open(data_path_truthy, 'w' ))\n",
"\n",
"res = ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
"res = await ezkl.gen_witness(data_path_truthy, compiled_model_path, witness_path_truthy)\n",
"assert os.path.isfile(witness_path_truthy)"
]
},
@@ -482,7 +482,7 @@
"source": [
"import pytest\n",
"def test_verification():\n",
" with pytest.raises(RuntimeError, match='Failed to run verify: The constraint system is not satisfied'):\n",
" with pytest.raises(RuntimeError, match='Failed to run verify: \\\\[halo2\\\\] The constraint system is not satisfied'):\n",
" ezkl.verify(\n",
" proof_path_faulty,\n",
" settings_path,\n",
@@ -514,7 +514,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.3"
}
},
"nbformat": 4,

View File

@@ -193,7 +193,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -205,7 +205,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -290,7 +290,7 @@
"source": [
"# Generate a larger SRS. This is needed for the aggregated proof\n",
"\n",
"res = ezkl.get_srs(settings_path=None, logrows=21, commitment=ezkl.PyCommitments.KZG)"
"res = await ezkl.get_srs(settings_path=None, logrows=21, commitment=ezkl.PyCommitments.KZG)"
]
},
{
@@ -374,7 +374,7 @@
"sol_code_path = os.path.join(\"Verifier.sol\")\n",
"abi_path = os.path.join(\"Verifier_ABI.json\")\n",
"\n",
"res = ezkl.create_evm_verifier_aggr(\n",
"res = await ezkl.create_evm_verifier_aggr(\n",
" [settings_path],\n",
" aggregate_vk_path,\n",
" sol_code_path,\n",
@@ -404,4 +404,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -157,6 +157,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "b78d3cbf",
"metadata": {},
"outputs": [],
"source": [
@@ -170,7 +171,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -204,7 +205,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -298,7 +299,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.3"
}
},
"nbformat": 4,

View File

@@ -169,7 +169,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -191,7 +191,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -203,7 +203,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},

View File

@@ -170,7 +170,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -192,7 +192,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -204,7 +204,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},

View File

@@ -149,7 +149,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -171,7 +171,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -183,7 +183,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -282,4 +282,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -250,7 +250,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -297,7 +297,7 @@
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path, vk_path)\n",
"assert os.path.isfile(witness_path)\n",
"\n",
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
@@ -411,7 +411,7 @@
"source": [
"# now generate the witness file\n",
"\n",
"res = ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
"res = await ezkl.gen_witness(data_path_faulty, compiled_model_path, witness_path, vk_path)\n",
"assert os.path.isfile(witness_path)\n",
"\n",
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
@@ -478,12 +478,11 @@
"import pytest\n",
"\n",
"def test_verification():\n",
" with pytest.raises(RuntimeError, match='Failed to run verify: The constraint system is not satisfied'):\n",
" with pytest.raises(RuntimeError, match='Failed to run verify: \\\\[halo2\\\\] The constraint system is not satisfied'):\n",
" ezkl.verify(\n",
" proof_path,\n",
" settings_path,\n",
" vk_path,\n",
" \n",
" )\n",
"\n",
"# Run the test function\n",
@@ -510,9 +509,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -167,7 +167,7 @@
"res = ezkl.gen_settings(model_path, settings_path)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True"
]
},
@@ -187,7 +187,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -209,7 +209,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -221,7 +221,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -320,4 +320,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -39,7 +39,7 @@
"import json\n",
"import numpy as np\n",
"from sklearn.svm import SVC\n",
"import sk2torch\n",
"from hummingbird.ml import convert\n",
"import torch\n",
"import ezkl\n",
"import os\n",
@@ -59,11 +59,11 @@
"# Train an SVM on the data and wrap it in PyTorch.\n",
"sk_model = SVC(probability=True)\n",
"sk_model.fit(xs, ys)\n",
"model = sk2torch.wrap(sk_model)\n",
"\n",
"model = convert(sk_model, \"torch\").model\n",
"\n",
"\n",
"\n",
"model\n",
"\n"
]
},
@@ -84,33 +84,6 @@
"data_path = os.path.join('input.json')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7f0ca328",
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"# Create a coordinate grid to compute a vector field on.\n",
"spaced = np.linspace(-2, 2, num=25)\n",
"grid_xs = torch.tensor([[x, y] for x in spaced for y in spaced], requires_grad=True)\n",
"\n",
"\n",
"# Compute the gradients of the SVM output.\n",
"outputs = model.predict_proba(grid_xs)[:, 1]\n",
"(input_grads,) = torch.autograd.grad(outputs.sum(), (grid_xs,))\n",
"\n",
"\n",
"# Create a quiver plot of the vector field.\n",
"plt.quiver(\n",
" grid_xs[:, 0].detach().numpy(),\n",
" grid_xs[:, 1].detach().numpy(),\n",
" input_grads[:, 0].detach().numpy(),\n",
" input_grads[:, 1].detach().numpy(),\n",
")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
@@ -119,14 +92,14 @@
"outputs": [],
"source": [
"\n",
"\n",
"spaced = np.linspace(-2, 2, num=25)\n",
"grid_xs = torch.tensor([[x, y] for x in spaced for y in spaced], requires_grad=True)\n",
"# export to onnx format\n",
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
"\n",
"# Input to the model\n",
"shape = xs.shape[1:]\n",
"x = grid_xs[0:1]\n",
"torch_out = model.predict(x)\n",
"# Export the model\n",
"torch.onnx.export(model, # model being run\n",
" # model input (or a tuple for multiple inputs)\n",
@@ -143,9 +116,7 @@
"\n",
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
"\n",
"data = dict(input_shapes=[shape],\n",
" input_data=[d],\n",
" output_data=[o.reshape([-1]).tolist() for o in torch_out])\n",
"data = dict(input_data=[d])\n",
"\n",
"# Serialize data into file:\n",
"json.dump(data, open(\"input.json\", 'w'))\n"
@@ -167,6 +138,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "0bee4d7f",
"metadata": {},
"outputs": [],
"source": [
@@ -180,7 +152,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -202,7 +174,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -214,13 +186,13 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": null,
"id": "b1c561a8",
"metadata": {},
"outputs": [],
@@ -420,7 +392,7 @@
"res = ezkl.gen_settings(model_path, settings_path)\n",
"assert res == True\n",
"\n",
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"res = await ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
"assert res == True"
]
}
@@ -441,7 +413,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.3"
}
},
"nbformat": 4,

View File

@@ -13,7 +13,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -57,7 +57,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -119,7 +119,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -163,7 +163,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -217,7 +217,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@@ -637,7 +637,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
]
},
{
@@ -646,7 +646,7 @@
"metadata": {},
"outputs": [],
"source": [
"ezkl.get_srs( settings_path)"
"await ezkl.get_srs( settings_path)"
]
},
{
@@ -683,7 +683,7 @@
" data = json.load(f)\n",
" print(len(data['input_data'][0]))\n",
"\n",
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
]
},
{

View File

@@ -525,7 +525,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
]
},
{
@@ -572,7 +572,7 @@
" data = json.load(f)\n",
" print(len(data['input_data'][0]))\n",
"\n",
"ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
"await ezkl.gen_witness(data_path, compiled_model_path, witness_path)"
]
},
{

View File

@@ -24,7 +24,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {
"id": "9Byiv2Nc2MsK"
},
@@ -49,7 +49,11 @@
"import pandas as pd\n",
"import requests\n",
"import json\n",
"import os"
"import os\n",
"\n",
"import logging\n",
"\n",
"logging.basicConfig(level=logging.INFO)"
]
},
{
@@ -63,7 +67,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 2,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -71,7 +75,15 @@
"id": "x1vl9ZXF3EEW",
"outputId": "bda21d02-fe5f-4fb2-8106-f51a8e2e67aa"
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"cpu\n"
]
}
],
"source": [
"from torch import nn\n",
"import torch\n",
@@ -133,7 +145,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -141,7 +153,18 @@
"id": "6RAMplxk5xPk",
"outputId": "bd2158fe-0c00-44fd-e632-6a3f70cdb7c9"
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1715422870\n",
"1714818070\n",
"https://api.coingecko.com/api/v3/coins/ethereum/market_chart/range?vs_currency=usd&from=1714818070&to=1715422870\n",
"<Response [200]>\n"
]
}
],
"source": [
"\n",
"def get_url(coin, currency, start, end):\n",
@@ -174,7 +197,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
@@ -183,7 +206,115 @@
"id": "WSj1Uxln65vf",
"outputId": "51422d71-9680-4b51-c4df-e400d20f988b"
},
"outputs": [],
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>time</th>\n",
" <th>prices</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>1714820485367</td>\n",
" <td>3146.785806</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1714824033868</td>\n",
" <td>3127.968728</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>1714828058243</td>\n",
" <td>3156.141681</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>1714831650751</td>\n",
" <td>3124.834064</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>1714834972229</td>\n",
" <td>3133.115333</td>\n",
" </tr>\n",
" <tr>\n",
" <th>...</th>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>163</th>\n",
" <td>1715407579346</td>\n",
" <td>2918.049749</td>\n",
" </tr>\n",
" <tr>\n",
" <th>164</th>\n",
" <td>1715411090715</td>\n",
" <td>2920.330834</td>\n",
" </tr>\n",
" <tr>\n",
" <th>165</th>\n",
" <td>1715414554830</td>\n",
" <td>2923.986611</td>\n",
" </tr>\n",
" <tr>\n",
" <th>166</th>\n",
" <td>1715418419843</td>\n",
" <td>2910.537671</td>\n",
" </tr>\n",
" <tr>\n",
" <th>167</th>\n",
" <td>1715421675338</td>\n",
" <td>2907.702307</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>168 rows × 2 columns</p>\n",
"</div>"
],
"text/plain": [
" time prices\n",
"0 1714820485367 3146.785806\n",
"1 1714824033868 3127.968728\n",
"2 1714828058243 3156.141681\n",
"3 1714831650751 3124.834064\n",
"4 1714834972229 3133.115333\n",
".. ... ...\n",
"163 1715407579346 2918.049749\n",
"164 1715411090715 2920.330834\n",
"165 1715414554830 2923.986611\n",
"166 1715418419843 2910.537671\n",
"167 1715421675338 2907.702307\n",
"\n",
"[168 rows x 2 columns]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"df = pd.DataFrame(new_data)\n",
"df\n"
@@ -200,7 +331,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -217,7 +348,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -225,7 +356,98 @@
"id": "4MmE9SX66_Il",
"outputId": "16403639-66a4-4280-ac7f-6966b75de5a3"
},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:ezkl.execute:SRS already exists at that path\n",
"INFO:ezkl.execute:num calibration batches: 1\n",
"INFO:ezkl.execute:read 16777476 bytes from file (vector of len = 16777476)\n",
"WARNING:ezkl.execute:\n",
"\n",
" <------------- Numerical Fidelity Report (input_scale: 4, param_scale: 4, scale_input_multiplier: 10) ------------->\n",
"\n",
"+------------+--------------+-----------+-----------+----------------+------------------+---------------+---------------+--------------------+--------------------+------------------------+\n",
"| mean_error | median_error | max_error | min_error | mean_abs_error | median_abs_error | max_abs_error | min_abs_error | mean_squared_error | mean_percent_error | mean_abs_percent_error |\n",
"+------------+--------------+-----------+-----------+----------------+------------------+---------------+---------------+--------------------+--------------------+------------------------+\n",
"| -727.9929 | -727.9929 | -727.9929 | -727.9929 | 727.9929 | 727.9929 | 727.9929 | 727.9929 | 529973.7 | -0.24999964 | 0.24999964 |\n",
"+------------+--------------+-----------+-----------+----------------+------------------+---------------+---------------+--------------------+--------------------+------------------------+\n",
"\n",
"\n",
"INFO:ezkl.execute:file hash: 41509f380362a8d14401c5ae92073154922fe23e45459ce6f696f58607655db7\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"run_args\": {\n",
" \"tolerance\": {\n",
" \"val\": 0.0,\n",
" \"scale\": 1.0\n",
" },\n",
" \"input_scale\": 4,\n",
" \"param_scale\": 4,\n",
" \"scale_rebase_multiplier\": 10,\n",
" \"lookup_range\": [\n",
" 0,\n",
" 0\n",
" ],\n",
" \"logrows\": 6,\n",
" \"num_inner_cols\": 2,\n",
" \"variables\": [\n",
" [\n",
" \"batch_size\",\n",
" 1\n",
" ]\n",
" ],\n",
" \"input_visibility\": \"Private\",\n",
" \"output_visibility\": \"Public\",\n",
" \"param_visibility\": \"Private\",\n",
" \"div_rebasing\": false,\n",
" \"rebase_frac_zero_constants\": false,\n",
" \"check_mode\": \"UNSAFE\",\n",
" \"commitment\": \"KZG\"\n",
" },\n",
" \"num_rows\": 21,\n",
" \"total_assignments\": 42,\n",
" \"total_const_size\": 0,\n",
" \"total_dynamic_col_size\": 0,\n",
" \"num_dynamic_lookups\": 0,\n",
" \"num_shuffles\": 0,\n",
" \"total_shuffle_col_size\": 0,\n",
" \"model_instance_shapes\": [\n",
" [\n",
" 1\n",
" ]\n",
" ],\n",
" \"model_output_scales\": [\n",
" 8\n",
" ],\n",
" \"model_input_scales\": [\n",
" 4\n",
" ],\n",
" \"module_sizes\": {\n",
" \"polycommit\": [],\n",
" \"poseidon\": [\n",
" 0,\n",
" [\n",
" 0\n",
" ]\n",
" ]\n",
" },\n",
" \"required_lookups\": [],\n",
" \"required_range_checks\": [],\n",
" \"check_mode\": \"UNSAFE\",\n",
" \"version\": \"0.0.0\",\n",
" \"num_blinding_factors\": null,\n",
" \"timestamp\": 1715422871248\n",
"}\n"
]
}
],
"source": [
"# generate settings\n",
"onnx_filename = os.path.join('lol.onnx')\n",
@@ -236,9 +458,9 @@
"\n",
"\n",
"ezkl.gen_settings(onnx_filename, settings_filename)\n",
"ezkl.calibrate_settings(\n",
"await ezkl.calibrate_settings(\n",
" input_filename, onnx_filename, settings_filename, \"resources\", scales = [4])\n",
"res = ezkl.get_srs(settings_filename)\n",
"res = await ezkl.get_srs(settings_filename)\n",
"ezkl.compile_circuit(onnx_filename, compiled_filename, settings_filename)\n",
"\n",
"# show the settings.json\n",
@@ -259,7 +481,24 @@
"metadata": {
"id": "fULvvnK7_CMb"
},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:ezkl.pfsys.srs:loading srs from \"/Users/dante/.ezkl/srs/kzg6.srs\"\n",
"INFO:ezkl.execute:downsizing params to 6 logrows\n",
"INFO:ezkl.graph.model:model layout...\n",
"INFO:ezkl.pfsys:VK took 0.8\n",
"INFO:ezkl.graph.model:model layout...\n",
"INFO:ezkl.pfsys:PK took 0.2\n",
"INFO:ezkl.pfsys:saving verification key 💾\n",
"INFO:ezkl.pfsys:done saving verification key ✅\n",
"INFO:ezkl.pfsys:saving proving key 💾\n",
"INFO:ezkl.pfsys:done saving proving key ✅\n"
]
}
],
"source": [
"pk_path = os.path.join('test.pk')\n",
"vk_path = os.path.join('test.vk')\n",
@@ -281,20 +520,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
"res = await ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
@@ -302,7 +541,34 @@
"id": "Oog3j6Kd-Wed",
"outputId": "5839d0c1-5b43-476e-c2f8-6707de562260"
},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:ezkl.pfsys:loading proving key from \"test.pk\"\n",
"INFO:ezkl.pfsys:done loading proving key ✅\n",
"INFO:ezkl.pfsys.srs:loading srs from \"/Users/dante/.ezkl/srs/kzg6.srs\"\n",
"INFO:ezkl.execute:downsizing params to 6 logrows\n",
"INFO:ezkl.pfsys:proof started...\n",
"INFO:ezkl.graph.model:model layout...\n",
"INFO:ezkl.pfsys:proof took 0.15\n",
"INFO:ezkl.pfsys.srs:loading srs from \"/Users/dante/.ezkl/srs/kzg6.srs\"\n",
"INFO:ezkl.execute:downsizing params to 6 logrows\n",
"INFO:ezkl.pfsys:loading verification key from \"test.vk\"\n",
"INFO:ezkl.pfsys:done loading verification key ✅\n",
"INFO:ezkl.execute:verify took 0.2\n",
"INFO:ezkl.execute:verified: true\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"verified\n"
]
}
],
"source": [
"# prove the zk circuit\n",
"# GENERATE A PROOF\n",
@@ -351,7 +617,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
@@ -360,7 +626,26 @@
"id": "fodkNgwS70FM",
"outputId": "827b5efd-f74f-44de-c114-861b3a86daf2"
},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:ezkl.pfsys.srs:loading srs from \"/Users/dante/.ezkl/srs/kzg6.srs\"\n",
"INFO:ezkl.execute:downsizing params to 6 logrows\n",
"INFO:ezkl.pfsys:loading verification key from \"test.vk\"\n",
"INFO:ezkl.pfsys:done loading verification key ✅\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"test.vk\n",
"settings.json\n"
]
}
],
"source": [
"# first we need to create evm verifier\n",
"print(vk_path)\n",
@@ -370,7 +655,7 @@
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" settings_filename,\n",
" sol_code_path,\n",
@@ -381,9 +666,18 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 11,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:ezkl.eth:using chain 31337\n",
"INFO:ezkl.execute:Contract deployed at: 0x998abeb3e57409262ae5b751f60747921b33613e\n"
]
}
],
"source": [
"# Make sure anvil is running locally first\n",
"# run with $ anvil -p 3030\n",
@@ -391,8 +685,9 @@
"import json\n",
"\n",
"address_path = os.path.join(\"address.json\")\n",
"\n",
"res = ezkl.deploy_evm(\n",
"sol_code_path = 'test.sol'\n",
"# await\n",
"res = await ezkl.deploy_evm(\n",
" address_path,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -406,16 +701,26 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:ezkl.eth:using chain 31337\n",
"INFO:ezkl.eth:estimated verify gas cost: 399775\n",
"INFO:ezkl.execute:Solidity verification result: true\n"
]
}
],
"source": [
"# read the address from addr_path\n",
"addr = None\n",
"with open(address_path, 'r') as f:\n",
" addr = f.read()\n",
"\n",
"res = ezkl.verify_evm(\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\"\n",
@@ -451,7 +756,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.2"
}
},
"nbformat": 4,

View File

@@ -0,0 +1 @@
[{"type":"function","name":"verifyProof","inputs":[{"name":"proof","type":"bytes","internalType":"bytes"},{"name":"instances","type":"uint256[]","internalType":"uint256[]"}],"outputs":[{"name":"","type":"bool","internalType":"bool"}],"stateMutability":"nonpayable"}]

View File

@@ -629,7 +629,7 @@
"source": [
"\n",
"\n",
"res = ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
"res = await ezkl.calibrate_settings(val_data, model_path, settings_path, \"resources\", scales = [4])\n",
"assert res == True\n",
"print(\"verified\")\n"
]
@@ -660,7 +660,7 @@
"metadata": {},
"outputs": [],
"source": [
"res = ezkl.get_srs(settings_path)"
"res = await ezkl.get_srs(settings_path)"
]
},
{
@@ -680,7 +680,7 @@
"\n",
"witness_path = \"witness.json\"\n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -807,7 +807,7 @@
"settings_path = os.path.join('settings.json')\n",
"\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
@@ -847,7 +847,7 @@
"\n",
"address_path = os.path.join(\"address.json\")\n",
"\n",
"res = ezkl.deploy_evm(\n",
"res = await ezkl.deploy_evm(\n",
" address_path,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -868,7 +868,7 @@
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"res = ezkl.verify_evm(\n",
"res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\"\n",
@@ -905,4 +905,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
}

View File

@@ -242,6 +242,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "2007dc77",
"metadata": {},
"outputs": [],
"source": [
@@ -257,6 +258,7 @@
},
{
"cell_type": "markdown",
"id": "ab993958",
"metadata": {},
"source": [
"As we use Halo2 with KZG-commitments we need an SRS string from (preferably) a multi-party trusted setup ceremony. For an overview of the procedures for such a ceremony check out [this page](https://blog.ethereum.org/2023/01/16/announcing-kzg-ceremony). The `get_srs` command retrieves a correctly sized SRS given the calibrated settings file from [here](https://github.com/han0110/halo2-kzg-srs). \n",
@@ -272,7 +274,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -284,12 +286,13 @@
"source": [
"# now generate the witness file \n",
"\n",
"witness = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"witness = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
{
"cell_type": "markdown",
"id": "ad58432e",
"metadata": {},
"source": [
"Here we setup verifying and proving keys for the circuit. As the name suggests the proving key is needed for ... proving and the verifying key is needed for ... verifying. "
@@ -317,6 +320,7 @@
},
{
"cell_type": "markdown",
"id": "1746c8d1",
"metadata": {},
"source": [
"We can now create an EVM verifier contract from our circuit. This contract will be deployed to the chain we are using. In this case we are using a local anvil instance."
@@ -325,15 +329,15 @@
{
"cell_type": "code",
"execution_count": null,
"id": "d1920c0f",
"metadata": {},
"outputs": [],
"source": [
"abi_path = 'test.abi'\n",
"sol_code_path = 'test.sol'\n",
"\n",
"res = ezkl.create_evm_verifier(\n",
"res = await ezkl.create_evm_verifier(\n",
" vk_path,\n",
" \n",
" settings_path,\n",
" sol_code_path,\n",
" abi_path,\n",
@@ -344,6 +348,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "0fd7f22b",
"metadata": {},
"outputs": [],
"source": [
@@ -351,7 +356,7 @@
"\n",
"addr_path_verifier = \"addr_verifier.txt\"\n",
"\n",
"res = ezkl.deploy_evm(\n",
"res = await ezkl.deploy_evm(\n",
" addr_path_verifier,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030'\n",
@@ -362,6 +367,7 @@
},
{
"cell_type": "markdown",
"id": "9c0dffab",
"metadata": {},
"source": [
"With the vanilla verifier deployed, we can now create the data attestation contract, which will read in the instances from the calldata to the verifier, attest to them, call the verifier and then return the result. \n",
@@ -371,6 +377,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "cc888848",
"metadata": {},
"outputs": [],
"source": []
@@ -378,6 +385,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "c2db14d7",
"metadata": {},
"outputs": [],
"source": [
@@ -385,7 +393,7 @@
"sol_code_path = 'test.sol'\n",
"input_path = 'input.json'\n",
"\n",
"res = ezkl.create_evm_data_attestation(\n",
"res = await ezkl.create_evm_data_attestation(\n",
" input_path,\n",
" settings_path,\n",
" sol_code_path,\n",
@@ -396,12 +404,13 @@
{
"cell_type": "code",
"execution_count": null,
"id": "5a018ba6",
"metadata": {},
"outputs": [],
"source": [
"addr_path_da = \"addr_da.txt\"\n",
"\n",
"res = ezkl.deploy_da_evm(\n",
"res = await ezkl.deploy_da_evm(\n",
" addr_path_da,\n",
" input_path,\n",
" settings_path,\n",
@@ -412,6 +421,7 @@
},
{
"cell_type": "markdown",
"id": "2adad845",
"metadata": {},
"source": [
"Now we can pull in the data from the contract and calculate a new set of coordinates. We then rotate the world by 1 transform and submit the proof to the contract. The contract could then update the world rotation (logic not inserted here). For demo purposes we do this repeatedly, rotating the world by 1 transform. "
@@ -444,6 +454,7 @@
},
{
"cell_type": "markdown",
"id": "90eda56e",
"metadata": {},
"source": [
"Call the view only verify method on the contract to verify the proof. Since it is a view function this is safe to use in production since you don't have to pass your private key."
@@ -528,7 +539,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.2"
}
},
"nbformat": 4,

View File

@@ -193,7 +193,7 @@
"with open(cal_path, \"w\") as f:\n",
" json.dump(cal_data, f)\n",
"\n",
"res = ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"res = await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -215,7 +215,7 @@
"outputs": [],
"source": [
"# srs path\n",
"res = ezkl.get_srs( settings_path)"
"res = await ezkl.get_srs( settings_path)"
]
},
{
@@ -227,7 +227,7 @@
"source": [
"# now generate the witness file \n",
"\n",
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"res = await ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
"assert os.path.isfile(witness_path)"
]
},
@@ -346,4 +346,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}

Binary file not shown.

View File

@@ -0,0 +1 @@
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":7,"param_scale":7,"scale_rebase_multiplier":10,"lookup_range":[0,0],"logrows":13,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private"},"num_constraints":5619,"total_const_size":513,"model_instance_shapes":[[1,3,10,10]],"model_output_scales":[14],"model_input_scales":[7],"module_sizes":{"kzg":[],"poseidon":[0,[0]],"elgamal":[0,[0]]},"required_lookups":[],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null}

View File

@@ -9,7 +9,9 @@ class MyModel(nn.Module):
super(MyModel, self).__init__()
def forward(self, w, x, y, z):
return [((x & y)) == (x & (y | (z ^ w)))]
a = (x & y)
b = (y & (z ^ w))
return [a & b]
circuit = MyModel()

View File

@@ -1 +1 @@
{"input_data": [[false, true, false], [true, false, false], [true, false, false], [false, false, false]]}
{"input_data": [[false, true, true], [false, true, true], [true, false, false], [false, true, true]]}

View File

@@ -1,21 +1,17 @@
pytorch1.12.1:«
+
pytorch2.2.2:„
*
input1
input2
onnx::Equal_4And_0"And
input2
/And_output_0/And"And
)
input3
input
onnx::Or_5Xor_1"Xor
input3
input
/Xor_output_0/Xor"Xor
input2
onnx::Or_5 onnx::And_6Or_2"Or
0
input1
onnx::And_6
onnx::Equal_7And_3"And
6
5
input2
/Xor_output_0/And_1_output_0/And_1"And
5
/And_output_0
/And_1_output_0output/And_2"And

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -104,5 +104,5 @@ json.dump(data, open("input.json", 'w'))
# ezkl.gen_settings("network.onnx", "settings.json")
# !RUST_LOG = full
# res = ezkl.calibrate_settings(
# res = await ezkl.calibrate_settings(
# "input.json", "network.onnx", "settings.json", "resources")

View File

@@ -0,0 +1 @@
network.onnx filter=lfs diff=lfs merge=lfs -text

View File

@@ -0,0 +1,47 @@
## The worm
This is an onnx file for a [WormVAE](https://github.com/TuragaLab/wormvae?tab=readme-ov-file) model, which is a VAE / latent-space representation of the C. elegans connectome.
The model "is a large-scale latent variable model with a very high-dimensional latent space
consisting of voltage dynamics of 300 neurons over 5 minutes of time at the simulation frequency
of 160 Hz. The generative model for these latent variables is described by stochastic differential
equations modeling the nonlinear dynamics of the network activity." (see [here](https://openreview.net/pdf?id=CJzi3dRlJE-)).
In effect this is a generative model for a worm's voltage dynamics, which can be used to generate new worm-like voltage dynamics given previous connectome state.
Using ezkl you can create a zk circuit equivalent to the wormvae model, allowing you to "prove" execution of the worm model. If you're feeling particularly adventurous, you can also use the zk circuit to generate new worm-state that can be verified on chain.
To do so you'll first want to fetch the files using git-lfs (as the onnx file is too large to be stored in git).
```bash
git lfs fetch --all
```
You'll then want to use the usual ezkl loop to generate the zk circuit. We recommend using fixed visibility for the model parameters, as the model is quite large and this will prune the circuit significantly.
```bash
ezkl gen-settings --param-visibility=fixed
cp input.json calibration.json
ezkl calibrate-settings
ezkl compile-circuit
ezkl gen-witness
ezkl prove
```
You might also need to aggregate the proof to get it to fit on chain.
```bash
ezkl aggregate
```
You can then create a smart contract that verifies this aggregate proof
```bash
ezkl create-evm-verifier-aggr
```
This can then be deployed on the chain of your choice.
> Note: the model is large and thus we recommend a machine with at least 512GB of RAM to run the above commands. If you're ever compute constrained you can always use the lilith service to generate the zk circuit. Message us on discord or telegram for more details :)

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2f88c5901d3768ec21e3cf2f2840d255e84fa13c364df86b24d960cca3333769
size 82095882

View File

@@ -0,0 +1 @@
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":0,"param_scale":6,"scale_rebase_multiplier":1,"lookup_range":[-32768,32768],"logrows":17,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Fixed"},"num_constraints":367422820,"total_const_size":365577160,"model_instance_shapes":[[1,300,1200]],"model_output_scales":[6],"model_input_scales":[0,0,0],"module_sizes":{"kzg":[],"poseidon":[0,[0]],"elgamal":[0,[0]]},"required_lookups":[{"Div":{"denom":64.0}},"ReLU",{"Ln":{"scale":64.0}},{"Exp":{"scale":64.0}}],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null}

View File

@@ -9,7 +9,6 @@ import { EVM } from '@ethereumjs/evm'
import { buildTransaction, encodeDeployment } from './utils/tx-builder'
import { getAccountNonce, insertAccount } from './utils/account-utils'
import { encodeVerifierCalldata } from '../nodejs/ezkl';
import { error } from 'console'
async function deployContract(
vm: VM,
@@ -66,7 +65,7 @@ async function verify(
vkAddress = new Uint8Array(uint8Array.buffer);
// convert uitn8array of length
error('vkAddress', vkAddress)
console.error('vkAddress', vkAddress)
}
const data = encodeVerifierCalldata(proof, vkAddress)

View File

@@ -99,6 +99,10 @@ fi
echo "Removing old ezkl binary if it exists"
[ -e file ] && rm file
# echo platform and architecture
echo "Platform: $PLATFORM"
echo "Architecture: $ARCHITECTURE"
# download the release and unpack the right tarball
if [ "$PLATFORM" == "windows-msvc" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
@@ -126,7 +130,6 @@ elif [ "$PLATFORM" == "macos" ]; then
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-macos-aarch64.tar.gz"
else
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-macos.tar.gz")
@@ -143,7 +146,7 @@ elif [ "$PLATFORM" == "macos" ]; then
fi
elif [ "$PLATFORM" == "linux" ]; then
if [ "${ARCHITECTURE}" = "amd64" ]; then
if [ "$ARCHITECTURE" == "amd64" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-linux-gnu.tar.gz")
@@ -155,9 +158,20 @@ elif [ "$PLATFORM" == "linux" ]; then
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-linux-gnu.tar.gz"
elif [ "$ARCHITECTURE" == "aarch64" ]; then
JSON_RESPONSE=$(curl -s "$RELEASE_URL")
FILE_URL=$(echo "$JSON_RESPONSE" | grep -o 'https://github.com[^"]*' | grep "build-artifacts.ezkl-linux-aarch64.tar.gz")
echo "Downloading package"
curl -L "$FILE_URL" -o "$EZKL_DIR/build-artifacts.ezkl-linux-aarch64.tar.gz"
echo "Unpacking package"
tar -xzf "$EZKL_DIR/build-artifacts.ezkl-linux-aarch64.tar.gz" -C "$EZKL_DIR"
echo "Cleaning up"
rm "$EZKL_DIR/build-artifacts.ezkl-linux-aarch64.tar.gz"
else
echo "ARM architectures are not supported for Linux at the moment. If you would need support for the ARM architectures on linux please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
echo "Non aarch ARM architectures are not supported for Linux at the moment. If you would need support for the ARM architectures on linux please submit an issue https://github.com/zkonduit/ezkl/issues/new/choose"
exit 1
fi
else

View File

@@ -8,6 +8,7 @@ addopts = "-rfEX -p pytester --strict-markers"
testpaths = [
"tests/python/*_tests.py",
]
asyncio_mode = "auto"
[project]
name = "ezkl"

View File

@@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2024-02-06"
channel = "nightly-2024-07-18"
components = ["rustfmt", "clippy"]

View File

@@ -1,48 +1,65 @@
// ignore file if compiling for wasm
#[global_allocator]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[cfg(not(target_arch = "wasm32"))]
use clap::Parser;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use clap::{CommandFactory, Parser};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use colored_json::ToColoredJson;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use ezkl::commands::Cli;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use ezkl::execute::run;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use ezkl::logger::init_logger;
#[cfg(not(target_arch = "wasm32"))]
use log::{debug, error, info};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use log::{error, info};
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
use rand::prelude::SliceRandom;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
#[cfg(feature = "icicle")]
use std::env;
#[cfg(not(target_arch = "wasm32"))]
use std::error::Error;
#[tokio::main(flavor = "current_thread")]
#[cfg(not(target_arch = "wasm32"))]
pub async fn main() -> Result<(), Box<dyn Error>> {
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub async fn main() {
let args = Cli::parse();
init_logger();
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
banner();
#[cfg(feature = "icicle")]
if env::var("ENABLE_ICICLE_GPU").is_ok() {
info!("Running with ICICLE GPU");
if let Some(generator) = args.generator {
ezkl::commands::print_completions(generator, &mut Cli::command());
} else if let Some(command) = args.command {
init_logger();
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
banner();
#[cfg(feature = "icicle")]
if env::var("ENABLE_ICICLE_GPU").is_ok() {
info!("Running with ICICLE GPU");
} else {
info!("Running with CPU");
}
info!(
"command: \n {}",
&command.as_json().to_colored_json_auto().unwrap()
);
let res = run(command).await;
match &res {
Ok(_) => {
info!("succeeded");
}
Err(e) => {
error!("{}", e);
std::process::exit(1)
}
}
} else {
info!("Running with CPU");
init_logger();
error!("No command provided");
std::process::exit(1)
}
debug!("command: \n {}", &args.as_json()?.to_colored_json_auto()?);
let res = run(args.command).await;
match &res {
Ok(_) => info!("succeeded"),
Err(e) => error!("failed: {}", e),
};
res.map(|_| ())
}
#[cfg(target_arch = "wasm32")]
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
pub fn main() {}
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]

269
src/bin/ios_gen_bindings.rs Normal file
View File

@@ -0,0 +1,269 @@
use camino::Utf8Path;
use std::fs;
use std::fs::remove_dir_all;
use std::path::{Path, PathBuf};
use std::process::Command;
use uniffi_bindgen::bindings::SwiftBindingGenerator;
use uniffi_bindgen::library_mode::generate_bindings;
use uuid::Uuid;
fn main() {
let library_name = std::env::var("CARGO_PKG_NAME").expect("CARGO_PKG_NAME is not set");
let mode = determine_build_mode();
build_bindings(&library_name, mode);
}
/// Determines the build mode based on the CONFIGURATION environment variable.
/// Defaults to "release" if not set or unrecognized.
/// "release" mode takes longer to build but produces optimized code, which has smaller size and is faster.
fn determine_build_mode() -> &'static str {
match std::env::var("CONFIGURATION").map(|s| s.to_lowercase()) {
Ok(ref config) if config == "debug" => "debug",
_ => "release",
}
}
/// Builds the Swift bindings and XCFramework for the specified library and build mode.
fn build_bindings(library_name: &str, mode: &str) {
// Get the root directory of this Cargo project
let manifest_dir = std::env::var_os("CARGO_MANIFEST_DIR")
.map(PathBuf::from)
.unwrap_or_else(|| std::env::current_dir().unwrap());
// Define the build directory inside the manifest directory
let build_dir = manifest_dir.join("build");
// Create a temporary directory to store the bindings and combined library
let tmp_dir = mktemp_local(&build_dir);
// Define directories for Swift bindings and output bindings
let swift_bindings_dir = tmp_dir.join("SwiftBindings");
let bindings_out = create_bindings_out_dir(&tmp_dir);
let framework_out = bindings_out.join("EzklCore.xcframework");
// Define target architectures for building
// We currently only support iOS devices and simulators running on ARM Macs
// This is due to limiting the library size to under 100MB for GitHub Commit Size Limit
// To support older Macs (Intel), follow the instructions in the comments below
#[allow(clippy::useless_vec)]
let target_archs = vec![
vec!["aarch64-apple-ios"], // iOS device
vec!["aarch64-apple-ios-sim"], // iOS simulator ARM Mac
// vec!["aarch64-apple-ios-sim", "x86_64-apple-ios"], // TODO - replace the above line with this line to allow running on older Macs (Intel)
];
// Build the library for each architecture and combine them
let out_lib_paths: Vec<PathBuf> = target_archs
.iter()
.map(|archs| build_combined_archs(library_name, archs, &build_dir, mode))
.collect();
// Generate the path to the built dynamic library (.dylib)
let out_dylib_path = build_dir.join(format!(
"{}/{}/lib{}.dylib",
target_archs[0][0], mode, library_name
));
// Generate Swift bindings using uniffi_bindgen
generate_ios_bindings(&out_dylib_path, &swift_bindings_dir)
.expect("Failed to generate iOS bindings");
// Move the generated Swift file to the bindings output directory
fs::rename(
swift_bindings_dir.join(format!("{}.swift", library_name)),
bindings_out.join("EzklCore.swift"),
)
.expect("Failed to copy swift bindings file");
// Rename the `ios_ezklFFI.modulemap` file to `module.modulemap`
fs::rename(
swift_bindings_dir.join(format!("{}FFI.modulemap", library_name)),
swift_bindings_dir.join("module.modulemap"),
)
.expect("Failed to rename modulemap file");
// Create the XCFramework from the combined libraries and Swift bindings
create_xcframework(&out_lib_paths, &swift_bindings_dir, &framework_out);
// Define the destination directory for the bindings
let bindings_dest = build_dir.join("EzklCoreBindings");
if bindings_dest.exists() {
fs::remove_dir_all(&bindings_dest).expect("Failed to remove existing bindings directory");
}
// Move the bindings output to the destination directory
fs::rename(&bindings_out, &bindings_dest).expect("Failed to move framework into place");
// Clean up temporary directories
cleanup_temp_dirs(&build_dir);
}
/// Creates the output directory for the bindings.
/// Returns the path to the bindings output directory.
fn create_bindings_out_dir(base_dir: &Path) -> PathBuf {
let bindings_out = base_dir.join("EzklCoreBindings");
fs::create_dir_all(&bindings_out).expect("Failed to create bindings output directory");
bindings_out
}
/// Builds the library for each architecture and combines them into a single library using lipo.
/// Returns the path to the combined library.
fn build_combined_archs(
library_name: &str,
archs: &[&str],
build_dir: &Path,
mode: &str,
) -> PathBuf {
// Build the library for each architecture
let out_lib_paths: Vec<PathBuf> = archs
.iter()
.map(|&arch| {
build_for_arch(arch, build_dir, mode);
build_dir
.join(arch)
.join(mode)
.join(format!("lib{}.a", library_name))
})
.collect();
// Create a unique temporary directory for the combined library
let lib_out = mktemp_local(build_dir).join(format!("lib{}.a", library_name));
// Combine the libraries using lipo
let mut lipo_cmd = Command::new("lipo");
lipo_cmd
.arg("-create")
.arg("-output")
.arg(lib_out.to_str().unwrap());
for lib_path in &out_lib_paths {
lipo_cmd.arg(lib_path.to_str().unwrap());
}
let status = lipo_cmd.status().expect("Failed to run lipo command");
if !status.success() {
panic!("lipo command failed with status: {}", status);
}
lib_out
}
/// Builds the library for a specific architecture.
fn build_for_arch(arch: &str, build_dir: &Path, mode: &str) {
// Ensure the target architecture is installed
install_arch(arch);
// Run cargo build for the specified architecture and mode
let mut build_cmd = Command::new("cargo");
build_cmd
.arg("build")
.arg("--no-default-features")
.arg("--features")
.arg("ios-bindings");
if mode == "release" {
build_cmd.arg("--release");
}
build_cmd
.arg("--lib")
.env("CARGO_BUILD_TARGET_DIR", build_dir)
.env("CARGO_BUILD_TARGET", arch);
let status = build_cmd.status().expect("Failed to run cargo build");
if !status.success() {
panic!("cargo build failed for architecture: {}", arch);
}
}
/// Installs the specified target architecture using rustup.
fn install_arch(arch: &str) {
let status = Command::new("rustup")
.arg("target")
.arg("add")
.arg(arch)
.status()
.expect("Failed to run rustup command");
if !status.success() {
panic!("Failed to install target architecture: {}", arch);
}
}
/// Generates Swift bindings for the iOS library using uniffi_bindgen.
fn generate_ios_bindings(dylib_path: &Path, binding_dir: &Path) -> Result<(), std::io::Error> {
// Remove existing binding directory if it exists
if binding_dir.exists() {
remove_dir_all(binding_dir)?;
}
// Generate the Swift bindings using uniffi_bindgen
generate_bindings(
Utf8Path::from_path(dylib_path).ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid dylib path")
})?,
None,
&SwiftBindingGenerator,
None,
Utf8Path::from_path(binding_dir).ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Invalid Swift bindings directory",
)
})?,
true,
)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e.to_string()))?;
Ok(())
}
/// Creates an XCFramework from the combined libraries and Swift bindings.
fn create_xcframework(lib_paths: &[PathBuf], swift_bindings_dir: &Path, framework_out: &Path) {
let mut xcbuild_cmd = Command::new("xcodebuild");
xcbuild_cmd.arg("-create-xcframework");
// Add each library and its corresponding headers to the xcodebuild command
for lib_path in lib_paths {
println!("Including library: {:?}", lib_path);
xcbuild_cmd.arg("-library");
xcbuild_cmd.arg(lib_path.to_str().unwrap());
xcbuild_cmd.arg("-headers");
xcbuild_cmd.arg(swift_bindings_dir.to_str().unwrap());
}
xcbuild_cmd.arg("-output");
xcbuild_cmd.arg(framework_out.to_str().unwrap());
let status = xcbuild_cmd.status().expect("Failed to run xcodebuild");
if !status.success() {
panic!("xcodebuild failed with status: {}", status);
}
}
/// Creates a temporary directory inside the build path with a unique UUID.
/// This ensures unique build artifacts for concurrent builds.
fn mktemp_local(build_path: &Path) -> PathBuf {
let dir = tmp_local(build_path).join(Uuid::new_v4().to_string());
fs::create_dir(&dir).expect("Failed to create temporary directory");
dir
}
/// Gets the path to the local temporary directory inside the build path.
fn tmp_local(build_path: &Path) -> PathBuf {
let tmp_path = build_path.join("tmp");
if let Ok(metadata) = fs::metadata(&tmp_path) {
if !metadata.is_dir() {
panic!("Expected 'tmp' to be a directory");
}
} else {
fs::create_dir_all(&tmp_path).expect("Failed to create local temporary directory");
}
tmp_path
}
/// Cleans up temporary directories inside the build path.
fn cleanup_temp_dirs(build_dir: &Path) {
let tmp_dir = build_dir.join("tmp");
if tmp_dir.exists() {
fs::remove_dir_all(tmp_dir).expect("Failed to remove temporary directories");
}
}

12
src/bindings/mod.rs Normal file
View File

@@ -0,0 +1,12 @@
/// Python bindings
#[cfg(feature = "python-bindings")]
pub mod python;
/// Universal bindings for all platforms
#[cfg(any(
feature = "ios-bindings",
all(target_arch = "wasm32", target_os = "unknown")
))]
pub mod universal;
/// wasm prover and verifier
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
pub mod wasm;

View File

@@ -6,7 +6,7 @@ use crate::circuit::modules::poseidon::{
use crate::circuit::modules::Module;
use crate::circuit::{CheckMode, Tolerance};
use crate::commands::*;
use crate::fieldutils::{felt_to_i64, i64_to_felt};
use crate::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::TestDataSource;
use crate::graph::{
@@ -29,7 +29,6 @@ use pyo3_log;
use snark_verifier::util::arithmetic::PrimeField;
use std::str::FromStr;
use std::{fs::File, path::PathBuf};
use tokio::runtime::Runtime;
type PyFelt = String;
@@ -192,6 +191,12 @@ struct PyRunArgs {
#[pyo3(get, set)]
/// str: commitment type, accepts `kzg`, `ipa`
pub commitment: PyCommitments,
/// int: The base used for decomposition
#[pyo3(get, set)]
pub decomp_base: usize,
/// int: The number of legs used for decomposition
#[pyo3(get, set)]
pub decomp_legs: usize,
}
/// default instantiation of PyRunArgs
@@ -222,6 +227,8 @@ impl From<PyRunArgs> for RunArgs {
rebase_frac_zero_constants: py_run_args.rebase_frac_zero_constants,
check_mode: py_run_args.check_mode,
commitment: Some(py_run_args.commitment.into()),
decomp_base: py_run_args.decomp_base,
decomp_legs: py_run_args.decomp_legs,
}
}
}
@@ -244,6 +251,8 @@ impl Into<PyRunArgs> for RunArgs {
rebase_frac_zero_constants: self.rebase_frac_zero_constants,
check_mode: self.check_mode,
commitment: self.commitment.into(),
decomp_base: self.decomp_base,
decomp_legs: self.decomp_legs,
}
}
}
@@ -332,9 +341,9 @@ fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
#[pyfunction(signature = (
felt,
))]
fn felt_to_int(felt: PyFelt) -> PyResult<i64> {
fn felt_to_int(felt: PyFelt) -> PyResult<IntegerRep> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let int_rep = felt_to_i64(felt);
let int_rep = felt_to_integer_rep(felt);
Ok(int_rep)
}
@@ -358,7 +367,7 @@ fn felt_to_int(felt: PyFelt) -> PyResult<i64> {
))]
fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let int_rep = felt_to_i64(felt);
let int_rep = felt_to_integer_rep(felt);
let multiplier = scale_to_multiplier(scale);
let float_rep = int_rep as f64 / multiplier;
Ok(float_rep)
@@ -386,7 +395,7 @@ fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
let int_rep = quantize_float(&input, 0.0, scale)
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
let felt = i64_to_felt(int_rep);
let felt = integer_rep_to_felt(int_rep);
Ok(crate::pfsys::field_to_string::<Fr>(&felt))
}
@@ -467,7 +476,7 @@ fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
/// Arguments
/// -------
/// message: list[str]
/// List of field elements represnted as strings
/// List of field elements represented as strings
///
/// Returns
/// -------
@@ -779,29 +788,27 @@ fn gen_srs(srs_path: PathBuf, logrows: usize) -> PyResult<()> {
commitment=None,
))]
fn get_srs(
py: Python,
settings_path: Option<PathBuf>,
logrows: Option<u32>,
srs_path: Option<PathBuf>,
commitment: Option<PyCommitments>,
) -> PyResult<bool> {
) -> PyResult<Bound<'_, PyAny>> {
let commitment: Option<Commitments> = match commitment {
Some(c) => Some(c.into()),
None => None,
};
Runtime::new()
.unwrap()
.block_on(crate::execute::get_srs_cmd(
srs_path,
settings_path,
logrows,
commitment,
))
.map_err(|e| {
let err_str = format!("Failed to get srs: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::get_srs_cmd(srs_path, settings_path, logrows, commitment)
.await
.map_err(|e| {
let err_str = format!("Failed to get srs: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
})
}
/// Generates the circuit settings
@@ -885,33 +892,37 @@ fn gen_settings(
only_range_check_rebase = DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap(),
))]
fn calibrate_settings(
py: Python,
data: PathBuf,
model: PathBuf,
settings: PathBuf,
target: CalibrationTarget,
lookup_safety_margin: i64,
lookup_safety_margin: f64,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
max_logrows: Option<u32>,
only_range_check_rebase: bool,
) -> Result<bool, PyErr> {
crate::execute::calibrate(
model,
data,
settings,
target,
lookup_safety_margin,
scales,
scale_rebase_multiplier,
only_range_check_rebase,
max_logrows,
)
.map_err(|e| {
let err_str = format!("Failed to calibrate settings: {}", e);
PyRuntimeError::new_err(err_str)
})?;
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::calibrate(
model,
data,
settings,
target,
lookup_safety_margin,
scales,
scale_rebase_multiplier,
only_range_check_rebase,
max_logrows,
)
.await
.map_err(|e| {
let err_str = format!("Failed to calibrate settings: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
Ok(true)
})
}
/// Runs the forward pass operation to generate a witness
@@ -946,18 +957,22 @@ fn calibrate_settings(
srs_path=None,
))]
fn gen_witness(
py: Python,
data: PathBuf,
model: PathBuf,
output: Option<PathBuf>,
vk_path: Option<PathBuf>,
srs_path: Option<PathBuf>,
) -> PyResult<PyObject> {
let output =
crate::execute::gen_witness(model, data, output, vk_path, srs_path).map_err(|e| {
let err_str = format!("Failed to run generate witness: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Python::with_gil(|py| Ok(output.to_object(py)))
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
let output = crate::execute::gen_witness(model, data, output, vk_path, srs_path)
.await
.map_err(|e| {
let err_str = format!("Failed to generate witness: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Python::with_gil(|py| Ok(output.to_object(py)))
})
}
/// Mocks the prover
@@ -1425,6 +1440,47 @@ fn verify_aggr(
Ok(true)
}
/// Creates encoded evm calldata from a proof file
///
/// Arguments
/// ---------
/// proof: str
/// Path to the proof file
///
/// calldata: str
/// Path to the calldata file to save
///
/// addr_vk: str
/// The address of the verification key contract (if the verifier key is to be rendered as a separate contract)
///
/// Returns
/// -------
/// vec[u8]
/// The encoded calldata
///
#[pyfunction(signature = (
proof=PathBuf::from(DEFAULT_PROOF),
calldata=PathBuf::from(DEFAULT_CALLDATA),
addr_vk=None,
))]
fn encode_evm_calldata<'a>(
proof: PathBuf,
calldata: PathBuf,
addr_vk: Option<&'a str>,
) -> Result<Vec<u8>, PyErr> {
let addr_vk = if let Some(addr_vk) = addr_vk {
let addr_vk = H160Flag::from(addr_vk);
Some(addr_vk)
} else {
None
};
crate::execute::encode_evm_calldata(proof, calldata, addr_vk).map_err(|e| {
let err_str = format!("Failed to generate calldata: {}", e);
PyRuntimeError::new_err(err_str)
})
}
/// Creates an EVM compatible verifier, you will need solc installed in your environment to run this
///
/// Arguments
@@ -1444,8 +1500,8 @@ fn verify_aggr(
/// srs_path: str
/// The path to the SRS file
///
/// render_vk_separately: bool
/// Whether the verifier key should be rendered as a separate contract. We recommend disabling selector compression if this is enabled. To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command
/// reusable: bool
/// Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
///
/// Returns
/// -------
@@ -1457,30 +1513,85 @@ fn verify_aggr(
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
))]
fn create_evm_verifier(
py: Python,
vk_path: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
) -> Result<bool, PyErr> {
crate::execute::create_evm_verifier(
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
render_vk_seperately,
)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
PyRuntimeError::new_err(err_str)
})?;
reusable: bool,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_verifier(
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
reusable,
)
.await
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
Ok(true)
})
}
/// Creates an Evm VK artifact. This command generated a VK with circuit specific meta data encoding in memory for use by the reusable H2 verifier.
/// This is useful for deploying verifier that were otherwise too big to fit on chain and required aggregation.
///
/// Arguments
/// ---------
/// vk_path: str
/// The path to the verification key file
///
/// settings_path: str
/// The path to the settings file
///
/// sol_code_path: str
/// The path to the create the solidity verifying key.
///
/// abi_path: str
/// The path to create the ABI for the solidity verifier
///
/// srs_path: str
/// The path to the SRS file
///
/// Returns
/// -------
/// bool
///
#[pyfunction(signature = (
vk_path=PathBuf::from(DEFAULT_VK),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None
))]
fn create_evm_vka(
py: Python,
vk_path: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
srs_path: Option<PathBuf>,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_vka(vk_path, srs_path, settings_path, sol_code_path, abi_path)
.await
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
})
}
/// Creates an EVM compatible data attestation verifier, you will need solc installed in your environment to run this
@@ -1508,20 +1619,32 @@ fn create_evm_verifier(
settings_path=PathBuf::from(DEFAULT_SETTINGS),
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
abi_path=PathBuf::from(DEFAULT_VERIFIER_DA_ABI),
witness_path=None,
))]
fn create_evm_data_attestation(
py: Python,
input_data: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
) -> Result<bool, PyErr> {
crate::execute::create_evm_data_attestation(settings_path, sol_code_path, abi_path, input_data)
witness_path: Option<PathBuf>,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_data_attestation(
settings_path,
sol_code_path,
abi_path,
input_data,
witness_path,
)
.await
.map_err(|e| {
let err_str = format!("Failed to run create_evm_data_attestation: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
Ok(true)
})
}
/// Setup test evm witness
@@ -1559,29 +1682,31 @@ fn create_evm_data_attestation(
rpc_url=None,
))]
fn setup_test_evm_witness(
py: Python,
data_path: PathBuf,
compiled_circuit_path: PathBuf,
test_data: PathBuf,
input_source: PyTestDataSource,
output_source: PyTestDataSource,
rpc_url: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::setup_test_evm_witness(
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::setup_test_evm_witness(
data_path,
compiled_circuit_path,
test_data,
rpc_url,
input_source.into(),
output_source.into(),
))
)
.await
.map_err(|e| {
let err_str = format!("Failed to run setup_test_evm_witness: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
Ok(true)
})
}
/// deploys the solidity verifier
@@ -1589,65 +1714,36 @@ fn setup_test_evm_witness(
addr_path,
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
rpc_url=None,
contract_type=ContractType::default(),
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
fn deploy_evm(
py: Python,
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
contract_type: ContractType,
optimizer_runs: usize,
private_key: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::deploy_evm(
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2Verifier",
))
contract_type,
)
.await
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
/// deploys the solidity vk verifier
#[pyfunction(signature = (
addr_path,
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
fn deploy_vk_evm(
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2VerifyingKey",
))
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
Ok(true)
})
}
/// deploys the solidity da verifier
@@ -1661,6 +1757,7 @@ fn deploy_vk_evm(
private_key=None
))]
fn deploy_da_evm(
py: Python,
addr_path: PathBuf,
input_data: PathBuf,
settings_path: PathBuf,
@@ -1668,10 +1765,9 @@ fn deploy_da_evm(
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::deploy_da_evm(
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::deploy_da_evm(
input_data,
settings_path,
sol_code_path,
@@ -1679,20 +1775,22 @@ fn deploy_da_evm(
addr_path,
optimizer_runs,
private_key,
))
)
.await
.map_err(|e| {
let err_str = format!("Failed to run deploy_da_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
Ok(true)
})
}
/// verifies an evm compatible proof, you will need solc installed in your environment to run this
///
/// Arguments
/// ---------
/// addr_verifier: str
/// The path to verifier contract's address
/// The verifier contract's address as a hex string
///
/// proof_path: str
/// The path to the proof file (generated using the prove command)
@@ -1704,7 +1802,7 @@ fn deploy_da_evm(
/// does the verifier use data attestation ?
///
/// addr_vk: str
///
/// The addess of the separate VK contract (if the verifier key is rendered as a separate contract)
/// Returns
/// -------
/// bool
@@ -1716,13 +1814,14 @@ fn deploy_da_evm(
addr_da = None,
addr_vk = None,
))]
fn verify_evm(
addr_verifier: &str,
fn verify_evm<'a>(
py: Python<'a>,
addr_verifier: &'a str,
proof_path: PathBuf,
rpc_url: Option<String>,
addr_da: Option<&str>,
addr_vk: Option<&str>,
) -> Result<bool, PyErr> {
addr_da: Option<&'a str>,
addr_vk: Option<&'a str>,
) -> PyResult<Bound<'a, PyAny>> {
let addr_verifier = H160Flag::from(addr_verifier);
let addr_da = if let Some(addr_da) = addr_da {
let addr_da = H160Flag::from(addr_da);
@@ -1737,21 +1836,16 @@ fn verify_evm(
None
};
Runtime::new()
.unwrap()
.block_on(crate::execute::verify_evm(
proof_path,
addr_verifier,
rpc_url,
addr_da,
addr_vk,
))
.map_err(|e| {
let err_str = format!("Failed to run verify_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::verify_evm(proof_path, addr_verifier, rpc_url, addr_da, addr_vk)
.await
.map_err(|e| {
let err_str = format!("Failed to run verify_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
Ok(true)
})
}
/// Creates an evm compatible aggregate verifier, you will need solc installed in your environment to run this
@@ -1776,8 +1870,8 @@ fn verify_evm(
/// srs_path: str
/// The path to the SRS file
///
/// render_vk_separately: bool
/// Whether the verifier key should be rendered as a separate contract. We recommend disabling selector compression if this is enabled. To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command
/// reusable: bool
/// Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
///
/// Returns
/// -------
@@ -1790,31 +1884,36 @@ fn verify_evm(
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
))]
fn create_evm_verifier_aggr(
py: Python,
aggregation_settings: Vec<PathBuf>,
vk_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
logrows: u32,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
) -> Result<bool, PyErr> {
crate::execute::create_evm_aggregate_verifier(
vk_path,
srs_path,
sol_code_path,
abi_path,
aggregation_settings,
logrows,
render_vk_seperately,
)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier_aggr: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
reusable: bool,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_aggregate_verifier(
vk_path,
srs_path,
sol_code_path,
abi_path,
aggregation_settings,
logrows,
reusable,
)
.await
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier_aggr: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
})
}
// Python Module
@@ -1854,13 +1953,13 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(compile_circuit, m)?)?;
m.add_function(wrap_pyfunction!(verify_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_vka, m)?)?;
m.add_function(wrap_pyfunction!(deploy_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_vk_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
m.add_function(wrap_pyfunction!(setup_test_evm_witness, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_data_attestation, m)?)?;
m.add_function(wrap_pyfunction!(encode_evm_calldata, m)?)?;
Ok(())
}

579
src/bindings/universal.rs Normal file
View File

@@ -0,0 +1,579 @@
use halo2_proofs::{
plonk::*,
poly::{
commitment::{CommitmentScheme, ParamsProver},
ipa::{
commitment::{IPACommitmentScheme, ParamsIPA},
multiopen::{ProverIPA, VerifierIPA},
strategy::SingleStrategy as IPASingleStrategy,
},
kzg::{
commitment::{KZGCommitmentScheme, ParamsKZG},
multiopen::{ProverSHPLONK, VerifierSHPLONK},
strategy::SingleStrategy as KZGSingleStrategy,
},
VerificationStrategy,
},
};
use std::fmt::Display;
use std::io::BufReader;
use std::str::FromStr;
use crate::{
circuit::region::RegionSettings,
graph::GraphSettings,
pfsys::{
create_proof_circuit,
evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript},
verify_proof_circuit, TranscriptType,
},
tensor::TensorType,
CheckMode, Commitments, EZKLError as InnerEZKLError,
};
use crate::graph::{GraphCircuit, GraphWitness};
use halo2_solidity_verifier::encode_calldata;
use halo2curves::{
bn256::{Bn256, Fr, G1Affine},
ff::{FromUniformBytes, PrimeField},
};
use snark_verifier::{loader::native::NativeLoader, system::halo2::transcript::evm::EvmTranscript};
/// Wrapper around the Error Message
#[cfg_attr(feature = "ios-bindings", derive(uniffi::Error))]
#[derive(Debug)]
pub enum EZKLError {
/// Some Comment
InternalError(String),
}
impl Display for EZKLError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
EZKLError::InternalError(e) => write!(f, "Internal error: {}", e),
}
}
}
impl From<InnerEZKLError> for EZKLError {
fn from(e: InnerEZKLError) -> Self {
EZKLError::InternalError(e.to_string())
}
}
/// Encode verifier calldata from proof and ethereum vk_address
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn encode_verifier_calldata(
// TODO - shuold it be pub(crate) or pub or pub(super)?
proof: Vec<u8>,
vk_address: Option<Vec<u8>>,
) -> Result<Vec<u8>, EZKLError> {
let snark: crate::pfsys::Snark<Fr, G1Affine> =
serde_json::from_slice(&proof[..]).map_err(InnerEZKLError::from)?;
let vk_address: Option<[u8; 20]> = if let Some(vk_address) = vk_address {
let array: [u8; 20] =
serde_json::from_slice(&vk_address[..]).map_err(InnerEZKLError::from)?;
Some(array)
} else {
None
};
let flattened_instances = snark.instances.into_iter().flatten();
let encoded = encode_calldata(
vk_address,
&snark.proof,
&flattened_instances.collect::<Vec<_>>(),
);
Ok(encoded)
}
/// Generate witness from compiled circuit and input json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn gen_witness(compiled_circuit: Vec<u8>, input: Vec<u8>) -> Result<Vec<u8>, EZKLError> {
let mut circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| {
EZKLError::InternalError(format!("Failed to deserialize compiled model: {}", e))
})?;
let input: crate::graph::input::GraphData = serde_json::from_slice(&input[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize input: {}", e)))?;
let mut input = circuit
.load_graph_input(&input)
.map_err(|e| EZKLError::InternalError(format!("{}", e)))?;
let witness = circuit
.forward::<KZGCommitmentScheme<Bn256>>(
&mut input,
None,
None,
RegionSettings::all_true(
circuit.settings().run_args.decomp_base,
circuit.settings().run_args.decomp_legs,
),
)
.map_err(|e| EZKLError::InternalError(format!("{}", e)))?;
serde_json::to_vec(&witness)
.map_err(|e| EZKLError::InternalError(format!("Failed to serialize witness: {}", e)))
}
/// Generate verifying key from compiled circuit, and parameters srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn gen_vk(
compiled_circuit: Vec<u8>,
srs: Vec<u8>,
compress_selectors: bool,
) -> Result<Vec<u8>, EZKLError> {
let mut reader = BufReader::new(&srs[..]);
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let circuit: GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize circuit: {}", e)))?;
let vk = create_vk_lean::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
)
.map_err(|e| EZKLError::InternalError(format!("Failed to create verifying key: {}", e)))?;
let mut serialized_vk = Vec::new();
vk.write(&mut serialized_vk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| {
EZKLError::InternalError(format!("Failed to serialize verifying key: {}", e))
})?;
Ok(serialized_vk)
}
/// Generate proving key from vk, compiled circuit and parameters srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn gen_pk(
vk: Vec<u8>,
compiled_circuit: Vec<u8>,
srs: Vec<u8>,
) -> Result<Vec<u8>, EZKLError> {
let mut reader = BufReader::new(&srs[..]);
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let circuit: GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize circuit: {}", e)))?;
let mut reader = BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize verifying key: {}", e)))?;
let pk = create_pk_lean::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk, &circuit, &params)
.map_err(|e| EZKLError::InternalError(format!("Failed to create proving key: {}", e)))?;
let mut serialized_pk = Vec::new();
pk.write(&mut serialized_pk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| EZKLError::InternalError(format!("Failed to serialize proving key: {}", e)))?;
Ok(serialized_pk)
}
/// Verify proof with vk, proof json, circuit settings json and srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn verify(
proof: Vec<u8>,
vk: Vec<u8>,
settings: Vec<u8>,
srs: Vec<u8>,
) -> Result<bool, EZKLError> {
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize settings: {}", e)))?;
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proof: {}", e)))?;
let mut reader = BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings.clone(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize vk: {}", e)))?;
let orig_n = 1 << circuit_settings.run_args.logrows;
let commitment = circuit_settings.run_args.commitment.into();
let mut reader = BufReader::new(&srs[..]);
let result = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> = get_params(&mut reader)?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
};
match result {
Ok(_) => Ok(true),
Err(e) => Err(EZKLError::InternalError(format!(
"Verification failed: {}",
e
))),
}
}
/// Verify aggregate proof with vk, proof, circuit settings and srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn verify_aggr(
proof: Vec<u8>,
vk: Vec<u8>,
logrows: u64,
srs: Vec<u8>,
commitment: &str,
) -> Result<bool, EZKLError> {
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proof: {}", e)))?;
let mut reader = BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, AggregationCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize vk: {}", e)))?;
let commit = Commitments::from_str(commitment)
.map_err(|e| EZKLError::InternalError(format!("Invalid commitment: {}", e)))?;
let orig_n = 1 << logrows;
let mut reader = BufReader::new(&srs[..]);
let result = match commit {
Commitments::KZG => {
let params: ParamsKZG<Bn256> = get_params(&mut reader)?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(
|e| EZKLError::InternalError(format!("Failed to deserialize params: {}", e)),
)?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, &params, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, &params, &vk, strategy, orig_n)
}
}
}
};
result
.map(|_| true)
.map_err(|e| EZKLError::InternalError(format!("{}", e)))
}
/// Prove in browser with compiled circuit, witness json, proving key, and srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn prove(
witness: Vec<u8>,
pk: Vec<u8>,
compiled_circuit: Vec<u8>,
srs: Vec<u8>,
) -> Result<Vec<u8>, EZKLError> {
#[cfg(feature = "det-prove")]
log::set_max_level(log::LevelFilter::Debug);
#[cfg(not(feature = "det-prove"))]
log::set_max_level(log::LevelFilter::Info);
let mut circuit: GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize circuit: {}", e)))?;
let data: GraphWitness = serde_json::from_slice(&witness[..]).map_err(InnerEZKLError::from)?;
let mut reader = BufReader::new(&pk[..]);
let pk = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proving key: {}", e)))?;
circuit
.load_graph_witness(&data)
.map_err(InnerEZKLError::from)?;
let public_inputs = circuit
.prepare_public_inputs(&data)
.map_err(InnerEZKLError::from)?;
let proof_split_commits: Option<crate::pfsys::ProofSplitCommit> = data.into();
let mut reader = BufReader::new(&srs[..]);
let commitment = circuit.settings().run_args.commitment.into();
let proof = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(
|e| EZKLError::InternalError(format!("Failed to deserialize srs: {}", e)),
)?;
create_proof_circuit::<
KZGCommitmentScheme<Bn256>,
_,
ProverSHPLONK<_>,
VerifierSHPLONK<_>,
KZGSingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit,
vec![public_inputs],
&params,
&pk,
CheckMode::UNSAFE,
Commitments::KZG,
TranscriptType::EVM,
proof_split_commits,
None,
)
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(
|e| EZKLError::InternalError(format!("Failed to deserialize srs: {}", e)),
)?;
create_proof_circuit::<
IPACommitmentScheme<G1Affine>,
_,
ProverIPA<_>,
VerifierIPA<_>,
IPASingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit,
vec![public_inputs],
&params,
&pk,
CheckMode::UNSAFE,
Commitments::IPA,
TranscriptType::EVM,
proof_split_commits,
None,
)
}
}
.map_err(InnerEZKLError::from)?;
Ok(serde_json::to_vec(&proof).map_err(InnerEZKLError::from)?)
}
/// Validate the witness json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn witness_validation(witness: Vec<u8>) -> Result<bool, EZKLError> {
let _: GraphWitness = serde_json::from_slice(&witness[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the compiled circuit
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn compiled_circuit_validation(compiled_circuit: Vec<u8>) -> Result<bool, EZKLError> {
let _: GraphCircuit = bincode::deserialize(&compiled_circuit[..]).map_err(|e| {
EZKLError::InternalError(format!("Failed to deserialize compiled circuit: {}", e))
})?;
Ok(true)
}
/// Validate the input json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn input_validation(input: Vec<u8>) -> Result<bool, EZKLError> {
let _: crate::graph::input::GraphData =
serde_json::from_slice(&input[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the proof json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn proof_validation(proof: Vec<u8>) -> Result<bool, EZKLError> {
let _: crate::pfsys::Snark<Fr, G1Affine> =
serde_json::from_slice(&proof[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the verifying key given the settings json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn vk_validation(vk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
let circuit_settings: GraphSettings =
serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
let mut reader = BufReader::new(&vk[..]);
let _ = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize verifying key: {}", e)))?;
Ok(true)
}
/// Validate the proving key given the settings json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn pk_validation(pk: Vec<u8>, settings: Vec<u8>) -> Result<bool, EZKLError> {
let circuit_settings: GraphSettings =
serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
let mut reader = BufReader::new(&pk[..]);
let _ = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize proving key: {}", e)))?;
Ok(true)
}
/// Validate the settings json
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn settings_validation(settings: Vec<u8>) -> Result<bool, EZKLError> {
let _: GraphSettings = serde_json::from_slice(&settings[..]).map_err(InnerEZKLError::from)?;
Ok(true)
}
/// Validate the srs
#[cfg_attr(feature = "ios-bindings", uniffi::export)]
pub(crate) fn srs_validation(srs: Vec<u8>) -> Result<bool, EZKLError> {
let mut reader = BufReader::new(&srs[..]);
let _: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).map_err(|e| {
EZKLError::InternalError(format!("Failed to deserialize params: {}", e))
})?;
Ok(true)
}
// HELPER FUNCTIONS
fn get_params<
Scheme: for<'a> halo2_proofs::poly::commitment::Params<'a, halo2curves::bn256::G1Affine>,
>(
mut reader: &mut BufReader<&[u8]>,
) -> Result<Scheme, EZKLError> {
halo2_proofs::poly::commitment::Params::<G1Affine>::read(&mut reader)
.map_err(|e| EZKLError::InternalError(format!("Failed to deserialize params: {}", e)))
}
/// Creates a [ProvingKey] for a [GraphCircuit] (`circuit`) with specific [CommitmentScheme] parameters (`params`) for the WASM target
pub fn create_vk_lean<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
circuit: &C,
params: &'_ Scheme::ParamsProver,
compress_selectors: bool,
) -> Result<VerifyingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
{
// Real proof
let empty_circuit = <C as Circuit<F>>::without_witnesses(circuit);
// Initialize the verifying key
let vk = keygen_vk_custom(params, &empty_circuit, compress_selectors)?;
Ok(vk)
}
/// Creates a [ProvingKey] from a [VerifyingKey] for a [GraphCircuit] (`circuit`) with specific [CommitmentScheme] parameters (`params`) for the WASM target
pub fn create_pk_lean<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
vk: VerifyingKey<Scheme::Curve>,
circuit: &C,
params: &'_ Scheme::ParamsProver,
) -> Result<ProvingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
{
// Real proof
let empty_circuit = <C as Circuit<F>>::without_witnesses(circuit);
// Initialize the proving key
let pk = keygen_pk(params, vk, &empty_circuit)?;
Ok(pk)
}

372
src/bindings/wasm.rs Normal file
View File

@@ -0,0 +1,372 @@
use crate::{
circuit::modules::{
polycommit::PolyCommitChip,
poseidon::{
spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH},
PoseidonChip,
},
Module,
},
fieldutils::{felt_to_integer_rep, integer_rep_to_felt},
graph::{
modules::POSEIDON_LEN_GRAPH, quantize_float, scale_to_multiplier, GraphCircuit,
GraphSettings,
},
};
use console_error_panic_hook;
use halo2_proofs::{
plonk::*,
poly::kzg::commitment::{KZGCommitmentScheme, ParamsKZG},
};
use halo2curves::{
bn256::{Bn256, Fr, G1Affine},
ff::PrimeField,
};
use wasm_bindgen::prelude::*;
use wasm_bindgen_console_logger::DEFAULT_LOGGER;
use crate::bindings::universal::{
compiled_circuit_validation, encode_verifier_calldata, gen_pk, gen_vk, gen_witness,
input_validation, pk_validation, proof_validation, settings_validation, srs_validation,
verify_aggr, vk_validation, witness_validation, EZKLError as ExternalEZKLError,
};
#[cfg(feature = "web")]
pub use wasm_bindgen_rayon::init_thread_pool;
impl From<ExternalEZKLError> for JsError {
fn from(e: ExternalEZKLError) -> Self {
JsError::new(&format!("{}", e))
}
}
#[wasm_bindgen]
/// Initialize logger for wasm
pub fn init_logger() {
log::set_logger(&DEFAULT_LOGGER).unwrap();
}
#[wasm_bindgen]
/// Initialize panic hook for wasm
pub fn init_panic_hook() {
console_error_panic_hook::set_once();
}
/// Wrapper around the halo2 encode call data method
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn encodeVerifierCalldata(
proof: wasm_bindgen::Clamped<Vec<u8>>,
vk_address: Option<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
encode_verifier_calldata(proof.0, vk_address).map_err(JsError::from)
}
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToBigEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(format!("{:?}", felt))
}
/// Converts a felt to a little endian string
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToLittleEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let repr = serde_json::to_string(&felt).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
Ok(b)
}
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToInt(
array: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&felt_to_integer_rep(felt))
.map_err(|e| JsError::new(&format!("Failed to serialize integer: {}", e)))?,
))
}
/// Converts felts to a floating point element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToFloat(
array: wasm_bindgen::Clamped<Vec<u8>>,
scale: crate::Scale,
) -> Result<f64, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let int_rep = felt_to_integer_rep(felt);
let multiplier = scale_to_multiplier(scale);
Ok(int_rep as f64 / multiplier)
}
/// Converts a floating point number to a hex string representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn floatToFelt(
input: f64,
scale: crate::Scale,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let int_rep =
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
let felt = integer_rep_to_felt(int_rep);
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
)?))
}
/// Generate a kzg commitment.
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn kzgCommit(
message: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let mut reader = std::io::BufReader::new(&params_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let output = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
message,
(vk.cs().blinding_factors() + 1) as u32,
&params,
);
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&output).map_err(|e| JsError::new(&format!("{}", e)))?,
))
}
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn bufferToVecOfFelt(
buffer: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
// Convert the buffer to a slice
let buffer: &[u8] = &buffer;
// Divide the buffer into chunks of 64 bytes
let chunks = buffer.chunks_exact(16);
// Get the remainder
let remainder = chunks.remainder();
// Add 0s to the remainder to make it 64 bytes
let mut remainder = remainder.to_vec();
// Collect chunks into a Vec<[u8; 16]>.
let chunks: Result<Vec<[u8; 16]>, JsError> = chunks
.map(|slice| {
let array: [u8; 16] = slice
.try_into()
.map_err(|_| JsError::new("failed to slice input chunks"))?;
Ok(array)
})
.collect();
let mut chunks = chunks?;
if remainder.len() != 0 {
remainder.resize(16, 0);
// Convert the Vec<u8> to [u8; 16]
let remainder_array: [u8; 16] = remainder
.try_into()
.map_err(|_| JsError::new("failed to slice remainder"))?;
// append the remainder to the chunks
chunks.push(remainder_array);
}
// Convert each chunk to a field element
let field_elements: Vec<Fr> = chunks
.iter()
.map(|x| PrimeField::from_u128(u8_array_to_u128_le(*x)))
.collect();
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&field_elements)
.map_err(|e| JsError::new(&format!("Failed to serialize field elements: {}", e)))?,
))
}
/// Generate a poseidon hash in browser. Input message
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn poseidonHash(
message: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let output =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
.map_err(|e| JsError::new(&format!("{}", e)))?;
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&output).map_err(
|e| JsError::new(&format!("Failed to serialize poseidon hash output: {}", e)),
)?))
}
/// Generate a witness file from input.json, compiled model and a settings.json file.
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genWitness(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
input: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
gen_witness(compiled_circuit.0, input.0).map_err(JsError::from)
}
/// Generate verifying key in browser
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genVk(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
compress_selectors: bool,
) -> Result<Vec<u8>, JsError> {
gen_vk(compiled_circuit.0, params_ser.0, compress_selectors).map_err(JsError::from)
}
/// Generate proving key in browser
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn genPk(
vk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
gen_pk(vk.0, compiled_circuit.0, params_ser.0).map_err(JsError::from)
}
/// Verify proof in browser using wasm
#[wasm_bindgen]
pub fn verify(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
super::universal::verify(proof_js.0, vk.0, settings.0, srs.0).map_err(JsError::from)
}
/// Verify aggregate proof in browser using wasm
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn verifyAggr(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
logrows: u64,
srs: wasm_bindgen::Clamped<Vec<u8>>,
commitment: &str,
) -> Result<bool, JsError> {
verify_aggr(proof_js.0, vk.0, logrows, srs.0, commitment).map_err(JsError::from)
}
/// Prove in browser using wasm
#[wasm_bindgen]
pub fn prove(
witness: wasm_bindgen::Clamped<Vec<u8>>,
pk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
super::universal::prove(witness.0, pk.0, compiled_circuit.0, srs.0).map_err(JsError::from)
}
// VALIDATION FUNCTIONS
/// Witness file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn witnessValidation(witness: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
witness_validation(witness.0).map_err(JsError::from)
}
/// Compiled circuit validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn compiledCircuitValidation(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
compiled_circuit_validation(compiled_circuit.0).map_err(JsError::from)
}
/// Input file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn inputValidation(input: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
input_validation(input.0).map_err(JsError::from)
}
/// Proof file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn proofValidation(proof: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
proof_validation(proof.0).map_err(JsError::from)
}
/// Vk file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn vkValidation(
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
vk_validation(vk.0, settings.0).map_err(JsError::from)
}
/// Pk file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn pkValidation(
pk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
pk_validation(pk.0, settings.0).map_err(JsError::from)
}
/// Settings file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn settingsValidation(settings: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
settings_validation(settings.0).map_err(JsError::from)
}
/// Srs file validation
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn srsValidation(srs: wasm_bindgen::Clamped<Vec<u8>>) -> Result<bool, JsError> {
srs_validation(srs.0).map_err(JsError::from)
}
/// HELPER FUNCTIONS
pub fn u8_array_to_u128_le(arr: [u8; 16]) -> u128 {
let mut n: u128 = 0;
for &b in arr.iter().rev() {
n <<= 8;
n |= b as u128;
}
n
}

View File

@@ -0,0 +1,25 @@
use halo2_proofs::plonk::Error as PlonkError;
use thiserror::Error;
/// Error type for the circuit module
#[derive(Error, Debug)]
pub enum ModuleError {
/// Halo 2 error
#[error("[halo2] {0}")]
Halo2Error(#[from] PlonkError),
/// Wrong input type for a module
#[error("wrong input type {0} must be {1}")]
WrongInputType(String, String),
/// A constant was not previously assigned
#[error("constant was not previously assigned")]
ConstantNotAssigned,
/// Input length is wrong
#[error("input length is wrong {0}")]
InputWrongLength(usize),
}
impl From<ModuleError> for PlonkError {
fn from(_e: ModuleError) -> PlonkError {
PlonkError::Synthesis
}
}

View File

@@ -6,10 +6,11 @@ pub mod polycommit;
///
pub mod planner;
use halo2_proofs::{
circuit::Layouter,
plonk::{ConstraintSystem, Error},
};
///
pub mod errors;
use halo2_proofs::{circuit::Layouter, plonk::ConstraintSystem};
use halo2curves::ff::PrimeField;
pub use planner::*;
@@ -35,14 +36,14 @@ pub trait Module<F: PrimeField + TensorType + PartialOrd> {
/// Name
fn name(&self) -> &'static str;
/// Run the operation the module represents
fn run(input: Self::RunInputs) -> Result<Vec<Vec<F>>, Box<dyn std::error::Error>>;
fn run(input: Self::RunInputs) -> Result<Vec<Vec<F>>, errors::ModuleError>;
/// Layout inputs
fn layout_inputs(
&self,
layouter: &mut impl Layouter<F>,
input: &[ValTensor<F>],
constants: &mut ConstantsMap<F>,
) -> Result<Self::InputAssignments, Error>;
) -> Result<Self::InputAssignments, errors::ModuleError>;
/// Layout
fn layout(
&self,
@@ -50,7 +51,7 @@ pub trait Module<F: PrimeField + TensorType + PartialOrd> {
input: &[ValTensor<F>],
row_offset: usize,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, Error>;
) -> Result<ValTensor<F>, errors::ModuleError>;
/// Number of instance values the module uses every time it is applied
fn instance_increment_input(&self) -> Vec<usize>;
/// Number of rows used by the module

View File

@@ -18,6 +18,7 @@ use halo2curves::CurveAffine;
use crate::circuit::region::ConstantsMap;
use crate::tensor::{Tensor, ValTensor, ValType, VarTensor};
use super::errors::ModuleError;
use super::Module;
/// The number of instance columns used by the PolyCommit hash function
@@ -110,7 +111,7 @@ impl Module<Fp> for PolyCommitChip {
_: &mut impl Layouter<Fp>,
_: &[ValTensor<Fp>],
_: &mut ConstantsMap<Fp>,
) -> Result<Self::InputAssignments, Error> {
) -> Result<Self::InputAssignments, ModuleError> {
Ok(())
}
@@ -123,28 +124,30 @@ impl Module<Fp> for PolyCommitChip {
input: &[ValTensor<Fp>],
_: usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<ValTensor<Fp>, Error> {
) -> Result<ValTensor<Fp>, ModuleError> {
assert_eq!(input.len(), 1);
let local_constants = constants.clone();
layouter.assign_region(
|| "PolyCommit",
|mut region| {
let mut local_inner_constants = local_constants.clone();
let res = self.config.inputs.assign(
&mut region,
0,
&input[0],
&mut local_inner_constants,
)?;
*constants = local_inner_constants;
Ok(res)
},
)
layouter
.assign_region(
|| "PolyCommit",
|mut region| {
let mut local_inner_constants = local_constants.clone();
let res = self.config.inputs.assign(
&mut region,
0,
&input[0],
&mut local_inner_constants,
)?;
*constants = local_inner_constants;
Ok(res)
},
)
.map_err(|e| e.into())
}
///
fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, Box<dyn std::error::Error>> {
fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, ModuleError> {
Ok(vec![message])
}
@@ -216,7 +219,7 @@ mod tests {
fn polycommit_chip_for_a_range_of_input_sizes() {
let rng = rand::rngs::OsRng;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
{
@@ -244,7 +247,7 @@ mod tests {
#[test]
#[ignore]
fn polycommit_chip_much_longer_input() {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
let rng = rand::rngs::OsRng;

View File

@@ -21,6 +21,7 @@ use std::marker::PhantomData;
use crate::circuit::region::ConstantsMap;
use crate::tensor::{Tensor, ValTensor, ValType};
use super::errors::ModuleError;
use super::Module;
/// The number of instance columns used by the Poseidon hash function
@@ -174,7 +175,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
layouter: &mut impl Layouter<Fp>,
message: &[ValTensor<Fp>],
constants: &mut ConstantsMap<Fp>,
) -> Result<Self::InputAssignments, Error> {
) -> Result<Self::InputAssignments, ModuleError> {
assert_eq!(message.len(), 1);
let message = message[0].clone();
@@ -185,78 +186,82 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
let res = layouter.assign_region(
|| "load message",
|mut region| {
let assigned_message: Result<Vec<AssignedCell<Fp, Fp>>, Error> = match &message {
ValTensor::Value { inner: v, .. } => v
.iter()
.enumerate()
.map(|(i, value)| {
let x = i % WIDTH;
let y = i / WIDTH;
let assigned_message: Result<Vec<AssignedCell<Fp, Fp>>, ModuleError> =
match &message {
ValTensor::Value { inner: v, .. } => {
v.iter()
.enumerate()
.map(|(i, value)| {
let x = i % WIDTH;
let y = i / WIDTH;
match value {
ValType::Value(v) => region.assign_advice(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
|| *v,
),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
Ok(v.clone())
}
ValType::Constant(f) => {
if local_constants.contains_key(f) {
Ok(constants.get(f).unwrap().assigned_cell().ok_or({
log::error!("constant not previously assigned");
Error::Synthesis
})?)
} else {
let res = region.assign_advice_from_constant(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
*f,
)?;
match value {
ValType::Value(v) => region
.assign_advice(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
|| *v,
)
.map_err(|e| e.into()),
ValType::PrevAssigned(v)
| ValType::AssignedConstant(v, ..) => Ok(v.clone()),
ValType::Constant(f) => {
if local_constants.contains_key(f) {
Ok(constants
.get(f)
.unwrap()
.assigned_cell()
.ok_or(ModuleError::ConstantNotAssigned)?)
} else {
let res = region.assign_advice_from_constant(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
*f,
)?;
constants
.insert(*f, ValType::AssignedConstant(res.clone(), *f));
constants.insert(
*f,
ValType::AssignedConstant(res.clone(), *f),
);
Ok(res)
Ok(res)
}
}
e => Err(ModuleError::WrongInputType(
format!("{:?}", e),
"PrevAssigned".to_string(),
)),
}
}
e => {
log::error!(
"wrong input type {:?}, must be previously assigned",
e
);
Err(Error::Synthesis)
}
}
})
.collect(),
ValTensor::Instance {
dims,
inner: col,
idx,
initial_offset,
..
} => {
// this should never ever fail
let num_elems = dims[*idx].iter().product::<usize>();
(0..num_elems)
.map(|i| {
let x = i % WIDTH;
let y = i / WIDTH;
region.assign_advice_from_instance(
|| "pub input anchor",
*col,
initial_offset + i,
self.config.hash_inputs[x],
y,
)
})
.collect()
}
};
})
.collect()
}
ValTensor::Instance {
dims,
inner: col,
idx,
initial_offset,
..
} => {
// this should never ever fail
let num_elems = dims[*idx].iter().product::<usize>();
(0..num_elems)
.map(|i| {
let x = i % WIDTH;
let y = i / WIDTH;
region.assign_advice_from_instance(
|| "pub input anchor",
*col,
initial_offset + i,
self.config.hash_inputs[x],
y,
)
})
.collect::<Result<Vec<_>, _>>()
.map_err(|e| e.into())
}
};
let offset = message.len() / WIDTH + 1;
@@ -277,7 +282,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
message.len(),
start_time.elapsed()
);
res
res.map_err(|e| e.into())
}
/// L is the number of inputs to the hash function
@@ -289,7 +294,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
input: &[ValTensor<Fp>],
row_offset: usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<ValTensor<Fp>, Error> {
) -> Result<ValTensor<Fp>, ModuleError> {
let (mut input_cells, zero_val) = self.layout_inputs(layouter, input, constants)?;
// extract the values from the input cells
let mut assigned_input: Tensor<ValType<Fp>> =
@@ -301,7 +306,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
let mut one_iter = false;
// do the Tree dance baby
while input_cells.len() > 1 || !one_iter {
let hashes: Result<Vec<AssignedCell<Fp, Fp>>, Error> = input_cells
let hashes: Result<Vec<AssignedCell<Fp, Fp>>, ModuleError> = input_cells
.chunks(L)
.enumerate()
.map(|(i, block)| {
@@ -332,7 +337,8 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
hash
})
.collect();
.collect::<Result<Vec<_>, _>>()
.map_err(|e| e.into());
log::trace!("hashes (N={:?}) took: {:?}", len, start_time.elapsed());
one_iter = true;
@@ -348,7 +354,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
ValType::PrevAssigned(v) => v,
_ => {
log::error!("wrong input type, must be previously assigned");
return Err(Error::Synthesis);
return Err(Error::Synthesis.into());
}
};
@@ -380,7 +386,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
}
///
fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, Box<dyn std::error::Error>> {
fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, ModuleError> {
let mut hash_inputs = message;
let len = hash_inputs.len();
@@ -400,7 +406,11 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
block.extend(vec![Fp::ZERO; L - remainder].iter());
}
let message = block.try_into().map_err(|_| Error::Synthesis)?;
let block_len = block.len();
let message = block
.try_into()
.map_err(|_| ModuleError::InputWrongLength(block_len))?;
Ok(halo2_gadgets::poseidon::primitives::Hash::<
_,
@@ -411,7 +421,7 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
>::init()
.hash(message))
})
.collect::<Result<Vec<_>, Error>>()?;
.collect::<Result<Vec<_>, ModuleError>>()?;
one_iter = true;
hash_inputs = hashes;
}
@@ -550,7 +560,7 @@ mod tests {
fn hash_for_a_range_of_input_sizes() {
let rng = rand::rngs::OsRng;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
env_logger::init();
{

Some files were not shown because too many files have changed in this diff Show More