mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 00:08:12 -05:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c45f04ee5a | ||
|
|
5c574adc31 | ||
|
|
749e0ba652 | ||
|
|
d464ddf6b6 | ||
|
|
8f6c0aced5 | ||
|
|
860e9700a8 | ||
|
|
32dd4a854f | ||
|
|
924f7c0420 | ||
|
|
ae03b6515b | ||
|
|
bae2e9e22b | ||
|
|
4a93d31869 | ||
|
|
88dd83dbe5 | ||
|
|
f05f83481e | ||
|
|
8aaf518b5e | ||
|
|
1b7b43e073 |
@@ -1,4 +1,4 @@
|
||||
name: Build and Publish EZKL npm packages (wasm bindings and in-browser evm verifier)
|
||||
name: Build and Publish EZKL Engine npm package
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -62,7 +62,7 @@ jobs:
|
||||
"web/ezkl_bg.wasm",
|
||||
"web/ezkl.js",
|
||||
"web/ezkl.d.ts",
|
||||
"web/snippets/wasm-bindgen-rayon-7afa899f36665473/src/workerHelpers.js",
|
||||
"web/snippets/**/*",
|
||||
"web/package.json",
|
||||
"web/utils.js",
|
||||
"ezkl.d.ts"
|
||||
@@ -79,6 +79,10 @@ jobs:
|
||||
run: |
|
||||
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" pkg/nodejs/ezkl.js
|
||||
|
||||
- name: Replace `import.meta.url` with `import.meta.resolve` definition in workerHelpers.js
|
||||
run: |
|
||||
find ./pkg/web/snippets -type f -name "*.js" -exec sed -i "s|import.meta.url|import.meta.resolve|" {} +
|
||||
|
||||
- name: Add serialize and deserialize methods to nodejs bundle
|
||||
run: |
|
||||
echo '
|
||||
@@ -174,40 +178,3 @@ jobs:
|
||||
npm publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
|
||||
in-browser-evm-ver-publish:
|
||||
name: publish-in-browser-evm-verifier-package
|
||||
needs: ["publish-wasm-bindings"]
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Update version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"version\": \".*\"|\"version\": \"${{ github.ref_name }}\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update @ezkljs/engine version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"@ezkljs/engine\": \".*\"|\"@ezkljs/engine\": \"${{ github.ref_name }}\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update the engine import in in-browser-evm-verifier to use @ezkljs/engine package instead of the local one;
|
||||
run: |
|
||||
sed -i "s|import { encodeVerifierCalldata } from '../nodejs/ezkl';|import { encodeVerifierCalldata } from '@ezkljs/engine';|" in-browser-evm-verifier/src/index.ts
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Publish to npm
|
||||
run: |
|
||||
cd in-browser-evm-verifier
|
||||
npm install
|
||||
npm run build
|
||||
npm ci
|
||||
npm publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
42
.github/workflows/rust.yml
vendored
42
.github/workflows/rust.yml
vendored
@@ -307,8 +307,8 @@ jobs:
|
||||
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
|
||||
- name: Install dependencies for js tests and in-browser-evm-verifier package
|
||||
run: |
|
||||
pnpm install --no-frozen-lockfile
|
||||
pnpm install --dir ./in-browser-evm-verifier --no-frozen-lockfile
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm install --dir ./in-browser-evm-verifier --frozen-lockfile
|
||||
env:
|
||||
CI: false
|
||||
NODE_ENV: development
|
||||
@@ -354,7 +354,7 @@ jobs:
|
||||
|
||||
prove-and-verify-tests:
|
||||
runs-on: non-gpu
|
||||
needs: [build, library-tests, docs, python-tests, python-integration-tests]
|
||||
needs: [build, library-tests, docs]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
@@ -380,7 +380,7 @@ jobs:
|
||||
cache: "pnpm"
|
||||
- name: Install dependencies for js tests
|
||||
run: |
|
||||
pnpm install --no-frozen-lockfile
|
||||
pnpm install --frozen-lockfile
|
||||
env:
|
||||
CI: false
|
||||
NODE_ENV: development
|
||||
@@ -394,14 +394,18 @@ jobs:
|
||||
- name: Replace memory definition in nodejs
|
||||
run: |
|
||||
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" tests/wasm/nodejs/ezkl.js
|
||||
- name: KZG prove and verify tests (public outputs + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w
|
||||
- name: KZG prove and verify tests (public outputs + fixed params + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_
|
||||
- name: KZG prove and verify tests (hashed inputs + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_hashed_inputs_
|
||||
- name: KZG prove and verify tests (public outputs)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_tight_lookup_::t
|
||||
- name: IPA prove and verify tests
|
||||
run: cargo nextest run --release --verbose tests::ipa_prove_and_verify_::t --test-threads 1
|
||||
- name: IPA prove and verify tests (ipa outputs)
|
||||
run: cargo nextest run --release --verbose tests::ipa_prove_and_verify_ipa_output
|
||||
- name: KZG prove and verify tests (public outputs + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w
|
||||
- name: KZG prove and verify tests single inner col
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_single_col
|
||||
- name: KZG prove and verify tests triple inner col
|
||||
@@ -412,8 +416,6 @@ jobs:
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_octuple_col --test-threads 8
|
||||
- name: KZG prove and verify tests (kzg outputs)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output
|
||||
- name: KZG prove and verify tests (public outputs + fixed params + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_
|
||||
- name: KZG prove and verify tests (public outputs)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t
|
||||
- name: KZG prove and verify tests (public inputs)
|
||||
@@ -545,8 +547,6 @@ jobs:
|
||||
with:
|
||||
crate: cargo-nextest
|
||||
locked: true
|
||||
- name: Download MNIST
|
||||
run: sh data.sh
|
||||
- name: Examples
|
||||
run: cargo nextest run --release tests_examples
|
||||
|
||||
@@ -610,6 +610,24 @@ jobs:
|
||||
|
||||
python-integration-tests:
|
||||
runs-on: large-self-hosted
|
||||
services:
|
||||
# Label used to access the service container
|
||||
postgres:
|
||||
# Docker Hub image
|
||||
image: postgres
|
||||
env:
|
||||
POSTGRES_USER: ubuntu
|
||||
POSTGRES_HOST_AUTH_METHOD: trust
|
||||
# Set health checks to wait until postgres has started
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
-v /var/run/postgresql:/var/run/postgresql
|
||||
ports:
|
||||
# Maps tcp port 5432 on service container to the host
|
||||
- 5432:5432
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
@@ -634,6 +652,8 @@ jobs:
|
||||
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt; python -m ensurepip --upgrade
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
|
||||
- name: Postgres tutorials
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::postgres_ --no-capture
|
||||
- name: Tictactoe tutorials
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::tictactoe_ --test-threads 1
|
||||
# - name: authenticate-kaggle-cli
|
||||
@@ -651,5 +671,3 @@ jobs:
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
|
||||
- name: NBEATS tutorial
|
||||
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
|
||||
# - name: Postgres tutorials
|
||||
# run: source .env/bin/activate; cargo nextest run py_tests::tests::postgres_ --test-threads 1
|
||||
|
||||
65
.github/workflows/verify.yml
vendored
Normal file
65
.github/workflows/verify.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: Build and Publish EZKL npm packages (wasm bindings and in-browser evm verifier)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "The tag to release"
|
||||
required: true
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: .
|
||||
jobs:
|
||||
in-browser-evm-ver-publish:
|
||||
name: publish-in-browser-evm-verifier-package
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Update version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"version\": \".*\"|\"version\": \"${{ github.ref_name }}\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update @ezkljs/engine version in package.json
|
||||
shell: bash
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.ref_name }}
|
||||
run: |
|
||||
sed -i "s|\"@ezkljs/engine\": \".*\"|\"@ezkljs/engine\": \"${{ github.ref_name#v }}\"|" in-browser-evm-verifier/package.json
|
||||
- name: Update the engine import in in-browser-evm-verifier to use @ezkljs/engine package instead of the local one;
|
||||
run: |
|
||||
sed -i "s|import { encodeVerifierCalldata } from '../nodejs/ezkl';|import { encodeVerifierCalldata } from '@ezkljs/engine';|" in-browser-evm-verifier/src/index.ts
|
||||
- name: Fetch integrity
|
||||
run: |
|
||||
ENGINE_INTEGRITY=$(npm view @ezkljs/engine@${{ github.ref_name#v }} dist.integrity)
|
||||
echo "ENGINE_INTEGRITY=$ENGINE_INTEGRITY" >> $GITHUB_ENV
|
||||
- name: Update pnpm-lock.yaml versions and integrity
|
||||
run: |
|
||||
awk -v integrity="$ENGINE_INTEGRITY" -v tag="${{ github.ref_name#v }}" '
|
||||
NR==30{$0=" specifier: \"" tag "\""}
|
||||
NR==31{$0=" version: \"" tag "\""}
|
||||
NR==400{$0=" /@ezkljs/engine@" tag ":"}
|
||||
NR==401{$0=" resolution: {integrity: \"" integrity "\"}"} 1' in-browser-evm-verifier/pnpm-lock.yaml > temp.yaml && mv temp.yaml in-browser-evm-verifier/pnpm-lock.yaml
|
||||
- name: Use pnpm 8
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: 8
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "18.12.1"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
- name: Publish to npm
|
||||
run: |
|
||||
cd in-browser-evm-verifier
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm run build
|
||||
pnpm publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,5 @@
|
||||
target
|
||||
pkg
|
||||
data
|
||||
*.csv
|
||||
!examples/notebooks/eth_price.csv
|
||||
*.ipynb_checkpoints
|
||||
@@ -50,4 +49,4 @@ timingData.json
|
||||
!tests/wasm/pk.key
|
||||
!tests/wasm/vk.key
|
||||
docs/python/build
|
||||
!tests/wasm/vk_aggr.key
|
||||
!tests/wasm/vk_aggr.key
|
||||
121
Cargo.lock
generated
121
Cargo.lock
generated
@@ -655,6 +655,12 @@ dependencies = [
|
||||
"constant_time_eq",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.9.0"
|
||||
@@ -1011,6 +1017,17 @@ version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
|
||||
|
||||
[[package]]
|
||||
name = "core-graphics-types"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45390e6114f68f718cc7a830514a96f903cccd70d02a8f6d9f643ac4ba45afaf"
|
||||
dependencies = [
|
||||
"bitflags 1.3.2",
|
||||
"core-foundation",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.12"
|
||||
@@ -1307,6 +1324,12 @@ version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125"
|
||||
|
||||
[[package]]
|
||||
name = "dyn-hash"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a650a461c6a8ff1ef205ed9a2ad56579309853fecefc2423f73dced342f92258"
|
||||
|
||||
[[package]]
|
||||
name = "ecc"
|
||||
version = "0.1.0"
|
||||
@@ -1793,8 +1816,10 @@ dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"maybe-rayon",
|
||||
"metal",
|
||||
"mnist",
|
||||
"num",
|
||||
"objc",
|
||||
"openssl",
|
||||
"pg_bigdecimal",
|
||||
"plotters",
|
||||
@@ -1932,7 +1957,28 @@ version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
|
||||
dependencies = [
|
||||
"foreign-types-shared",
|
||||
"foreign-types-shared 0.1.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965"
|
||||
dependencies = [
|
||||
"foreign-types-macros",
|
||||
"foreign-types-shared 0.3.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-macros"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1941,6 +1987,12 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types-shared"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b"
|
||||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.2.1"
|
||||
@@ -2224,7 +2276,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_solidity_verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/alexander-camuto/halo2-solidity-verifier?branch=main#eb04be1f7d005e5b9dd3ff41efa30aeb5e0c34a3"
|
||||
source = "git+https://github.com/alexander-camuto/halo2-solidity-verifier?branch=main#fd74f1da2ce51664e2d4349965987ee606551060"
|
||||
dependencies = [
|
||||
"askama",
|
||||
"blake2b_simd",
|
||||
@@ -2933,6 +2985,15 @@ dependencies = [
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "malloc_buf"
|
||||
version = "0.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62bb907fe88d54d8d9ce32a3cceab4218ed2f6b7d35617cafe9adf84e43919cb"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "maplit"
|
||||
version = "1.0.2"
|
||||
@@ -2993,6 +3054,20 @@ dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "metal"
|
||||
version = "0.27.0"
|
||||
source = "git+https://github.com/gfx-rs/metal-rs#ff8fd3d6dc7792852f8a015458d7e6d42d7fb352"
|
||||
dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"block",
|
||||
"core-graphics-types",
|
||||
"foreign-types 0.5.0",
|
||||
"log",
|
||||
"objc",
|
||||
"paste",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mime"
|
||||
version = "0.3.17"
|
||||
@@ -3205,6 +3280,15 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
|
||||
|
||||
[[package]]
|
||||
name = "objc"
|
||||
version = "0.2.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "915b1b472bc21c53464d6c8461c9d3af805ba1ef837e1cac254428f4a77177b1"
|
||||
dependencies = [
|
||||
"malloc_buf",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.32.2"
|
||||
@@ -3265,7 +3349,7 @@ checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f"
|
||||
dependencies = [
|
||||
"bitflags 2.5.0",
|
||||
"cfg-if",
|
||||
"foreign-types",
|
||||
"foreign-types 0.3.2",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"openssl-macros",
|
||||
@@ -5398,8 +5482,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tract-core"
|
||||
version = "0.21.3"
|
||||
source = "git+https://github.com/sonos/tract/?rev=681a096f02c9d7d363102d9fb0e446d1710ac2c8#681a096f02c9d7d363102d9fb0e446d1710ac2c8"
|
||||
version = "0.21.5-pre"
|
||||
source = "git+https://github.com/sonos/tract/?rev=05ebf550aa9922b221af4635c21a67a8d2af12a9#05ebf550aa9922b221af4635c21a67a8d2af12a9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bit-set",
|
||||
@@ -5422,10 +5506,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tract-data"
|
||||
version = "0.21.3"
|
||||
source = "git+https://github.com/sonos/tract/?rev=681a096f02c9d7d363102d9fb0e446d1710ac2c8#681a096f02c9d7d363102d9fb0e446d1710ac2c8"
|
||||
version = "0.21.5-pre"
|
||||
source = "git+https://github.com/sonos/tract/?rev=05ebf550aa9922b221af4635c21a67a8d2af12a9#05ebf550aa9922b221af4635c21a67a8d2af12a9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"downcast-rs",
|
||||
"dyn-hash",
|
||||
"half 2.2.1",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
@@ -5441,8 +5527,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tract-hir"
|
||||
version = "0.21.3"
|
||||
source = "git+https://github.com/sonos/tract/?rev=681a096f02c9d7d363102d9fb0e446d1710ac2c8#681a096f02c9d7d363102d9fb0e446d1710ac2c8"
|
||||
version = "0.21.5-pre"
|
||||
source = "git+https://github.com/sonos/tract/?rev=05ebf550aa9922b221af4635c21a67a8d2af12a9#05ebf550aa9922b221af4635c21a67a8d2af12a9"
|
||||
dependencies = [
|
||||
"derive-new",
|
||||
"log",
|
||||
@@ -5451,13 +5537,14 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tract-linalg"
|
||||
version = "0.21.3"
|
||||
source = "git+https://github.com/sonos/tract/?rev=681a096f02c9d7d363102d9fb0e446d1710ac2c8#681a096f02c9d7d363102d9fb0e446d1710ac2c8"
|
||||
version = "0.21.5-pre"
|
||||
source = "git+https://github.com/sonos/tract/?rev=05ebf550aa9922b221af4635c21a67a8d2af12a9#05ebf550aa9922b221af4635c21a67a8d2af12a9"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"derive-new",
|
||||
"downcast-rs",
|
||||
"dyn-clone",
|
||||
"dyn-hash",
|
||||
"half 2.2.1",
|
||||
"lazy_static",
|
||||
"liquid",
|
||||
@@ -5475,8 +5562,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tract-nnef"
|
||||
version = "0.21.3"
|
||||
source = "git+https://github.com/sonos/tract/?rev=681a096f02c9d7d363102d9fb0e446d1710ac2c8#681a096f02c9d7d363102d9fb0e446d1710ac2c8"
|
||||
version = "0.21.5-pre"
|
||||
source = "git+https://github.com/sonos/tract/?rev=05ebf550aa9922b221af4635c21a67a8d2af12a9#05ebf550aa9922b221af4635c21a67a8d2af12a9"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"flate2",
|
||||
@@ -5489,8 +5576,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tract-onnx"
|
||||
version = "0.21.3"
|
||||
source = "git+https://github.com/sonos/tract/?rev=681a096f02c9d7d363102d9fb0e446d1710ac2c8#681a096f02c9d7d363102d9fb0e446d1710ac2c8"
|
||||
version = "0.21.5-pre"
|
||||
source = "git+https://github.com/sonos/tract/?rev=05ebf550aa9922b221af4635c21a67a8d2af12a9#05ebf550aa9922b221af4635c21a67a8d2af12a9"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"derive-new",
|
||||
@@ -5506,8 +5593,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tract-onnx-opl"
|
||||
version = "0.21.3"
|
||||
source = "git+https://github.com/sonos/tract/?rev=681a096f02c9d7d363102d9fb0e446d1710ac2c8#681a096f02c9d7d363102d9fb0e446d1710ac2c8"
|
||||
version = "0.21.5-pre"
|
||||
source = "git+https://github.com/sonos/tract/?rev=05ebf550aa9922b221af4635c21a67a8d2af12a9#05ebf550aa9922b221af4635c21a67a8d2af12a9"
|
||||
dependencies = [
|
||||
"getrandom",
|
||||
"log",
|
||||
|
||||
@@ -43,6 +43,7 @@ unzip-n = "0.1.2"
|
||||
num = "0.4.1"
|
||||
portable-atomic = "1.6.0"
|
||||
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand" }
|
||||
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
|
||||
|
||||
|
||||
# evm related deps
|
||||
@@ -80,9 +81,11 @@ pyo3-asyncio = { version = "0.20.0", features = [
|
||||
"tokio-runtime",
|
||||
], default_features = false, optional = true }
|
||||
pyo3-log = { version = "0.9.0", default_features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "681a096f02c9d7d363102d9fb0e446d1710ac2c8", default_features = false, optional = true }
|
||||
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "05ebf550aa9922b221af4635c21a67a8d2af12a9", default_features = false, optional = true }
|
||||
tabled = { version = "0.12.0", optional = true }
|
||||
|
||||
objc = { version = "0.2.4", optional = true }
|
||||
|
||||
|
||||
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies]
|
||||
colored = { version = "2.0.0", default_features = false, optional = true }
|
||||
@@ -198,6 +201,7 @@ det-prove = []
|
||||
icicle = ["halo2_proofs/icicle_gpu"]
|
||||
empty-cmd = []
|
||||
no-banner = []
|
||||
metal = ["dep:metal", "dep:objc"]
|
||||
|
||||
# icicle patch to 0.1.0 if feature icicle is enabled
|
||||
[patch.'https://github.com/ingonyama-zk/icicle']
|
||||
|
||||
@@ -20,9 +20,9 @@
|
||||
"name": "quantize_data",
|
||||
"outputs": [
|
||||
{
|
||||
"internalType": "int128[]",
|
||||
"internalType": "int64[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int128[]"
|
||||
"type": "int64[]"
|
||||
}
|
||||
],
|
||||
"stateMutability": "pure",
|
||||
@@ -31,9 +31,9 @@
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "int128[]",
|
||||
"internalType": "int64[]",
|
||||
"name": "quantized_data",
|
||||
"type": "int128[]"
|
||||
"type": "int64[]"
|
||||
}
|
||||
],
|
||||
"name": "to_field_element",
|
||||
|
||||
@@ -125,7 +125,7 @@ contract QuantizeData {
|
||||
}
|
||||
|
||||
function to_field_element(
|
||||
int128[] memory quantized_data
|
||||
int64[] memory quantized_data
|
||||
) public pure returns (uint256[] memory output) {
|
||||
output = new uint256[](quantized_data.length);
|
||||
for (uint i; i < quantized_data.length; i++) {
|
||||
|
||||
11
data.sh
11
data.sh
@@ -1,11 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
mkdir data
|
||||
cd data
|
||||
|
||||
wget http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
|
||||
wget http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
|
||||
wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
|
||||
wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
|
||||
|
||||
gzip -d *.gz
|
||||
@@ -1,4 +1,4 @@
|
||||
ezkl==0.0.0
|
||||
ezkl==11.0.4
|
||||
sphinx
|
||||
sphinx-rtd-theme
|
||||
sphinxcontrib-napoleon
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import ezkl
|
||||
|
||||
project = 'ezkl'
|
||||
release = '0.0.0'
|
||||
release = '11.0.4'
|
||||
version = release
|
||||
|
||||
|
||||
|
||||
@@ -42,8 +42,8 @@ const NUM_INNER_COLS: usize = 1;
|
||||
struct Config<
|
||||
const LEN: usize, //LEN = CHOUT x OH x OW flattened //not supported yet in rust stable
|
||||
const CLASSES: usize,
|
||||
const LOOKUP_MIN: i128,
|
||||
const LOOKUP_MAX: i128,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
// Convolution
|
||||
const KERNEL_HEIGHT: usize,
|
||||
const KERNEL_WIDTH: usize,
|
||||
@@ -66,8 +66,8 @@ struct Config<
|
||||
struct MyCircuit<
|
||||
const LEN: usize, //LEN = CHOUT x OH x OW flattened
|
||||
const CLASSES: usize,
|
||||
const LOOKUP_MIN: i128,
|
||||
const LOOKUP_MAX: i128,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
// Convolution
|
||||
const KERNEL_HEIGHT: usize,
|
||||
const KERNEL_WIDTH: usize,
|
||||
@@ -90,8 +90,8 @@ struct MyCircuit<
|
||||
impl<
|
||||
const LEN: usize,
|
||||
const CLASSES: usize,
|
||||
const LOOKUP_MIN: i128,
|
||||
const LOOKUP_MAX: i128,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
// Convolution
|
||||
const KERNEL_HEIGHT: usize,
|
||||
const KERNEL_WIDTH: usize,
|
||||
@@ -308,6 +308,7 @@ pub fn runconv() {
|
||||
tst_lbl: _,
|
||||
..
|
||||
} = MnistBuilder::new()
|
||||
.base_path("examples/data")
|
||||
.label_format_digit()
|
||||
.training_set_length(50_000)
|
||||
.validation_set_length(10_000)
|
||||
|
||||
BIN
examples/data/t10k-images-idx3-ubyte
Normal file
BIN
examples/data/t10k-images-idx3-ubyte
Normal file
Binary file not shown.
BIN
examples/data/t10k-labels-idx1-ubyte
Normal file
BIN
examples/data/t10k-labels-idx1-ubyte
Normal file
Binary file not shown.
BIN
examples/data/train-images-idx3-ubyte
Normal file
BIN
examples/data/train-images-idx3-ubyte
Normal file
Binary file not shown.
BIN
examples/data/train-labels-idx1-ubyte
Normal file
BIN
examples/data/train-labels-idx1-ubyte
Normal file
Binary file not shown.
@@ -23,8 +23,8 @@ struct MyConfig {
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<
|
||||
const LEN: usize, //LEN = CHOUT x OH x OW flattened
|
||||
const LOOKUP_MIN: i128,
|
||||
const LOOKUP_MAX: i128,
|
||||
const LOOKUP_MIN: i64,
|
||||
const LOOKUP_MAX: i64,
|
||||
> {
|
||||
// Given the stateless MyConfig type information, a DNN trace is determined by its input and the parameters of its layers.
|
||||
// Computing the trace still requires a forward pass. The intermediate activations are stored only by the layouter.
|
||||
@@ -34,7 +34,7 @@ struct MyCircuit<
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<const LEN: usize, const LOOKUP_MIN: i128, const LOOKUP_MAX: i128> Circuit<F>
|
||||
impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
|
||||
for MyCircuit<LEN, LOOKUP_MIN, LOOKUP_MAX>
|
||||
{
|
||||
type Config = MyConfig;
|
||||
|
||||
279
examples/notebooks/logistic_regression.ipynb
Normal file
279
examples/notebooks/logistic_regression.ipynb
Normal file
@@ -0,0 +1,279 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "cf69bb3f-94e6-4dba-92cd-ce08df117d67",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Logistic Regression\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"Sklearn based models are slightly finicky to get into a suitable onnx format. \n",
|
||||
"This notebook showcases how to do so using the `hummingbird-ml` python package for a Logistic Regression model. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "95613ee9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# check if notebook is in colab\n",
|
||||
"try:\n",
|
||||
" # install ezkl\n",
|
||||
" import google.colab\n",
|
||||
" import subprocess\n",
|
||||
" import sys\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"ezkl\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"onnx\"])\n",
|
||||
" subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"hummingbird-ml\"])\n",
|
||||
"\n",
|
||||
"# rely on local installation of ezkl if the notebook is not in colab\n",
|
||||
"except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import torch\n",
|
||||
"import ezkl\n",
|
||||
"import json\n",
|
||||
"from hummingbird.ml import convert\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# here we create and (potentially train a model)\n",
|
||||
"\n",
|
||||
"# make sure you have the dependencies required here already installed\n",
|
||||
"import numpy as np\n",
|
||||
"from sklearn.linear_model import LogisticRegression\n",
|
||||
"X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n",
|
||||
"# y = 1 * x_0 + 2 * x_1 + 3\n",
|
||||
"y = np.dot(X, np.array([1, 2])) + 3\n",
|
||||
"reg = LogisticRegression().fit(X, y)\n",
|
||||
"reg.score(X, y)\n",
|
||||
"\n",
|
||||
"circuit = convert(reg, \"torch\", X[:1]).model\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b37637c4",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model_path = os.path.join('network.onnx')\n",
|
||||
"compiled_model_path = os.path.join('network.compiled')\n",
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"settings_path = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"witness_path = os.path.join('witness.json')\n",
|
||||
"data_path = os.path.join('input.json')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82db373a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"\n",
|
||||
"# export to onnx format\n",
|
||||
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
|
||||
"\n",
|
||||
"# Input to the model\n",
|
||||
"shape = X.shape[1:]\n",
|
||||
"x = torch.rand(1, *shape, requires_grad=True)\n",
|
||||
"torch_out = circuit(x)\n",
|
||||
"# Export the model\n",
|
||||
"torch.onnx.export(circuit, # model being run\n",
|
||||
" # model input (or a tuple for multiple inputs)\n",
|
||||
" x,\n",
|
||||
" # where to save the model (can be a file or file-like object)\n",
|
||||
" \"network.onnx\",\n",
|
||||
" export_params=True, # store the trained parameter weights inside the model file\n",
|
||||
" opset_version=10, # the ONNX version to export the model to\n",
|
||||
" do_constant_folding=True, # whether to execute constant folding for optimization\n",
|
||||
" input_names=['input'], # the model's input names\n",
|
||||
" output_names=['output'], # the model's output names\n",
|
||||
" dynamic_axes={'input': {0: 'batch_size'}, # variable length axes\n",
|
||||
" 'output': {0: 'batch_size'}})\n",
|
||||
"\n",
|
||||
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_shapes=[shape],\n",
|
||||
" input_data=[d],\n",
|
||||
" output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(\"input.json\", 'w'))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "d5e374a2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!RUST_LOG=trace\n",
|
||||
"# TODO: Dictionary outputs\n",
|
||||
"res = ezkl.gen_settings(model_path, settings_path)\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"cal_path = os.path.join(\"calibration.json\")\n",
|
||||
"\n",
|
||||
"data_array = (torch.randn(20, *shape).detach().numpy()).reshape([-1]).tolist()\n",
|
||||
"\n",
|
||||
"data = dict(input_data = [data_array])\n",
|
||||
"\n",
|
||||
"# Serialize data into file:\n",
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(data_path, model_path, settings_path, \"resources\")\n",
|
||||
"assert res == True\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "3aa4f090",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "8b74dcee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# srs path\n",
|
||||
"res = ezkl.get_srs( settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "18c8b7c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# now generate the witness file \n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(data_path, compiled_model_path, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b1c561a8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"# HERE WE SETUP THE CIRCUIT PARAMS\n",
|
||||
"# WE GOT KEYS\n",
|
||||
"# WE GOT CIRCUIT PARAMETERS\n",
|
||||
"# EVERYTHING ANYONE HAS EVER NEEDED FOR ZK\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"assert os.path.isfile(vk_path)\n",
|
||||
"assert os.path.isfile(pk_path)\n",
|
||||
"assert os.path.isfile(settings_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c384cbc8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# GENERATE A PROOF\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"proof_path = os.path.join('test.pf')\n",
|
||||
"\n",
|
||||
"res = ezkl.prove(\n",
|
||||
" witness_path,\n",
|
||||
" compiled_model_path,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" \n",
|
||||
" \"single\",\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(res)\n",
|
||||
"assert os.path.isfile(proof_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "76f00d41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# VERIFY IT\n",
|
||||
"\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_path,\n",
|
||||
" vk_path,\n",
|
||||
" \n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@@ -7,9 +7,9 @@
|
||||
"## Mean of ERC20 transfer amounts\n",
|
||||
"\n",
|
||||
"This notebook shows how to calculate the mean of ERC20 transfer amounts, pulling data in from a Postgres database. First we install and get the necessary libraries running. \n",
|
||||
"The first of which is [e2pg](https://github.com/indexsupply/x/tree/main/docs/e2pg), which is a library that allows us to pull data from the Ethereum blockchain into a Postgres database.\n",
|
||||
"The first of which is [shovel](https://indexsupply.com/shovel/docs/#getting-started), which is a library that allows us to pull data from the Ethereum blockchain into a Postgres database.\n",
|
||||
"\n",
|
||||
"Make sure you install postgres if needed https://postgresapp.com/. \n",
|
||||
"Make sure you install postgres if needed https://indexsupply.com/shovel/docs/#getting-started. \n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
@@ -21,23 +21,81 @@
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import getpass\n",
|
||||
"\n",
|
||||
"import json\n",
|
||||
"import time\n",
|
||||
"import subprocess\n",
|
||||
"\n",
|
||||
"# swap out for the relevant linux/amd64, darwin/arm64, darwin/amd64, windows/amd64\n",
|
||||
"os.system(\"curl -LO https://indexsupply.net/bin/main/linux/amd64/e2pg\")\n",
|
||||
"os.system(\"chmod +x e2pg\")\n",
|
||||
"os.system(\"curl -LO https://indexsupply.net/bin/1.0/linux/amd64/shovel\")\n",
|
||||
"os.system(\"chmod +x shovel\")\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"os.environ[\"PG_URL\"] = \"postgresql://\" + getpass.getuser() + \":@localhost:5432/e2pg\"\n",
|
||||
"os.environ[\"RLPS_URL\"] = \"https://1.rlps.indexsupply.net\"\n",
|
||||
"os.environ[\"PG_URL\"] = \"postgres://\" + getpass.getuser() + \":@localhost:5432/shovel\"\n",
|
||||
"\n",
|
||||
"# create a config.json file with the following contents\n",
|
||||
"config = {\n",
|
||||
" \"pg_url\": \"$PG_URL\",\n",
|
||||
" \"eth_sources\": [\n",
|
||||
" {\"name\": \"mainnet\", \"chain_id\": 1, \"url\": \"https://ethereum-rpc.publicnode.com\"},\n",
|
||||
" {\"name\": \"base\", \"chain_id\": 8453, \"url\": \"https://base-rpc.publicnode.com\"}\n",
|
||||
" ],\n",
|
||||
" \"integrations\": [{\n",
|
||||
" \"name\": \"usdc_transfer\",\n",
|
||||
" \"enabled\": True,\n",
|
||||
" \"sources\": [{\"name\": \"mainnet\"}, {\"name\": \"base\"}],\n",
|
||||
" \"table\": {\n",
|
||||
" \"name\": \"usdc\",\n",
|
||||
" \"columns\": [\n",
|
||||
" {\"name\": \"log_addr\", \"type\": \"bytea\"},\n",
|
||||
" {\"name\": \"block_num\", \"type\": \"numeric\"},\n",
|
||||
" {\"name\": \"f\", \"type\": \"bytea\"},\n",
|
||||
" {\"name\": \"t\", \"type\": \"bytea\"},\n",
|
||||
" {\"name\": \"v\", \"type\": \"numeric\"}\n",
|
||||
" ]\n",
|
||||
" },\n",
|
||||
" \"block\": [\n",
|
||||
" {\"name\": \"block_num\", \"column\": \"block_num\"},\n",
|
||||
" {\n",
|
||||
" \"name\": \"log_addr\",\n",
|
||||
" \"column\": \"log_addr\",\n",
|
||||
" \"filter_op\": \"contains\",\n",
|
||||
" \"filter_arg\": [\n",
|
||||
" \"a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48\",\n",
|
||||
" \"833589fCD6eDb6E08f4c7C32D4f71b54bdA02913\"\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" ],\n",
|
||||
" \"event\": {\n",
|
||||
" \"name\": \"Transfer\",\n",
|
||||
" \"type\": \"event\",\n",
|
||||
" \"anonymous\": False,\n",
|
||||
" \"inputs\": [\n",
|
||||
" {\"indexed\": True, \"name\": \"from\", \"type\": \"address\", \"column\": \"f\"},\n",
|
||||
" {\"indexed\": True, \"name\": \"to\", \"type\": \"address\", \"column\": \"t\"},\n",
|
||||
" {\"indexed\": False, \"name\": \"value\", \"type\": \"uint256\", \"column\": \"v\"}\n",
|
||||
" ]\n",
|
||||
" }\n",
|
||||
" }]\n",
|
||||
"}\n",
|
||||
"\n",
|
||||
"# write the config to a file\n",
|
||||
"with open(\"config.json\", \"w\") as f:\n",
|
||||
" f.write(json.dumps(config))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# print the two env variables\n",
|
||||
"os.system(\"echo $PG_URL\")\n",
|
||||
"os.system(\"echo $RLPS_URL\")\n",
|
||||
"\n",
|
||||
"os.system(\"createdb -h localhost -p 5432 e2pg\")\n",
|
||||
"# equivalent of nohup ./e2pg -reset -e $RLPS_URL -pg $PG_URL &\n",
|
||||
"e2pg_process = os.system(\"nohup ./e2pg -e $RLPS_URL -pg $PG_URL &\")\n",
|
||||
"os.system(\"createdb -h localhost -p 5432 shovel\")\n",
|
||||
"\n",
|
||||
"os.system(\"echo shovel is now installed. starting:\")\n",
|
||||
"\n",
|
||||
"command = [\"./shovel\", \"-config\", \"config.json\"]\n",
|
||||
"subprocess.Popen(command)\n",
|
||||
"\n",
|
||||
"os.system(\"echo shovel started.\")\n",
|
||||
"\n",
|
||||
"time.sleep(5)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
@@ -79,11 +137,13 @@
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# import logging\n",
|
||||
"# # # uncomment for more descriptive logging \n",
|
||||
"# FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"# logging.basicConfig(format=FORMAT)\n",
|
||||
"# logging.getLogger().setLevel(logging.DEBUG)"
|
||||
"import logging\n",
|
||||
"# # uncomment for more descriptive logging \n",
|
||||
"FORMAT = '%(levelname)s %(name)s %(asctime)-15s %(filename)s:%(lineno)d %(message)s'\n",
|
||||
"logging.basicConfig(format=FORMAT)\n",
|
||||
"logging.getLogger().setLevel(logging.DEBUG)\n",
|
||||
"\n",
|
||||
"print(\"ezkl version: \", ezkl.__version__)"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -176,6 +236,7 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import getpass\n",
|
||||
"# make an input.json file from the df above\n",
|
||||
"input_filename = os.path.join('input.json')\n",
|
||||
"\n",
|
||||
@@ -183,9 +244,9 @@
|
||||
" \"host\": \"localhost\",\n",
|
||||
" # make sure you replace this with your own username\n",
|
||||
" \"user\": getpass.getuser(),\n",
|
||||
" \"dbname\": \"e2pg\",\n",
|
||||
" \"dbname\": \"shovel\",\n",
|
||||
" \"password\": \"\",\n",
|
||||
" \"query\": \"SELECT value FROM erc20_transfers ORDER BY block_number DESC LIMIT 5\",\n",
|
||||
" \"query\": \"SELECT v FROM usdc ORDER BY block_num DESC LIMIT 5\",\n",
|
||||
" \"port\": \"5432\",\n",
|
||||
"})\n",
|
||||
"\n",
|
||||
@@ -194,7 +255,7 @@
|
||||
"\n",
|
||||
"\n",
|
||||
" # Serialize data into file:\n",
|
||||
"json.dump( pg_input_file, open(input_filename, 'w' ))\n"
|
||||
"json.dump(pg_input_file, open(input_filename, 'w' ))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -210,9 +271,9 @@
|
||||
" \"host\": \"localhost\",\n",
|
||||
" # make sure you replace this with your own username\n",
|
||||
" \"user\": getpass.getuser(),\n",
|
||||
" \"dbname\": \"e2pg\",\n",
|
||||
" \"dbname\": \"shovel\",\n",
|
||||
" \"password\": \"\",\n",
|
||||
" \"query\": \"SELECT value FROM erc20_transfers ORDER BY block_number DESC LIMIT 20\",\n",
|
||||
" \"query\": \"SELECT v FROM usdc ORDER BY block_num DESC LIMIT 20\",\n",
|
||||
" \"port\": \"5432\",\n",
|
||||
"})\n",
|
||||
"\n",
|
||||
@@ -229,22 +290,6 @@
|
||||
"**EZKL Workflow**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"onnx_filename = os.path.join('lol.onnx')\n",
|
||||
"compiled_filename = os.path.join('lol.compiled')\n",
|
||||
"settings_filename = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"ezkl.gen_settings(onnx_filename, settings_filename)\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(\n",
|
||||
" input_filename, onnx_filename, settings_filename, \"resources\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@@ -253,10 +298,21 @@
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# setup kzg params\n",
|
||||
"params_path = os.path.join('kzg.params')\n",
|
||||
"import subprocess\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"res = ezkl.get_srs(params_path, settings_filename)"
|
||||
"onnx_filename = os.path.join('lol.onnx')\n",
|
||||
"compiled_filename = os.path.join('lol.compiled')\n",
|
||||
"settings_filename = os.path.join('settings.json')\n",
|
||||
"\n",
|
||||
"# Generate settings using ezkl\n",
|
||||
"res = ezkl.gen_settings(onnx_filename, settings_filename)\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"res = ezkl.calibrate_settings(input_filename, onnx_filename, settings_filename, \"resources\")\n",
|
||||
"\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -306,16 +362,13 @@
|
||||
"source": [
|
||||
"pk_path = os.path.join('test.pk')\n",
|
||||
"vk_path = os.path.join('test.vk')\n",
|
||||
"params_path = os.path.join('kzg.params')\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# setup the proof\n",
|
||||
"res = ezkl.setup(\n",
|
||||
" compiled_filename,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" params_path,\n",
|
||||
" settings_filename,\n",
|
||||
" pk_path\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
@@ -331,11 +384,14 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"\n",
|
||||
"witness_path = \"witness.json\"\n",
|
||||
"\n",
|
||||
"res = ezkl.gen_witness(input_filename, compiled_filename, witness_path)\n",
|
||||
"assert os.path.isfile(witness_path)"
|
||||
"# generate the witness\n",
|
||||
"res = ezkl.gen_witness(\n",
|
||||
" input_filename,\n",
|
||||
" compiled_filename,\n",
|
||||
" witness_path\n",
|
||||
" )\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -360,73 +416,14 @@
|
||||
" compiled_filename,\n",
|
||||
" pk_path,\n",
|
||||
" proof_path,\n",
|
||||
" params_path,\n",
|
||||
" \"single\",\n",
|
||||
" \"single\"\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"print(\"proved\")\n",
|
||||
"\n",
|
||||
"assert os.path.isfile(proof_path)\n",
|
||||
"\n",
|
||||
"# verify\n",
|
||||
"res = ezkl.verify(\n",
|
||||
" proof_path,\n",
|
||||
" settings_filename,\n",
|
||||
" vk_path,\n",
|
||||
" params_path,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"print(\"verified\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "W7tAa-DFAtvS"
|
||||
},
|
||||
"source": [
|
||||
"# Part 2 (Using the ZK Computational Graph Onchain!)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"id": "8Ym91kaVAIB6"
|
||||
},
|
||||
"source": [
|
||||
"**Now How Do We Do It Onchain?????**"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
"height": 339
|
||||
},
|
||||
"id": "fodkNgwS70FM",
|
||||
"outputId": "827b5efd-f74f-44de-c114-861b3a86daf2"
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# first we need to create evm verifier\n",
|
||||
"print(vk_path)\n",
|
||||
"print(params_path)\n",
|
||||
"print(settings_filename)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"abi_path = 'test.abi'\n",
|
||||
"sol_code_path = 'test.sol'\n",
|
||||
"\n",
|
||||
"res = ezkl.create_evm_verifier(\n",
|
||||
" vk_path,\n",
|
||||
" params_path,\n",
|
||||
" settings_filename,\n",
|
||||
" sol_code_path,\n",
|
||||
" abi_path,\n",
|
||||
" )\n",
|
||||
"assert res == True"
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -435,51 +432,8 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Make sure anvil is running locally first\n",
|
||||
"# run with $ anvil -p 3030\n",
|
||||
"# we use the default anvil node here\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"address_path = os.path.join(\"address.json\")\n",
|
||||
"\n",
|
||||
"res = ezkl.deploy_evm(\n",
|
||||
" address_path,\n",
|
||||
" sol_code_path,\n",
|
||||
" 'http://127.0.0.1:3030'\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"assert res == True\n",
|
||||
"\n",
|
||||
"with open(address_path, 'r') as file:\n",
|
||||
" addr = file.read().rstrip()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# read the address from addr_path\n",
|
||||
"addr = None\n",
|
||||
"with open(address_path, 'r') as f:\n",
|
||||
" addr = f.read()\n",
|
||||
"\n",
|
||||
"res = ezkl.verify_evm(\n",
|
||||
" addr,\n",
|
||||
" proof_path,\n",
|
||||
" \"http://127.0.0.1:3030\"\n",
|
||||
")\n",
|
||||
"assert res == True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"os.system(\"killall -9 e2pg\");"
|
||||
"# kill all shovel process \n",
|
||||
"os.system(\"pkill -f shovel\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
@@ -501,7 +455,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.12.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
9
examples/onnx/lstm_medium/input.json
Normal file
9
examples/onnx/lstm_medium/input.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"input_data": [
|
||||
[
|
||||
1.514470100402832, 1.519423007965088, 1.5182757377624512,
|
||||
1.5262789726257324, 1.5298409461975098
|
||||
]
|
||||
],
|
||||
"output_data": [[-0.1862019]]
|
||||
}
|
||||
BIN
examples/onnx/lstm_medium/network.onnx
Normal file
BIN
examples/onnx/lstm_medium/network.onnx
Normal file
Binary file not shown.
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@ezkljs/verify",
|
||||
"version": "0.0.0",
|
||||
"version": "v10.4.2",
|
||||
"publishConfig": {
|
||||
"access": "public"
|
||||
},
|
||||
@@ -20,16 +20,16 @@
|
||||
"build": "npm run clean && npm run build:commonjs && npm run build:esm"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ethereumjs/common": "^4.0.0",
|
||||
"@ethereumjs/evm": "^2.0.0",
|
||||
"@ethereumjs/statemanager": "^2.0.0",
|
||||
"@ethereumjs/tx": "^5.0.0",
|
||||
"@ethereumjs/util": "^9.0.0",
|
||||
"@ethereumjs/vm": "^7.0.0",
|
||||
"@ethersproject/abi": "^5.7.0",
|
||||
"@ezkljs/engine": "^9.4.4",
|
||||
"ethers": "^6.7.1",
|
||||
"json-bigint": "^1.0.0"
|
||||
"@ethereumjs/common": "4.0.0",
|
||||
"@ethereumjs/evm": "2.0.0",
|
||||
"@ethereumjs/statemanager": "2.0.0",
|
||||
"@ethereumjs/tx": "5.0.0",
|
||||
"@ethereumjs/util": "9.0.0",
|
||||
"@ethereumjs/vm": "7.0.0",
|
||||
"@ethersproject/abi": "5.7.0",
|
||||
"@ezkljs/engine": "10.4.2",
|
||||
"ethers": "6.7.1",
|
||||
"json-bigint": "1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^20.8.3",
|
||||
|
||||
26
in-browser-evm-verifier/pnpm-lock.yaml
generated
26
in-browser-evm-verifier/pnpm-lock.yaml
generated
@@ -6,34 +6,34 @@ settings:
|
||||
|
||||
dependencies:
|
||||
'@ethereumjs/common':
|
||||
specifier: ^4.0.0
|
||||
specifier: 4.0.0
|
||||
version: 4.0.0
|
||||
'@ethereumjs/evm':
|
||||
specifier: ^2.0.0
|
||||
specifier: 2.0.0
|
||||
version: 2.0.0
|
||||
'@ethereumjs/statemanager':
|
||||
specifier: ^2.0.0
|
||||
specifier: 2.0.0
|
||||
version: 2.0.0
|
||||
'@ethereumjs/tx':
|
||||
specifier: ^5.0.0
|
||||
specifier: 5.0.0
|
||||
version: 5.0.0
|
||||
'@ethereumjs/util':
|
||||
specifier: ^9.0.0
|
||||
specifier: 9.0.0
|
||||
version: 9.0.0
|
||||
'@ethereumjs/vm':
|
||||
specifier: ^7.0.0
|
||||
specifier: 7.0.0
|
||||
version: 7.0.0
|
||||
'@ethersproject/abi':
|
||||
specifier: ^5.7.0
|
||||
specifier: 5.7.0
|
||||
version: 5.7.0
|
||||
'@ezkljs/engine':
|
||||
specifier: ^9.4.4
|
||||
version: 9.4.4
|
||||
specifier: "10.4.2"
|
||||
version: "10.4.2"
|
||||
ethers:
|
||||
specifier: ^6.7.1
|
||||
specifier: 6.7.1
|
||||
version: 6.7.1
|
||||
json-bigint:
|
||||
specifier: ^1.0.0
|
||||
specifier: 1.0.0
|
||||
version: 1.0.0
|
||||
|
||||
devDependencies:
|
||||
@@ -397,8 +397,8 @@ packages:
|
||||
'@ethersproject/strings': 5.7.0
|
||||
dev: false
|
||||
|
||||
/@ezkljs/engine@9.4.4:
|
||||
resolution: {integrity: sha512-kNsTmDQa8mIiQ6yjJmBMwVgAAxh4nfs4NCtnewJifonyA8Mfhs+teXwwW8WhERRDoQPUofKO2pT8BPvV/XGIDA==}
|
||||
/@ezkljs/engine@10.4.2:
|
||||
resolution: {integrity: "sha512-1GNB4vChbaQ1ALcYbEbM/AFoh4QWtswpzGCO/g9wL8Ep6NegM2gQP/uWICU7Utl0Lj1DncXomD7PUhFSXhtx8A=="}
|
||||
dependencies:
|
||||
'@types/json-bigint': 1.0.2
|
||||
json-bigint: 1.0.0
|
||||
|
||||
@@ -36,7 +36,7 @@ if [ "$(which ezkl)s" != "s" ] && [ "$(which ezkl)" != "$EZKL_DIR/ezkl" ] ; the
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ":$PATH:" != *":${EZKl_DIR}:"* ]]; then
|
||||
if [[ ":$PATH:" != *":${EZKL_DIR}:"* ]]; then
|
||||
# Add the ezkl directory to the path and ensure the old PATH variables remain.
|
||||
echo >> $PROFILE && echo "export PATH=\"\$PATH:$EZKL_DIR\"" >> $PROFILE
|
||||
fi
|
||||
|
||||
@@ -44,12 +44,11 @@ impl PolyCommitChip {
|
||||
/// Commit to the message using the KZG commitment scheme
|
||||
pub fn commit<Scheme: CommitmentScheme<Scalar = Fp, Curve = G1Affine>>(
|
||||
message: Vec<Scheme::Scalar>,
|
||||
degree: u32,
|
||||
num_unusable_rows: u32,
|
||||
params: &Scheme::ParamsProver,
|
||||
) -> Vec<G1Affine> {
|
||||
let k = params.k();
|
||||
let domain = halo2_proofs::poly::EvaluationDomain::new(degree, k);
|
||||
let domain = halo2_proofs::poly::EvaluationDomain::new(2, k);
|
||||
let n = 2_u64.pow(k) - num_unusable_rows as u64;
|
||||
let num_poly = (message.len() / n as usize) + 1;
|
||||
let mut poly = vec![domain.empty_lagrange(); num_poly];
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::{
|
||||
table::{Range, RangeCheck, Table},
|
||||
utils,
|
||||
},
|
||||
tensor::{Tensor, TensorType, ValTensor, VarTensor},
|
||||
tensor::{IntoI64, Tensor, TensorType, ValTensor, VarTensor},
|
||||
};
|
||||
use std::{collections::BTreeMap, error::Error, marker::PhantomData};
|
||||
|
||||
@@ -345,7 +345,7 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> BaseConfig<F> {
|
||||
/// Returns a new [BaseConfig] with no inputs, no selectors, and no tables.
|
||||
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
|
||||
Self {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use super::*;
|
||||
use crate::{
|
||||
circuit::{layouts, utils, Tolerance},
|
||||
fieldutils::i128_to_felt,
|
||||
fieldutils::i64_to_felt,
|
||||
graph::multiplier_to_scale,
|
||||
tensor::{self, Tensor, TensorType, ValTensor},
|
||||
};
|
||||
@@ -71,7 +71,7 @@ pub enum HybridOp {
|
||||
},
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for HybridOp {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for HybridOp {
|
||||
///
|
||||
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
|
||||
match self {
|
||||
@@ -184,8 +184,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
i128_to_felt(input_scale.0 as i128),
|
||||
i128_to_felt(output_scale.0 as i128),
|
||||
i64_to_felt(input_scale.0 as i64),
|
||||
i64_to_felt(output_scale.0 as i64),
|
||||
)?
|
||||
} else {
|
||||
layouts::nonlinearity(
|
||||
@@ -209,7 +209,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
i128_to_felt(denom.0 as i128),
|
||||
i64_to_felt(denom.0 as i64),
|
||||
)?
|
||||
} else {
|
||||
layouts::nonlinearity(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,9 +4,9 @@ use std::error::Error;
|
||||
|
||||
use crate::{
|
||||
circuit::{layouts, table::Range, utils},
|
||||
fieldutils::{felt_to_i128, i128_to_felt},
|
||||
fieldutils::{felt_to_i64, i64_to_felt},
|
||||
graph::multiplier_to_scale,
|
||||
tensor::{self, Tensor, TensorError, TensorType},
|
||||
tensor::{self, IntoI64, Tensor, TensorError, TensorType},
|
||||
};
|
||||
|
||||
use super::Op;
|
||||
@@ -132,16 +132,16 @@ impl LookupOp {
|
||||
/// Returns the range of values that can be represented by the table
|
||||
pub fn bit_range(max_len: usize) -> Range {
|
||||
let range = (max_len - 1) as f64 / 2_f64;
|
||||
let range = range as i128;
|
||||
let range = range as i64;
|
||||
(-range, range)
|
||||
}
|
||||
|
||||
/// Matches a [Op] to an operation in the `tensor::ops` module.
|
||||
pub(crate) fn f<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
pub(crate) fn f<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64>(
|
||||
&self,
|
||||
x: &[Tensor<F>],
|
||||
) -> Result<ForwardResult<F>, TensorError> {
|
||||
let x = x[0].clone().map(|x| felt_to_i128(x));
|
||||
let x = x[0].clone().map(|x| felt_to_i64(x));
|
||||
let res = match &self {
|
||||
LookupOp::Abs => Ok(tensor::ops::abs(&x)?),
|
||||
LookupOp::Ceil { scale } => Ok(tensor::ops::nonlinearities::ceil(&x, scale.into())),
|
||||
@@ -228,13 +228,13 @@ impl LookupOp {
|
||||
}
|
||||
}?;
|
||||
|
||||
let output = res.map(|x| i128_to_felt(x));
|
||||
let output = res.map(|x| i64_to_felt(x));
|
||||
|
||||
Ok(ForwardResult { output })
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for LookupOp {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for LookupOp {
|
||||
/// Returns a reference to the Any trait.
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
|
||||
@@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
graph::quantize_tensor,
|
||||
tensor::{self, Tensor, TensorType, ValTensor},
|
||||
tensor::{self, IntoI64, Tensor, TensorType, ValTensor},
|
||||
};
|
||||
use halo2curves::ff::PrimeField;
|
||||
|
||||
@@ -27,12 +27,12 @@ pub mod region;
|
||||
|
||||
/// A struct representing the result of a forward pass.
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
|
||||
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> {
|
||||
pub(crate) output: Tensor<F>,
|
||||
}
|
||||
|
||||
/// A trait representing operations that can be represented as constraints in a circuit.
|
||||
pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>:
|
||||
pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64>:
|
||||
std::fmt::Debug + Send + Sync + Any
|
||||
{
|
||||
/// Returns a string representation of the operation.
|
||||
@@ -71,7 +71,7 @@ pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>:
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Clone for Box<dyn Op<F>> {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Clone for Box<dyn Op<F>> {
|
||||
fn clone(&self) -> Self {
|
||||
self.clone_dyn()
|
||||
}
|
||||
@@ -122,8 +122,8 @@ impl InputType {
|
||||
*input = T::from_f64(f64_input).unwrap();
|
||||
}
|
||||
InputType::Int | InputType::TDim => {
|
||||
let int_input = input.clone().to_i128().unwrap();
|
||||
*input = T::from_i128(int_input).unwrap();
|
||||
let int_input = input.clone().to_i64().unwrap();
|
||||
*input = T::from_i64(int_input).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,7 +138,7 @@ pub struct Input {
|
||||
pub datum_type: InputType,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for Input {
|
||||
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
|
||||
Ok(self.scale)
|
||||
}
|
||||
@@ -193,7 +193,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct Unknown;
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Unknown {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for Unknown {
|
||||
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
|
||||
Ok(0)
|
||||
}
|
||||
@@ -220,7 +220,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Unknow
|
||||
|
||||
///
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
|
||||
pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> {
|
||||
///
|
||||
pub quantized_values: Tensor<F>,
|
||||
///
|
||||
@@ -230,7 +230,7 @@ pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
|
||||
pub pre_assigned_val: Option<ValTensor<F>>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Constant<F> {
|
||||
///
|
||||
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>) -> Self {
|
||||
Self {
|
||||
@@ -263,7 +263,8 @@ impl<
|
||||
+ PartialOrd
|
||||
+ std::hash::Hash
|
||||
+ Serialize
|
||||
+ for<'de> Deserialize<'de>,
|
||||
+ for<'de> Deserialize<'de>
|
||||
+ IntoI64,
|
||||
> Op<F> for Constant<F>
|
||||
{
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
|
||||
@@ -97,7 +97,8 @@ impl<
|
||||
+ PartialOrd
|
||||
+ std::hash::Hash
|
||||
+ Serialize
|
||||
+ for<'de> Deserialize<'de>,
|
||||
+ for<'de> Deserialize<'de>
|
||||
+ IntoI64,
|
||||
> Op<F> for PolyOp
|
||||
{
|
||||
/// Returns a reference to the Any trait.
|
||||
|
||||
@@ -9,7 +9,7 @@ use halo2_proofs::{
|
||||
plonk::{Error, Selector},
|
||||
};
|
||||
use halo2curves::ff::PrimeField;
|
||||
use portable_atomic::AtomicI128 as AtomicInt;
|
||||
use portable_atomic::AtomicI64 as AtomicInt;
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::{HashMap, HashSet},
|
||||
@@ -133,10 +133,11 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Ha
|
||||
shuffle_index: ShuffleIndex,
|
||||
used_lookups: HashSet<LookupOp>,
|
||||
used_range_checks: HashSet<Range>,
|
||||
max_lookup_inputs: i128,
|
||||
min_lookup_inputs: i128,
|
||||
max_range_size: i128,
|
||||
max_lookup_inputs: i64,
|
||||
min_lookup_inputs: i64,
|
||||
max_range_size: i64,
|
||||
witness_gen: bool,
|
||||
check_lookup_range: bool,
|
||||
assigned_constants: ConstantsMap<F>,
|
||||
}
|
||||
|
||||
@@ -191,6 +192,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
self.witness_gen
|
||||
}
|
||||
|
||||
///
|
||||
pub fn check_lookup_range(&self) -> bool {
|
||||
self.check_lookup_range
|
||||
}
|
||||
|
||||
/// Create a new region context
|
||||
pub fn new(region: Region<'a, F>, row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> {
|
||||
let region = Some(RefCell::new(region));
|
||||
@@ -209,6 +215,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
witness_gen: true,
|
||||
check_lookup_range: true,
|
||||
assigned_constants: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@@ -246,12 +253,18 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
witness_gen: false,
|
||||
check_lookup_range: false,
|
||||
assigned_constants: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new region context
|
||||
pub fn new_dummy(row: usize, num_inner_cols: usize, witness_gen: bool) -> RegionCtx<'a, F> {
|
||||
pub fn new_dummy(
|
||||
row: usize,
|
||||
num_inner_cols: usize,
|
||||
witness_gen: bool,
|
||||
check_lookup_range: bool,
|
||||
) -> RegionCtx<'a, F> {
|
||||
let region = None;
|
||||
let linear_coord = row * num_inner_cols;
|
||||
|
||||
@@ -268,6 +281,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
witness_gen,
|
||||
check_lookup_range,
|
||||
assigned_constants: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@@ -278,6 +292,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
linear_coord: usize,
|
||||
num_inner_cols: usize,
|
||||
witness_gen: bool,
|
||||
check_lookup_range: bool,
|
||||
) -> RegionCtx<'a, F> {
|
||||
let region = None;
|
||||
RegionCtx {
|
||||
@@ -293,6 +308,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
witness_gen,
|
||||
check_lookup_range,
|
||||
assigned_constants: HashMap::new(),
|
||||
}
|
||||
}
|
||||
@@ -364,6 +380,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
starting_linear_coord,
|
||||
self.num_inner_cols,
|
||||
self.witness_gen,
|
||||
self.check_lookup_range,
|
||||
);
|
||||
let res = inner_loop_function(idx, &mut local_reg);
|
||||
// we update the offset and constants
|
||||
@@ -546,17 +563,17 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
}
|
||||
|
||||
/// max lookup inputs
|
||||
pub fn max_lookup_inputs(&self) -> i128 {
|
||||
pub fn max_lookup_inputs(&self) -> i64 {
|
||||
self.max_lookup_inputs
|
||||
}
|
||||
|
||||
/// min lookup inputs
|
||||
pub fn min_lookup_inputs(&self) -> i128 {
|
||||
pub fn min_lookup_inputs(&self) -> i64 {
|
||||
self.min_lookup_inputs
|
||||
}
|
||||
|
||||
/// max range check
|
||||
pub fn max_range_size(&self) -> i128 {
|
||||
pub fn max_range_size(&self) -> i64 {
|
||||
self.max_range_size
|
||||
}
|
||||
|
||||
|
||||
@@ -11,17 +11,17 @@ use maybe_rayon::prelude::{IntoParallelIterator, ParallelIterator};
|
||||
|
||||
use crate::{
|
||||
circuit::CircuitError,
|
||||
fieldutils::i128_to_felt,
|
||||
tensor::{Tensor, TensorType},
|
||||
fieldutils::i64_to_felt,
|
||||
tensor::{IntoI64, Tensor, TensorType},
|
||||
};
|
||||
|
||||
use crate::circuit::lookup::LookupOp;
|
||||
|
||||
/// The range of the lookup table.
|
||||
pub type Range = (i128, i128);
|
||||
pub type Range = (i64, i64);
|
||||
|
||||
/// The safety factor for the range of the lookup table.
|
||||
pub const RANGE_MULTIPLIER: i128 = 2;
|
||||
pub const RANGE_MULTIPLIER: i64 = 2;
|
||||
/// The safety factor offset for the number of rows in the lookup table.
|
||||
pub const RESERVED_BLINDING_ROWS_PAD: usize = 3;
|
||||
|
||||
@@ -96,21 +96,21 @@ pub struct Table<F: PrimeField> {
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<F> {
|
||||
/// get column index given input
|
||||
pub fn get_col_index(&self, input: F) -> F {
|
||||
// range is split up into chunks of size col_size, find the chunk that input is in
|
||||
let chunk =
|
||||
(crate::fieldutils::felt_to_i128(input) - self.range.0).abs() / (self.col_size as i128);
|
||||
(crate::fieldutils::felt_to_i64(input) - self.range.0).abs() / (self.col_size as i64);
|
||||
|
||||
i128_to_felt(chunk)
|
||||
i64_to_felt(chunk)
|
||||
}
|
||||
|
||||
/// get first_element of column
|
||||
pub fn get_first_element(&self, chunk: usize) -> (F, F) {
|
||||
let chunk = chunk as i128;
|
||||
let chunk = chunk as i64;
|
||||
// we index from 1 to prevent soundness issues
|
||||
let first_element = i128_to_felt(chunk * (self.col_size as i128) + self.range.0);
|
||||
let first_element = i64_to_felt(chunk * (self.col_size as i64) + self.range.0);
|
||||
let op_f = self
|
||||
.nonlinearity
|
||||
.f(&[Tensor::from(vec![first_element].into_iter())])
|
||||
@@ -130,12 +130,12 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
}
|
||||
|
||||
///
|
||||
pub fn num_cols_required(range_len: i128, col_size: usize) -> usize {
|
||||
pub fn num_cols_required(range_len: i64, col_size: usize) -> usize {
|
||||
// number of cols needed to store the range
|
||||
(range_len / (col_size as i128)) as usize + 1
|
||||
(range_len / (col_size as i64)) as usize + 1
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<F> {
|
||||
/// Configures the table.
|
||||
pub fn configure(
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
@@ -202,7 +202,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
|
||||
let smallest = self.range.0;
|
||||
let largest = self.range.1;
|
||||
|
||||
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i128_to_felt(x));
|
||||
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i64_to_felt(x));
|
||||
let evals = self.nonlinearity.f(&[inputs.clone()])?;
|
||||
let chunked_inputs = inputs.chunks(self.col_size);
|
||||
|
||||
@@ -272,12 +272,12 @@ pub struct RangeCheck<F: PrimeField> {
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeCheck<F> {
|
||||
/// get first_element of column
|
||||
pub fn get_first_element(&self, chunk: usize) -> F {
|
||||
let chunk = chunk as i128;
|
||||
let chunk = chunk as i64;
|
||||
// we index from 1 to prevent soundness issues
|
||||
i128_to_felt(chunk * (self.col_size as i128) + self.range.0)
|
||||
i64_to_felt(chunk * (self.col_size as i64) + self.range.0)
|
||||
}
|
||||
|
||||
///
|
||||
@@ -294,13 +294,13 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
|
||||
pub fn get_col_index(&self, input: F) -> F {
|
||||
// range is split up into chunks of size col_size, find the chunk that input is in
|
||||
let chunk =
|
||||
(crate::fieldutils::felt_to_i128(input) - self.range.0).abs() / (self.col_size as i128);
|
||||
(crate::fieldutils::felt_to_i64(input) - self.range.0).abs() / (self.col_size as i64);
|
||||
|
||||
i128_to_felt(chunk)
|
||||
i64_to_felt(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeCheck<F> {
|
||||
/// Configures the table.
|
||||
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range, logrows: usize) -> RangeCheck<F> {
|
||||
log::debug!("range check range: {:?}", range);
|
||||
@@ -350,7 +350,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
|
||||
let smallest = self.range.0;
|
||||
let largest = self.range.1;
|
||||
|
||||
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i128_to_felt(x));
|
||||
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i64_to_felt(x));
|
||||
let chunked_inputs = inputs.chunks(self.col_size);
|
||||
|
||||
self.is_assigned = true;
|
||||
|
||||
@@ -345,7 +345,7 @@ pub enum Commands {
|
||||
target: CalibrationTarget,
|
||||
/// the lookup safety margin to use for calibration. if the max lookup is 2^k, then the max lookup will be 2^k * lookup_safety_margin. larger = safer but slower
|
||||
#[arg(long, default_value = DEFAULT_LOOKUP_SAFETY_MARGIN)]
|
||||
lookup_safety_margin: i128,
|
||||
lookup_safety_margin: i64,
|
||||
/// Optional scales to specifically try for calibration. Example, --scales 0,4
|
||||
#[arg(long, value_delimiter = ',', allow_hyphen_values = true)]
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
|
||||
@@ -424,7 +424,7 @@ pub async fn setup_test_contract<M: 'static + Middleware>(
|
||||
let input = input.to_float() as f32;
|
||||
let decimal_places = count_decimal_places(input) as u8;
|
||||
let scaled_by_decimals = input * f32::powf(10., decimal_places.into());
|
||||
scaled_by_decimals_data.push(I256::from(scaled_by_decimals as i128));
|
||||
scaled_by_decimals_data.push(I256::from(scaled_by_decimals as i64));
|
||||
decimals.push(decimal_places);
|
||||
} else if input.is_field() {
|
||||
let input = input.to_field(0);
|
||||
|
||||
@@ -196,7 +196,6 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
vk_path,
|
||||
srs_path,
|
||||
} => gen_witness(compiled_circuit, data, Some(output), vk_path, srs_path)
|
||||
.await
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
Commands::Mock { model, witness } => mock(model, witness),
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
@@ -637,7 +636,7 @@ pub(crate) fn table(model: PathBuf, run_args: RunArgs) -> Result<String, Box<dyn
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) async fn gen_witness(
|
||||
pub(crate) fn gen_witness(
|
||||
compiled_circuit_path: PathBuf,
|
||||
data: PathBuf,
|
||||
output: Option<PathBuf>,
|
||||
@@ -660,7 +659,7 @@ pub(crate) async fn gen_witness(
|
||||
};
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
let mut input = circuit.load_graph_input(&data).await?;
|
||||
let mut input = circuit.load_graph_input(&data)?;
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
let mut input = circuit.load_graph_input(&data)?;
|
||||
|
||||
@@ -683,6 +682,7 @@ pub(crate) async fn gen_witness(
|
||||
vk.as_ref(),
|
||||
Some(&srs),
|
||||
true,
|
||||
true,
|
||||
)?
|
||||
}
|
||||
Commitments::IPA => {
|
||||
@@ -697,15 +697,22 @@ pub(crate) async fn gen_witness(
|
||||
vk.as_ref(),
|
||||
Some(&srs),
|
||||
true,
|
||||
true,
|
||||
)?
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("SRS for poly commit does not exist (will be ignored)");
|
||||
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, true)?
|
||||
circuit.forward::<KZGCommitmentScheme<Bn256>>(
|
||||
&mut input,
|
||||
vk.as_ref(),
|
||||
None,
|
||||
true,
|
||||
true,
|
||||
)?
|
||||
}
|
||||
} else {
|
||||
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, true)?
|
||||
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, true, true)?
|
||||
};
|
||||
|
||||
// print each variable tuple (symbol, value) as symbol=value
|
||||
@@ -888,7 +895,7 @@ pub(crate) fn calibrate(
|
||||
data: PathBuf,
|
||||
settings_path: PathBuf,
|
||||
target: CalibrationTarget,
|
||||
lookup_safety_margin: i128,
|
||||
lookup_safety_margin: i64,
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
only_range_check_rebase: bool,
|
||||
@@ -1004,6 +1011,7 @@ pub(crate) fn calibrate(
|
||||
param_scale,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
lookup_range: (i64::MIN, i64::MAX),
|
||||
..settings.run_args.clone()
|
||||
};
|
||||
|
||||
@@ -1039,7 +1047,13 @@ pub(crate) fn calibrate(
|
||||
.map_err(|e| format!("failed to load circuit inputs: {}", e))?;
|
||||
|
||||
let forward_res = circuit
|
||||
.forward::<KZGCommitmentScheme<Bn256>>(&mut data.clone(), None, None, true)
|
||||
.forward::<KZGCommitmentScheme<Bn256>>(
|
||||
&mut data.clone(),
|
||||
None,
|
||||
None,
|
||||
true,
|
||||
false,
|
||||
)
|
||||
.map_err(|e| format!("failed to forward: {}", e))?;
|
||||
|
||||
// push result to the hashmap
|
||||
@@ -1054,7 +1068,7 @@ pub(crate) fn calibrate(
|
||||
|
||||
match forward_res {
|
||||
Ok(_) => (),
|
||||
// typically errors will be due to the circuit overflowing the i128 limit
|
||||
// typically errors will be due to the circuit overflowing the i64 limit
|
||||
Err(e) => {
|
||||
error!("forward pass failed: {:?}", e);
|
||||
pb.inc(1);
|
||||
@@ -1229,22 +1243,14 @@ pub(crate) fn calibrate(
|
||||
);
|
||||
|
||||
if matches!(target, CalibrationTarget::Resources { col_overflow: true }) {
|
||||
let lookup_log_rows = ((best_params.run_args.lookup_range.1
|
||||
- best_params.run_args.lookup_range.0) as f32)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
+ 1;
|
||||
let mut reduction = std::cmp::max(
|
||||
(best_params
|
||||
.model_instance_shapes
|
||||
.iter()
|
||||
.map(|x| x.iter().product::<usize>())
|
||||
.sum::<usize>() as f32)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
+ 1,
|
||||
lookup_log_rows,
|
||||
);
|
||||
let lookup_log_rows = best_params.lookup_log_rows_with_blinding();
|
||||
let module_log_row = best_params.module_constraint_logrows_with_blinding();
|
||||
let instance_logrows = best_params.log2_total_instances_with_blinding();
|
||||
let dynamic_lookup_logrows = best_params.dynamic_lookup_and_shuffle_logrows_with_blinding();
|
||||
|
||||
let mut reduction = std::cmp::max(lookup_log_rows, module_log_row);
|
||||
reduction = std::cmp::max(reduction, instance_logrows);
|
||||
reduction = std::cmp::max(reduction, dynamic_lookup_logrows);
|
||||
reduction = std::cmp::max(reduction, crate::graph::MIN_LOGROWS);
|
||||
|
||||
info!(
|
||||
|
||||
@@ -11,8 +11,8 @@ pub fn i32_to_felt<F: PrimeField>(x: i32) -> F {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts an i128 to a PrimeField element.
|
||||
pub fn i128_to_felt<F: PrimeField>(x: i128) -> F {
|
||||
/// Converts an i64 to a PrimeField element.
|
||||
pub fn i64_to_felt<F: PrimeField>(x: i64) -> F {
|
||||
if x >= 0 {
|
||||
F::from_u128(x as u128)
|
||||
} else {
|
||||
@@ -37,7 +37,7 @@ pub fn felt_to_i32<F: PrimeField + PartialOrd + Field>(x: F) -> i32 {
|
||||
|
||||
/// Converts a PrimeField element to an f64.
|
||||
pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
|
||||
if x > F::from_u128(i128::MAX as u128) {
|
||||
if x > F::from_u128(i64::MAX as u128) {
|
||||
let rep = (-x).to_repr();
|
||||
let negtmp: &[u8] = rep.as_ref();
|
||||
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
|
||||
@@ -50,18 +50,18 @@ pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a PrimeField element to an i128.
|
||||
pub fn felt_to_i128<F: PrimeField + PartialOrd + Field>(x: F) -> i128 {
|
||||
if x > F::from_u128(i128::MAX as u128) {
|
||||
/// Converts a PrimeField element to an i64.
|
||||
pub fn felt_to_i64<F: PrimeField + PartialOrd + Field>(x: F) -> i64 {
|
||||
if x > F::from_u128(i64::MAX as u128) {
|
||||
let rep = (-x).to_repr();
|
||||
let negtmp: &[u8] = rep.as_ref();
|
||||
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
|
||||
-(lower_128 as i128)
|
||||
-(lower_128 as i64)
|
||||
} else {
|
||||
let rep = (x).to_repr();
|
||||
let tmp: &[u8] = rep.as_ref();
|
||||
let lower_128: u128 = u128::from_le_bytes(tmp[..16].try_into().unwrap());
|
||||
lower_128 as i128
|
||||
lower_128 as i64
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,10 +79,10 @@ mod test {
|
||||
let res: F = i32_to_felt(2_i32.pow(17));
|
||||
assert_eq!(res, F::from(131072));
|
||||
|
||||
let res: F = i128_to_felt(-15i128);
|
||||
let res: F = i64_to_felt(-15i64);
|
||||
assert_eq!(res, -F::from(15));
|
||||
|
||||
let res: F = i128_to_felt(2_i128.pow(17));
|
||||
let res: F = i64_to_felt(2_i64.pow(17));
|
||||
assert_eq!(res, F::from(131072));
|
||||
}
|
||||
|
||||
@@ -96,10 +96,10 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn felttoi128() {
|
||||
for x in -(2i128.pow(20))..(2i128.pow(20)) {
|
||||
let fieldx: F = i128_to_felt::<F>(x);
|
||||
let xf: i128 = felt_to_i128::<F>(fieldx);
|
||||
fn felttoi64() {
|
||||
for x in -(2i64.pow(20))..(2i64.pow(20)) {
|
||||
let fieldx: F = i64_to_felt::<F>(x);
|
||||
let xf: i64 = felt_to_i64::<F>(fieldx);
|
||||
assert_eq!(x, xf);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use super::quantize_float;
|
||||
use super::GraphError;
|
||||
use crate::circuit::InputType;
|
||||
use crate::fieldutils::i128_to_felt;
|
||||
use crate::fieldutils::i64_to_felt;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use crate::tensor::Tensor;
|
||||
use crate::EZKL_BUF_CAPACITY;
|
||||
@@ -21,8 +21,6 @@ use std::io::BufWriter;
|
||||
use std::io::Read;
|
||||
use std::panic::UnwindSafe;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use std::thread;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use tract_onnx::tract_core::{
|
||||
tract_data::{prelude::Tensor as TractTensor, TVec},
|
||||
value::TValue,
|
||||
@@ -130,7 +128,7 @@ impl FileSourceInner {
|
||||
/// Convert to a field element
|
||||
pub fn to_field(&self, scale: crate::Scale) -> Fp {
|
||||
match self {
|
||||
FileSourceInner::Float(f) => i128_to_felt(quantize_float(f, 0.0, scale).unwrap()),
|
||||
FileSourceInner::Float(f) => i64_to_felt(quantize_float(f, 0.0, scale).unwrap()),
|
||||
FileSourceInner::Bool(f) => {
|
||||
if *f {
|
||||
Fp::one()
|
||||
@@ -152,7 +150,7 @@ impl FileSourceInner {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
FileSourceInner::Field(f) => crate::fieldutils::felt_to_i128(*f) as f64,
|
||||
FileSourceInner::Field(f) => crate::fieldutils::felt_to_i64(*f) as f64,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -234,21 +232,15 @@ impl PostgresSource {
|
||||
)
|
||||
};
|
||||
|
||||
let res: Vec<pg_bigdecimal::PgNumeric> = thread::spawn(move || {
|
||||
let mut client = Client::connect(&config, NoTls).unwrap();
|
||||
let mut res: Vec<pg_bigdecimal::PgNumeric> = Vec::new();
|
||||
// extract rows from query
|
||||
for row in client.query(&query, &[]).unwrap() {
|
||||
// extract features from row
|
||||
for i in 0..row.len() {
|
||||
res.push(row.get(i));
|
||||
}
|
||||
let mut client = Client::connect(&config, NoTls)?;
|
||||
let mut res: Vec<pg_bigdecimal::PgNumeric> = Vec::new();
|
||||
// extract rows from query
|
||||
for row in client.query(&query, &[])? {
|
||||
// extract features from row
|
||||
for i in 0..row.len() {
|
||||
res.push(row.get(i));
|
||||
}
|
||||
res
|
||||
})
|
||||
.join()
|
||||
.map_err(|_| "failed to fetch data from postgres")?;
|
||||
|
||||
}
|
||||
Ok(vec![res])
|
||||
}
|
||||
|
||||
|
||||
@@ -62,13 +62,13 @@ pub use vars::*;
|
||||
use crate::pfsys::field_to_string;
|
||||
|
||||
/// The safety factor for the range of the lookup table.
|
||||
pub const RANGE_MULTIPLIER: i128 = 2;
|
||||
pub const RANGE_MULTIPLIER: i64 = 2;
|
||||
|
||||
/// The maximum number of columns in a lookup table.
|
||||
pub const MAX_NUM_LOOKUP_COLS: usize = 12;
|
||||
|
||||
/// Max representation of a lookup table input
|
||||
pub const MAX_LOOKUP_ABS: i128 = (MAX_NUM_LOOKUP_COLS as i128) * 2_i128.pow(MAX_PUBLIC_SRS);
|
||||
pub const MAX_LOOKUP_ABS: i64 = (MAX_NUM_LOOKUP_COLS as i64) * 2_i64.pow(MAX_PUBLIC_SRS);
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
lazy_static! {
|
||||
@@ -175,11 +175,11 @@ pub struct GraphWitness {
|
||||
/// Any hashes of outputs generated during the forward pass
|
||||
pub processed_outputs: Option<ModuleForwardResult>,
|
||||
/// max lookup input
|
||||
pub max_lookup_inputs: i128,
|
||||
pub max_lookup_inputs: i64,
|
||||
/// max lookup input
|
||||
pub min_lookup_inputs: i128,
|
||||
pub min_lookup_inputs: i64,
|
||||
/// max range check size
|
||||
pub max_range_size: i128,
|
||||
pub max_range_size: i64,
|
||||
}
|
||||
|
||||
impl GraphWitness {
|
||||
@@ -483,7 +483,22 @@ pub struct GraphSettings {
|
||||
}
|
||||
|
||||
impl GraphSettings {
|
||||
fn model_constraint_logrows(&self) -> u32 {
|
||||
/// Calc the number of rows required for lookup tables
|
||||
pub fn lookup_log_rows(&self) -> u32 {
|
||||
((self.run_args.lookup_range.1 - self.run_args.lookup_range.0) as f32)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
/// Calc the number of rows required for lookup tables
|
||||
pub fn lookup_log_rows_with_blinding(&self) -> u32 {
|
||||
((self.run_args.lookup_range.1 - self.run_args.lookup_range.0) as f32
|
||||
+ RESERVED_BLINDING_ROWS as f32)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
fn model_constraint_logrows_with_blinding(&self) -> u32 {
|
||||
(self.num_rows as f64 + RESERVED_BLINDING_ROWS as f64)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
@@ -495,14 +510,31 @@ impl GraphSettings {
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
/// calculate the number of rows required for the dynamic lookup and shuffle
|
||||
pub fn dynamic_lookup_and_shuffle_logrows_with_blinding(&self) -> u32 {
|
||||
(self.total_dynamic_col_size as f64
|
||||
+ self.total_shuffle_col_size as f64
|
||||
+ RESERVED_BLINDING_ROWS as f64)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
fn dynamic_lookup_and_shuffle_col_size(&self) -> usize {
|
||||
self.total_dynamic_col_size + self.total_shuffle_col_size
|
||||
}
|
||||
|
||||
fn module_constraint_logrows(&self) -> u32 {
|
||||
/// calculate the number of rows required for the module constraints
|
||||
pub fn module_constraint_logrows(&self) -> u32 {
|
||||
(self.module_sizes.max_constraints() as f64).log2().ceil() as u32
|
||||
}
|
||||
|
||||
/// calculate the number of rows required for the module constraints
|
||||
pub fn module_constraint_logrows_with_blinding(&self) -> u32 {
|
||||
(self.module_sizes.max_constraints() as f64 + RESERVED_BLINDING_ROWS as f64)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
fn constants_logrows(&self) -> u32 {
|
||||
(self.total_const_size as f64 / self.run_args.num_inner_cols as f64)
|
||||
.log2()
|
||||
@@ -529,6 +561,14 @@ impl GraphSettings {
|
||||
std::cmp::max((sum as f64).log2().ceil() as u32, 1)
|
||||
}
|
||||
|
||||
/// calculate the log2 of the total number of instances
|
||||
pub fn log2_total_instances_with_blinding(&self) -> u32 {
|
||||
let sum = self.total_instances().iter().sum::<usize>() + RESERVED_BLINDING_ROWS;
|
||||
|
||||
// max between 1 and the log2 of the sums
|
||||
std::cmp::max((sum as f64).log2().ceil() as u32, 1)
|
||||
}
|
||||
|
||||
/// save params to file
|
||||
pub fn save(&self, path: &std::path::PathBuf) -> Result<(), std::io::Error> {
|
||||
// buf writer
|
||||
@@ -918,7 +958,7 @@ impl GraphCircuit {
|
||||
|
||||
///
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub async fn load_graph_input(
|
||||
pub fn load_graph_input(
|
||||
&mut self,
|
||||
data: &GraphData,
|
||||
) -> Result<Vec<Tensor<Fp>>, Box<dyn std::error::Error>> {
|
||||
@@ -928,7 +968,6 @@ impl GraphCircuit {
|
||||
debug!("input scales: {:?}", scales);
|
||||
|
||||
self.process_data_source(&data.input_data, shapes, scales, input_types)
|
||||
.await
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
@@ -952,7 +991,7 @@ impl GraphCircuit {
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Process the data source for the model
|
||||
async fn process_data_source(
|
||||
fn process_data_source(
|
||||
&mut self,
|
||||
data: &DataSource,
|
||||
shapes: Vec<Vec<usize>>,
|
||||
@@ -965,8 +1004,16 @@ impl GraphCircuit {
|
||||
for (i, shape) in shapes.iter().enumerate() {
|
||||
per_item_scale.extend(vec![scales[i]; shape.iter().product::<usize>()]);
|
||||
}
|
||||
self.load_on_chain_data(source.clone(), &shapes, per_item_scale)
|
||||
.await
|
||||
|
||||
// start runtime and fetch data
|
||||
let runtime = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
runtime.block_on(async {
|
||||
self.load_on_chain_data(source.clone(), &shapes, per_item_scale)
|
||||
.await
|
||||
})
|
||||
}
|
||||
DataSource::File(file_data) => {
|
||||
self.load_file_data(file_data, &shapes, scales, input_types)
|
||||
@@ -1051,14 +1098,14 @@ impl GraphCircuit {
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: i128) -> Range {
|
||||
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: i64) -> Range {
|
||||
(
|
||||
lookup_safety_margin * min_max_lookup.0,
|
||||
lookup_safety_margin * min_max_lookup.1,
|
||||
)
|
||||
}
|
||||
|
||||
fn calc_num_cols(range_len: i128, max_logrows: u32) -> usize {
|
||||
fn calc_num_cols(range_len: i64, max_logrows: u32) -> usize {
|
||||
let max_col_size = Table::<Fp>::cal_col_size(max_logrows as usize, RESERVED_BLINDING_ROWS);
|
||||
num_cols_required(range_len, max_col_size)
|
||||
}
|
||||
@@ -1066,7 +1113,7 @@ impl GraphCircuit {
|
||||
fn table_size_logrows(
|
||||
&self,
|
||||
safe_lookup_range: Range,
|
||||
max_range_size: i128,
|
||||
max_range_size: i64,
|
||||
) -> Result<u32, Box<dyn std::error::Error>> {
|
||||
// pick the range with the largest absolute size safe_lookup_range or max_range_size
|
||||
let safe_range = std::cmp::max(
|
||||
@@ -1085,9 +1132,9 @@ impl GraphCircuit {
|
||||
pub fn calc_min_logrows(
|
||||
&mut self,
|
||||
min_max_lookup: Range,
|
||||
max_range_size: i128,
|
||||
max_range_size: i64,
|
||||
max_logrows: Option<u32>,
|
||||
lookup_safety_margin: i128,
|
||||
lookup_safety_margin: i64,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// load the max logrows
|
||||
let max_logrows = max_logrows.unwrap_or(MAX_PUBLIC_SRS);
|
||||
@@ -1126,7 +1173,7 @@ impl GraphCircuit {
|
||||
);
|
||||
|
||||
// These are upper limits, going above these is wasteful, but they are not hard limits
|
||||
let model_constraint_logrows = self.settings().model_constraint_logrows();
|
||||
let model_constraint_logrows = self.settings().model_constraint_logrows_with_blinding();
|
||||
let min_bits = self.table_size_logrows(safe_lookup_range, max_range_size)?;
|
||||
let constants_logrows = self.settings().constants_logrows();
|
||||
max_logrows = std::cmp::min(
|
||||
@@ -1181,7 +1228,7 @@ impl GraphCircuit {
|
||||
&self,
|
||||
k: u32,
|
||||
safe_lookup_range: Range,
|
||||
max_range_size: i128,
|
||||
max_range_size: i64,
|
||||
) -> bool {
|
||||
// if num cols is too large then the extended k is too large
|
||||
if Self::calc_num_cols(safe_lookup_range.1 - safe_lookup_range.0, k) > MAX_NUM_LOOKUP_COLS
|
||||
@@ -1240,6 +1287,7 @@ impl GraphCircuit {
|
||||
vk: Option<&VerifyingKey<G1Affine>>,
|
||||
srs: Option<&Scheme::ParamsProver>,
|
||||
witness_gen: bool,
|
||||
check_lookup: bool,
|
||||
) -> Result<GraphWitness, Box<dyn std::error::Error>> {
|
||||
let original_inputs = inputs.to_vec();
|
||||
|
||||
@@ -1288,7 +1336,7 @@ impl GraphCircuit {
|
||||
|
||||
let mut model_results =
|
||||
self.model()
|
||||
.forward(inputs, &self.settings().run_args, witness_gen)?;
|
||||
.forward(inputs, &self.settings().run_args, witness_gen, check_lookup)?;
|
||||
|
||||
if visibility.output.requires_processing() {
|
||||
let module_outlets = visibility.output.overwrites_inputs();
|
||||
|
||||
@@ -65,11 +65,11 @@ pub struct ForwardResult {
|
||||
/// The outputs of the forward pass.
|
||||
pub outputs: Vec<Tensor<Fp>>,
|
||||
/// The maximum value of any input to a lookup operation.
|
||||
pub max_lookup_inputs: i128,
|
||||
pub max_lookup_inputs: i64,
|
||||
/// The minimum value of any input to a lookup operation.
|
||||
pub min_lookup_inputs: i128,
|
||||
pub min_lookup_inputs: i64,
|
||||
/// The max range check size
|
||||
pub max_range_size: i128,
|
||||
pub max_range_size: i64,
|
||||
}
|
||||
|
||||
impl From<DummyPassRes> for ForwardResult {
|
||||
@@ -117,11 +117,11 @@ pub struct DummyPassRes {
|
||||
/// range checks
|
||||
pub range_checks: HashSet<Range>,
|
||||
/// max lookup inputs
|
||||
pub max_lookup_inputs: i128,
|
||||
pub max_lookup_inputs: i64,
|
||||
/// min lookup inputs
|
||||
pub min_lookup_inputs: i128,
|
||||
pub min_lookup_inputs: i64,
|
||||
/// min range check
|
||||
pub max_range_size: i128,
|
||||
pub max_range_size: i64,
|
||||
/// outputs
|
||||
pub outputs: Vec<Tensor<Fp>>,
|
||||
}
|
||||
@@ -538,7 +538,7 @@ impl Model {
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
|
||||
let res = self.dummy_layout(run_args, &inputs, false)?;
|
||||
let res = self.dummy_layout(run_args, &inputs, false, false)?;
|
||||
|
||||
// if we're using percentage tolerance, we need to add the necessary range check ops for it.
|
||||
|
||||
@@ -582,12 +582,13 @@ impl Model {
|
||||
model_inputs: &[Tensor<Fp>],
|
||||
run_args: &RunArgs,
|
||||
witness_gen: bool,
|
||||
check_lookup: bool,
|
||||
) -> Result<ForwardResult, Box<dyn Error>> {
|
||||
let valtensor_inputs: Vec<ValTensor<Fp>> = model_inputs
|
||||
.iter()
|
||||
.map(|x| x.map(|elem| ValType::Value(Value::known(elem))).into())
|
||||
.collect();
|
||||
let res = self.dummy_layout(run_args, &valtensor_inputs, witness_gen)?;
|
||||
let res = self.dummy_layout(run_args, &valtensor_inputs, witness_gen, check_lookup)?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
@@ -1392,6 +1393,7 @@ impl Model {
|
||||
run_args: &RunArgs,
|
||||
inputs: &[ValTensor<Fp>],
|
||||
witness_gen: bool,
|
||||
check_lookup: bool,
|
||||
) -> Result<DummyPassRes, Box<dyn Error>> {
|
||||
debug!("calculating num of constraints using dummy model layout...");
|
||||
|
||||
@@ -1410,7 +1412,8 @@ impl Model {
|
||||
vars: ModelVars::new_dummy(),
|
||||
};
|
||||
|
||||
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, witness_gen);
|
||||
let mut region =
|
||||
RegionCtx::new_dummy(0, run_args.num_inner_cols, witness_gen, check_lookup);
|
||||
|
||||
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;
|
||||
|
||||
|
||||
@@ -314,7 +314,6 @@ impl GraphModules {
|
||||
let commitments = inputs.iter().fold(vec![], |mut acc, x| {
|
||||
let res = PolyCommitChip::commit::<Scheme>(
|
||||
x.to_vec(),
|
||||
vk.cs().degree() as u32,
|
||||
(vk.cs().blinding_factors() + 1) as u32,
|
||||
srs,
|
||||
);
|
||||
|
||||
@@ -52,16 +52,16 @@ use tract_onnx::tract_hir::{
|
||||
/// * `dims` - the dimensionality of the resulting [Tensor].
|
||||
/// * `shift` - offset used in the fixed point representation.
|
||||
/// * `scale` - `2^scale` used in the fixed point representation.
|
||||
pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i128, TensorError> {
|
||||
pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i64, TensorError> {
|
||||
let mult = scale_to_multiplier(scale);
|
||||
let max_value = ((i128::MAX as f64 - shift) / mult).round(); // the maximum value that can be represented w/o sig bit truncation
|
||||
let max_value = ((i64::MAX as f64 - shift) / mult).round(); // the maximum value that can be represented w/o sig bit truncation
|
||||
|
||||
if *elem > max_value {
|
||||
return Err(TensorError::SigBitTruncationError);
|
||||
}
|
||||
|
||||
// we parallelize the quantization process as it seems to be quite slow at times
|
||||
let scaled = (mult * *elem + shift).round() as i128;
|
||||
let scaled = (mult * *elem + shift).round() as i64;
|
||||
|
||||
Ok(scaled)
|
||||
}
|
||||
@@ -72,7 +72,7 @@ pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i12
|
||||
/// * `scale` - `2^scale` used in the fixed point representation.
|
||||
/// * `shift` - offset used in the fixed point representation.
|
||||
pub fn dequantize(felt: Fp, scale: crate::Scale, shift: f64) -> f64 {
|
||||
let int_rep = crate::fieldutils::felt_to_i128(felt);
|
||||
let int_rep = crate::fieldutils::felt_to_i64(felt);
|
||||
let multiplier = scale_to_multiplier(scale);
|
||||
int_rep as f64 / multiplier - shift
|
||||
}
|
||||
@@ -1475,7 +1475,7 @@ pub fn quantize_tensor<F: PrimeField + TensorType + PartialOrd>(
|
||||
visibility: &Visibility,
|
||||
) -> Result<Tensor<F>, Box<dyn std::error::Error>> {
|
||||
let mut value: Tensor<F> = const_value.par_enum_map(|_, x| {
|
||||
Ok::<_, TensorError>(crate::fieldutils::i128_to_felt::<F>(quantize_float(
|
||||
Ok::<_, TensorError>(crate::fieldutils::i64_to_felt::<F>(quantize_float(
|
||||
&(x).into(),
|
||||
0.0,
|
||||
scale,
|
||||
|
||||
15
src/lib.rs
15
src/lib.rs
@@ -23,6 +23,7 @@
|
||||
)]
|
||||
// we allow this for our dynamic range based indexing scheme
|
||||
#![allow(clippy::single_range_in_vec_init)]
|
||||
#![feature(stmt_expr_attributes)]
|
||||
|
||||
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
|
||||
//!
|
||||
@@ -189,7 +190,7 @@ pub struct RunArgs {
|
||||
#[arg(long, default_value = "1")]
|
||||
pub scale_rebase_multiplier: u32,
|
||||
/// The min and max elements in the lookup table input column
|
||||
#[arg(short = 'B', long, value_parser = parse_key_val::<i128, i128>, default_value = "-32768->32768")]
|
||||
#[arg(short = 'B', long, value_parser = parse_key_val::<i64, i64>, default_value = "-32768->32768")]
|
||||
pub lookup_range: Range,
|
||||
/// The log_2 number of rows
|
||||
#[arg(short = 'K', long, default_value = "17")]
|
||||
@@ -200,13 +201,13 @@ pub struct RunArgs {
|
||||
/// Hand-written parser for graph variables, eg. batch_size=1
|
||||
#[arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size->1", value_delimiter = ',')]
|
||||
pub variables: Vec<(String, usize)>,
|
||||
/// Flags whether inputs are public, private, hashed
|
||||
/// Flags whether inputs are public, private, hashed, fixed, kzgcommit
|
||||
#[arg(long, default_value = "private")]
|
||||
pub input_visibility: Visibility,
|
||||
/// Flags whether outputs are public, private, hashed
|
||||
/// Flags whether outputs are public, private, fixed, hashed, kzgcommit
|
||||
#[arg(long, default_value = "public")]
|
||||
pub output_visibility: Visibility,
|
||||
/// Flags whether params are public, private, hashed
|
||||
/// Flags whether params are fixed, private, hashed, kzgcommit
|
||||
#[arg(long, default_value = "private")]
|
||||
pub param_visibility: Visibility,
|
||||
#[arg(long, default_value = "false")]
|
||||
@@ -248,6 +249,12 @@ impl Default for RunArgs {
|
||||
impl RunArgs {
|
||||
///
|
||||
pub fn validate(&self) -> Result<(), Box<dyn std::error::Error>> {
|
||||
if self.param_visibility == Visibility::Public {
|
||||
return Err(
|
||||
"params cannot be public instances, you are probably trying to use `fixed` or `kzgcommit`"
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if self.scale_rebase_multiplier < 1 {
|
||||
return Err("scale_rebase_multiplier must be >= 1".into());
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ use crate::circuit::modules::poseidon::{
|
||||
use crate::circuit::modules::Module;
|
||||
use crate::circuit::{CheckMode, Tolerance};
|
||||
use crate::commands::*;
|
||||
use crate::fieldutils::{felt_to_i128, i128_to_felt};
|
||||
use crate::fieldutils::{felt_to_i64, i64_to_felt};
|
||||
use crate::graph::modules::POSEIDON_LEN_GRAPH;
|
||||
use crate::graph::TestDataSource;
|
||||
use crate::graph::{
|
||||
@@ -332,9 +332,9 @@ fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
|
||||
#[pyfunction(signature = (
|
||||
felt,
|
||||
))]
|
||||
fn felt_to_int(felt: PyFelt) -> PyResult<i128> {
|
||||
fn felt_to_int(felt: PyFelt) -> PyResult<i64> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
|
||||
let int_rep = felt_to_i128(felt);
|
||||
let int_rep = felt_to_i64(felt);
|
||||
Ok(int_rep)
|
||||
}
|
||||
|
||||
@@ -358,7 +358,7 @@ fn felt_to_int(felt: PyFelt) -> PyResult<i128> {
|
||||
))]
|
||||
fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
|
||||
let int_rep = felt_to_i128(felt);
|
||||
let int_rep = felt_to_i64(felt);
|
||||
let multiplier = scale_to_multiplier(scale);
|
||||
let float_rep = int_rep as f64 / multiplier;
|
||||
Ok(float_rep)
|
||||
@@ -386,7 +386,7 @@ fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
|
||||
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
|
||||
let int_rep = quantize_float(&input, 0.0, scale)
|
||||
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
|
||||
let felt = i128_to_felt(int_rep);
|
||||
let felt = i64_to_felt(int_rep);
|
||||
Ok(crate::pfsys::field_to_string::<Fr>(&felt))
|
||||
}
|
||||
|
||||
@@ -547,7 +547,6 @@ fn kzg_commit(
|
||||
|
||||
let output = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
|
||||
message,
|
||||
vk.cs().degree() as u32,
|
||||
(vk.cs().blinding_factors() + 1) as u32,
|
||||
&srs,
|
||||
);
|
||||
@@ -606,7 +605,6 @@ fn ipa_commit(
|
||||
|
||||
let output = PolyCommitChip::commit::<IPACommitmentScheme<G1Affine>>(
|
||||
message,
|
||||
vk.cs().degree() as u32,
|
||||
(vk.cs().blinding_factors() + 1) as u32,
|
||||
&srs,
|
||||
);
|
||||
@@ -891,7 +889,7 @@ fn calibrate_settings(
|
||||
model: PathBuf,
|
||||
settings: PathBuf,
|
||||
target: CalibrationTarget,
|
||||
lookup_safety_margin: i128,
|
||||
lookup_safety_margin: i64,
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
max_logrows: Option<u32>,
|
||||
@@ -942,7 +940,7 @@ fn calibrate_settings(
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
data=PathBuf::from(DEFAULT_DATA),
|
||||
model=PathBuf::from(DEFAULT_MODEL),
|
||||
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
|
||||
output=PathBuf::from(DEFAULT_WITNESS),
|
||||
vk_path=None,
|
||||
srs_path=None,
|
||||
@@ -954,12 +952,8 @@ fn gen_witness(
|
||||
vk_path: Option<PathBuf>,
|
||||
srs_path: Option<PathBuf>,
|
||||
) -> PyResult<PyObject> {
|
||||
let output = Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(crate::execute::gen_witness(
|
||||
model, data, output, vk_path, srs_path,
|
||||
))
|
||||
.map_err(|e| {
|
||||
let output =
|
||||
crate::execute::gen_witness(model, data, output, vk_path, srs_path).map_err(|e| {
|
||||
let err_str = format!("Failed to run generate witness: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
@@ -982,7 +976,7 @@ fn gen_witness(
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
witness=PathBuf::from(DEFAULT_WITNESS),
|
||||
model=PathBuf::from(DEFAULT_MODEL),
|
||||
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
|
||||
))]
|
||||
fn mock(witness: PathBuf, model: PathBuf) -> PyResult<bool> {
|
||||
crate::execute::mock(model, witness).map_err(|e| {
|
||||
@@ -1054,7 +1048,7 @@ fn mock_aggregate(
|
||||
/// bool
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
model=PathBuf::from(DEFAULT_MODEL),
|
||||
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
|
||||
vk_path=PathBuf::from(DEFAULT_VK),
|
||||
pk_path=PathBuf::from(DEFAULT_PK),
|
||||
srs_path=None,
|
||||
@@ -1113,7 +1107,7 @@ fn setup(
|
||||
///
|
||||
#[pyfunction(signature = (
|
||||
witness=PathBuf::from(DEFAULT_WITNESS),
|
||||
model=PathBuf::from(DEFAULT_MODEL),
|
||||
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
|
||||
pk_path=PathBuf::from(DEFAULT_PK),
|
||||
proof_path=None,
|
||||
proof_type=ProofType::default(),
|
||||
@@ -1172,26 +1166,21 @@ fn prove(
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
vk_path=PathBuf::from(DEFAULT_VK),
|
||||
srs_path=None,
|
||||
non_reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse::<bool>().unwrap(),
|
||||
reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse::<bool>().unwrap(),
|
||||
))]
|
||||
fn verify(
|
||||
proof_path: PathBuf,
|
||||
settings_path: PathBuf,
|
||||
vk_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
non_reduced_srs: bool,
|
||||
reduced_srs: bool,
|
||||
) -> Result<bool, PyErr> {
|
||||
crate::execute::verify(
|
||||
proof_path,
|
||||
settings_path,
|
||||
vk_path,
|
||||
srs_path,
|
||||
non_reduced_srs,
|
||||
)
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run verify: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
crate::execute::verify(proof_path, settings_path, vk_path, srs_path, reduced_srs).map_err(
|
||||
|e| {
|
||||
let err_str = format!("Failed to run verify: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
BIN
src/tensor/metal/tensor_ops.air
Normal file
BIN
src/tensor/metal/tensor_ops.air
Normal file
Binary file not shown.
31
src/tensor/metal/tensor_ops.metal
Normal file
31
src/tensor/metal/tensor_ops.metal
Normal file
@@ -0,0 +1,31 @@
|
||||
[[kernel]]
|
||||
void add(
|
||||
constant long *inA [[buffer(0)]],
|
||||
constant long *inB [[buffer(1)]],
|
||||
device long *result [[buffer(2)]],
|
||||
uint index [[thread_position_in_grid]])
|
||||
{
|
||||
result[index] = inA[index] + inB[index];
|
||||
}
|
||||
|
||||
|
||||
[[kernel]]
|
||||
void sub(
|
||||
constant long *inA [[buffer(0)]],
|
||||
constant long *inB [[buffer(1)]],
|
||||
device long *result [[buffer(2)]],
|
||||
uint index [[thread_position_in_grid]])
|
||||
{
|
||||
result[index] = inA[index] - inB[index];
|
||||
}
|
||||
|
||||
|
||||
[[kernel]]
|
||||
void mul(
|
||||
constant long *inA [[buffer(0)]],
|
||||
constant long *inB [[buffer(1)]],
|
||||
device long *result [[buffer(2)]],
|
||||
uint index [[thread_position_in_grid]])
|
||||
{
|
||||
result[index] = inA[index] * inB[index];
|
||||
}
|
||||
BIN
src/tensor/metal/tensor_ops.metallib
Normal file
BIN
src/tensor/metal/tensor_ops.metallib
Normal file
Binary file not shown.
@@ -5,7 +5,7 @@ pub mod val;
|
||||
/// A wrapper around a tensor of Halo2 Value types.
|
||||
pub mod var;
|
||||
|
||||
use halo2curves::ff::PrimeField;
|
||||
use halo2curves::{bn256::Fr, ff::PrimeField};
|
||||
use maybe_rayon::{
|
||||
prelude::{
|
||||
IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator,
|
||||
@@ -17,9 +17,12 @@ use serde::{Deserialize, Serialize};
|
||||
pub use val::*;
|
||||
pub use var::*;
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
use instant::Instant;
|
||||
|
||||
use crate::{
|
||||
circuit::utils,
|
||||
fieldutils::{felt_to_i32, i128_to_felt, i32_to_felt},
|
||||
fieldutils::{felt_to_i32, felt_to_i64, i32_to_felt, i64_to_felt},
|
||||
graph::Visibility,
|
||||
};
|
||||
|
||||
@@ -30,12 +33,18 @@ use halo2_proofs::{
|
||||
poly::Rotation,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
#[cfg(feature = "metal")]
|
||||
use metal::{Device, MTLResourceOptions, MTLSize};
|
||||
use std::error::Error;
|
||||
use std::fmt::Debug;
|
||||
use std::iter::Iterator;
|
||||
use std::ops::{Add, Deref, DerefMut, Div, Mul, Neg, Range, Sub};
|
||||
use std::{cmp::max, ops::Rem};
|
||||
use thiserror::Error;
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A wrapper for tensor related errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum TensorError {
|
||||
@@ -65,6 +74,28 @@ pub enum TensorError {
|
||||
Overflow(String),
|
||||
}
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
const LIB_DATA: &[u8] = include_bytes!("metal/tensor_ops.metallib");
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
lazy_static::lazy_static! {
|
||||
static ref DEVICE: Device = Device::system_default().expect("no device found");
|
||||
|
||||
static ref LIB: metal::Library = DEVICE.new_library_with_data(LIB_DATA).unwrap();
|
||||
|
||||
static ref QUEUE: metal::CommandQueue = DEVICE.new_command_queue();
|
||||
|
||||
static ref PIPELINES: HashMap<String, metal::ComputePipelineState> = {
|
||||
let mut map = HashMap::new();
|
||||
for name in ["add", "sub", "mul"] {
|
||||
let function = LIB.get_function(name, None).unwrap();
|
||||
let pipeline = DEVICE.new_compute_pipeline_state_with_function(&function).unwrap();
|
||||
map.insert(name.to_string(), pipeline);
|
||||
}
|
||||
map
|
||||
};
|
||||
}
|
||||
|
||||
/// The (inner) type of tensor elements.
|
||||
pub trait TensorType: Clone + Debug + 'static {
|
||||
/// Returns the zero value.
|
||||
@@ -145,7 +176,7 @@ impl TensorType for f64 {
|
||||
}
|
||||
|
||||
tensor_type!(bool, Bool, false, true);
|
||||
tensor_type!(i128, Int128, 0, 1);
|
||||
tensor_type!(i64, Int64, 0, 1);
|
||||
tensor_type!(i32, Int32, 0, 1);
|
||||
tensor_type!(usize, USize, 0, 1);
|
||||
tensor_type!((), Empty, (), ());
|
||||
@@ -311,6 +342,94 @@ impl<T: TensorType> DerefMut for Tensor<T> {
|
||||
self.inner.deref_mut()
|
||||
}
|
||||
}
|
||||
/// Convert to i64 trait
|
||||
pub trait IntoI64 {
|
||||
/// Convert to i64
|
||||
fn into_i64(self) -> i64;
|
||||
|
||||
/// From i64
|
||||
fn from_i64(i: i64) -> Self;
|
||||
}
|
||||
|
||||
impl IntoI64 for i64 {
|
||||
fn into_i64(self) -> i64 {
|
||||
self
|
||||
}
|
||||
fn from_i64(i: i64) -> i64 {
|
||||
i
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoI64 for i32 {
|
||||
fn into_i64(self) -> i64 {
|
||||
self as i64
|
||||
}
|
||||
fn from_i64(i: i64) -> Self {
|
||||
i as i32
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoI64 for usize {
|
||||
fn into_i64(self) -> i64 {
|
||||
self as i64
|
||||
}
|
||||
fn from_i64(i: i64) -> Self {
|
||||
i as usize
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoI64 for f32 {
|
||||
fn into_i64(self) -> i64 {
|
||||
self as i64
|
||||
}
|
||||
fn from_i64(i: i64) -> Self {
|
||||
i as f32
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoI64 for f64 {
|
||||
fn into_i64(self) -> i64 {
|
||||
self as i64
|
||||
}
|
||||
fn from_i64(i: i64) -> Self {
|
||||
i as f64
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoI64 for () {
|
||||
fn into_i64(self) -> i64 {
|
||||
0
|
||||
}
|
||||
fn from_i64(_: i64) -> Self {
|
||||
()
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoI64 for Fr {
|
||||
fn into_i64(self) -> i64 {
|
||||
felt_to_i64(self)
|
||||
}
|
||||
fn from_i64(i: i64) -> Self {
|
||||
i64_to_felt::<Fr>(i)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField + IntoI64> IntoI64 for Value<F> {
|
||||
fn into_i64(self) -> i64 {
|
||||
let mut res = vec![];
|
||||
self.map(|x| res.push(x.into_i64()));
|
||||
|
||||
if res.len() == 0 {
|
||||
0
|
||||
} else {
|
||||
res[0]
|
||||
}
|
||||
}
|
||||
|
||||
fn from_i64(i: i64) -> Self {
|
||||
Value::known(F::from_i64(i))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: PartialEq + TensorType> PartialEq for Tensor<T> {
|
||||
fn eq(&self, other: &Tensor<T>) -> bool {
|
||||
@@ -427,10 +546,10 @@ impl<F: PrimeField + TensorType + Clone> From<Tensor<i32>> for Tensor<Value<F>>
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + Clone> From<Tensor<i128>> for Tensor<Value<F>> {
|
||||
fn from(t: Tensor<i128>) -> Tensor<Value<F>> {
|
||||
impl<F: PrimeField + TensorType + Clone> From<Tensor<i64>> for Tensor<Value<F>> {
|
||||
fn from(t: Tensor<i64>) -> Tensor<Value<F>> {
|
||||
let mut ta: Tensor<Value<F>> =
|
||||
Tensor::from((0..t.len()).map(|i| Value::known(i128_to_felt::<F>(t[i]))));
|
||||
Tensor::from((0..t.len()).map(|i| Value::known(i64_to_felt::<F>(t[i]))));
|
||||
// safe to unwrap as we know the dims are correct
|
||||
ta.reshape(t.dims()).unwrap();
|
||||
ta
|
||||
@@ -1217,6 +1336,97 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "metal")]
|
||||
#[allow(unsafe_code)]
|
||||
/// Perform a tensor operation on the GPU using Metal.
|
||||
pub fn metal_tensor_op<T: Clone + TensorType + IntoI64 + Send + Sync>(
|
||||
v: &Tensor<T>,
|
||||
w: &Tensor<T>,
|
||||
op: &str,
|
||||
) -> Tensor<T> {
|
||||
assert_eq!(v.dims(), w.dims());
|
||||
|
||||
log::trace!("------------------------------------------------");
|
||||
|
||||
let start = Instant::now();
|
||||
let v = v
|
||||
.par_enum_map(|_, x| Ok::<_, TensorError>(x.into_i64()))
|
||||
.unwrap();
|
||||
let w = w
|
||||
.par_enum_map(|_, x| Ok::<_, TensorError>(x.into_i64()))
|
||||
.unwrap();
|
||||
log::trace!("Time to map tensors: {:?}", start.elapsed());
|
||||
|
||||
objc::rc::autoreleasepool(|| {
|
||||
// create function pipeline.
|
||||
// this compiles the function, so a pipline can't be created in performance sensitive code.
|
||||
|
||||
let pipeline = &PIPELINES[op];
|
||||
|
||||
let length = v.len() as u64;
|
||||
let size = length * core::mem::size_of::<i64>() as u64;
|
||||
assert_eq!(v.len(), w.len());
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
let buffer_a = DEVICE.new_buffer_with_data(
|
||||
unsafe { std::mem::transmute(v.as_ptr()) },
|
||||
size,
|
||||
MTLResourceOptions::StorageModeShared,
|
||||
);
|
||||
let buffer_b = DEVICE.new_buffer_with_data(
|
||||
unsafe { std::mem::transmute(w.as_ptr()) },
|
||||
size,
|
||||
MTLResourceOptions::StorageModeShared,
|
||||
);
|
||||
let buffer_result = DEVICE.new_buffer(
|
||||
size, // the operation will return an array with the same size.
|
||||
MTLResourceOptions::StorageModeShared,
|
||||
);
|
||||
|
||||
log::trace!("Time to load buffers: {:?}", start.elapsed());
|
||||
|
||||
// for sending commands, a command buffer is needed.
|
||||
let start = Instant::now();
|
||||
let command_buffer = QUEUE.new_command_buffer();
|
||||
log::trace!("Time to load command buffer: {:?}", start.elapsed());
|
||||
// to write commands into a buffer an encoder is needed, in our case a compute encoder.
|
||||
let start = Instant::now();
|
||||
let compute_encoder = command_buffer.new_compute_command_encoder();
|
||||
compute_encoder.set_compute_pipeline_state(&pipeline);
|
||||
compute_encoder.set_buffers(
|
||||
0,
|
||||
&[Some(&buffer_a), Some(&buffer_b), Some(&buffer_result)],
|
||||
&[0; 3],
|
||||
);
|
||||
log::trace!("Time to load compute encoder: {:?}", start.elapsed());
|
||||
|
||||
// specify thread count and organization
|
||||
let start = Instant::now();
|
||||
let grid_size = MTLSize::new(length, 1, 1);
|
||||
let threadgroup_size = MTLSize::new(length, 1, 1);
|
||||
compute_encoder.dispatch_threads(grid_size, threadgroup_size);
|
||||
log::trace!("Time to dispatch threads: {:?}", start.elapsed());
|
||||
|
||||
// end encoding and execute commands
|
||||
let start = Instant::now();
|
||||
compute_encoder.end_encoding();
|
||||
command_buffer.commit();
|
||||
|
||||
command_buffer.wait_until_completed();
|
||||
log::trace!("Time to commit: {:?}", start.elapsed());
|
||||
|
||||
let start = Instant::now();
|
||||
let ptr = buffer_result.contents() as *const i64;
|
||||
let len = buffer_result.length() as usize / std::mem::size_of::<i64>();
|
||||
let slice = unsafe { core::slice::from_raw_parts(ptr, len) };
|
||||
let res = Tensor::new(Some(&slice.to_vec()), &v.dims()).unwrap();
|
||||
log::trace!("Time to get result: {:?}", start.elapsed());
|
||||
|
||||
res.map(|x| T::from_i64(x))
|
||||
})
|
||||
}
|
||||
|
||||
impl<T: Clone + TensorType> Tensor<Tensor<T>> {
|
||||
/// Flattens a tensor of tensors
|
||||
/// ```
|
||||
@@ -1238,7 +1448,9 @@ impl<T: Clone + TensorType> Tensor<Tensor<T>> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync> Add for Tensor<T> {
|
||||
impl<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync + IntoI64> Add
|
||||
for Tensor<T>
|
||||
{
|
||||
type Output = Result<Tensor<T>, TensorError>;
|
||||
/// Adds tensors.
|
||||
/// # Arguments
|
||||
@@ -1288,14 +1500,24 @@ impl<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync> Ad
|
||||
/// ```
|
||||
fn add(self, rhs: Self) -> Self::Output {
|
||||
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
|
||||
let mut lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
|
||||
*o = o.clone() + r;
|
||||
});
|
||||
#[cfg(feature = "metal")]
|
||||
let res = metal_tensor_op(&lhs, &rhs, "add");
|
||||
|
||||
Ok(lhs)
|
||||
#[cfg(not(feature = "metal"))]
|
||||
let res = {
|
||||
let mut res: Tensor<T> = lhs
|
||||
.par_iter()
|
||||
.zip(rhs)
|
||||
.map(|(o, r)| o.clone() + r)
|
||||
.collect();
|
||||
res.reshape(&broadcasted_shape).unwrap();
|
||||
res
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1318,6 +1540,7 @@ impl<T: TensorType + Neg<Output = T> + std::marker::Send + std::marker::Sync> Ne
|
||||
/// ```
|
||||
fn neg(self) -> Self {
|
||||
let mut output = self;
|
||||
|
||||
output.par_iter_mut().for_each(|x| {
|
||||
*x = x.clone().neg();
|
||||
});
|
||||
@@ -1325,7 +1548,9 @@ impl<T: TensorType + Neg<Output = T> + std::marker::Send + std::marker::Sync> Ne
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync> Sub for Tensor<T> {
|
||||
impl<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync + IntoI64> Sub
|
||||
for Tensor<T>
|
||||
{
|
||||
type Output = Result<Tensor<T>, TensorError>;
|
||||
/// Subtracts tensors.
|
||||
/// # Arguments
|
||||
@@ -1376,18 +1601,30 @@ impl<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync> Su
|
||||
/// ```
|
||||
fn sub(self, rhs: Self) -> Self::Output {
|
||||
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
|
||||
let mut lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
|
||||
*o = o.clone() - r;
|
||||
});
|
||||
#[cfg(feature = "metal")]
|
||||
let res = metal_tensor_op(&lhs, &rhs, "sub");
|
||||
|
||||
Ok(lhs)
|
||||
#[cfg(not(feature = "metal"))]
|
||||
let res = {
|
||||
let mut res: Tensor<T> = lhs
|
||||
.par_iter()
|
||||
.zip(rhs)
|
||||
.map(|(o, r)| o.clone() - r)
|
||||
.collect();
|
||||
res.reshape(&broadcasted_shape).unwrap();
|
||||
res
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync> Mul for Tensor<T> {
|
||||
impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync + IntoI64> Mul
|
||||
for Tensor<T>
|
||||
{
|
||||
type Output = Result<Tensor<T>, TensorError>;
|
||||
/// Elementwise multiplies tensors.
|
||||
/// # Arguments
|
||||
@@ -1436,18 +1673,28 @@ impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync> Mu
|
||||
/// ```
|
||||
fn mul(self, rhs: Self) -> Self::Output {
|
||||
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
|
||||
let mut lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
|
||||
*o = o.clone() * r;
|
||||
});
|
||||
#[cfg(feature = "metal")]
|
||||
let res = metal_tensor_op(&lhs, &rhs, "mul");
|
||||
|
||||
Ok(lhs)
|
||||
#[cfg(not(feature = "metal"))]
|
||||
let res = {
|
||||
let mut res: Tensor<T> = lhs
|
||||
.par_iter()
|
||||
.zip(rhs)
|
||||
.map(|(o, r)| o.clone() * r)
|
||||
.collect();
|
||||
res.reshape(&broadcasted_shape).unwrap();
|
||||
res
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync> Tensor<T> {
|
||||
impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync + IntoI64> Tensor<T> {
|
||||
/// Elementwise raise a tensor to the nth power.
|
||||
/// # Arguments
|
||||
///
|
||||
@@ -1661,4 +1908,66 @@ mod tests {
|
||||
let b = Tensor::<i32>::new(Some(&[1, 4]), &[2, 1]).unwrap();
|
||||
assert_eq!(a.get_slice(&[0..2, 0..1]).unwrap(), b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "metal")]
|
||||
fn tensor_metal_int() {
|
||||
let a = Tensor::<i64>::new(Some(&[1, 2, 3, 4]), &[2, 2]).unwrap();
|
||||
let b = Tensor::<i64>::new(Some(&[1, 2, 3, 4]), &[2, 2]).unwrap();
|
||||
let c = metal_tensor_op(&a, &b, "add");
|
||||
assert_eq!(c, Tensor::new(Some(&[2, 4, 6, 8]), &[2, 2]).unwrap());
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "sub");
|
||||
assert_eq!(c, Tensor::new(Some(&[0, 0, 0, 0]), &[2, 2]).unwrap());
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "mul");
|
||||
assert_eq!(c, Tensor::new(Some(&[1, 4, 9, 16]), &[2, 2]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "metal")]
|
||||
fn tensor_metal_felt() {
|
||||
use halo2curves::bn256::Fr;
|
||||
|
||||
let a = Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(1), Fr::from(2), Fr::from(3), Fr::from(4)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap();
|
||||
let b = Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(1), Fr::from(2), Fr::from(3), Fr::from(4)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "add");
|
||||
assert_eq!(
|
||||
c,
|
||||
Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(2), Fr::from(4), Fr::from(6), Fr::from(8)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "sub");
|
||||
assert_eq!(
|
||||
c,
|
||||
Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(0), Fr::from(0), Fr::from(0), Fr::from(0)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
let c = metal_tensor_op(&a, &b, "mul");
|
||||
assert_eq!(
|
||||
c,
|
||||
Tensor::<Fr>::new(
|
||||
Some(&[Fr::from(1), Fr::from(4), Fr::from(9), Fr::from(16)]),
|
||||
&[2, 2],
|
||||
)
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -316,9 +316,9 @@ impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<AssignedCell<F, F>>> f
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
|
||||
/// Allocate a new [ValTensor::Value] from the given [Tensor] of [i128].
|
||||
pub fn from_i128_tensor(t: Tensor<i128>) -> ValTensor<F> {
|
||||
let inner = t.map(|x| ValType::Value(Value::known(i128_to_felt(x))));
|
||||
/// Allocate a new [ValTensor::Value] from the given [Tensor] of [i64].
|
||||
pub fn from_i64_tensor(t: Tensor<i64>) -> ValTensor<F> {
|
||||
let inner = t.map(|x| ValType::Value(Value::known(i64_to_felt(x))));
|
||||
inner.into()
|
||||
}
|
||||
|
||||
@@ -521,9 +521,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
|
||||
}
|
||||
|
||||
/// Calls `int_evals` on the inner tensor.
|
||||
pub fn get_int_evals(&self) -> Result<Tensor<i128>, Box<dyn Error>> {
|
||||
pub fn get_int_evals(&self) -> Result<Tensor<i64>, Box<dyn Error>> {
|
||||
// finally convert to vector of integers
|
||||
let mut integer_evals: Vec<i128> = vec![];
|
||||
let mut integer_evals: Vec<i64> = vec![];
|
||||
match self {
|
||||
ValTensor::Value {
|
||||
inner: v, dims: _, ..
|
||||
@@ -531,25 +531,25 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
|
||||
// we have to push to an externally created vector or else vaf.map() returns an evaluation wrapped in Value<> (which we don't want)
|
||||
let _ = v.map(|vaf| match vaf {
|
||||
ValType::Value(v) => v.map(|f| {
|
||||
integer_evals.push(crate::fieldutils::felt_to_i128(f));
|
||||
integer_evals.push(crate::fieldutils::felt_to_i64(f));
|
||||
}),
|
||||
ValType::AssignedValue(v) => v.map(|f| {
|
||||
integer_evals.push(crate::fieldutils::felt_to_i128(f.evaluate()));
|
||||
integer_evals.push(crate::fieldutils::felt_to_i64(f.evaluate()));
|
||||
}),
|
||||
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
|
||||
v.value_field().map(|f| {
|
||||
integer_evals.push(crate::fieldutils::felt_to_i128(f.evaluate()));
|
||||
integer_evals.push(crate::fieldutils::felt_to_i64(f.evaluate()));
|
||||
})
|
||||
}
|
||||
ValType::Constant(v) => {
|
||||
integer_evals.push(crate::fieldutils::felt_to_i128(v));
|
||||
integer_evals.push(crate::fieldutils::felt_to_i64(v));
|
||||
Value::unknown()
|
||||
}
|
||||
});
|
||||
}
|
||||
_ => return Err(Box::new(TensorError::WrongMethod)),
|
||||
};
|
||||
let mut tensor: Tensor<i128> = integer_evals.into_iter().into();
|
||||
let mut tensor: Tensor<i64> = integer_evals.into_iter().into();
|
||||
match tensor.reshape(self.dims()) {
|
||||
_ => {}
|
||||
};
|
||||
|
||||
51
src/wasm.rs
51
src/wasm.rs
@@ -1,8 +1,9 @@
|
||||
use crate::circuit::modules::polycommit::PolyCommitChip;
|
||||
use crate::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
|
||||
use crate::circuit::modules::poseidon::PoseidonChip;
|
||||
use crate::circuit::modules::Module;
|
||||
use crate::fieldutils::felt_to_i128;
|
||||
use crate::fieldutils::i128_to_felt;
|
||||
use crate::fieldutils::felt_to_i64;
|
||||
use crate::fieldutils::i64_to_felt;
|
||||
use crate::graph::modules::POSEIDON_LEN_GRAPH;
|
||||
use crate::graph::quantize_float;
|
||||
use crate::graph::scale_to_multiplier;
|
||||
@@ -112,7 +113,7 @@ pub fn feltToInt(
|
||||
let felt: Fr = serde_json::from_slice(&array[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
|
||||
Ok(wasm_bindgen::Clamped(
|
||||
serde_json::to_vec(&felt_to_i128(felt))
|
||||
serde_json::to_vec(&felt_to_i64(felt))
|
||||
.map_err(|e| JsError::new(&format!("Failed to serialize integer: {}", e)))?,
|
||||
))
|
||||
}
|
||||
@@ -126,7 +127,7 @@ pub fn feltToFloat(
|
||||
) -> Result<f64, JsError> {
|
||||
let felt: Fr = serde_json::from_slice(&array[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
|
||||
let int_rep = felt_to_i128(felt);
|
||||
let int_rep = felt_to_i64(felt);
|
||||
let multiplier = scale_to_multiplier(scale);
|
||||
Ok(int_rep as f64 / multiplier)
|
||||
}
|
||||
@@ -140,13 +141,51 @@ pub fn floatToFelt(
|
||||
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
|
||||
let int_rep =
|
||||
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
let felt = i128_to_felt(int_rep);
|
||||
let felt = i64_to_felt(int_rep);
|
||||
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
|
||||
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|
||||
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
|
||||
)?))
|
||||
}
|
||||
|
||||
/// Generate a kzg commitment.
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn kzgCommit(
|
||||
message: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
vk: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
settings: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
|
||||
let message: Vec<Fr> = serde_json::from_slice(&message[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
|
||||
|
||||
let mut reader = std::io::BufReader::new(¶ms_ser[..]);
|
||||
let params: ParamsKZG<Bn256> =
|
||||
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
|
||||
|
||||
let mut reader = std::io::BufReader::new(&vk[..]);
|
||||
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
|
||||
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
circuit_settings,
|
||||
)
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
|
||||
|
||||
let output = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
|
||||
message,
|
||||
(vk.cs().blinding_factors() + 1) as u32,
|
||||
¶ms,
|
||||
);
|
||||
|
||||
Ok(wasm_bindgen::Clamped(
|
||||
serde_json::to_vec(&output).map_err(|e| JsError::new(&format!("{}", e)))?,
|
||||
))
|
||||
}
|
||||
|
||||
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
@@ -236,7 +275,7 @@ pub fn genWitness(
|
||||
.map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
|
||||
let witness = circuit
|
||||
.forward::<KZGCommitmentScheme<Bn256>>(&mut input, None, None, false)
|
||||
.forward::<KZGCommitmentScheme<Bn256>>(&mut input, None, None, false, false)
|
||||
.map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
|
||||
serde_json::to_vec(&witness)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
mod native_tests {
|
||||
|
||||
use ezkl::circuit::Tolerance;
|
||||
use ezkl::fieldutils::{felt_to_i128, i128_to_felt};
|
||||
use ezkl::fieldutils::{felt_to_i64, i64_to_felt};
|
||||
// use ezkl::circuit::table::RESERVED_BLINDING_ROWS_PAD;
|
||||
use ezkl::graph::input::{FileSource, FileSourceInner, GraphData};
|
||||
use ezkl::graph::{DataSource, GraphSettings, GraphWitness};
|
||||
@@ -200,7 +200,7 @@ mod native_tests {
|
||||
"1l_tiny_div",
|
||||
];
|
||||
|
||||
const TESTS: [&str; 92] = [
|
||||
const TESTS: [&str; 93] = [
|
||||
"1l_mlp", //0
|
||||
"1l_slice",
|
||||
"1l_concat",
|
||||
@@ -296,7 +296,8 @@ mod native_tests {
|
||||
"reducel1",
|
||||
"reducel2", // 89
|
||||
"1l_lppool",
|
||||
"lstm_large", // 91
|
||||
"lstm_large", // 91
|
||||
"lstm_medium", // 92
|
||||
];
|
||||
|
||||
const WASM_TESTS: [&str; 46] = [
|
||||
@@ -535,7 +536,7 @@ mod native_tests {
|
||||
}
|
||||
});
|
||||
|
||||
seq!(N in 0..=91 {
|
||||
seq!(N in 0..=92 {
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
#[ignore]
|
||||
@@ -623,7 +624,7 @@ mod native_tests {
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn mock_large_batch_public_outputs_(test: &str) {
|
||||
// currently variable output rank is not supported in ONNX
|
||||
if test != "gather_nd" && test != "lstm_large" {
|
||||
if test != "gather_nd" && test != "lstm_large" && test != "lstm_medium" {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
@@ -899,7 +900,7 @@ mod native_tests {
|
||||
seq!(N in 0..=45 {
|
||||
|
||||
#(#[test_case(WASM_TESTS[N])])*
|
||||
fn prove_and_verify_with_overflow_(test: &str) {
|
||||
fn kzg_prove_and_verify_with_overflow_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
// crate::native_tests::init_wasm();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
@@ -908,11 +909,24 @@ mod native_tests {
|
||||
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, true, "single", Commitments::KZG, 2);
|
||||
#[cfg(not(feature = "icicle"))]
|
||||
run_js_tests(path, test.to_string(), "testWasm", false);
|
||||
// test_dir.close().unwrap();
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(WASM_TESTS[N])])*
|
||||
fn prove_and_verify_with_overflow_fixed_params_(test: &str) {
|
||||
fn kzg_prove_and_verify_with_overflow_hashed_inputs_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
// crate::native_tests::init_wasm();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
env_logger::init();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
prove_and_verify(path, test.to_string(), "safe", "hashed", "private", "public", 1, None, true, "single", Commitments::KZG, 2);
|
||||
#[cfg(not(feature = "icicle"))]
|
||||
run_js_tests(path, test.to_string(), "testWasm", false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(WASM_TESTS[N])])*
|
||||
fn kzg_prove_and_verify_with_overflow_fixed_params_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
// crate::native_tests::init_wasm();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
@@ -1360,7 +1374,7 @@ mod native_tests {
|
||||
let witness = witness.clone();
|
||||
let outputs = witness.outputs.clone();
|
||||
|
||||
// get values as i128
|
||||
// get values as i64
|
||||
let output_perturbed_safe: Vec<Vec<halo2curves::bn256::Fr>> = outputs
|
||||
.iter()
|
||||
.map(|sv| {
|
||||
@@ -1370,10 +1384,10 @@ mod native_tests {
|
||||
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
|
||||
halo2curves::bn256::Fr::zero()
|
||||
} else {
|
||||
i128_to_felt(
|
||||
(felt_to_i128(*v) as f32
|
||||
i64_to_felt(
|
||||
(felt_to_i64(*v) as f32
|
||||
* (rand::thread_rng().gen_range(-0.01..0.01) * tolerance))
|
||||
as i128,
|
||||
as i64,
|
||||
)
|
||||
};
|
||||
|
||||
@@ -1383,7 +1397,7 @@ mod native_tests {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// get values as i128
|
||||
// get values as i64
|
||||
let output_perturbed_bad: Vec<Vec<halo2curves::bn256::Fr>> = outputs
|
||||
.iter()
|
||||
.map(|sv| {
|
||||
@@ -1393,10 +1407,10 @@ mod native_tests {
|
||||
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
|
||||
halo2curves::bn256::Fr::from(2)
|
||||
} else {
|
||||
i128_to_felt(
|
||||
(felt_to_i128(*v) as f32
|
||||
i64_to_felt(
|
||||
(felt_to_i64(*v) as f32
|
||||
* (rand::thread_rng().gen_range(0.02..0.1) * tolerance))
|
||||
as i128,
|
||||
as i64,
|
||||
)
|
||||
};
|
||||
*v + perturbation
|
||||
|
||||
@@ -123,7 +123,7 @@ mod py_tests {
|
||||
}
|
||||
}
|
||||
|
||||
const TESTS: [&str; 32] = [
|
||||
const TESTS: [&str; 33] = [
|
||||
"proof_splitting.ipynb", // 0
|
||||
"variance.ipynb",
|
||||
"mnist_gan.ipynb",
|
||||
@@ -157,6 +157,7 @@ mod py_tests {
|
||||
"generalized_inverse.ipynb",
|
||||
"mnist_classifier.ipynb", // 30
|
||||
"world_rotation.ipynb",
|
||||
"logistic_regression.ipynb",
|
||||
];
|
||||
|
||||
macro_rules! test_func {
|
||||
@@ -169,7 +170,7 @@ mod py_tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
seq!(N in 0..=31 {
|
||||
seq!(N in 0..=32 {
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn run_notebook_(test: &str) {
|
||||
|
||||
@@ -1,21 +1,29 @@
|
||||
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
|
||||
#[cfg(test)]
|
||||
mod wasm32 {
|
||||
use ezkl::circuit::modules::polycommit::PolyCommitChip;
|
||||
use ezkl::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
|
||||
use ezkl::circuit::modules::poseidon::PoseidonChip;
|
||||
use ezkl::circuit::modules::Module;
|
||||
use ezkl::graph::modules::POSEIDON_LEN_GRAPH;
|
||||
use ezkl::graph::GraphWitness;
|
||||
use ezkl::graph::GraphCircuit;
|
||||
use ezkl::graph::{GraphSettings, GraphWitness};
|
||||
use ezkl::pfsys;
|
||||
use ezkl::wasm::{
|
||||
bufferToVecOfFelt, compiledCircuitValidation, encodeVerifierCalldata, feltToBigEndian,
|
||||
feltToFloat, feltToInt, feltToLittleEndian, genPk, genVk, genWitness, inputValidation,
|
||||
pkValidation, poseidonHash, proofValidation, prove, settingsValidation, srsValidation,
|
||||
u8_array_to_u128_le, verify, verifyAggr, vkValidation, witnessValidation,
|
||||
kzgCommit, pkValidation, poseidonHash, proofValidation, prove, settingsValidation,
|
||||
srsValidation, u8_array_to_u128_le, verify, verifyAggr, vkValidation, witnessValidation,
|
||||
};
|
||||
use halo2_proofs::plonk::VerifyingKey;
|
||||
use halo2_proofs::poly::commitment::CommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2_proofs::poly::kzg::commitment::ParamsKZG;
|
||||
use halo2_solidity_verifier::encode_calldata;
|
||||
use halo2curves::bn256::Bn256;
|
||||
use halo2curves::bn256::{Fr, G1Affine};
|
||||
use snark_verifier::util::arithmetic::PrimeField;
|
||||
use wasm_bindgen::JsError;
|
||||
#[cfg(feature = "web")]
|
||||
pub use wasm_bindgen_rayon::init_thread_pool;
|
||||
use wasm_bindgen_test::*;
|
||||
@@ -90,6 +98,45 @@ mod wasm32 {
|
||||
assert_eq!(calldata, reference_calldata);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
fn verify_kzg_commit() {
|
||||
// create a vector of field elements Vec<Fr> and assign it to the message variable
|
||||
let mut message: Vec<Fr> = vec![];
|
||||
for i in 0..32 {
|
||||
message.push(Fr::from(i as u64));
|
||||
}
|
||||
let message_ser = serde_json::to_vec(&message).unwrap();
|
||||
|
||||
let settings: GraphSettings = serde_json::from_slice(&SETTINGS).unwrap();
|
||||
let mut reader = std::io::BufReader::new(SRS);
|
||||
let params: ParamsKZG<Bn256> =
|
||||
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).unwrap();
|
||||
let mut reader = std::io::BufReader::new(VK);
|
||||
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
settings.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
let commitment_ser = kzgCommit(
|
||||
wasm_bindgen::Clamped(message_ser),
|
||||
wasm_bindgen::Clamped(VK.to_vec()),
|
||||
wasm_bindgen::Clamped(SETTINGS.to_vec()),
|
||||
wasm_bindgen::Clamped(SRS.to_vec()),
|
||||
)
|
||||
.map_err(|_| "failed")
|
||||
.unwrap();
|
||||
let commitment: Vec<halo2curves::bn256::G1Affine> =
|
||||
serde_json::from_slice(&commitment_ser[..]).unwrap();
|
||||
let reference_commitment = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
|
||||
message,
|
||||
(vk.cs().blinding_factors() + 1) as u32,
|
||||
¶ms,
|
||||
);
|
||||
|
||||
assert_eq!(commitment, reference_commitment);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
async fn verify_field_serialization_roundtrip() {
|
||||
for i in 0..32 {
|
||||
@@ -103,10 +150,10 @@ mod wasm32 {
|
||||
.unwrap();
|
||||
assert_eq!(floating_point, (i as f64) / 4.0);
|
||||
|
||||
let integer: i128 =
|
||||
let integer: i64 =
|
||||
serde_json::from_slice(&feltToInt(clamped.clone()).map_err(|_| "failed").unwrap())
|
||||
.unwrap();
|
||||
assert_eq!(integer, i as i128);
|
||||
assert_eq!(integer, i as i64);
|
||||
|
||||
let hex_string = format!("{:?}", field_element.clone());
|
||||
let returned_string: String = feltToBigEndian(clamped.clone())
|
||||
|
||||
Binary file not shown.
Reference in New Issue
Block a user