mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 08:17:57 -05:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe535c1cac | ||
|
|
3e8dcb001a | ||
|
|
14786acb95 | ||
|
|
80a3c44cb4 | ||
|
|
1656846d1a | ||
|
|
88098b8190 | ||
|
|
6c0c17c9be | ||
|
|
bf69b16fc1 | ||
|
|
74feb829da | ||
|
|
d429e7edab | ||
|
|
f0e5b82787 | ||
|
|
3f7261f50b | ||
|
|
678a249dcb | ||
|
|
0291eb2d0f | ||
|
|
1b637a70b0 | ||
|
|
abcd5380db | ||
|
|
076b737108 | ||
|
|
97d9832591 | ||
|
|
e0771683a6 | ||
|
|
319c222307 | ||
|
|
85ee6e7f9d | ||
|
|
4c8daf773c | ||
|
|
80041ac523 |
2
.github/workflows/large-tests.yml
vendored
2
.github/workflows/large-tests.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: nanoGPT Mock
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -45,7 +45,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Checkout repo
|
||||
|
||||
79
.github/workflows/rust.yml
vendored
79
.github/workflows/rust.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Build
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Docs
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -139,7 +139,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -172,7 +172,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -189,7 +189,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: jetli/wasm-pack-action@v0.4.0
|
||||
@@ -198,10 +198,8 @@ jobs:
|
||||
# chromedriver-version: "115.0.5790.102"
|
||||
- name: Install wasm32-unknown-unknown
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
- name: Install wasm runner
|
||||
run: cargo install wasm-server-runner
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
|
||||
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
|
||||
- name: Run wasm verifier tests
|
||||
# on mac:
|
||||
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
|
||||
@@ -214,7 +212,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -231,13 +229,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
with:
|
||||
crate: cargo-nextest
|
||||
locked: true
|
||||
- name: public outputs and tolerance > 0
|
||||
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
|
||||
- name: public outputs + batch size == 10
|
||||
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 32
|
||||
- name: kzg inputs
|
||||
@@ -286,7 +286,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -312,7 +312,7 @@ jobs:
|
||||
- name: Install solc
|
||||
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
|
||||
- name: Install Anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
|
||||
- name: KZG prove and verify tests (EVM + VK rendered seperately)
|
||||
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_render_seperately_ --test-threads 1
|
||||
- name: KZG prove and verify tests (EVM + kzg all)
|
||||
@@ -345,18 +345,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: jetli/wasm-pack-action@v0.4.0
|
||||
- name: Add wasm32-unknown-unknown target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
|
||||
- name: Install wasm-server-runner
|
||||
run: cargo install wasm-server-runner
|
||||
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
|
||||
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
|
||||
- uses: actions/checkout@v3
|
||||
- name: Use pnpm 8
|
||||
uses: pnpm/action-setup@v2
|
||||
@@ -416,11 +413,11 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
|
||||
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
|
||||
- uses: actions/checkout@v3
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
with:
|
||||
@@ -450,7 +447,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -460,7 +457,7 @@ jobs:
|
||||
- name: Install solc
|
||||
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
|
||||
- name: Install Anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
|
||||
- name: fuzz tests (EVM)
|
||||
run: cargo nextest run --release --verbose tests_evm::kzg_evm_fuzz_ --test-threads 2
|
||||
# - name: fuzz tests
|
||||
@@ -473,7 +470,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -491,7 +488,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -503,12 +500,12 @@ jobs:
|
||||
|
||||
prove-and-verify-aggr-tests:
|
||||
runs-on: large-self-hosted
|
||||
needs: [build, library-tests, python-tests]
|
||||
needs: [build, library-tests]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -516,16 +513,16 @@ jobs:
|
||||
crate: cargo-nextest
|
||||
locked: true
|
||||
- name: KZG )tests
|
||||
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 8 -- --include-ignored
|
||||
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 4 -- --include-ignored
|
||||
|
||||
prove-and-verify-aggr-evm-tests:
|
||||
runs-on: large-self-hosted
|
||||
needs: [build, library-tests, python-tests]
|
||||
needs: [build, library-tests]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -535,7 +532,7 @@ jobs:
|
||||
- name: Install solc
|
||||
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
|
||||
- name: Install Anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
|
||||
- name: KZG prove and verify aggr tests
|
||||
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
|
||||
|
||||
@@ -546,7 +543,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -568,7 +565,7 @@ jobs:
|
||||
python-version: "3.7"
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install solc
|
||||
@@ -576,9 +573,9 @@ jobs:
|
||||
- name: Setup Virtual Env and Install python dependencies
|
||||
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
|
||||
- name: Install Anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; maturin develop --features python-bindings --release
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
|
||||
- name: Run pytest
|
||||
run: source .env/bin/activate; pytest -vv
|
||||
|
||||
@@ -592,7 +589,7 @@ jobs:
|
||||
python-version: "3.7"
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -602,7 +599,7 @@ jobs:
|
||||
- name: Setup Virtual Env and Install python dependencies
|
||||
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; maturin develop --features python-bindings --release
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
|
||||
- name: Div rebase
|
||||
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_div_rebase_
|
||||
- name: Public inputs
|
||||
@@ -623,7 +620,7 @@ jobs:
|
||||
python-version: "3.9"
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: baptiste0928/cargo-install@v1
|
||||
@@ -633,11 +630,11 @@ jobs:
|
||||
- name: Install solc
|
||||
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
|
||||
- name: Install Anvil
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
|
||||
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
|
||||
- name: Setup Virtual Env and Install python dependencies
|
||||
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; maturin develop --features python-bindings --release
|
||||
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
|
||||
# - name: authenticate-kaggle-cli
|
||||
# shell: bash
|
||||
# env:
|
||||
|
||||
7
.github/workflows/wasm.yml
vendored
7
.github/workflows/wasm.yml
vendored
@@ -22,18 +22,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2023-08-24
|
||||
toolchain: nightly-2024-01-04
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- uses: jetli/wasm-pack-action@v0.4.0
|
||||
- name: Add wasm32-unknown-unknown target
|
||||
run: rustup target add wasm32-unknown-unknown
|
||||
|
||||
- name: Install wasm-server-runner
|
||||
run: cargo install wasm-server-runner
|
||||
|
||||
- name: Add rust-src
|
||||
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
|
||||
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
|
||||
- name: Install binaryen
|
||||
run: |
|
||||
set -e
|
||||
|
||||
558
Cargo.lock
generated
558
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -17,7 +17,7 @@ crate-type = ["cdylib", "rlib"]
|
||||
[dependencies]
|
||||
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch= "main" }
|
||||
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch= "main" }
|
||||
halo2curves = { version = "0.6.0", features = ["derive_serde"] }
|
||||
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev="9fff22c", features=["derive_serde"] }
|
||||
rand = { version = "0.8", default_features = false }
|
||||
itertools = { version = "0.10.3", default_features = false }
|
||||
clap = { version = "4.3.3", features = ["derive"]}
|
||||
@@ -34,10 +34,13 @@ bincode = { version = "1.3.3", default_features = false }
|
||||
ark-std = { version = "^0.3.0", default-features = false }
|
||||
unzip-n = "0.1.2"
|
||||
num = "0.4.1"
|
||||
portable-atomic = "1.6.0"
|
||||
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand" }
|
||||
|
||||
|
||||
# evm related deps
|
||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||
ethers = { version = "2.0.7", default_features = false, features = ["ethers-solc"] }
|
||||
ethers = { version = "2.0.11", default_features = false, features = ["ethers-solc"] }
|
||||
indicatif = {version = "0.17.5", features = ["rayon"]}
|
||||
gag = { version = "1.0.0", default_features = false}
|
||||
instant = { version = "0.1" }
|
||||
@@ -158,6 +161,7 @@ mv-lookup = ["halo2_proofs/mv-lookup", "snark-verifier/mv-lookup", "halo2_solidi
|
||||
det-prove = []
|
||||
icicle = ["halo2_proofs/icicle_gpu"]
|
||||
empty-cmd = []
|
||||
no-banner = []
|
||||
|
||||
# icicle patch to 0.1.0 if feature icicle is enabled
|
||||
[patch.'https://github.com/ingonyama-zk/icicle']
|
||||
|
||||
@@ -6,6 +6,7 @@ use ezkl::fieldutils;
|
||||
use ezkl::fieldutils::i32_to_felt;
|
||||
use ezkl::tensor::*;
|
||||
use halo2_proofs::dev::MockProver;
|
||||
use halo2_proofs::poly::commitment::Params;
|
||||
use halo2_proofs::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
|
||||
use halo2_proofs::{
|
||||
circuit::{Layouter, SimpleFloorPlanner, Value},
|
||||
@@ -489,6 +490,7 @@ pub fn runconv() {
|
||||
strategy,
|
||||
pi_for_real_prover,
|
||||
&mut transcript,
|
||||
params.n(),
|
||||
);
|
||||
assert!(verify.is_ok());
|
||||
|
||||
|
||||
@@ -309,7 +309,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]))"
|
||||
"print(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -325,7 +325,7 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from web3 import Web3, HTTPProvider, utils\n",
|
||||
"from web3 import Web3, HTTPProvider\n",
|
||||
"from solcx import compile_standard\n",
|
||||
"from decimal import Decimal\n",
|
||||
"import json\n",
|
||||
@@ -338,7 +338,7 @@
|
||||
"\n",
|
||||
"def test_on_chain_data(res):\n",
|
||||
" # Step 0: Convert the tensor to a flat list\n",
|
||||
" data = [int(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
|
||||
" data = [int(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
|
||||
"\n",
|
||||
" # Step 1: Prepare the data\n",
|
||||
" # Step 2: Prepare and compile the contract.\n",
|
||||
@@ -648,7 +648,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.9.15"
|
||||
},
|
||||
"orig_nbformat": 4
|
||||
},
|
||||
|
||||
@@ -695,7 +695,7 @@
|
||||
"formatted_output = \"[\"\n",
|
||||
"for i, value in enumerate(proof[\"instances\"]):\n",
|
||||
" for j, field_element in enumerate(value):\n",
|
||||
" onchain_input_array.append(ezkl.string_to_felt(field_element))\n",
|
||||
" onchain_input_array.append(ezkl.felt_to_big_endian(field_element))\n",
|
||||
" formatted_output += str(onchain_input_array[-1])\n",
|
||||
" if j != len(value) - 1:\n",
|
||||
" formatted_output += \", \"\n",
|
||||
@@ -705,7 +705,7 @@
|
||||
"# copy them over to remix and see if they verify\n",
|
||||
"# What happens when you change a value?\n",
|
||||
"print(\"pubInputs: \", formatted_output)\n",
|
||||
"print(\"proof: \", \"0x\" + proof[\"proof\"])"
|
||||
"print(\"proof: \", proof[\"proof\"])"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -122,8 +122,8 @@
|
||||
"# Loop through each element in the y tensor\n",
|
||||
"for e in y_input:\n",
|
||||
" # Apply the custom function and append the result to the list\n",
|
||||
" print(ezkl.float_to_string(e,7))\n",
|
||||
" result.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 7)])[0])\n",
|
||||
" print(ezkl.float_to_felt(e,7))\n",
|
||||
" result.append(ezkl.poseidon_hash([ezkl.float_to_felt(e, 7)])[0])\n",
|
||||
"\n",
|
||||
"y = y.unsqueeze(0)\n",
|
||||
"y = y.reshape(1, 9)\n",
|
||||
|
||||
@@ -126,7 +126,7 @@
|
||||
"# Loop through each element in the y tensor\n",
|
||||
"for e in user_preimages:\n",
|
||||
" # Apply the custom function and append the result to the list\n",
|
||||
" users.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 0)])[0])\n",
|
||||
" users.append(ezkl.poseidon_hash([ezkl.float_to_felt(e, 0)])[0])\n",
|
||||
"\n",
|
||||
"users_t = torch.tensor(user_preimages)\n",
|
||||
"users_t = users_t.reshape(1, 6)\n",
|
||||
@@ -303,7 +303,7 @@
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
|
||||
"witness = json.load(open(witness_path, \"r\"))\n",
|
||||
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
|
||||
"witness[\"outputs\"][0] = [ezkl.float_to_felt(1.0, 0)]\n",
|
||||
"json.dump(witness, open(witness_path, \"w\"))"
|
||||
]
|
||||
},
|
||||
@@ -417,7 +417,7 @@
|
||||
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
|
||||
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
|
||||
"witness = json.load(open(witness_path, \"r\"))\n",
|
||||
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
|
||||
"witness[\"outputs\"][0] = [ezkl.float_to_felt(1.0, 0)]\n",
|
||||
"json.dump(witness, open(witness_path, \"w\"))\n"
|
||||
]
|
||||
},
|
||||
@@ -510,7 +510,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
"version": "3.9.15"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
||||
@@ -633,7 +633,7 @@
|
||||
"json.dump(data, open(cal_path, 'w'))\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
|
||||
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -664,7 +664,6 @@
|
||||
" compiled_model_path,\n",
|
||||
" vk_path,\n",
|
||||
" pk_path,\n",
|
||||
" \n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
|
||||
@@ -503,11 +503,11 @@
|
||||
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
|
||||
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
|
||||
"\n",
|
||||
"arrow_x = ezkl.string_to_float(witness['outputs'][0][0], out_scale)\n",
|
||||
"arrow_y = ezkl.string_to_float(witness['outputs'][0][1], out_scale)\n",
|
||||
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][0], out_scale)\n",
|
||||
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][1], out_scale)\n",
|
||||
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)\n",
|
||||
"arrow_x = ezkl.string_to_float(witness['outputs'][0][2], out_scale)\n",
|
||||
"arrow_y = ezkl.string_to_float(witness['outputs'][0][3], out_scale)\n",
|
||||
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][2], out_scale)\n",
|
||||
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][3], out_scale)\n",
|
||||
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ use ezkl::execute::run;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use ezkl::logger::init_logger;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use log::{error, info};
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use log::{debug, error, info};
|
||||
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
|
||||
use rand::prelude::SliceRandom;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
#[cfg(feature = "icicle")]
|
||||
@@ -25,6 +25,7 @@ use std::error::Error;
|
||||
pub async fn main() -> Result<(), Box<dyn Error>> {
|
||||
let args = Cli::parse();
|
||||
init_logger();
|
||||
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
|
||||
banner();
|
||||
#[cfg(feature = "icicle")]
|
||||
if env::var("ENABLE_ICICLE_GPU").is_ok() {
|
||||
@@ -32,7 +33,7 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
|
||||
} else {
|
||||
info!("Running with CPU");
|
||||
}
|
||||
info!("command: \n {}", &args.as_json()?.to_colored_json_auto()?);
|
||||
debug!("command: \n {}", &args.as_json()?.to_colored_json_auto()?);
|
||||
let res = run(args.command).await;
|
||||
match &res {
|
||||
Ok(_) => info!("succeeded"),
|
||||
@@ -44,7 +45,7 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
pub fn main() {}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
|
||||
fn banner() {
|
||||
let ell: Vec<&str> = vec![
|
||||
"for Neural Networks",
|
||||
|
||||
@@ -41,7 +41,7 @@ pub struct KZGChip {
|
||||
}
|
||||
|
||||
impl KZGChip {
|
||||
/// Returns the number of inputs to the hash function
|
||||
/// Commit to the message using the KZG commitment scheme
|
||||
pub fn commit(
|
||||
message: Vec<Fp>,
|
||||
degree: u32,
|
||||
|
||||
@@ -15,7 +15,7 @@ use halo2_proofs::{
|
||||
Instance, Selector, TableColumn,
|
||||
},
|
||||
};
|
||||
use log::{trace, warn};
|
||||
use log::{debug, trace};
|
||||
|
||||
/// A simple [`FloorPlanner`] that performs minimal optimizations.
|
||||
#[derive(Debug)]
|
||||
@@ -119,7 +119,7 @@ impl<'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> Layouter<F> for ModuleLayo
|
||||
Error::Synthesis
|
||||
})?;
|
||||
if !self.regions.contains_key(&index) {
|
||||
warn!("spawning module {}", index)
|
||||
debug!("spawning module {}", index)
|
||||
};
|
||||
self.current_module = index;
|
||||
}
|
||||
|
||||
@@ -125,8 +125,8 @@ impl BaseOp {
|
||||
BaseOp::Sum => 1,
|
||||
BaseOp::SumInit => 1,
|
||||
BaseOp::Range { .. } => 1,
|
||||
BaseOp::IsZero => 1,
|
||||
BaseOp::IsBoolean => 1,
|
||||
BaseOp::IsZero => 0,
|
||||
BaseOp::IsBoolean => 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,11 @@ use pyo3::{
|
||||
types::PyString,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
use crate::{
|
||||
circuit::ops::base::BaseOp,
|
||||
circuit::{
|
||||
ops::base::BaseOp,
|
||||
table::{Range, RangeCheck, Table},
|
||||
utils,
|
||||
},
|
||||
@@ -61,6 +62,22 @@ pub enum CheckMode {
|
||||
UNSAFE,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for CheckMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
CheckMode::SAFE => write!(f, "safe"),
|
||||
CheckMode::UNSAFE => write!(f, "unsafe"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for CheckMode {
|
||||
/// Convert the struct to a subcommand string
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for CheckMode {
|
||||
fn from(value: String) -> Self {
|
||||
match value.to_lowercase().as_str() {
|
||||
@@ -83,6 +100,19 @@ pub struct Tolerance {
|
||||
pub scale: utils::F32,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Tolerance {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{:.2}", self.val)
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for Tolerance {
|
||||
/// Convert the struct to a subcommand string
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Tolerance {
|
||||
type Err = String;
|
||||
|
||||
@@ -168,6 +198,9 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
|
||||
pub lookup_input: VarTensor,
|
||||
/// the (currently singular) output of the accumulated operations.
|
||||
pub output: VarTensor,
|
||||
/// The VarTensor reserved for dynamic lookup operations (could be an element of inputs or the same as output)
|
||||
/// Note that you should be careful to ensure that the lookup_output is not simultaneously assigned to by other non-lookup operations eg. in the case of composite ops.
|
||||
pub dynamic_lookup_tables: Vec<VarTensor>,
|
||||
/// the VarTensor reserved for lookup operations (could be an element of inputs or the same as output)
|
||||
/// Note that you should be careful to ensure that the lookup_output is not simultaneously assigned to by other non-lookup operations eg. in the case of composite ops.
|
||||
pub lookup_output: VarTensor,
|
||||
@@ -177,6 +210,10 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
|
||||
pub selectors: BTreeMap<(BaseOp, usize, usize), Selector>,
|
||||
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many lookup ops.
|
||||
pub lookup_selectors: BTreeMap<(LookupOp, usize, usize), Selector>,
|
||||
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
|
||||
pub dynamic_lookup_selectors: BTreeMap<(usize, usize), Vec<Selector>>,
|
||||
///
|
||||
pub dynamic_table_selectors: Vec<Selector>,
|
||||
///
|
||||
pub tables: BTreeMap<LookupOp, Table<F>>,
|
||||
///
|
||||
@@ -198,9 +235,12 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
lookup_input: dummy_var.clone(),
|
||||
output: dummy_var.clone(),
|
||||
lookup_output: dummy_var.clone(),
|
||||
dynamic_lookup_tables: vec![VarTensor::dummy(col_size, 2), dummy_var.clone()],
|
||||
lookup_index: dummy_var,
|
||||
selectors: BTreeMap::new(),
|
||||
lookup_selectors: BTreeMap::new(),
|
||||
dynamic_lookup_selectors: BTreeMap::new(),
|
||||
dynamic_table_selectors: vec![],
|
||||
range_check_selectors: BTreeMap::new(),
|
||||
tables: BTreeMap::new(),
|
||||
range_checks: BTreeMap::new(),
|
||||
@@ -276,9 +316,20 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
|
||||
let constraints = match base_op {
|
||||
BaseOp::IsBoolean => {
|
||||
vec![(qis[1].clone()) * (qis[1].clone() - Expression::Constant(F::from(1)))]
|
||||
let expected_output: Tensor<Expression<F>> = output
|
||||
.query_rng(meta, *block_idx, *inner_col_idx, 0, 1)
|
||||
.expect("non accum: output query failed");
|
||||
|
||||
let output = expected_output[base_op.constraint_idx()].clone();
|
||||
|
||||
vec![(output.clone()) * (output.clone() - Expression::Constant(F::from(1)))]
|
||||
}
|
||||
BaseOp::IsZero => {
|
||||
let expected_output: Tensor<Expression<F>> = output
|
||||
.query_rng(meta, *block_idx, *inner_col_idx, 0, 1)
|
||||
.expect("non accum: output query failed");
|
||||
vec![expected_output[base_op.constraint_idx()].clone()]
|
||||
}
|
||||
BaseOp::IsZero => vec![qis[1].clone()],
|
||||
_ => {
|
||||
let expected_output: Tensor<Expression<F>> = output
|
||||
.query_rng(meta, *block_idx, *inner_col_idx, rotation_offset, rng)
|
||||
@@ -335,10 +386,13 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
selectors,
|
||||
lookup_selectors: BTreeMap::new(),
|
||||
range_check_selectors: BTreeMap::new(),
|
||||
dynamic_lookup_selectors: BTreeMap::new(),
|
||||
inputs: inputs.to_vec(),
|
||||
lookup_input: VarTensor::Empty,
|
||||
lookup_output: VarTensor::Empty,
|
||||
lookup_index: VarTensor::Empty,
|
||||
dynamic_table_selectors: vec![],
|
||||
dynamic_lookup_tables: vec![],
|
||||
tables: BTreeMap::new(),
|
||||
range_checks: BTreeMap::new(),
|
||||
output: output.clone(),
|
||||
@@ -362,8 +416,6 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
where
|
||||
F: Field,
|
||||
{
|
||||
let mut selectors = BTreeMap::new();
|
||||
|
||||
if !index.is_advice() {
|
||||
return Err("wrong input type for lookup index".into());
|
||||
}
|
||||
@@ -473,10 +525,10 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
res
|
||||
});
|
||||
}
|
||||
selectors.insert((nl.clone(), x, y), multi_col_selector);
|
||||
self.lookup_selectors
|
||||
.insert((nl.clone(), x, y), multi_col_selector);
|
||||
}
|
||||
}
|
||||
self.lookup_selectors.extend(selectors);
|
||||
// if we haven't previously initialized the input/output, do so now
|
||||
if let VarTensor::Empty = self.lookup_input {
|
||||
debug!("assigning lookup input");
|
||||
@@ -495,69 +547,182 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
|
||||
/// Configures and creates lookup selectors
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn configure_range_check(
|
||||
pub fn configure_dynamic_lookup(
|
||||
&mut self,
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
input: &VarTensor,
|
||||
range: Range,
|
||||
lookups: &[VarTensor; 2],
|
||||
tables: &[VarTensor; 2],
|
||||
) -> Result<(), Box<dyn Error>>
|
||||
where
|
||||
F: Field,
|
||||
{
|
||||
let mut selectors = BTreeMap::new();
|
||||
for l in lookups.iter() {
|
||||
if !l.is_advice() {
|
||||
return Err("wrong input type for dynamic lookup".into());
|
||||
}
|
||||
}
|
||||
|
||||
for t in tables.iter() {
|
||||
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
|
||||
return Err("wrong table type for dynamic lookup".into());
|
||||
}
|
||||
}
|
||||
|
||||
let one = Expression::Constant(F::ONE);
|
||||
|
||||
let s_ltable = cs.complex_selector();
|
||||
|
||||
for x in 0..lookups[0].num_blocks() {
|
||||
for y in 0..lookups[0].num_inner_cols() {
|
||||
let s_lookup = cs.complex_selector();
|
||||
|
||||
cs.lookup_any("lookup", |cs| {
|
||||
let s_lookupq = cs.query_selector(s_lookup);
|
||||
let mut expression = vec![];
|
||||
let s_ltableq = cs.query_selector(s_ltable);
|
||||
let mut lookup_queries = vec![one.clone()];
|
||||
|
||||
for lookup in lookups {
|
||||
lookup_queries.push(match lookup {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
|
||||
let mut table_queries = vec![one.clone()];
|
||||
for table in tables {
|
||||
table_queries.push(match table {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[0][0], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
|
||||
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
|
||||
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
|
||||
expression.extend(lhs.zip(rhs));
|
||||
|
||||
expression
|
||||
});
|
||||
self.dynamic_lookup_selectors
|
||||
.entry((x, y))
|
||||
.or_default()
|
||||
.push(s_lookup);
|
||||
}
|
||||
}
|
||||
self.dynamic_table_selectors.push(s_ltable);
|
||||
|
||||
// if we haven't previously initialized the input/output, do so now
|
||||
if self.dynamic_lookup_tables.is_empty() {
|
||||
debug!("assigning dynamic lookup table");
|
||||
self.dynamic_lookup_tables = tables.to_vec();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Configures and creates lookup selectors
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn configure_range_check(
|
||||
&mut self,
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
input: &VarTensor,
|
||||
index: &VarTensor,
|
||||
range: Range,
|
||||
logrows: usize,
|
||||
) -> Result<(), Box<dyn Error>>
|
||||
where
|
||||
F: Field,
|
||||
{
|
||||
if !input.is_advice() {
|
||||
return Err("wrong input type for lookup input".into());
|
||||
}
|
||||
|
||||
// we borrow mutably twice so we need to do this dance
|
||||
|
||||
let range_check = if !self.range_checks.contains_key(&range) {
|
||||
// as all tables have the same input we see if there's another table who's input we can reuse
|
||||
let range_check = RangeCheck::<F>::configure(cs, range);
|
||||
self.range_checks.insert(range, range_check.clone());
|
||||
range_check
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
let range_check =
|
||||
if let std::collections::btree_map::Entry::Vacant(e) = self.range_checks.entry(range) {
|
||||
// as all tables have the same input we see if there's another table who's input we can reuse
|
||||
let range_check = RangeCheck::<F>::configure(cs, range, logrows);
|
||||
e.insert(range_check.clone());
|
||||
range_check
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
for x in 0..input.num_blocks() {
|
||||
for y in 0..input.num_inner_cols() {
|
||||
let single_col_sel = cs.complex_selector();
|
||||
let len = range_check.selector_constructor.degree;
|
||||
let multi_col_selector = cs.complex_selector();
|
||||
|
||||
cs.lookup("", |cs| {
|
||||
let mut res = vec![];
|
||||
let sel = cs.query_selector(single_col_sel);
|
||||
for (col_idx, input_col) in range_check.inputs.iter().enumerate() {
|
||||
cs.lookup("", |cs| {
|
||||
let mut res = vec![];
|
||||
let sel = cs.query_selector(multi_col_selector);
|
||||
|
||||
let input_query = match &input {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let synthetic_sel = match len {
|
||||
1 => Expression::Constant(F::from(1)),
|
||||
_ => match index {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
};
|
||||
|
||||
let default_x = range_check.get_first_element();
|
||||
let input_query = match &input {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let not_sel = Expression::Constant(F::ONE) - sel.clone();
|
||||
let default_x = range_check.get_first_element(col_idx);
|
||||
|
||||
res.extend([(
|
||||
sel.clone() * input_query.clone()
|
||||
+ not_sel.clone() * Expression::Constant(default_x),
|
||||
range_check.input,
|
||||
)]);
|
||||
let col_expr = sel.clone()
|
||||
* range_check
|
||||
.selector_constructor
|
||||
.get_expr_at_idx(col_idx, synthetic_sel);
|
||||
|
||||
res
|
||||
});
|
||||
selectors.insert((range, x, y), single_col_sel);
|
||||
let multiplier = range_check
|
||||
.selector_constructor
|
||||
.get_selector_val_at_idx(col_idx);
|
||||
|
||||
let not_expr = Expression::Constant(multiplier) - col_expr.clone();
|
||||
|
||||
res.extend([(
|
||||
col_expr.clone() * input_query.clone()
|
||||
+ not_expr.clone() * Expression::Constant(default_x),
|
||||
*input_col,
|
||||
)]);
|
||||
|
||||
log::trace!("---------------- col {:?} ------------------", col_idx,);
|
||||
log::trace!("expr: {:?}", col_expr,);
|
||||
log::trace!("multiplier: {:?}", multiplier);
|
||||
log::trace!("not_expr: {:?}", not_expr);
|
||||
log::trace!("default x: {:?}", default_x);
|
||||
|
||||
res
|
||||
});
|
||||
}
|
||||
self.range_check_selectors
|
||||
.insert((range, x, y), multi_col_selector);
|
||||
}
|
||||
}
|
||||
self.range_check_selectors.extend(selectors);
|
||||
// if we haven't previously initialized the input/output, do so now
|
||||
if let VarTensor::Empty = self.lookup_input {
|
||||
debug!("assigning lookup input");
|
||||
self.lookup_input = input.clone();
|
||||
}
|
||||
|
||||
if let VarTensor::Empty = self.lookup_index {
|
||||
debug!("assigning lookup index");
|
||||
self.lookup_index = index.clone();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ use crate::{
|
||||
tensor::{self, Tensor, TensorError, TensorType, ValTensor},
|
||||
};
|
||||
use halo2curves::ff::PrimeField;
|
||||
use itertools::Itertools;
|
||||
use serde::{Deserialize, Serialize};
|
||||
// import run args from model
|
||||
|
||||
@@ -69,14 +68,6 @@ pub enum HybridOp {
|
||||
dim: usize,
|
||||
num_classes: usize,
|
||||
},
|
||||
GatherElements {
|
||||
dim: usize,
|
||||
constant_idx: Option<Tensor<usize>>,
|
||||
},
|
||||
ScatterElements {
|
||||
dim: usize,
|
||||
constant_idx: Option<Tensor<usize>>,
|
||||
},
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
@@ -84,7 +75,6 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
|
||||
match self {
|
||||
HybridOp::Greater | HybridOp::Less | HybridOp::Equals => vec![0, 1],
|
||||
HybridOp::ScatterElements { .. } => vec![0, 2],
|
||||
HybridOp::GreaterEqual | HybridOp::LessEqual => vec![0, 1],
|
||||
_ => vec![],
|
||||
}
|
||||
@@ -98,176 +88,42 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
fn f(&self, inputs: &[Tensor<F>]) -> Result<ForwardResult<F>, TensorError> {
|
||||
let x = inputs[0].clone().map(|x| felt_to_i128(x));
|
||||
|
||||
let (res, intermediate_lookups) = match &self {
|
||||
HybridOp::ReduceMax { axes, .. } => {
|
||||
let res = tensor::ops::max_axes(&x, axes)?;
|
||||
let max_minus_one =
|
||||
Tensor::from(vec![x.clone().into_iter().max().unwrap() - 1].into_iter());
|
||||
let unit = Tensor::from(vec![1].into_iter());
|
||||
// relu(x - max(x - 1)
|
||||
let inter_1 = (x.clone() - max_minus_one)?;
|
||||
// relu(1 - sum(relu(inter_1)))
|
||||
let inter_2 = (unit
|
||||
- tensor::ops::sum(&tensor::ops::nonlinearities::leakyrelu(&inter_1, 0.0))?)?;
|
||||
|
||||
(res.clone(), vec![inter_1, inter_2])
|
||||
}
|
||||
HybridOp::ReduceMin { axes, .. } => {
|
||||
let res = tensor::ops::min_axes(&x, axes)?;
|
||||
let min_plus_one =
|
||||
Tensor::from(vec![x.clone().into_iter().min().unwrap() + 1].into_iter());
|
||||
let unit = Tensor::from(vec![1].into_iter());
|
||||
// relu(min(x + 1) - x)
|
||||
let inter_1 = (min_plus_one - x.clone())?;
|
||||
// relu(1 - sum(relu(inter_1)))
|
||||
let inter_2 = (unit
|
||||
- tensor::ops::sum(&tensor::ops::nonlinearities::leakyrelu(&inter_1, 0.0))?)?;
|
||||
(res.clone(), vec![inter_1, inter_2])
|
||||
}
|
||||
HybridOp::Div {
|
||||
denom,
|
||||
use_range_check_for_int,
|
||||
..
|
||||
} => {
|
||||
let res = crate::tensor::ops::nonlinearities::const_div(&x, denom.0 as f64);
|
||||
// if denom is a round number and use_range_check_for_int is true, use range check check
|
||||
if denom.0.fract() == 0.0 && *use_range_check_for_int {
|
||||
let divisor = Tensor::from(vec![denom.0 as i128 / 2].into_iter());
|
||||
(res, vec![-divisor.clone(), divisor])
|
||||
} else {
|
||||
(res, vec![x])
|
||||
}
|
||||
let res = match &self {
|
||||
HybridOp::ReduceMax { axes, .. } => tensor::ops::max_axes(&x, axes)?,
|
||||
HybridOp::ReduceMin { axes, .. } => tensor::ops::min_axes(&x, axes)?,
|
||||
HybridOp::Div { denom, .. } => {
|
||||
crate::tensor::ops::nonlinearities::const_div(&x, denom.0 as f64)
|
||||
}
|
||||
HybridOp::Recip {
|
||||
input_scale,
|
||||
output_scale,
|
||||
use_range_check_for_int,
|
||||
} => {
|
||||
let res = crate::tensor::ops::nonlinearities::recip(
|
||||
&x,
|
||||
input_scale.0 as f64,
|
||||
output_scale.0 as f64,
|
||||
);
|
||||
// if scale is a round number and use_range_check_for_int is true, use range check check
|
||||
if input_scale.0.fract() == 0.0 && *use_range_check_for_int {
|
||||
let err_tol = Tensor::from(
|
||||
vec![(output_scale.0 * input_scale.0) as i128 / 2].into_iter(),
|
||||
);
|
||||
(res, vec![-err_tol.clone(), err_tol])
|
||||
} else {
|
||||
(res, vec![x])
|
||||
}
|
||||
}
|
||||
HybridOp::ReduceArgMax { dim } => {
|
||||
let res = tensor::ops::argmax_axes(&x, *dim)?;
|
||||
let indices = Tensor::from(0..x.dims()[*dim] as i128);
|
||||
let mut inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
|
||||
let inter =
|
||||
Op::f(&HybridOp::ReduceMax { axes: vec![*dim] }, inputs)?.intermediate_lookups;
|
||||
inter_equals.extend(inter);
|
||||
|
||||
(res.clone(), inter_equals)
|
||||
}
|
||||
HybridOp::ReduceArgMin { dim } => {
|
||||
let res = tensor::ops::argmin_axes(&x, *dim)?;
|
||||
let indices = Tensor::from(0..x.dims()[*dim] as i128);
|
||||
let mut inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
|
||||
let inter =
|
||||
Op::f(&HybridOp::ReduceMin { axes: vec![*dim] }, inputs)?.intermediate_lookups;
|
||||
inter_equals.extend(inter);
|
||||
|
||||
(res.clone(), inter_equals)
|
||||
}
|
||||
..
|
||||
} => crate::tensor::ops::nonlinearities::recip(
|
||||
&x,
|
||||
input_scale.0 as f64,
|
||||
output_scale.0 as f64,
|
||||
),
|
||||
HybridOp::ReduceArgMax { dim } => tensor::ops::argmax_axes(&x, *dim)?,
|
||||
HybridOp::ReduceArgMin { dim } => tensor::ops::argmin_axes(&x, *dim)?,
|
||||
HybridOp::Gather { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
log::debug!("idx: {}", idx.show());
|
||||
let res = tensor::ops::gather(&x, idx, *dim)?;
|
||||
(res.clone(), vec![])
|
||||
tensor::ops::gather(&x, idx, *dim)?
|
||||
} else {
|
||||
let y = inputs[1].clone().map(|x| felt_to_i128(x));
|
||||
let indices = Tensor::from(0..x.dims()[*dim] as i128);
|
||||
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
|
||||
let res = tensor::ops::gather(&x, &y.map(|x| x as usize), *dim)?;
|
||||
(res.clone(), inter_equals)
|
||||
tensor::ops::gather(&x, &y.map(|x| x as usize), *dim)?
|
||||
}
|
||||
}
|
||||
HybridOp::OneHot { dim, num_classes } => {
|
||||
let indices = Tensor::from(0..x.dims()[*dim] as i128);
|
||||
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
|
||||
let res = tensor::ops::one_hot(&x, *num_classes, *dim)?;
|
||||
(res.clone(), inter_equals)
|
||||
tensor::ops::one_hot(&x, *num_classes, *dim)?.clone()
|
||||
}
|
||||
HybridOp::TopK { dim, k, largest } => {
|
||||
let res = tensor::ops::topk_axes(&x, *k, *dim, *largest)?;
|
||||
|
||||
let mut inter_equals = x
|
||||
.clone()
|
||||
.into_iter()
|
||||
.flat_map(|elem| {
|
||||
tensor::ops::equals(&res, &vec![elem].into_iter().into())
|
||||
.unwrap()
|
||||
.1
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// sort in descending order and take pairwise differences
|
||||
inter_equals.push(
|
||||
x.into_iter()
|
||||
.sorted()
|
||||
.tuple_windows()
|
||||
.map(|(a, b)| b - a)
|
||||
.into(),
|
||||
);
|
||||
|
||||
(res.clone(), inter_equals)
|
||||
}
|
||||
HybridOp::GatherElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
log::debug!("idx: {}", idx.show());
|
||||
let res = tensor::ops::gather_elements(&x, idx, *dim)?;
|
||||
(res.clone(), vec![])
|
||||
} else {
|
||||
let y = inputs[1].clone().map(|x| felt_to_i128(x));
|
||||
let indices = Tensor::from(0..x.dims()[*dim] as i128);
|
||||
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
|
||||
let res = tensor::ops::gather_elements(&x, &y.map(|x| x as usize), *dim)?;
|
||||
(res.clone(), inter_equals)
|
||||
}
|
||||
}
|
||||
HybridOp::ScatterElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
log::debug!("idx: {}", idx.show());
|
||||
let src = inputs[1].clone().map(|x| felt_to_i128(x));
|
||||
let res = tensor::ops::scatter(&x, idx, &src, *dim)?;
|
||||
(res.clone(), vec![])
|
||||
} else {
|
||||
let idx = inputs[1].clone().map(|x| felt_to_i128(x) as usize);
|
||||
let src = inputs[2].clone().map(|x| felt_to_i128(x));
|
||||
let indices = Tensor::from(0..x.dims()[*dim] as i128);
|
||||
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
|
||||
let res = tensor::ops::scatter(&x, &idx, &src, *dim)?;
|
||||
(res.clone(), inter_equals)
|
||||
}
|
||||
}
|
||||
HybridOp::TopK { dim, k, largest } => tensor::ops::topk_axes(&x, *k, *dim, *largest)?,
|
||||
HybridOp::MaxPool2d {
|
||||
padding,
|
||||
stride,
|
||||
pool_dims,
|
||||
..
|
||||
} => {
|
||||
let max_minus_one =
|
||||
Tensor::from(vec![x.clone().into_iter().max().unwrap() - 1].into_iter());
|
||||
let unit = Tensor::from(vec![1].into_iter());
|
||||
// relu(x - max(x - 1)
|
||||
let inter_1 = (x.clone() - max_minus_one)?;
|
||||
// relu(1 - sum(relu(inter_1)))
|
||||
let inter_2 = (unit
|
||||
- tensor::ops::sum(&tensor::ops::nonlinearities::leakyrelu(&inter_1, 0.0))?)?;
|
||||
(
|
||||
tensor::ops::max_pool2d(&x, padding, stride, pool_dims)?,
|
||||
vec![inter_1, inter_2],
|
||||
)
|
||||
}
|
||||
} => tensor::ops::max_pool2d(&x, padding, stride, pool_dims)?,
|
||||
HybridOp::SumPool {
|
||||
padding,
|
||||
stride,
|
||||
@@ -279,10 +135,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
}
|
||||
HybridOp::RangeCheck(tol) => {
|
||||
let y = inputs[1].clone().map(|x| felt_to_i128(x));
|
||||
(
|
||||
tensor::ops::nonlinearities::range_check_percent(&[x, y], 128, 128, tol.val),
|
||||
vec![],
|
||||
)
|
||||
tensor::ops::nonlinearities::range_check_percent(&[x, y], 128, 128, tol.val)
|
||||
}
|
||||
HybridOp::Greater => {
|
||||
let y = inputs[1].clone().map(|x| felt_to_i128(x));
|
||||
@@ -309,10 +162,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
// convert back to felt
|
||||
let output = res.map(|x| i128_to_felt(x));
|
||||
|
||||
Ok(ForwardResult {
|
||||
output,
|
||||
intermediate_lookups,
|
||||
})
|
||||
Ok(ForwardResult { output })
|
||||
}
|
||||
|
||||
fn as_string(&self) -> String {
|
||||
@@ -366,8 +216,6 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
HybridOp::TopK { k, dim, largest } => {
|
||||
format!("TOPK (k={}, dim={}, largest={})", k, dim, largest)
|
||||
}
|
||||
HybridOp::GatherElements { dim, .. } => format!("GATHERELEMENTS (dim={})", dim),
|
||||
HybridOp::ScatterElements { dim, .. } => format!("SCATTERELEMENTS (dim={})", dim),
|
||||
HybridOp::OneHot { dim, num_classes } => {
|
||||
format!("ONEHOT (dim={}, num_classes={})", dim, num_classes)
|
||||
}
|
||||
@@ -429,7 +277,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
..
|
||||
} => {
|
||||
if denom.0.fract() == 0.0 && *use_range_check_for_int {
|
||||
layouts::div(
|
||||
layouts::loop_div(
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
@@ -440,9 +288,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
config,
|
||||
region,
|
||||
values.try_into()?,
|
||||
&LookupOp::Div {
|
||||
denom: denom.clone(),
|
||||
},
|
||||
&LookupOp::Div { denom: *denom },
|
||||
)?
|
||||
}
|
||||
}
|
||||
@@ -453,26 +299,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
|
||||
layouts::gather(config, region, values[..].try_into()?, *dim)?
|
||||
}
|
||||
}
|
||||
HybridOp::GatherElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
|
||||
} else {
|
||||
layouts::gather_elements(config, region, values[..].try_into()?, *dim)?
|
||||
}
|
||||
}
|
||||
HybridOp::ScatterElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
tensor::ops::scatter(
|
||||
values[0].get_inner_tensor()?,
|
||||
idx,
|
||||
values[1].get_inner_tensor()?,
|
||||
*dim,
|
||||
)?
|
||||
.into()
|
||||
} else {
|
||||
layouts::scatter_elements(config, region, values[..].try_into()?, *dim)?
|
||||
}
|
||||
}
|
||||
|
||||
HybridOp::MaxPool2d {
|
||||
padding,
|
||||
stride,
|
||||
|
||||
@@ -18,10 +18,7 @@ use super::{
|
||||
region::RegionCtx,
|
||||
};
|
||||
use crate::{
|
||||
circuit::{
|
||||
ops::base::BaseOp,
|
||||
utils::{self},
|
||||
},
|
||||
circuit::{ops::base::BaseOp, utils},
|
||||
fieldutils::{felt_to_i128, i128_to_felt},
|
||||
tensor::{
|
||||
get_broadcasted_shape,
|
||||
@@ -54,6 +51,41 @@ pub fn overflowed_len(starting_idx: usize, mut total_len: usize, column_len: usi
|
||||
total_len
|
||||
}
|
||||
|
||||
/// Same as div but splits the division into N parts
|
||||
pub fn loop_div<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
value: &[ValTensor<F>; 1],
|
||||
divisor: F,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
if divisor == F::ONE {
|
||||
return Ok(value[0].clone());
|
||||
}
|
||||
|
||||
// if integer val is divisible by 2, we can use a faster method and div > F::S
|
||||
let mut divisor = divisor;
|
||||
let mut num_parts = 1;
|
||||
|
||||
while felt_to_i128(divisor) % 2 == 0 && felt_to_i128(divisor) > (2_i128.pow(F::S - 4)) {
|
||||
divisor = i128_to_felt(felt_to_i128(divisor) / 2);
|
||||
num_parts += 1;
|
||||
}
|
||||
|
||||
let output = div(config, region, value, divisor)?;
|
||||
if num_parts == 1 {
|
||||
return Ok(output);
|
||||
}
|
||||
|
||||
let divisor_int = 2_i128.pow(num_parts - 1);
|
||||
let divisor_felt = i128_to_felt(divisor_int);
|
||||
if divisor_int <= 2_i128.pow(F::S - 3) {
|
||||
div(config, region, &[output], divisor_felt)
|
||||
} else {
|
||||
// keep splitting the divisor until it satisfies the condition
|
||||
loop_div(config, region, &[output], divisor_felt)
|
||||
}
|
||||
}
|
||||
|
||||
/// Div accumulated layout
|
||||
pub fn div<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
@@ -61,6 +93,10 @@ pub fn div<F: PrimeField + TensorType + PartialOrd>(
|
||||
value: &[ValTensor<F>; 1],
|
||||
div: F,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
if div == F::ONE {
|
||||
return Ok(value[0].clone());
|
||||
}
|
||||
|
||||
let input = value[0].clone();
|
||||
let input_dims = input.dims();
|
||||
|
||||
@@ -88,6 +124,8 @@ pub fn div<F: PrimeField + TensorType + PartialOrd>(
|
||||
.into()
|
||||
};
|
||||
claimed_output.reshape(input_dims)?;
|
||||
region.assign(&config.output, &claimed_output)?;
|
||||
region.increment(claimed_output.len());
|
||||
|
||||
let product = pairwise(
|
||||
config,
|
||||
@@ -96,8 +134,6 @@ pub fn div<F: PrimeField + TensorType + PartialOrd>(
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
log::debug!("product: {:?}", product.get_int_evals()?);
|
||||
|
||||
let diff_with_input = pairwise(
|
||||
config,
|
||||
region,
|
||||
@@ -115,6 +151,46 @@ pub fn div<F: PrimeField + TensorType + PartialOrd>(
|
||||
Ok(claimed_output)
|
||||
}
|
||||
|
||||
fn recip_int<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
input: &[ValTensor<F>; 1],
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
// assert is boolean
|
||||
let zero_inverse_val = tensor::ops::nonlinearities::zero_recip(1.0)[0];
|
||||
// get values where input is 0
|
||||
let zero_mask = equals_zero(config, region, input)?;
|
||||
|
||||
let one_minus_zero_mask = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
zero_mask.clone(),
|
||||
ValTensor::from(Tensor::from([ValType::Constant(F::ONE)].into_iter())),
|
||||
],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
let zero_inverse_val = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
zero_mask,
|
||||
ValTensor::from(Tensor::from(
|
||||
[ValType::Constant(i128_to_felt(zero_inverse_val))].into_iter(),
|
||||
)),
|
||||
],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
pairwise(
|
||||
config,
|
||||
region,
|
||||
&[one_minus_zero_mask, zero_inverse_val],
|
||||
BaseOp::Add,
|
||||
)
|
||||
}
|
||||
|
||||
/// recip accumulated layout
|
||||
pub fn recip<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
@@ -123,18 +199,25 @@ pub fn recip<F: PrimeField + TensorType + PartialOrd>(
|
||||
input_scale: F,
|
||||
output_scale: F,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
if output_scale == F::ONE || output_scale == F::ZERO {
|
||||
return recip_int(config, region, value);
|
||||
}
|
||||
|
||||
let input = value[0].clone();
|
||||
let input_dims = input.dims();
|
||||
|
||||
let range_check_bracket = felt_to_i128(output_scale * input_scale) / 2;
|
||||
let integer_input_scale = felt_to_i128(input_scale);
|
||||
let integer_output_scale = felt_to_i128(output_scale);
|
||||
|
||||
let mut scaled_unit =
|
||||
Tensor::from(vec![ValType::Constant(output_scale * input_scale)].into_iter());
|
||||
scaled_unit.set_visibility(&crate::graph::Visibility::Fixed);
|
||||
let scaled_unit = region.assign(&config.inputs[1], &scaled_unit.into())?;
|
||||
region.increment(scaled_unit.len());
|
||||
// range_check_bracket is min of input_scale * output_scale and 2^F::S - 3
|
||||
let range_check_len = std::cmp::min(integer_output_scale, 2_i128.pow(F::S - 4));
|
||||
|
||||
let is_assigned = !input.any_unknowns()? && !scaled_unit.any_unknowns()?;
|
||||
let input_scale_ratio =
|
||||
i128_to_felt(integer_input_scale * integer_output_scale / range_check_len);
|
||||
|
||||
let range_check_bracket = range_check_len / 2;
|
||||
|
||||
let is_assigned = !input.any_unknowns()?;
|
||||
|
||||
let mut claimed_output: ValTensor<F> = if is_assigned {
|
||||
let input_evals = input.get_int_evals()?;
|
||||
@@ -155,6 +238,8 @@ pub fn recip<F: PrimeField + TensorType + PartialOrd>(
|
||||
.into()
|
||||
};
|
||||
claimed_output.reshape(input_dims)?;
|
||||
let claimed_output = region.assign(&config.output, &claimed_output)?;
|
||||
region.increment(claimed_output.len());
|
||||
|
||||
// this is now of scale 2 * scale
|
||||
let product = pairwise(
|
||||
@@ -164,29 +249,47 @@ pub fn recip<F: PrimeField + TensorType + PartialOrd>(
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
log::debug!("product: {:?}", product.get_int_evals()?);
|
||||
// divide by input_scale
|
||||
let rebased_div = loop_div(config, region, &[product], input_scale_ratio)?;
|
||||
|
||||
// this is now of scale 2 * scale hence why we rescaled the unit scale
|
||||
let diff_with_input = pairwise(
|
||||
let zero_inverse_val =
|
||||
tensor::ops::nonlinearities::zero_recip(felt_to_i128(output_scale) as f64)[0];
|
||||
let zero_inverse =
|
||||
Tensor::from([ValType::Constant(i128_to_felt::<F>(zero_inverse_val))].into_iter());
|
||||
|
||||
let equal_zero_mask = equals_zero(config, region, &[input.clone()])?;
|
||||
|
||||
let equal_inverse_mask = equals(
|
||||
config,
|
||||
region,
|
||||
&[product.clone(), scaled_unit.clone()],
|
||||
BaseOp::Sub,
|
||||
&[claimed_output.clone(), zero_inverse.into()],
|
||||
)?;
|
||||
|
||||
log::debug!("scaled_unit: {:?}", scaled_unit.get_int_evals()?);
|
||||
// assert the two masks are equal
|
||||
enforce_equality(
|
||||
config,
|
||||
region,
|
||||
&[equal_zero_mask.clone(), equal_inverse_mask],
|
||||
)?;
|
||||
|
||||
// debug print the diff
|
||||
log::debug!("diff_with_input: {:?}", diff_with_input.get_int_evals()?);
|
||||
let unit_scale = Tensor::from([ValType::Constant(i128_to_felt(range_check_len))].into_iter());
|
||||
|
||||
log::debug!("range_check_bracket: {:?}", range_check_bracket);
|
||||
let unit_mask = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[equal_zero_mask, unit_scale.into()],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
// now add the unit mask to the rebased_div
|
||||
let rebased_offset_div = pairwise(config, region, &[rebased_div, unit_mask], BaseOp::Add)?;
|
||||
|
||||
// at most the error should be in the original unit scale's range
|
||||
range_check(
|
||||
config,
|
||||
region,
|
||||
&[diff_with_input],
|
||||
&(-range_check_bracket, range_check_bracket),
|
||||
&[rebased_offset_div],
|
||||
&(range_check_bracket, 3 * range_check_bracket),
|
||||
)?;
|
||||
|
||||
Ok(claimed_output)
|
||||
@@ -233,7 +336,7 @@ pub fn dot<F: PrimeField + TensorType + PartialOrd>(
|
||||
|
||||
let mut assigned_len = 0;
|
||||
for (i, input) in values.iter_mut().enumerate() {
|
||||
input.pad_to_zero_rem(block_width)?;
|
||||
input.pad_to_zero_rem(block_width, ValType::Constant(F::ZERO))?;
|
||||
let inp = {
|
||||
let (res, len) = region.assign_with_duplication(
|
||||
&config.inputs[i],
|
||||
@@ -789,14 +892,7 @@ fn one_hot<F: PrimeField + TensorType + PartialOrd>(
|
||||
let assigned_input = region.assign(&config.inputs[0], &input)?;
|
||||
|
||||
// now assert all elems are 0 or 1
|
||||
let assigned_output = region.assign(&config.inputs[1], &output)?;
|
||||
if !region.is_dummy() {
|
||||
for i in 0..assigned_output.len() {
|
||||
let (x, y, z) = config.output.cartesian_coord(region.linear_coord() + i);
|
||||
let selector = config.selectors.get(&(BaseOp::IsBoolean, x, y));
|
||||
region.enable(selector, z)?;
|
||||
}
|
||||
}
|
||||
let assigned_output = boolean_identity(config, region, &[output.clone()], true)?;
|
||||
region.increment(std::cmp::max(assigned_output.len(), assigned_input.len()));
|
||||
|
||||
let sum = sum(config, region, &[assigned_output.clone()])?;
|
||||
@@ -819,6 +915,78 @@ fn one_hot<F: PrimeField + TensorType + PartialOrd>(
|
||||
Ok(assigned_output)
|
||||
}
|
||||
|
||||
/// Dynamic lookup
|
||||
pub fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
lookups: &[ValTensor<F>; 2],
|
||||
tables: &[ValTensor<F>; 2],
|
||||
) -> Result<(ValTensor<F>, ValTensor<F>), Box<dyn Error>> {
|
||||
// if not all lookups same length err
|
||||
if lookups[0].len() != lookups[1].len() {
|
||||
return Err("lookups must be same length".into());
|
||||
}
|
||||
|
||||
// if not all inputs same length err
|
||||
if tables[0].len() != tables[1].len() {
|
||||
return Err("inputs must be same length".into());
|
||||
}
|
||||
|
||||
// now assert the inputs of a smaller length than the lookups
|
||||
if tables[0].len() > tables[0].len() {
|
||||
return Err("inputs must be smaller length than dynamic lookups".into());
|
||||
}
|
||||
|
||||
let (lookup_0, lookup_1) = (lookups[0].clone(), lookups[1].clone());
|
||||
let (table_0, table_1) = (tables[0].clone(), tables[1].clone());
|
||||
|
||||
let table_0 = region.assign_dynamic_lookup(&config.dynamic_lookup_tables[0], &table_0)?;
|
||||
let _table_1 = region.assign_dynamic_lookup(&config.dynamic_lookup_tables[1], &table_1)?;
|
||||
let table_len = table_0.len();
|
||||
|
||||
let lookup_0 = region.assign(&config.inputs[0], &lookup_0)?;
|
||||
let lookup_1 = region.assign(&config.inputs[1], &lookup_1)?;
|
||||
|
||||
let lookup_len = lookup_0.len();
|
||||
|
||||
if !region.is_dummy() {
|
||||
(0..table_len)
|
||||
.map(|i| {
|
||||
let dynamic_lookup_index = region.dynamic_lookup_index();
|
||||
let table_selector = config.dynamic_table_selectors[dynamic_lookup_index];
|
||||
let (_, _, z) = config.dynamic_lookup_tables[0]
|
||||
.cartesian_coord(region.dynamic_lookup_col_coord() + i);
|
||||
region.enable(Some(&table_selector), z)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
if !region.is_dummy() {
|
||||
// Enable the selectors
|
||||
(0..lookup_len)
|
||||
.map(|i| {
|
||||
let (x, y, z) = config.inputs[0].cartesian_coord(region.linear_coord() + i);
|
||||
let dynamic_lookup_index = region.dynamic_lookup_index();
|
||||
let lookup_selector = config
|
||||
.dynamic_lookup_selectors
|
||||
.get(&(x, y))
|
||||
.ok_or("missing selectors")?[dynamic_lookup_index];
|
||||
|
||||
region.enable(Some(&lookup_selector), z)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
region.increment_dynamic_lookup_col_coord(table_len);
|
||||
region.increment_dynamic_lookup_index(1);
|
||||
region.increment(lookup_len);
|
||||
|
||||
Ok((lookup_0, lookup_1))
|
||||
}
|
||||
|
||||
/// One hot accumulated layout
|
||||
pub fn one_hot_axis<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
@@ -1168,7 +1336,7 @@ pub fn sum<F: PrimeField + TensorType + PartialOrd>(
|
||||
let assigned_len: usize;
|
||||
let input = {
|
||||
let mut input = values[0].clone();
|
||||
input.pad_to_zero_rem(block_width)?;
|
||||
input.pad_to_zero_rem(block_width, ValType::Constant(F::ZERO))?;
|
||||
let (res, len) =
|
||||
region.assign_with_duplication(&config.inputs[1], &input, &config.check_mode, false)?;
|
||||
assigned_len = len;
|
||||
@@ -1237,7 +1405,7 @@ pub fn prod<F: PrimeField + TensorType + PartialOrd>(
|
||||
let assigned_len: usize;
|
||||
let input = {
|
||||
let mut input = values[0].clone();
|
||||
input.pad_to_zero_rem(block_width)?;
|
||||
input.pad_to_zero_rem(block_width, ValType::Constant(F::ONE))?;
|
||||
let (res, len) =
|
||||
region.assign_with_duplication(&config.inputs[1], &input, &config.check_mode, false)?;
|
||||
assigned_len = len;
|
||||
@@ -1701,10 +1869,42 @@ pub fn equals<F: PrimeField + TensorType + PartialOrd>(
|
||||
values: &[ValTensor<F>; 2],
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
let diff = pairwise(config, region, values, BaseOp::Sub)?;
|
||||
equals_zero(config, region, &[diff])
|
||||
}
|
||||
|
||||
let res = nonlinearity(config, region, &[diff], &LookupOp::KroneckerDelta)?;
|
||||
/// Equality boolean operation
|
||||
pub fn equals_zero<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 1],
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
let values = values[0].clone();
|
||||
let values_inverse = values.inverse()?;
|
||||
let product_values_and_invert = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[values.clone(), values_inverse],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
Ok(res)
|
||||
// constant of 1
|
||||
let mut ones = Tensor::from(vec![ValType::Constant(F::from(1))].into_iter());
|
||||
ones.set_visibility(&crate::graph::Visibility::Fixed);
|
||||
|
||||
// subtract
|
||||
let output = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[ones.into(), product_values_and_invert],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
// take the product of diff and output
|
||||
let prod_check = pairwise(config, region, &[values, output.clone()], BaseOp::Mult)?;
|
||||
|
||||
is_zero_identity(config, region, &[prod_check], false)?;
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Xor boolean operation
|
||||
@@ -1768,21 +1968,7 @@ pub fn iff<F: PrimeField + TensorType + PartialOrd>(
|
||||
.into();
|
||||
|
||||
// make sure mask is boolean
|
||||
let assigned_mask = region.assign(&config.inputs[1], mask)?;
|
||||
|
||||
// Enable the selectors
|
||||
if !region.is_dummy() {
|
||||
(0..assigned_mask.len())
|
||||
.map(|i| {
|
||||
let (x, y, z) = config.inputs[1].cartesian_coord(region.linear_coord() + i);
|
||||
let selector = config.selectors.get(&(BaseOp::IsBoolean, x, y));
|
||||
region.enable(selector, z)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
region.increment(assigned_mask.len());
|
||||
let assigned_mask = boolean_identity(config, region, &[mask.clone()], true)?;
|
||||
|
||||
let one_minus_mask = pairwise(config, region, &[unit, assigned_mask.clone()], BaseOp::Sub)?;
|
||||
|
||||
@@ -1876,17 +2062,15 @@ pub fn sumpool<F: PrimeField + TensorType + PartialOrd>(
|
||||
let shape = &res[0].dims()[2..];
|
||||
let mut last_elem = res[1..]
|
||||
.iter()
|
||||
.fold(Ok(res[0].clone()), |acc, elem| acc?.concat(elem.clone()))?;
|
||||
.try_fold(res[0].clone(), |acc, elem| acc.concat(elem.clone()))?;
|
||||
last_elem.reshape(&[&[batch_size, image_channels], shape].concat())?;
|
||||
|
||||
if normalized {
|
||||
last_elem = nonlinearity(
|
||||
last_elem = loop_div(
|
||||
config,
|
||||
region,
|
||||
&[last_elem],
|
||||
&LookupOp::Div {
|
||||
denom: utils::F32((kernel_shape.0 * kernel_shape.1) as f32),
|
||||
},
|
||||
F::from((kernel_shape.0 * kernel_shape.1) as u64),
|
||||
)?;
|
||||
}
|
||||
Ok(last_elem)
|
||||
@@ -2383,18 +2567,60 @@ pub fn identity<F: PrimeField + TensorType + PartialOrd>(
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// is zero identity constraint. Usually used to constrain an instance column to an advice so the returned cells / values can be operated upon.
|
||||
pub fn is_zero_identity<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 1],
|
||||
assign: bool,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
let output = if assign || !values[0].get_const_indices()?.is_empty() {
|
||||
let output = region.assign(&config.output, &values[0])?;
|
||||
region.increment(output.len());
|
||||
output
|
||||
} else {
|
||||
values[0].clone()
|
||||
};
|
||||
// Enable the selectors
|
||||
if !region.is_dummy() {
|
||||
(0..output.len())
|
||||
.map(|j| {
|
||||
let index = region.linear_coord() - j - 1;
|
||||
|
||||
let (x, y, z) = config.output.cartesian_coord(index);
|
||||
let selector = config.selectors.get(&(BaseOp::IsZero, x, y));
|
||||
|
||||
region.enable(selector, z)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Boolean identity constraint. Usually used to constrain an instance column to an advice so the returned cells / values can be operated upon.
|
||||
pub fn boolean_identity<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 1],
|
||||
assign: bool,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
let output = region.assign(&config.inputs[1], &values[0])?;
|
||||
let output = if assign || !values[0].get_const_indices()?.is_empty() {
|
||||
// get zero constants indices
|
||||
let output = region.assign(&config.output, &values[0])?;
|
||||
region.increment(output.len());
|
||||
output
|
||||
} else {
|
||||
values[0].clone()
|
||||
};
|
||||
// Enable the selectors
|
||||
if !region.is_dummy() {
|
||||
(0..output.len())
|
||||
.map(|j| {
|
||||
let (x, y, z) = config.inputs[1].cartesian_coord(region.linear_coord() + j);
|
||||
let index = region.linear_coord() - j - 1;
|
||||
|
||||
let (x, y, z) = config.output.cartesian_coord(index);
|
||||
let selector = config.selectors.get(&(BaseOp::IsBoolean, x, y));
|
||||
|
||||
region.enable(selector, z)?;
|
||||
@@ -2402,7 +2628,6 @@ pub fn boolean_identity<F: PrimeField + TensorType + PartialOrd>(
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
region.increment(output.len());
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
@@ -2452,7 +2677,7 @@ pub fn range_check<F: PrimeField + TensorType + PartialOrd>(
|
||||
values: &[ValTensor<F>; 1],
|
||||
range: &crate::circuit::table::Range,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
region.add_used_range_check(*range);
|
||||
region.add_used_range_check(*range)?;
|
||||
|
||||
// time the entire operation
|
||||
let timer = instant::Instant::now();
|
||||
@@ -2465,19 +2690,52 @@ pub fn range_check<F: PrimeField + TensorType + PartialOrd>(
|
||||
|
||||
let is_dummy = region.is_dummy();
|
||||
|
||||
let table_index: ValTensor<F> = w
|
||||
.get_inner_tensor()?
|
||||
.par_enum_map(|_, e| {
|
||||
Ok::<ValType<F>, TensorError>(if let Some(f) = e.get_felt_eval() {
|
||||
let col_idx = if !is_dummy {
|
||||
let table = config
|
||||
.range_checks
|
||||
.get(range)
|
||||
.ok_or(TensorError::TableLookupError)?;
|
||||
table.get_col_index(f)
|
||||
} else {
|
||||
F::ZERO
|
||||
};
|
||||
Value::known(col_idx).into()
|
||||
} else {
|
||||
Value::<F>::unknown().into()
|
||||
})
|
||||
})?
|
||||
.into();
|
||||
|
||||
region.assign(&config.lookup_index, &table_index)?;
|
||||
|
||||
if !is_dummy {
|
||||
(0..assigned_len)
|
||||
.map(|i| {
|
||||
let (x, y, z) = config
|
||||
.lookup_input
|
||||
.cartesian_coord(region.linear_coord() + i);
|
||||
let selector = config.range_check_selectors.get(&(range.clone(), x, y));
|
||||
let selector = config.range_check_selectors.get(&(*range, x, y));
|
||||
region.enable(selector, z)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
if region.throw_range_check_error() {
|
||||
// assert is within range
|
||||
let int_values = w.get_int_evals()?;
|
||||
for v in int_values {
|
||||
if v < range.0 || v > range.1 {
|
||||
log::debug!("Value ({:?}) out of range: {:?}", v, range);
|
||||
return Err(Box::new(TensorError::TableLookupError));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
region.increment(assigned_len);
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
@@ -2498,7 +2756,7 @@ pub fn nonlinearity<F: PrimeField + TensorType + PartialOrd>(
|
||||
values: &[ValTensor<F>; 1],
|
||||
nl: &LookupOp,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
region.add_used_lookup(nl.clone());
|
||||
region.add_used_lookup(nl.clone(), values)?;
|
||||
|
||||
// time the entire operation
|
||||
let timer = instant::Instant::now();
|
||||
@@ -2581,22 +2839,6 @@ pub fn nonlinearity<F: PrimeField + TensorType + PartialOrd>(
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// mean function layout
|
||||
pub fn mean<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 1],
|
||||
scale: usize,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
let x = &values[0];
|
||||
|
||||
let sum_x = sum(config, region, &[x.clone()])?;
|
||||
let nl = LookupOp::Div {
|
||||
denom: utils::F32((scale * x.len()) as f32),
|
||||
};
|
||||
nonlinearity(config, region, &[sum_x], &nl)
|
||||
}
|
||||
|
||||
/// Argmax
|
||||
pub fn argmax<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
@@ -2709,24 +2951,8 @@ pub fn max<F: PrimeField + TensorType + PartialOrd>(
|
||||
)?;
|
||||
// relu(x - max(x - 1))
|
||||
let relu = nonlinearity(config, region, &[diff], &LookupOp::ReLU)?;
|
||||
|
||||
let len = relu.dims().iter().product();
|
||||
|
||||
// y_i*(1 - y_i) =0 // assert the values are either 0 or 1
|
||||
region.assign(&config.inputs[1], &relu)?;
|
||||
|
||||
if !region.is_dummy() {
|
||||
(0..len)
|
||||
.map(|i| {
|
||||
let (x, y, z) = config.inputs[1].cartesian_coord(region.linear_coord() + i);
|
||||
let selector = config.selectors.get(&(BaseOp::IsBoolean, x, y));
|
||||
region.enable(selector, z)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
region.increment(len);
|
||||
// constraining relu(x - max(x - 1)) = 0/1
|
||||
boolean_identity(config, region, &[relu.clone()], false)?;
|
||||
|
||||
// sum(relu(x - max(x - 1)))
|
||||
let sum_relu = sum(config, region, &[relu])?;
|
||||
@@ -2737,13 +2963,7 @@ pub fn max<F: PrimeField + TensorType + PartialOrd>(
|
||||
nonlinearity(config, region, &[one_minus_sum_relu], &LookupOp::ReLU)?;
|
||||
|
||||
// constraining 1 - sum(relu(x - max(x - 1))) = 0
|
||||
region.assign(&config.inputs[1], &relu_one_minus_sum_relu)?;
|
||||
|
||||
let (x, y, z) = config.output.cartesian_coord(region.linear_coord());
|
||||
let selector = config.selectors.get(&(BaseOp::IsZero, x, y));
|
||||
region.enable(selector, z)?;
|
||||
|
||||
region.increment(relu_one_minus_sum_relu.len());
|
||||
is_zero_identity(config, region, &[relu_one_minus_sum_relu], false)?;
|
||||
|
||||
Ok(assigned_max_val)
|
||||
}
|
||||
@@ -2788,23 +3008,8 @@ pub fn min<F: PrimeField + TensorType + PartialOrd>(
|
||||
|
||||
// relu(min(x + 1) - x)
|
||||
let relu = nonlinearity(config, region, &[diff], &LookupOp::ReLU)?;
|
||||
|
||||
let len = relu.dims().iter().product();
|
||||
|
||||
region.assign(&config.inputs[1], &relu)?;
|
||||
// y_i*(1 - y_i) =0 // assert the values are either 0 or 1
|
||||
if !region.is_dummy() {
|
||||
(0..len)
|
||||
.map(|i| {
|
||||
let (x, y, z) = config.inputs[1].cartesian_coord(region.linear_coord() + i);
|
||||
let selector = config.selectors.get(&(BaseOp::IsBoolean, x, y));
|
||||
region.enable(selector, z)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
region.increment(len);
|
||||
// constraining relu(min(x + 1) - x) = 0/1
|
||||
boolean_identity(config, region, &[relu.clone()], false)?;
|
||||
|
||||
// sum(relu(min(x + 1) - x))
|
||||
let sum_relu = sum(config, region, &[relu])?;
|
||||
@@ -2815,14 +3020,8 @@ pub fn min<F: PrimeField + TensorType + PartialOrd>(
|
||||
let relu_one_minus_sum_relu =
|
||||
nonlinearity(config, region, &[one_minus_sum_relu], &LookupOp::ReLU)?;
|
||||
|
||||
region.assign(&config.inputs[1], &relu_one_minus_sum_relu)?;
|
||||
|
||||
// constraining product to 0
|
||||
let (x, y, z) = config.output.cartesian_coord(region.linear_coord());
|
||||
let selector = config.selectors.get(&(BaseOp::IsZero, x, y));
|
||||
region.enable(selector, z)?;
|
||||
|
||||
region.increment(relu_one_minus_sum_relu.len());
|
||||
is_zero_identity(config, region, &[relu_one_minus_sum_relu], false)?;
|
||||
|
||||
Ok(assigned_min_val)
|
||||
}
|
||||
@@ -2963,16 +3162,8 @@ pub fn softmax<F: PrimeField + TensorType + PartialOrd>(
|
||||
let denom = sum(config, region, &[ex.clone()])?;
|
||||
// get the inverse
|
||||
|
||||
let inv_denom = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[denom],
|
||||
// we set to input scale + output_scale so the output scale is output)scale
|
||||
&LookupOp::Recip {
|
||||
input_scale: scale,
|
||||
output_scale: scale,
|
||||
},
|
||||
)?;
|
||||
let felt_scale = F::from(scale.0 as u64);
|
||||
let inv_denom = recip(config, region, &[denom], felt_scale, felt_scale)?;
|
||||
|
||||
// product of num * (1 / denom) = 2*output_scale
|
||||
let softmax = pairwise(config, region, &[ex, inv_denom], BaseOp::Mult)?;
|
||||
@@ -2995,61 +3186,56 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd>(
|
||||
return enforce_equality(config, region, values);
|
||||
}
|
||||
|
||||
// Calculate the difference between the expected output and actual output
|
||||
let diff = pairwise(config, region, values, BaseOp::Sub)?;
|
||||
let mut values = [values[0].clone(), values[1].clone()];
|
||||
|
||||
// Calculate the reciprocal of the expected output tensor, scaling by double the scaling factor
|
||||
let recip = nonlinearity(
|
||||
values[0] = region.assign(&config.inputs[0], &values[0])?;
|
||||
values[1] = region.assign(&config.inputs[1], &values[1])?;
|
||||
let total_assigned_0 = values[0].len();
|
||||
let total_assigned_1 = values[1].len();
|
||||
let total_assigned = std::cmp::max(total_assigned_0, total_assigned_1);
|
||||
region.increment(total_assigned);
|
||||
|
||||
// Calculate the difference between the expected output and actual output
|
||||
let diff = pairwise(config, region, &values, BaseOp::Sub)?;
|
||||
|
||||
// integer scale
|
||||
let int_scale = scale.0 as i128;
|
||||
// felt scale
|
||||
let felt_scale = i128_to_felt(int_scale);
|
||||
// range check len capped at 2^(S-3) and make it divisible 2
|
||||
let range_check_bracket = std::cmp::min(
|
||||
utils::F32(scale.0),
|
||||
utils::F32(2_f32.powf((F::S - 5) as f32)),
|
||||
)
|
||||
.0;
|
||||
|
||||
let range_check_bracket_int = range_check_bracket as i128;
|
||||
|
||||
// input scale ratio we multiply by tol such that in the new scale range_check_len represents tol percent
|
||||
let input_scale_ratio = ((scale.0.powf(2.0) / range_check_bracket) * tol) as i128 / 2 * 2;
|
||||
|
||||
let recip = recip(
|
||||
config,
|
||||
region,
|
||||
&[values[0].clone()],
|
||||
&LookupOp::Recip {
|
||||
input_scale: scale,
|
||||
output_scale: scale,
|
||||
},
|
||||
felt_scale,
|
||||
felt_scale * F::from(100),
|
||||
)?;
|
||||
|
||||
log::debug!("recip: {}", recip.show());
|
||||
|
||||
// Multiply the difference by the recip
|
||||
let product = pairwise(config, region, &[diff, recip], BaseOp::Mult)?;
|
||||
|
||||
let scale_squared = scale.0 * scale.0;
|
||||
log::debug!("product: {}", product.show());
|
||||
let rebased_product = loop_div(config, region, &[product], i128_to_felt(input_scale_ratio))?;
|
||||
log::debug!("rebased_product: {}", rebased_product.show());
|
||||
|
||||
// Use the greater than look up table to check if the percent error is within the tolerance for upper bound
|
||||
let tol = tol / 100.0;
|
||||
let upper_bound = nonlinearity(
|
||||
// check that it is within the tolerance range
|
||||
range_check(
|
||||
config,
|
||||
region,
|
||||
&[product.clone()],
|
||||
&LookupOp::GreaterThan {
|
||||
a: utils::F32(tol * scale_squared),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Negate the product
|
||||
let neg_product = neg(config, region, &[product])?;
|
||||
|
||||
// Use the greater than look up table to check if the percent error is within the tolerance for lower bound
|
||||
let lower_bound = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[neg_product],
|
||||
&LookupOp::GreaterThan {
|
||||
a: utils::F32(tol * scale_squared),
|
||||
},
|
||||
)?;
|
||||
|
||||
// Add the lower_bound and upper_bound
|
||||
let sum = pairwise(config, region, &[lower_bound, upper_bound], BaseOp::Add)?;
|
||||
|
||||
// Assign the sum tensor to the inputs
|
||||
region.assign(&config.inputs[1], &sum)?;
|
||||
|
||||
// Constrain the sum to be all zeros
|
||||
let (x, y, z) = config.output.cartesian_coord(region.linear_coord());
|
||||
let selector = config.selectors.get(&(BaseOp::IsZero, x, y));
|
||||
region.enable(selector, z)?;
|
||||
|
||||
region.increment(sum.len());
|
||||
|
||||
Ok(sum)
|
||||
&[rebased_product],
|
||||
&(-range_check_bracket_int, range_check_bracket_int),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -227,10 +227,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
|
||||
|
||||
let output = res.map(|x| i128_to_felt(x));
|
||||
|
||||
Ok(ForwardResult {
|
||||
output,
|
||||
intermediate_lookups: vec![],
|
||||
})
|
||||
Ok(ForwardResult { output })
|
||||
}
|
||||
|
||||
/// Returns the name of the operation
|
||||
@@ -246,10 +243,10 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
|
||||
LookupOp::Max { scale, a } => format!("MAX(scale={}, a={})", scale, a),
|
||||
LookupOp::Min { scale, a } => format!("MIN(scale={}, a={})", scale, a),
|
||||
LookupOp::Sign => "SIGN".into(),
|
||||
LookupOp::GreaterThan { .. } => "GREATER_THAN".into(),
|
||||
LookupOp::GreaterThanEqual { .. } => "GREATER_THAN_EQUAL".into(),
|
||||
LookupOp::LessThan { .. } => "LESS_THAN".into(),
|
||||
LookupOp::LessThanEqual { .. } => "LESS_THAN_EQUAL".into(),
|
||||
LookupOp::GreaterThan { a } => format!("GREATER_THAN(a={})", a),
|
||||
LookupOp::GreaterThanEqual { a } => format!("GREATER_THAN_EQUAL(a={})", a),
|
||||
LookupOp::LessThan { a } => format!("LESS_THAN(a={})", a),
|
||||
LookupOp::LessThanEqual { a } => format!("LESS_THAN_EQUAL(a={})", a),
|
||||
LookupOp::Recip {
|
||||
input_scale,
|
||||
output_scale,
|
||||
|
||||
@@ -29,7 +29,6 @@ pub mod region;
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd> {
|
||||
pub(crate) output: Tensor<F>,
|
||||
pub(crate) intermediate_lookups: Vec<Tensor<i128>>,
|
||||
}
|
||||
|
||||
/// A trait representing operations that can be represented as constraints in a circuit.
|
||||
@@ -178,7 +177,6 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Input {
|
||||
fn f(&self, x: &[Tensor<F>]) -> Result<ForwardResult<F>, TensorError> {
|
||||
Ok(ForwardResult {
|
||||
output: x[0].clone(),
|
||||
intermediate_lookups: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
@@ -201,6 +199,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Input {
|
||||
config,
|
||||
region,
|
||||
values[..].try_into()?,
|
||||
true,
|
||||
)?))
|
||||
}
|
||||
_ => Ok(Some(super::layouts::identity(
|
||||
@@ -303,10 +302,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
|
||||
fn f(&self, _: &[Tensor<F>]) -> Result<ForwardResult<F>, TensorError> {
|
||||
let output = self.quantized_values.clone();
|
||||
|
||||
Ok(ForwardResult {
|
||||
output,
|
||||
intermediate_lookups: vec![],
|
||||
})
|
||||
Ok(ForwardResult { output })
|
||||
}
|
||||
|
||||
fn as_string(&self) -> String {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::{
|
||||
circuit::layouts,
|
||||
fieldutils::felt_to_i128,
|
||||
tensor::{self, Tensor, TensorError},
|
||||
};
|
||||
|
||||
@@ -9,6 +10,14 @@ use super::{base::BaseOp, *};
|
||||
/// An enum representing the operations that can be expressed as arithmetic (non lookup) operations.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum PolyOp {
|
||||
GatherElements {
|
||||
dim: usize,
|
||||
constant_idx: Option<Tensor<usize>>,
|
||||
},
|
||||
ScatterElements {
|
||||
dim: usize,
|
||||
constant_idx: Option<Tensor<usize>>,
|
||||
},
|
||||
MultiBroadcastTo {
|
||||
shape: Vec<usize>,
|
||||
},
|
||||
@@ -81,6 +90,8 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
|
||||
|
||||
fn as_string(&self) -> String {
|
||||
match &self {
|
||||
PolyOp::GatherElements { dim, .. } => format!("GATHERELEMENTS (dim={})", dim),
|
||||
PolyOp::ScatterElements { dim, .. } => format!("SCATTERELEMENTS (dim={})", dim),
|
||||
PolyOp::MultiBroadcastTo { shape } => format!("MULTIBROADCASTTO (shape={:?})", shape),
|
||||
PolyOp::MoveAxis { .. } => "MOVEAXIS".into(),
|
||||
PolyOp::Downsample { .. } => "DOWNSAMPLE".into(),
|
||||
@@ -203,14 +214,36 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
|
||||
if 1 != inputs.len() {
|
||||
return Err(TensorError::DimMismatch("slice inputs".to_string()));
|
||||
}
|
||||
Ok(tensor::ops::slice(&inputs[0], axis, start, end)?)
|
||||
tensor::ops::slice(&inputs[0], axis, start, end)
|
||||
}
|
||||
PolyOp::GatherElements { dim, constant_idx } => {
|
||||
let x = inputs[0].clone();
|
||||
let y = if let Some(idx) = constant_idx {
|
||||
idx.clone()
|
||||
} else {
|
||||
inputs[1].clone().map(|x| felt_to_i128(x) as usize)
|
||||
};
|
||||
tensor::ops::gather_elements(&x, &y, *dim)
|
||||
}
|
||||
PolyOp::ScatterElements { dim, constant_idx } => {
|
||||
let x = inputs[0].clone();
|
||||
|
||||
let idx = if let Some(idx) = constant_idx {
|
||||
idx.clone()
|
||||
} else {
|
||||
inputs[1].clone().map(|x| felt_to_i128(x) as usize)
|
||||
};
|
||||
|
||||
let src = if constant_idx.is_some() {
|
||||
inputs[1].clone()
|
||||
} else {
|
||||
inputs[2].clone()
|
||||
};
|
||||
tensor::ops::scatter(&x, &idx, &src, *dim)
|
||||
}
|
||||
}?;
|
||||
|
||||
Ok(ForwardResult {
|
||||
output: res,
|
||||
intermediate_lookups: vec![],
|
||||
})
|
||||
Ok(ForwardResult { output: res })
|
||||
}
|
||||
|
||||
fn layout(
|
||||
@@ -251,6 +284,26 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
|
||||
PolyOp::Conv { padding, stride } => {
|
||||
layouts::conv(config, region, values[..].try_into()?, *padding, *stride)?
|
||||
}
|
||||
PolyOp::GatherElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
|
||||
} else {
|
||||
layouts::gather_elements(config, region, values[..].try_into()?, *dim)?
|
||||
}
|
||||
}
|
||||
PolyOp::ScatterElements { dim, constant_idx } => {
|
||||
if let Some(idx) = constant_idx {
|
||||
tensor::ops::scatter(
|
||||
values[0].get_inner_tensor()?,
|
||||
idx,
|
||||
values[1].get_inner_tensor()?,
|
||||
*dim,
|
||||
)?
|
||||
.into()
|
||||
} else {
|
||||
layouts::scatter_elements(config, region, values[..].try_into()?, *dim)?
|
||||
}
|
||||
}
|
||||
PolyOp::DeConv {
|
||||
padding,
|
||||
output_padding,
|
||||
@@ -352,6 +405,8 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
|
||||
vec![1, 2]
|
||||
} else if matches!(self, PolyOp::Concat { .. }) {
|
||||
(0..100).collect()
|
||||
} else if matches!(self, PolyOp::ScatterElements { .. }) {
|
||||
vec![0, 2]
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
|
||||
@@ -16,8 +16,37 @@ use std::{
|
||||
},
|
||||
};
|
||||
|
||||
use portable_atomic::AtomicI128 as AtomicInt;
|
||||
|
||||
use super::lookup::LookupOp;
|
||||
|
||||
/// Dynamic lookup index
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DynamicLookupIndex {
|
||||
lookup_index: usize,
|
||||
col_coord: usize,
|
||||
}
|
||||
|
||||
impl DynamicLookupIndex {
|
||||
/// Create a new dynamic lookup index
|
||||
pub fn new(lookup_index: usize, col_coord: usize) -> DynamicLookupIndex {
|
||||
DynamicLookupIndex {
|
||||
lookup_index,
|
||||
col_coord,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the lookup index
|
||||
pub fn lookup_index(&self) -> usize {
|
||||
self.lookup_index
|
||||
}
|
||||
|
||||
/// Get the column coord
|
||||
pub fn col_coord(&self) -> usize {
|
||||
self.col_coord
|
||||
}
|
||||
}
|
||||
|
||||
/// Region error
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum RegionError {
|
||||
@@ -64,8 +93,13 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd> {
|
||||
linear_coord: usize,
|
||||
num_inner_cols: usize,
|
||||
total_constants: usize,
|
||||
dynamic_lookup_index: DynamicLookupIndex,
|
||||
used_lookups: HashSet<LookupOp>,
|
||||
used_range_checks: HashSet<Range>,
|
||||
max_lookup_inputs: i128,
|
||||
min_lookup_inputs: i128,
|
||||
max_range_size: i128,
|
||||
throw_range_check_error: bool,
|
||||
}
|
||||
|
||||
impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
@@ -74,6 +108,21 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
self.total_constants += n;
|
||||
}
|
||||
|
||||
///
|
||||
pub fn increment_dynamic_lookup_index(&mut self, n: usize) {
|
||||
self.dynamic_lookup_index.lookup_index += n;
|
||||
}
|
||||
|
||||
///
|
||||
pub fn increment_dynamic_lookup_col_coord(&mut self, n: usize) {
|
||||
self.dynamic_lookup_index.col_coord += n;
|
||||
}
|
||||
|
||||
///
|
||||
pub fn throw_range_check_error(&self) -> bool {
|
||||
self.throw_range_check_error
|
||||
}
|
||||
|
||||
/// Create a new region context
|
||||
pub fn new(region: Region<'a, F>, row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> {
|
||||
let region = Some(RefCell::new(region));
|
||||
@@ -85,8 +134,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
row,
|
||||
linear_coord,
|
||||
total_constants: 0,
|
||||
dynamic_lookup_index: DynamicLookupIndex::default(),
|
||||
used_lookups: HashSet::new(),
|
||||
used_range_checks: HashSet::new(),
|
||||
max_lookup_inputs: 0,
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
throw_range_check_error: false,
|
||||
}
|
||||
}
|
||||
/// Create a new region context from a wrapped region
|
||||
@@ -94,6 +148,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
region: Option<RefCell<Region<'a, F>>>,
|
||||
row: usize,
|
||||
num_inner_cols: usize,
|
||||
dynamic_lookup_index: DynamicLookupIndex,
|
||||
) -> RegionCtx<'a, F> {
|
||||
let linear_coord = row * num_inner_cols;
|
||||
RegionCtx {
|
||||
@@ -102,13 +157,22 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
linear_coord,
|
||||
row,
|
||||
total_constants: 0,
|
||||
dynamic_lookup_index,
|
||||
used_lookups: HashSet::new(),
|
||||
used_range_checks: HashSet::new(),
|
||||
max_lookup_inputs: 0,
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
throw_range_check_error: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new region context
|
||||
pub fn new_dummy(row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> {
|
||||
pub fn new_dummy(
|
||||
row: usize,
|
||||
num_inner_cols: usize,
|
||||
throw_range_check_error: bool,
|
||||
) -> RegionCtx<'a, F> {
|
||||
let region = None;
|
||||
let linear_coord = row * num_inner_cols;
|
||||
|
||||
@@ -118,8 +182,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
linear_coord,
|
||||
row,
|
||||
total_constants: 0,
|
||||
dynamic_lookup_index: DynamicLookupIndex::default(),
|
||||
used_lookups: HashSet::new(),
|
||||
used_range_checks: HashSet::new(),
|
||||
max_lookup_inputs: 0,
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
throw_range_check_error,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,8 +198,10 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
linear_coord: usize,
|
||||
total_constants: usize,
|
||||
num_inner_cols: usize,
|
||||
dynamic_lookup_index: DynamicLookupIndex,
|
||||
used_lookups: HashSet<LookupOp>,
|
||||
used_range_checks: HashSet<Range>,
|
||||
throw_range_check_error: bool,
|
||||
) -> RegionCtx<'a, F> {
|
||||
let region = None;
|
||||
RegionCtx {
|
||||
@@ -139,8 +210,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
linear_coord,
|
||||
row,
|
||||
total_constants,
|
||||
dynamic_lookup_index,
|
||||
used_lookups,
|
||||
used_range_checks,
|
||||
max_lookup_inputs: 0,
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
throw_range_check_error,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,6 +256,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
|
||||
/// Create a new region context per loop iteration
|
||||
/// hacky but it works
|
||||
|
||||
pub fn dummy_loop<T: TensorType + Send + Sync>(
|
||||
&mut self,
|
||||
output: &mut Tensor<T>,
|
||||
@@ -190,8 +267,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
let row = AtomicUsize::new(self.row());
|
||||
let linear_coord = AtomicUsize::new(self.linear_coord());
|
||||
let constants = AtomicUsize::new(self.total_constants());
|
||||
let max_lookup_inputs = AtomicInt::new(self.max_lookup_inputs());
|
||||
let min_lookup_inputs = AtomicInt::new(self.min_lookup_inputs());
|
||||
let lookups = Arc::new(Mutex::new(self.used_lookups.clone()));
|
||||
let range_checks = Arc::new(Mutex::new(self.used_range_checks.clone()));
|
||||
let dynamic_lookup_index = Arc::new(Mutex::new(self.dynamic_lookup_index.clone()));
|
||||
|
||||
*output = output
|
||||
.par_enum_map(|idx, _| {
|
||||
@@ -207,8 +287,10 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
starting_linear_coord,
|
||||
starting_constants,
|
||||
self.num_inner_cols,
|
||||
DynamicLookupIndex::default(),
|
||||
HashSet::new(),
|
||||
HashSet::new(),
|
||||
self.throw_range_check_error,
|
||||
);
|
||||
let res = inner_loop_function(idx, &mut local_reg);
|
||||
// we update the offset and constants
|
||||
@@ -221,11 +303,18 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
local_reg.total_constants() - starting_constants,
|
||||
Ordering::SeqCst,
|
||||
);
|
||||
|
||||
max_lookup_inputs.fetch_max(local_reg.max_lookup_inputs(), Ordering::SeqCst);
|
||||
min_lookup_inputs.fetch_min(local_reg.min_lookup_inputs(), Ordering::SeqCst);
|
||||
// update the lookups
|
||||
let mut lookups = lookups.lock().unwrap();
|
||||
lookups.extend(local_reg.used_lookups());
|
||||
let mut range_checks = range_checks.lock().unwrap();
|
||||
range_checks.extend(local_reg.used_range_checks());
|
||||
let mut dynamic_lookup_index = dynamic_lookup_index.lock().unwrap();
|
||||
dynamic_lookup_index.lookup_index += local_reg.dynamic_lookup_index.lookup_index;
|
||||
dynamic_lookup_index.col_coord += local_reg.dynamic_lookup_index.col_coord;
|
||||
|
||||
res
|
||||
})
|
||||
.map_err(|e| {
|
||||
@@ -234,6 +323,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
})?;
|
||||
self.total_constants = constants.into_inner();
|
||||
self.linear_coord = linear_coord.into_inner();
|
||||
#[allow(trivial_numeric_casts)]
|
||||
{
|
||||
self.max_lookup_inputs = max_lookup_inputs.into_inner();
|
||||
self.min_lookup_inputs = min_lookup_inputs.into_inner();
|
||||
}
|
||||
self.row = row.into_inner();
|
||||
self.used_lookups = Arc::try_unwrap(lookups)
|
||||
.map_err(|e| RegionError::from(format!("dummy_loop: failed to get lookups: {:?}", e)))?
|
||||
@@ -249,23 +343,73 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
.map_err(|e| {
|
||||
RegionError::from(format!("dummy_loop: failed to get range checks: {:?}", e))
|
||||
})?;
|
||||
self.dynamic_lookup_index = Arc::try_unwrap(dynamic_lookup_index)
|
||||
.map_err(|e| {
|
||||
RegionError::from(format!(
|
||||
"dummy_loop: failed to get dynamic lookup index: {:?}",
|
||||
e
|
||||
))
|
||||
})?
|
||||
.into_inner()
|
||||
.map_err(|e| {
|
||||
RegionError::from(format!(
|
||||
"dummy_loop: failed to get dynamic lookup index: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update the max and min from inputs
|
||||
pub fn update_max_min_lookup_inputs(
|
||||
&mut self,
|
||||
inputs: &[ValTensor<F>],
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let (mut min, mut max) = (0, 0);
|
||||
for i in inputs {
|
||||
max = max.max(i.get_int_evals()?.into_iter().max().unwrap_or_default());
|
||||
min = min.min(i.get_int_evals()?.into_iter().min().unwrap_or_default());
|
||||
}
|
||||
self.max_lookup_inputs = self.max_lookup_inputs.max(max);
|
||||
self.min_lookup_inputs = self.min_lookup_inputs.min(min);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update the max and min from inputs
|
||||
pub fn update_max_min_lookup_range(
|
||||
&mut self,
|
||||
range: Range,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
if range.0 > range.1 {
|
||||
return Err("update_max_min_lookup_range: invalid range".into());
|
||||
}
|
||||
|
||||
let range_size = (range.1 - range.0).abs();
|
||||
|
||||
self.max_range_size = self.max_range_size.max(range_size);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the region is dummy
|
||||
pub fn is_dummy(&self) -> bool {
|
||||
self.region.is_none()
|
||||
}
|
||||
|
||||
/// add used lookup
|
||||
pub fn add_used_lookup(&mut self, lookup: LookupOp) {
|
||||
pub fn add_used_lookup(
|
||||
&mut self,
|
||||
lookup: LookupOp,
|
||||
inputs: &[ValTensor<F>],
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
self.used_lookups.insert(lookup);
|
||||
self.update_max_min_lookup_inputs(inputs)
|
||||
}
|
||||
|
||||
/// add used range check
|
||||
pub fn add_used_range_check(&mut self, range: Range) {
|
||||
pub fn add_used_range_check(&mut self, range: Range) -> Result<(), Box<dyn std::error::Error>> {
|
||||
self.used_range_checks.insert(range);
|
||||
self.update_max_min_lookup_range(range)
|
||||
}
|
||||
|
||||
/// Get the offset
|
||||
@@ -283,6 +427,16 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
self.total_constants
|
||||
}
|
||||
|
||||
/// Get the dynamic lookup index
|
||||
pub fn dynamic_lookup_index(&self) -> usize {
|
||||
self.dynamic_lookup_index.lookup_index
|
||||
}
|
||||
|
||||
/// Get the dynamic lookup column coordinate
|
||||
pub fn dynamic_lookup_col_coord(&self) -> usize {
|
||||
self.dynamic_lookup_index.col_coord
|
||||
}
|
||||
|
||||
/// get used lookups
|
||||
pub fn used_lookups(&self) -> HashSet<LookupOp> {
|
||||
self.used_lookups.clone()
|
||||
@@ -293,6 +447,21 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
self.used_range_checks.clone()
|
||||
}
|
||||
|
||||
/// max lookup inputs
|
||||
pub fn max_lookup_inputs(&self) -> i128 {
|
||||
self.max_lookup_inputs
|
||||
}
|
||||
|
||||
/// min lookup inputs
|
||||
pub fn min_lookup_inputs(&self) -> i128 {
|
||||
self.min_lookup_inputs
|
||||
}
|
||||
|
||||
/// max range check
|
||||
pub fn max_range_size(&self) -> i128 {
|
||||
self.max_range_size
|
||||
}
|
||||
|
||||
/// Assign a constant value
|
||||
pub fn assign_constant(&mut self, var: &VarTensor, value: F) -> Result<ValType<F>, Error> {
|
||||
self.total_constants += 1;
|
||||
@@ -317,6 +486,24 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Assign a valtensor to a vartensor
|
||||
pub fn assign_dynamic_lookup(
|
||||
&mut self,
|
||||
var: &VarTensor,
|
||||
values: &ValTensor<F>,
|
||||
) -> Result<ValTensor<F>, Error> {
|
||||
self.total_constants += values.num_constants();
|
||||
if let Some(region) = &self.region {
|
||||
var.assign(
|
||||
&mut region.borrow_mut(),
|
||||
self.dynamic_lookup_col_coord(),
|
||||
values,
|
||||
)
|
||||
} else {
|
||||
Ok(values.clone())
|
||||
}
|
||||
}
|
||||
|
||||
/// Assign a valtensor to a vartensor
|
||||
pub fn assign_with_omissions(
|
||||
&mut self,
|
||||
|
||||
@@ -130,14 +130,12 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
|
||||
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(bits as u32) - reserved_blinding_rows
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
pub fn num_cols_required(range: Range, col_size: usize) -> usize {
|
||||
// double it to be safe
|
||||
let range_len = range.1 - range.0;
|
||||
// number of cols needed to store the range
|
||||
(range_len / (col_size as i128)) as usize + 1
|
||||
}
|
||||
///
|
||||
pub fn num_cols_required(range_len: i128, col_size: usize) -> usize {
|
||||
// number of cols needed to store the range
|
||||
(range_len / (col_size as i128)) as usize + 1
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
|
||||
@@ -152,7 +150,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
|
||||
let factors = cs.blinding_factors() + RESERVED_BLINDING_ROWS_PAD;
|
||||
let col_size = Self::cal_col_size(logrows, factors);
|
||||
// number of cols needed to store the range
|
||||
let num_cols = Self::num_cols_required(range, col_size);
|
||||
let num_cols = num_cols_required((range.1 - range.0).abs(), col_size);
|
||||
|
||||
log::debug!("table range: {:?}", range);
|
||||
|
||||
@@ -265,7 +263,9 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RangeCheck<F: PrimeField> {
|
||||
/// Input to table.
|
||||
pub input: TableColumn,
|
||||
pub inputs: Vec<TableColumn>,
|
||||
/// col size
|
||||
pub col_size: usize,
|
||||
/// selector cn
|
||||
pub selector_constructor: SelectorConstructor<F>,
|
||||
/// Flags if table has been previously assigned to.
|
||||
@@ -277,8 +277,10 @@ pub struct RangeCheck<F: PrimeField> {
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
|
||||
/// get first_element of column
|
||||
pub fn get_first_element(&self) -> F {
|
||||
i128_to_felt(self.range.0)
|
||||
pub fn get_first_element(&self, chunk: usize) -> F {
|
||||
let chunk = chunk as i128;
|
||||
// we index from 1 to prevent soundness issues
|
||||
i128_to_felt(chunk * (self.col_size as i128) + self.range.0)
|
||||
}
|
||||
|
||||
///
|
||||
@@ -290,24 +292,58 @@ impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
|
||||
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(bits as u32) - reserved_blinding_rows
|
||||
}
|
||||
|
||||
/// get column index given input
|
||||
pub fn get_col_index(&self, input: F) -> F {
|
||||
// range is split up into chunks of size col_size, find the chunk that input is in
|
||||
let chunk =
|
||||
(crate::fieldutils::felt_to_i128(input) - self.range.0).abs() / (self.col_size as i128);
|
||||
|
||||
i128_to_felt(chunk)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
|
||||
/// Configures the table.
|
||||
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range) -> RangeCheck<F> {
|
||||
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range, logrows: usize) -> RangeCheck<F> {
|
||||
log::debug!("range check range: {:?}", range);
|
||||
|
||||
let inputs = cs.lookup_table_column();
|
||||
let factors = cs.blinding_factors() + RESERVED_BLINDING_ROWS_PAD;
|
||||
let col_size = Self::cal_col_size(logrows, factors);
|
||||
// number of cols needed to store the range
|
||||
let num_cols = num_cols_required((range.1 - range.0).abs(), col_size);
|
||||
|
||||
let inputs = {
|
||||
let mut cols = vec![];
|
||||
for _ in 0..num_cols {
|
||||
cols.push(cs.lookup_table_column());
|
||||
}
|
||||
cols
|
||||
};
|
||||
|
||||
let num_cols = inputs.len();
|
||||
|
||||
if num_cols > 1 {
|
||||
warn!("Using {} columns for range-check.", num_cols);
|
||||
}
|
||||
|
||||
RangeCheck {
|
||||
input: inputs,
|
||||
inputs,
|
||||
col_size,
|
||||
is_assigned: false,
|
||||
selector_constructor: SelectorConstructor::new(2),
|
||||
selector_constructor: SelectorConstructor::new(num_cols),
|
||||
range,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a linear coordinate and output the (column, row) position in the storage block.
|
||||
pub fn cartesian_coord(&self, linear_coord: usize) -> (usize, usize) {
|
||||
let x = linear_coord / self.col_size;
|
||||
let y = linear_coord % self.col_size;
|
||||
(x, y)
|
||||
}
|
||||
|
||||
/// Assigns values to the constraints generated when calling `configure`.
|
||||
pub fn layout(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn Error>> {
|
||||
if self.is_assigned {
|
||||
@@ -318,28 +354,43 @@ impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
|
||||
let largest = self.range.1;
|
||||
|
||||
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i128_to_felt(x));
|
||||
let chunked_inputs = inputs.chunks(self.col_size);
|
||||
|
||||
self.is_assigned = true;
|
||||
|
||||
layouter.assign_table(
|
||||
|| "range check table",
|
||||
|mut table| {
|
||||
let _ = inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(row_offset, input)| {
|
||||
table.assign_cell(
|
||||
|| format!("rc_i_col row {}", row_offset),
|
||||
self.input,
|
||||
row_offset,
|
||||
|| Value::known(*input),
|
||||
)?;
|
||||
let col_multipliers: Vec<F> = (0..chunked_inputs.len())
|
||||
.map(|x| self.selector_constructor.get_selector_val_at_idx(x))
|
||||
.collect();
|
||||
|
||||
let _ = chunked_inputs
|
||||
.enumerate()
|
||||
.map(|(chunk_idx, inputs)| {
|
||||
layouter.assign_table(
|
||||
|| "range check table",
|
||||
|mut table| {
|
||||
let _ = inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(mut row_offset, input)| {
|
||||
let col_multiplier = col_multipliers[chunk_idx];
|
||||
|
||||
row_offset += chunk_idx * self.col_size;
|
||||
let (x, y) = self.cartesian_coord(row_offset);
|
||||
table.assign_cell(
|
||||
|| format!("rc_i_col row {}", row_offset),
|
||||
self.inputs[x],
|
||||
y,
|
||||
|| Value::known(*input * col_multiplier),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use crate::circuit::ops::hybrid::HybridOp;
|
||||
use crate::circuit::ops::poly::PolyOp;
|
||||
use crate::circuit::*;
|
||||
use crate::tensor::{Tensor, TensorType, ValTensor, VarTensor};
|
||||
@@ -246,7 +245,7 @@ mod matmul_col_overflow {
|
||||
#[cfg(test)]
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
mod matmul_col_ultra_overflow_double_col {
|
||||
use halo2_proofs::poly::commitment::ParamsProver;
|
||||
use halo2_proofs::poly::commitment::{Params, ParamsProver};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -349,8 +348,13 @@ mod matmul_col_ultra_overflow_double_col {
|
||||
let strategy =
|
||||
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
|
||||
let vk = pk.get_vk();
|
||||
let result =
|
||||
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
|
||||
let result = crate::pfsys::verify_proof_circuit_kzg(
|
||||
params.verifier_params(),
|
||||
proof,
|
||||
vk,
|
||||
strategy,
|
||||
params.n(),
|
||||
);
|
||||
|
||||
assert!(result.is_ok());
|
||||
|
||||
@@ -361,7 +365,7 @@ mod matmul_col_ultra_overflow_double_col {
|
||||
#[cfg(test)]
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
mod matmul_col_ultra_overflow {
|
||||
use halo2_proofs::poly::commitment::ParamsProver;
|
||||
use halo2_proofs::poly::commitment::{Params, ParamsProver};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -463,8 +467,13 @@ mod matmul_col_ultra_overflow {
|
||||
let strategy =
|
||||
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
|
||||
let vk = pk.get_vk();
|
||||
let result =
|
||||
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
|
||||
let result = crate::pfsys::verify_proof_circuit_kzg(
|
||||
params.verifier_params(),
|
||||
proof,
|
||||
vk,
|
||||
strategy,
|
||||
params.n(),
|
||||
);
|
||||
|
||||
assert!(result.is_ok());
|
||||
|
||||
@@ -1140,7 +1149,7 @@ mod conv {
|
||||
#[cfg(test)]
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
mod conv_col_ultra_overflow {
|
||||
use halo2_proofs::poly::commitment::ParamsProver;
|
||||
use halo2_proofs::poly::commitment::{Params, ParamsProver};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -1262,8 +1271,13 @@ mod conv_col_ultra_overflow {
|
||||
let strategy =
|
||||
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
|
||||
let vk = pk.get_vk();
|
||||
let result =
|
||||
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
|
||||
let result = crate::pfsys::verify_proof_circuit_kzg(
|
||||
params.verifier_params(),
|
||||
proof,
|
||||
vk,
|
||||
strategy,
|
||||
params.n(),
|
||||
);
|
||||
|
||||
assert!(result.is_ok());
|
||||
|
||||
@@ -1275,7 +1289,7 @@ mod conv_col_ultra_overflow {
|
||||
// not wasm 32 unknown
|
||||
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
|
||||
mod conv_relu_col_ultra_overflow {
|
||||
use halo2_proofs::poly::commitment::ParamsProver;
|
||||
use halo2_proofs::poly::commitment::{Params, ParamsProver};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -1412,8 +1426,13 @@ mod conv_relu_col_ultra_overflow {
|
||||
let strategy =
|
||||
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
|
||||
let vk = pk.get_vk();
|
||||
let result =
|
||||
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
|
||||
let result = crate::pfsys::verify_proof_circuit_kzg(
|
||||
params.verifier_params(),
|
||||
proof,
|
||||
vk,
|
||||
strategy,
|
||||
params.n(),
|
||||
);
|
||||
|
||||
assert!(result.is_ok());
|
||||
|
||||
@@ -1555,6 +1574,142 @@ mod add {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod dynamic_lookup {
|
||||
use super::*;
|
||||
|
||||
const K: usize = 6;
|
||||
const LEN: usize = 4;
|
||||
const NUM_LOOP: usize = 5;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
|
||||
tables: [[ValTensor<F>; 2]; NUM_LOOP],
|
||||
lookups: [[ValTensor<F>; 2]; NUM_LOOP],
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl Circuit<F> for MyCircuit<F> {
|
||||
type Config = BaseConfig<F>;
|
||||
type FloorPlanner = SimpleFloorPlanner;
|
||||
type Params = TestParams;
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let a = VarTensor::new_advice(cs, K, 2, LEN);
|
||||
let b = VarTensor::new_advice(cs, K, 2, LEN);
|
||||
let c: VarTensor = VarTensor::new_advice(cs, K, 2, LEN);
|
||||
|
||||
let d = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let e = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
|
||||
let mut config =
|
||||
Self::Config::configure(cs, &[a.clone(), b.clone()], &c, CheckMode::SAFE);
|
||||
for _ in 0..NUM_LOOP {
|
||||
config
|
||||
.configure_dynamic_lookup(cs, &[a.clone(), b.clone()], &[d.clone(), e.clone()])
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
&self,
|
||||
config: Self::Config,
|
||||
mut layouter: impl Layouter<F>,
|
||||
) -> Result<(), Error> {
|
||||
layouter
|
||||
.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1);
|
||||
for i in 0..NUM_LOOP {
|
||||
layouts::dynamic_lookup(
|
||||
&config,
|
||||
&mut region,
|
||||
&self.lookups[i],
|
||||
&self.tables[i],
|
||||
)
|
||||
.map_err(|_| Error::Synthesis)?;
|
||||
}
|
||||
assert_eq!(
|
||||
region.dynamic_lookup_col_coord(),
|
||||
NUM_LOOP * self.tables[0][0].len()
|
||||
);
|
||||
assert_eq!(region.dynamic_lookup_index(), NUM_LOOP);
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dynamiclookupcircuit() {
|
||||
// parameters
|
||||
let tables = (0..NUM_LOOP)
|
||||
.map(|loop_idx| {
|
||||
[
|
||||
ValTensor::from(Tensor::from(
|
||||
(0..LEN).map(|i| Value::known(F::from((i * loop_idx) as u64 + 1))),
|
||||
)),
|
||||
ValTensor::from(Tensor::from(
|
||||
(0..LEN).map(|i| Value::known(F::from((loop_idx * i * i) as u64 + 1))),
|
||||
)),
|
||||
]
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let lookups = (0..NUM_LOOP)
|
||||
.map(|loop_idx| {
|
||||
[
|
||||
ValTensor::from(Tensor::from(
|
||||
(0..3).map(|i| Value::known(F::from((i * loop_idx) as u64 + 1))),
|
||||
)),
|
||||
ValTensor::from(Tensor::from(
|
||||
(0..3).map(|i| Value::known(F::from((loop_idx * i * i) as u64 + 1))),
|
||||
)),
|
||||
]
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let circuit = MyCircuit::<F> {
|
||||
tables: tables.clone().try_into().unwrap(),
|
||||
lookups: lookups.try_into().unwrap(),
|
||||
_marker: PhantomData,
|
||||
};
|
||||
|
||||
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
prover.assert_satisfied();
|
||||
|
||||
let lookups = (0..NUM_LOOP)
|
||||
.map(|loop_idx| {
|
||||
[
|
||||
ValTensor::from(Tensor::from(
|
||||
(0..2).map(|i| Value::known(F::from((i * loop_idx) as u64 + 1))),
|
||||
)),
|
||||
ValTensor::from(Tensor::from((0..2).map(|_| Value::known(F::from(10000))))),
|
||||
]
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let circuit = MyCircuit::<F> {
|
||||
tables: tables.try_into().unwrap(),
|
||||
lookups: lookups.try_into().unwrap(),
|
||||
_marker: PhantomData,
|
||||
};
|
||||
|
||||
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
assert!(prover.verify().is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod add_with_overflow {
|
||||
use super::*;
|
||||
@@ -2120,148 +2275,6 @@ mod matmul_relu {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod rangecheckpercent {
|
||||
use crate::circuit::Tolerance;
|
||||
use crate::{circuit, tensor::Tensor};
|
||||
use halo2_proofs::{
|
||||
circuit::{Layouter, SimpleFloorPlanner, Value},
|
||||
dev::MockProver,
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
|
||||
const RANGE: f32 = 1.0; // 1 percent error tolerance
|
||||
const K: usize = 18;
|
||||
const LEN: usize = 1;
|
||||
const SCALE: usize = i128::pow(2, 7) as usize;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
|
||||
input: ValTensor<F>,
|
||||
output: ValTensor<F>,
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl Circuit<F> for MyCircuit<F> {
|
||||
type Config = BaseConfig<F>;
|
||||
type FloorPlanner = SimpleFloorPlanner;
|
||||
type Params = TestParams;
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let scale = utils::F32(SCALE as f32);
|
||||
let a = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let b = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let output = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let mut config =
|
||||
Self::Config::configure(cs, &[a.clone(), b.clone()], &output, CheckMode::SAFE);
|
||||
// set up a new GreaterThan and Recip tables
|
||||
let nl = &LookupOp::GreaterThan {
|
||||
a: circuit::utils::F32((RANGE * SCALE.pow(2) as f32) / 100.0),
|
||||
};
|
||||
config
|
||||
.configure_lookup(cs, &b, &output, &a, (-32768, 32768), K, nl)
|
||||
.unwrap();
|
||||
|
||||
config
|
||||
.configure_lookup(
|
||||
cs,
|
||||
&b,
|
||||
&output,
|
||||
&a,
|
||||
(-32768, 32768),
|
||||
K,
|
||||
&LookupOp::Recip {
|
||||
input_scale: scale,
|
||||
output_scale: scale,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
config
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
&self,
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<F>,
|
||||
) -> Result<(), Error> {
|
||||
config.layout_tables(&mut layouter).unwrap();
|
||||
layouter
|
||||
.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1);
|
||||
config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[self.output.clone(), self.input.clone()],
|
||||
Box::new(HybridOp::RangeCheck(Tolerance {
|
||||
val: RANGE,
|
||||
scale: SCALE.into(),
|
||||
})),
|
||||
)
|
||||
.map_err(|_| Error::Synthesis)
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
fn test_range_check_percent() {
|
||||
// Successful cases
|
||||
{
|
||||
let inp = Tensor::new(Some(&[Value::<F>::known(F::from(100_u64))]), &[1]).unwrap();
|
||||
let out = Tensor::new(Some(&[Value::<F>::known(F::from(101_u64))]), &[1]).unwrap();
|
||||
let circuit = MyCircuit::<F> {
|
||||
input: ValTensor::from(inp),
|
||||
output: ValTensor::from(out),
|
||||
_marker: PhantomData,
|
||||
};
|
||||
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
prover.assert_satisfied();
|
||||
}
|
||||
{
|
||||
let inp = Tensor::new(Some(&[Value::<F>::known(F::from(200_u64))]), &[1]).unwrap();
|
||||
let out = Tensor::new(Some(&[Value::<F>::known(F::from(199_u64))]), &[1]).unwrap();
|
||||
let circuit = MyCircuit::<F> {
|
||||
input: ValTensor::from(inp),
|
||||
output: ValTensor::from(out),
|
||||
_marker: PhantomData,
|
||||
};
|
||||
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
prover.assert_satisfied();
|
||||
}
|
||||
|
||||
// Unsuccessful case
|
||||
{
|
||||
let inp = Tensor::new(Some(&[Value::<F>::known(F::from(100_u64))]), &[1]).unwrap();
|
||||
let out = Tensor::new(Some(&[Value::<F>::known(F::from(102_u64))]), &[1]).unwrap();
|
||||
let circuit = MyCircuit::<F> {
|
||||
input: ValTensor::from(inp),
|
||||
output: ValTensor::from(out),
|
||||
_marker: PhantomData,
|
||||
};
|
||||
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
match prover.verify() {
|
||||
Ok(_) => {
|
||||
assert!(false)
|
||||
}
|
||||
Err(_) => {
|
||||
assert!(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod relu {
|
||||
use super::*;
|
||||
@@ -2343,7 +2356,7 @@ mod lookup_ultra_overflow {
|
||||
use halo2_proofs::{
|
||||
circuit::{Layouter, SimpleFloorPlanner, Value},
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
poly::commitment::ParamsProver,
|
||||
poly::commitment::{Params, ParamsProver},
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -2447,121 +2460,16 @@ mod lookup_ultra_overflow {
|
||||
let strategy =
|
||||
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
|
||||
let vk = pk.get_vk();
|
||||
let result =
|
||||
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
|
||||
let result = crate::pfsys::verify_proof_circuit_kzg(
|
||||
params.verifier_params(),
|
||||
proof,
|
||||
vk,
|
||||
strategy,
|
||||
params.n(),
|
||||
);
|
||||
|
||||
assert!(result.is_ok());
|
||||
|
||||
println!("done.");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod softmax {
|
||||
|
||||
use super::*;
|
||||
use halo2_proofs::{
|
||||
circuit::{Layouter, SimpleFloorPlanner, Value},
|
||||
dev::MockProver,
|
||||
plonk::{Circuit, ConstraintSystem, Error},
|
||||
};
|
||||
|
||||
const K: usize = 18;
|
||||
const LEN: usize = 3;
|
||||
const SCALE: f32 = 128.0;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SoftmaxCircuit<F: PrimeField + TensorType + PartialOrd> {
|
||||
pub input: ValTensor<F>,
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl Circuit<F> for SoftmaxCircuit<F> {
|
||||
type Config = BaseConfig<F>;
|
||||
type FloorPlanner = SimpleFloorPlanner;
|
||||
type Params = TestParams;
|
||||
|
||||
fn without_witnesses(&self) -> Self {
|
||||
self.clone()
|
||||
}
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let a = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let b = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let output = VarTensor::new_advice(cs, K, 1, LEN);
|
||||
let mut config = Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE);
|
||||
let advices = (0..3)
|
||||
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
config
|
||||
.configure_lookup(
|
||||
cs,
|
||||
&advices[0],
|
||||
&advices[1],
|
||||
&advices[2],
|
||||
(-32768, 32768),
|
||||
K,
|
||||
&LookupOp::Exp {
|
||||
scale: SCALE.into(),
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
config
|
||||
.configure_lookup(
|
||||
cs,
|
||||
&advices[0],
|
||||
&advices[1],
|
||||
&advices[2],
|
||||
(-32768, 32768),
|
||||
K,
|
||||
&LookupOp::Recip {
|
||||
input_scale: SCALE.into(),
|
||||
output_scale: SCALE.into(),
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
config
|
||||
}
|
||||
|
||||
fn synthesize(
|
||||
&self,
|
||||
mut config: Self::Config,
|
||||
mut layouter: impl Layouter<F>,
|
||||
) -> Result<(), Error> {
|
||||
config.layout_tables(&mut layouter).unwrap();
|
||||
layouter
|
||||
.assign_region(
|
||||
|| "",
|
||||
|region| {
|
||||
let mut region = RegionCtx::new(region, 0, 1);
|
||||
let _output = config
|
||||
.layout(
|
||||
&mut region,
|
||||
&[self.input.clone()],
|
||||
Box::new(HybridOp::Softmax {
|
||||
scale: SCALE.into(),
|
||||
axes: vec![0],
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn softmax_circuit() {
|
||||
let input = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1))));
|
||||
|
||||
let circuit = SoftmaxCircuit::<F> {
|
||||
input: ValTensor::from(input),
|
||||
_marker: PhantomData,
|
||||
};
|
||||
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
|
||||
prover.assert_satisfied();
|
||||
}
|
||||
}
|
||||
|
||||
134
src/commands.rs
134
src/commands.rs
@@ -1,4 +1,4 @@
|
||||
use clap::{Parser, Subcommand, ValueEnum};
|
||||
use clap::{Parser, Subcommand};
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use ethers::types::H160;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
@@ -9,8 +9,9 @@ use pyo3::{
|
||||
types::PyString,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
use std::path::PathBuf;
|
||||
use std::{error::Error, str::FromStr};
|
||||
use tosubcommand::{ToFlags, ToSubcommand};
|
||||
|
||||
use crate::{pfsys::ProofType, RunArgs};
|
||||
|
||||
@@ -85,15 +86,11 @@ pub const DEFAULT_VK_SOL: &str = "vk.sol";
|
||||
pub const DEFAULT_VK_ABI: &str = "vk.abi";
|
||||
/// Default scale rebase multipliers for calibration
|
||||
pub const DEFAULT_SCALE_REBASE_MULTIPLIERS: &str = "1,2,10";
|
||||
/// Default use reduced srs for verification
|
||||
pub const DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION: &str = "false";
|
||||
/// Default only check for range check rebase
|
||||
pub const DEFAULT_ONLY_RANGE_CHECK_REBASE: &str = "false";
|
||||
|
||||
impl std::fmt::Display for TranscriptType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.to_possible_value()
|
||||
.expect("no values are skipped")
|
||||
.get_name()
|
||||
.fmt(f)
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Converts TranscriptType into a PyObject (Required for TranscriptType to be compatible with Python)
|
||||
impl IntoPy<PyObject> for TranscriptType {
|
||||
@@ -138,17 +135,27 @@ impl Default for CalibrationTarget {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for CalibrationTarget {
|
||||
fn to_string(&self) -> String {
|
||||
match self {
|
||||
CalibrationTarget::Resources { col_overflow: true } => {
|
||||
"resources/col-overflow".to_string()
|
||||
impl std::fmt::Display for CalibrationTarget {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
CalibrationTarget::Resources { col_overflow: true } => {
|
||||
"resources/col-overflow".to_string()
|
||||
}
|
||||
CalibrationTarget::Resources {
|
||||
col_overflow: false,
|
||||
} => "resources".to_string(),
|
||||
CalibrationTarget::Accuracy => "accuracy".to_string(),
|
||||
}
|
||||
CalibrationTarget::Resources {
|
||||
col_overflow: false,
|
||||
} => "resources".to_string(),
|
||||
CalibrationTarget::Accuracy => "accuracy".to_string(),
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for CalibrationTarget {
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,6 +176,36 @@ impl From<&str> for CalibrationTarget {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
|
||||
/// wrapper for H160 to make it easy to parse into flag vals
|
||||
pub struct H160Flag {
|
||||
inner: H160,
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
impl From<H160Flag> for H160 {
|
||||
fn from(val: H160Flag) -> H160 {
|
||||
val.inner
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
impl ToFlags for H160Flag {
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{:#x}", self.inner)]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
impl From<&str> for H160Flag {
|
||||
fn from(s: &str) -> Self {
|
||||
Self {
|
||||
inner: H160::from_str(s).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
/// Converts CalibrationTarget into a PyObject (Required for CalibrationTarget to be compatible with Python)
|
||||
impl IntoPy<PyObject> for CalibrationTarget {
|
||||
@@ -201,7 +238,7 @@ impl<'source> FromPyObject<'source> for CalibrationTarget {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// not wasm
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
// if CARGO VERSION is 0.0.0 replace with "source - no compatibility guaranteed"
|
||||
@@ -242,7 +279,7 @@ impl Cli {
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug, Subcommand, Clone, Deserialize, Serialize, PartialEq, PartialOrd)]
|
||||
#[derive(Debug, Subcommand, Clone, Deserialize, Serialize, PartialEq, PartialOrd, ToSubcommand)]
|
||||
pub enum Commands {
|
||||
#[cfg(feature = "empty-cmd")]
|
||||
/// Creates an empty buffer
|
||||
@@ -336,9 +373,9 @@ pub enum Commands {
|
||||
/// max logrows to use for calibration, 26 is the max public SRS size
|
||||
#[arg(long)]
|
||||
max_logrows: Option<u32>,
|
||||
// whether to fix the div_rebasing value truthiness during calibration. this changes how we rebase
|
||||
#[arg(long)]
|
||||
div_rebasing: Option<bool>,
|
||||
// whether to only range check rebases (instead of trying both range check and lookup)
|
||||
#[arg(long, default_value = DEFAULT_ONLY_RANGE_CHECK_REBASE)]
|
||||
only_range_check_rebase: bool,
|
||||
},
|
||||
|
||||
/// Generates a dummy SRS
|
||||
@@ -434,7 +471,7 @@ pub enum Commands {
|
||||
long,
|
||||
require_equals = true,
|
||||
num_args = 0..=1,
|
||||
default_value_t = TranscriptType::EVM,
|
||||
default_value_t = TranscriptType::default(),
|
||||
value_enum
|
||||
)]
|
||||
transcript: TranscriptType,
|
||||
@@ -489,13 +526,13 @@ pub enum Commands {
|
||||
#[arg(short = 'W', long, default_value = DEFAULT_WITNESS)]
|
||||
witness: PathBuf,
|
||||
/// The path to the compiled model file (generated using the compile-circuit command)
|
||||
#[arg(short = 'M', long, default_value = DEFAULT_COMPILED_CIRCUIT)]
|
||||
#[arg(short = 'M', long)]
|
||||
compiled_circuit: PathBuf,
|
||||
#[arg(
|
||||
long,
|
||||
require_equals = true,
|
||||
num_args = 0..=1,
|
||||
default_value_t = TranscriptType::EVM,
|
||||
default_value_t = TranscriptType::default(),
|
||||
value_enum
|
||||
)]
|
||||
transcript: TranscriptType,
|
||||
@@ -509,7 +546,7 @@ pub enum Commands {
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
|
||||
#[command(arg_required_else_help = true)]
|
||||
SetupTestEVMData {
|
||||
SetupTestEvmData {
|
||||
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
|
||||
#[arg(short = 'D', long)]
|
||||
data: PathBuf,
|
||||
@@ -537,7 +574,7 @@ pub enum Commands {
|
||||
TestUpdateAccountCalls {
|
||||
/// The path to the verifier contract's address
|
||||
#[arg(long)]
|
||||
addr: H160,
|
||||
addr: H160Flag,
|
||||
/// The path to the .json data file.
|
||||
#[arg(short = 'D', long)]
|
||||
data: PathBuf,
|
||||
@@ -587,9 +624,9 @@ pub enum Commands {
|
||||
check_mode: CheckMode,
|
||||
},
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Creates an EVM verifier for a single proof
|
||||
/// Creates an Evm verifier for a single proof
|
||||
#[command(name = "create-evm-verifier")]
|
||||
CreateEVMVerifier {
|
||||
CreateEvmVerifier {
|
||||
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
|
||||
#[arg(long)]
|
||||
srs_path: Option<PathBuf>,
|
||||
@@ -612,9 +649,9 @@ pub enum Commands {
|
||||
render_vk_seperately: bool,
|
||||
},
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Creates an EVM verifier for a single proof
|
||||
/// Creates an Evm verifier for a single proof
|
||||
#[command(name = "create-evm-vk")]
|
||||
CreateEVMVK {
|
||||
CreateEvmVK {
|
||||
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
|
||||
#[arg(long)]
|
||||
srs_path: Option<PathBuf>,
|
||||
@@ -632,9 +669,9 @@ pub enum Commands {
|
||||
abi_path: PathBuf,
|
||||
},
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Creates an EVM verifier that attests to on-chain inputs for a single proof
|
||||
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
|
||||
#[command(name = "create-evm-da")]
|
||||
CreateEVMDataAttestation {
|
||||
CreateEvmDataAttestation {
|
||||
/// The path to load circuit settings .json file from (generated using the gen-settings command)
|
||||
#[arg(short = 'S', long, default_value = DEFAULT_SETTINGS)]
|
||||
settings_path: PathBuf,
|
||||
@@ -654,9 +691,9 @@ pub enum Commands {
|
||||
},
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Creates an EVM verifier for an aggregate proof
|
||||
/// Creates an Evm verifier for an aggregate proof
|
||||
#[command(name = "create-evm-verifier-aggr")]
|
||||
CreateEVMVerifierAggr {
|
||||
CreateEvmVerifierAggr {
|
||||
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
|
||||
#[arg(long)]
|
||||
srs_path: Option<PathBuf>,
|
||||
@@ -695,6 +732,9 @@ pub enum Commands {
|
||||
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
|
||||
#[arg(long)]
|
||||
srs_path: Option<PathBuf>,
|
||||
/// Reduce SRS logrows to the number of instances rather than the number of logrows used for proofs (only works if the srs were generated in the same ceremony)
|
||||
#[arg(long, default_value = DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION)]
|
||||
reduced_srs: bool,
|
||||
},
|
||||
/// Verifies an aggregate proof, returning accept or reject
|
||||
VerifyAggr {
|
||||
@@ -776,31 +816,23 @@ pub enum Commands {
|
||||
private_key: Option<String>,
|
||||
},
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Verifies a proof using a local EVM executor, returning accept or reject
|
||||
/// Verifies a proof using a local Evm executor, returning accept or reject
|
||||
#[command(name = "verify-evm")]
|
||||
VerifyEVM {
|
||||
VerifyEvm {
|
||||
/// The path to the proof file (generated using the prove command)
|
||||
#[arg(long, default_value = DEFAULT_PROOF)]
|
||||
proof_path: PathBuf,
|
||||
/// The path to verifier contract's address
|
||||
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS)]
|
||||
addr_verifier: H160,
|
||||
addr_verifier: H160Flag,
|
||||
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
|
||||
#[arg(short = 'U', long)]
|
||||
rpc_url: Option<String>,
|
||||
/// does the verifier use data attestation ?
|
||||
#[arg(long)]
|
||||
addr_da: Option<H160>,
|
||||
addr_da: Option<H160Flag>,
|
||||
// is the vk rendered seperately, if so specify an address
|
||||
#[arg(long)]
|
||||
addr_vk: Option<H160>,
|
||||
},
|
||||
|
||||
/// Print the proof in hexadecimal
|
||||
#[command(name = "print-proof-hex")]
|
||||
PrintProofHex {
|
||||
/// The path to the proof file
|
||||
#[arg(long, default_value = DEFAULT_PROOF)]
|
||||
proof_path: PathBuf,
|
||||
addr_vk: Option<H160Flag>,
|
||||
},
|
||||
}
|
||||
|
||||
178
src/execute.rs
178
src/execute.rs
@@ -3,6 +3,8 @@ use crate::circuit::CheckMode;
|
||||
use crate::commands::CalibrationTarget;
|
||||
use crate::commands::Commands;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use crate::commands::H160Flag;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use crate::eth::{deploy_contract_via_solidity, deploy_da_verifier_via_solidity};
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
#[allow(unused_imports)]
|
||||
@@ -21,8 +23,6 @@ use crate::pfsys::{create_proof_circuit_kzg, verify_proof_circuit_kzg};
|
||||
use crate::pfsys::{save_vk, srs::*};
|
||||
use crate::tensor::TensorError;
|
||||
use crate::RunArgs;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use ethers::types::H160;
|
||||
use gag::Gag;
|
||||
use halo2_proofs::dev::VerifyFailure;
|
||||
use halo2_proofs::poly::commitment::Params;
|
||||
@@ -178,7 +178,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
max_logrows,
|
||||
div_rebasing,
|
||||
only_range_check_rebase,
|
||||
} => calibrate(
|
||||
model,
|
||||
data,
|
||||
@@ -187,7 +187,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
only_range_check_rebase,
|
||||
max_logrows,
|
||||
)
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
@@ -202,7 +202,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
Commands::Mock { model, witness } => mock(model, witness),
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
Commands::CreateEVMVerifier {
|
||||
Commands::CreateEvmVerifier {
|
||||
vk_path,
|
||||
srs_path,
|
||||
settings_path,
|
||||
@@ -217,7 +217,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
abi_path,
|
||||
render_vk_seperately,
|
||||
),
|
||||
Commands::CreateEVMVK {
|
||||
Commands::CreateEvmVK {
|
||||
vk_path,
|
||||
srs_path,
|
||||
settings_path,
|
||||
@@ -225,14 +225,14 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
abi_path,
|
||||
} => create_evm_vk(vk_path, srs_path, settings_path, sol_code_path, abi_path),
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
Commands::CreateEVMDataAttestation {
|
||||
Commands::CreateEvmDataAttestation {
|
||||
settings_path,
|
||||
sol_code_path,
|
||||
abi_path,
|
||||
data,
|
||||
} => create_evm_data_attestation(settings_path, sol_code_path, abi_path, data),
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
Commands::CreateEVMVerifierAggr {
|
||||
Commands::CreateEvmVerifierAggr {
|
||||
vk_path,
|
||||
srs_path,
|
||||
sol_code_path,
|
||||
@@ -270,7 +270,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
compress_selectors,
|
||||
),
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
Commands::SetupTestEVMData {
|
||||
Commands::SetupTestEvmData {
|
||||
data,
|
||||
compiled_circuit,
|
||||
test_data,
|
||||
@@ -366,7 +366,8 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
settings_path,
|
||||
vk_path,
|
||||
srs_path,
|
||||
} => verify(proof_path, settings_path, vk_path, srs_path)
|
||||
reduced_srs,
|
||||
} => verify(proof_path, settings_path, vk_path, srs_path, reduced_srs)
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
Commands::VerifyAggr {
|
||||
proof_path,
|
||||
@@ -433,14 +434,13 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
.await
|
||||
}
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
Commands::VerifyEVM {
|
||||
Commands::VerifyEvm {
|
||||
proof_path,
|
||||
addr_verifier,
|
||||
rpc_url,
|
||||
addr_da,
|
||||
addr_vk,
|
||||
} => verify_evm(proof_path, addr_verifier, rpc_url, addr_da, addr_vk).await,
|
||||
Commands::PrintProofHex { proof_path } => print_proof_hex(proof_path),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -488,8 +488,14 @@ async fn fetch_srs(uri: &str) -> Result<Vec<u8>, Box<dyn Error>> {
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
fn check_srs_hash(logrows: u32, srs_path: Option<PathBuf>) -> Result<String, Box<dyn Error>> {
|
||||
use std::io::Read;
|
||||
|
||||
let path = get_srs_path(logrows, srs_path);
|
||||
let hash = sha256::digest(std::fs::read(path.clone())?);
|
||||
let file = std::fs::File::open(path.clone())?;
|
||||
let mut buffer = vec![];
|
||||
let bytes_read = std::io::BufReader::new(file).read_to_end(&mut buffer)?;
|
||||
debug!("read {} bytes from SRS file", bytes_read);
|
||||
let hash = sha256::digest(buffer);
|
||||
info!("SRS hash: {}", hash);
|
||||
|
||||
let predefined_hash = match { crate::srs_sha::PUBLIC_SRS_SHA256_HASHES.get(&logrows) } {
|
||||
@@ -612,7 +618,7 @@ pub(crate) async fn gen_witness(
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
let witness = circuit.forward(&mut input, vk.as_ref(), srs.as_ref())?;
|
||||
let witness = circuit.forward(&mut input, vk.as_ref(), srs.as_ref(), false)?;
|
||||
|
||||
// print each variable tuple (symbol, value) as symbol=value
|
||||
trace!(
|
||||
@@ -628,7 +634,7 @@ pub(crate) async fn gen_witness(
|
||||
);
|
||||
|
||||
if let Some(output_path) = output {
|
||||
serde_json::to_writer(&File::create(output_path)?, &witness)?;
|
||||
witness.save(output_path)?;
|
||||
}
|
||||
|
||||
// print the witness in debug
|
||||
@@ -726,11 +732,11 @@ impl AccuracyResults {
|
||||
let percentage_error = error.enum_map(|i, x| Ok::<_, TensorError>(x / original[i]))?;
|
||||
let abs_percentage_error = percentage_error.map(|x| x.abs());
|
||||
|
||||
errors.extend(error.into_iter());
|
||||
abs_errors.extend(abs_error.into_iter());
|
||||
squared_errors.extend(squared_error.into_iter());
|
||||
percentage_errors.extend(percentage_error.into_iter());
|
||||
abs_percentage_errors.extend(abs_percentage_error.into_iter());
|
||||
errors.extend(error);
|
||||
abs_errors.extend(abs_error);
|
||||
squared_errors.extend(squared_error);
|
||||
percentage_errors.extend(percentage_error);
|
||||
abs_percentage_errors.extend(abs_percentage_error);
|
||||
}
|
||||
|
||||
let mean_percent_error =
|
||||
@@ -780,6 +786,7 @@ impl AccuracyResults {
|
||||
/// Calibrate the circuit parameters to a given a dataset
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
#[allow(trivial_casts)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn calibrate(
|
||||
model_path: PathBuf,
|
||||
data: PathBuf,
|
||||
@@ -788,7 +795,7 @@ pub(crate) fn calibrate(
|
||||
lookup_safety_margin: i128,
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
div_rebasing: Option<bool>,
|
||||
only_range_check_rebase: bool,
|
||||
max_logrows: Option<u32>,
|
||||
) -> Result<GraphSettings, Box<dyn Error>> {
|
||||
use std::collections::HashMap;
|
||||
@@ -801,16 +808,7 @@ pub(crate) fn calibrate(
|
||||
// we load the model to get the input and output shapes
|
||||
// check if gag already exists
|
||||
|
||||
#[cfg(unix)]
|
||||
let _r = match Gag::stdout() {
|
||||
Ok(r) => Some(r),
|
||||
Err(_) => None,
|
||||
};
|
||||
|
||||
let model = Model::from_run_args(&settings.run_args, &model_path)?;
|
||||
// drop the gag
|
||||
#[cfg(unix)]
|
||||
std::mem::drop(_r);
|
||||
|
||||
let chunks = data.split_into_batches(model.graph.input_shapes()?)?;
|
||||
info!("num of calibration batches: {}", chunks.len());
|
||||
@@ -826,14 +824,11 @@ pub(crate) fn calibrate(
|
||||
let range = if let Some(scales) = scales {
|
||||
scales
|
||||
} else {
|
||||
match target {
|
||||
CalibrationTarget::Resources { .. } => (8..10).collect::<Vec<crate::Scale>>(),
|
||||
CalibrationTarget::Accuracy => (10..14).collect::<Vec<crate::Scale>>(),
|
||||
}
|
||||
(11..14).collect::<Vec<crate::Scale>>()
|
||||
};
|
||||
|
||||
let div_rebasing = if let Some(div_rebasing) = div_rebasing {
|
||||
vec![div_rebasing]
|
||||
let div_rebasing = if only_range_check_rebase {
|
||||
vec![false]
|
||||
} else {
|
||||
vec![true, false]
|
||||
};
|
||||
@@ -892,16 +887,6 @@ pub(crate) fn calibrate(
|
||||
input_scale, param_scale, scale_rebase_multiplier, div_rebasing
|
||||
));
|
||||
|
||||
#[cfg(unix)]
|
||||
let _r = match Gag::stdout() {
|
||||
Ok(r) => Some(r),
|
||||
Err(_) => None,
|
||||
};
|
||||
#[cfg(unix)]
|
||||
let _q = match Gag::stderr() {
|
||||
Ok(r) => Some(r),
|
||||
Err(_) => None,
|
||||
};
|
||||
let key = (input_scale, param_scale, scale_rebase_multiplier);
|
||||
forward_pass_res.insert(key, vec![]);
|
||||
|
||||
@@ -916,17 +901,12 @@ pub(crate) fn calibrate(
|
||||
let mut circuit = match GraphCircuit::from_run_args(&local_run_args, &model_path) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
// drop the gag
|
||||
#[cfg(unix)]
|
||||
std::mem::drop(_r);
|
||||
#[cfg(unix)]
|
||||
std::mem::drop(_q);
|
||||
debug!("circuit creation from run args failed: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
chunks
|
||||
let forward_res = chunks
|
||||
.iter()
|
||||
.map(|chunk| {
|
||||
let chunk = chunk.clone();
|
||||
@@ -936,7 +916,7 @@ pub(crate) fn calibrate(
|
||||
.map_err(|e| format!("failed to load circuit inputs: {}", e))?;
|
||||
|
||||
let forward_res = circuit
|
||||
.forward(&mut data.clone(), None, None)
|
||||
.forward(&mut data.clone(), None, None, true)
|
||||
.map_err(|e| format!("failed to forward: {}", e))?;
|
||||
|
||||
// push result to the hashmap
|
||||
@@ -947,7 +927,16 @@ pub(crate) fn calibrate(
|
||||
|
||||
Ok(()) as Result<(), String>
|
||||
})
|
||||
.collect::<Result<Vec<()>, String>>()?;
|
||||
.collect::<Result<Vec<()>, String>>();
|
||||
|
||||
match forward_res {
|
||||
Ok(_) => (),
|
||||
// typically errors will be due to the circuit overflowing the i128 limit
|
||||
Err(e) => {
|
||||
debug!("forward pass failed: {:?}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let min_lookup_range = forward_pass_res
|
||||
.get(&key)
|
||||
@@ -965,19 +954,21 @@ pub(crate) fn calibrate(
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
let res = circuit.calibrate_from_min_max(
|
||||
min_lookup_range,
|
||||
max_lookup_range,
|
||||
let max_range_size = forward_pass_res
|
||||
.get(&key)
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|x| x.max_range_size)
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
let res = circuit.calc_min_logrows(
|
||||
(min_lookup_range, max_lookup_range),
|
||||
max_range_size,
|
||||
max_logrows,
|
||||
lookup_safety_margin,
|
||||
);
|
||||
|
||||
// drop the gag
|
||||
#[cfg(unix)]
|
||||
std::mem::drop(_r);
|
||||
#[cfg(unix)]
|
||||
std::mem::drop(_q);
|
||||
|
||||
if res.is_ok() {
|
||||
let new_settings = circuit.settings().clone();
|
||||
|
||||
@@ -1172,16 +1163,6 @@ pub(crate) fn mock(
|
||||
Ok(String::new())
|
||||
}
|
||||
|
||||
pub(crate) fn print_proof_hex(proof_path: PathBuf) -> Result<String, Box<dyn Error>> {
|
||||
let proof = Snark::load::<KZGCommitmentScheme<Bn256>>(&proof_path)?;
|
||||
for instance in proof.instances {
|
||||
println!("{:?}", instance);
|
||||
}
|
||||
let hex_str = hex::encode(proof.proof);
|
||||
info!("0x{}", hex_str);
|
||||
Ok(format!("0x{}", hex_str))
|
||||
}
|
||||
|
||||
#[cfg(feature = "render")]
|
||||
pub(crate) fn render(
|
||||
model: PathBuf,
|
||||
@@ -1400,10 +1381,10 @@ pub(crate) async fn deploy_evm(
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub(crate) async fn verify_evm(
|
||||
proof_path: PathBuf,
|
||||
addr_verifier: H160,
|
||||
addr_verifier: H160Flag,
|
||||
rpc_url: Option<String>,
|
||||
addr_da: Option<H160>,
|
||||
addr_vk: Option<H160>,
|
||||
addr_da: Option<H160Flag>,
|
||||
addr_vk: Option<H160Flag>,
|
||||
) -> Result<String, Box<dyn Error>> {
|
||||
use crate::eth::verify_proof_with_data_attestation;
|
||||
check_solc_requirement();
|
||||
@@ -1413,14 +1394,20 @@ pub(crate) async fn verify_evm(
|
||||
let result = if let Some(addr_da) = addr_da {
|
||||
verify_proof_with_data_attestation(
|
||||
proof.clone(),
|
||||
addr_verifier,
|
||||
addr_da,
|
||||
addr_vk,
|
||||
addr_verifier.into(),
|
||||
addr_da.into(),
|
||||
addr_vk.map(|s| s.into()),
|
||||
rpc_url.as_deref(),
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
verify_proof_via_solidity(proof.clone(), addr_verifier, addr_vk, rpc_url.as_deref()).await?
|
||||
verify_proof_via_solidity(
|
||||
proof.clone(),
|
||||
addr_verifier.into(),
|
||||
addr_vk.map(|s| s.into()),
|
||||
rpc_url.as_deref(),
|
||||
)
|
||||
.await?
|
||||
};
|
||||
|
||||
info!("Solidity verification result: {}", result);
|
||||
@@ -1576,14 +1563,14 @@ pub(crate) async fn setup_test_evm_witness(
|
||||
use crate::pfsys::ProofType;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
pub(crate) async fn test_update_account_calls(
|
||||
addr: H160,
|
||||
addr: H160Flag,
|
||||
data: PathBuf,
|
||||
rpc_url: Option<String>,
|
||||
) -> Result<String, Box<dyn Error>> {
|
||||
use crate::eth::update_account_calls;
|
||||
|
||||
check_solc_requirement();
|
||||
update_account_calls(addr, data, rpc_url.as_deref()).await?;
|
||||
update_account_calls(addr.into(), data, rpc_url.as_deref()).await?;
|
||||
|
||||
Ok(String::new())
|
||||
}
|
||||
@@ -1728,6 +1715,7 @@ pub(crate) fn fuzz(
|
||||
bad_proof,
|
||||
pk.get_vk(),
|
||||
strategy.clone(),
|
||||
params.n(),
|
||||
)
|
||||
.map_err(|_| ())
|
||||
};
|
||||
@@ -1758,6 +1746,7 @@ pub(crate) fn fuzz(
|
||||
bad_proof,
|
||||
pk.get_vk(),
|
||||
strategy.clone(),
|
||||
params.n(),
|
||||
)
|
||||
.map_err(|_| ())
|
||||
};
|
||||
@@ -1794,6 +1783,7 @@ pub(crate) fn fuzz(
|
||||
proof.clone(),
|
||||
bad_vk,
|
||||
strategy.clone(),
|
||||
params.n(),
|
||||
)
|
||||
.map_err(|_| ())
|
||||
};
|
||||
@@ -1825,6 +1815,7 @@ pub(crate) fn fuzz(
|
||||
bad_proof,
|
||||
pk.get_vk(),
|
||||
strategy.clone(),
|
||||
params.n(),
|
||||
)
|
||||
.map_err(|_| ())
|
||||
};
|
||||
@@ -1860,6 +1851,7 @@ pub(crate) fn fuzz(
|
||||
bad_proof,
|
||||
pk.get_vk(),
|
||||
strategy.clone(),
|
||||
params.n(),
|
||||
)
|
||||
.map_err(|_| ())
|
||||
};
|
||||
@@ -2045,15 +2037,29 @@ pub(crate) fn verify(
|
||||
settings_path: PathBuf,
|
||||
vk_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
reduced_srs: bool,
|
||||
) -> Result<bool, Box<dyn Error>> {
|
||||
let circuit_settings = GraphSettings::load(&settings_path)?;
|
||||
let params = load_params_cmd(srs_path, circuit_settings.run_args.logrows)?;
|
||||
|
||||
let params = if reduced_srs {
|
||||
load_params_cmd(srs_path, circuit_settings.log2_total_instances())?
|
||||
} else {
|
||||
load_params_cmd(srs_path, circuit_settings.run_args.logrows)?
|
||||
};
|
||||
|
||||
let proof = Snark::load::<KZGCommitmentScheme<Bn256>>(&proof_path)?;
|
||||
|
||||
let strategy = KZGSingleStrategy::new(params.verifier_params());
|
||||
let vk = load_vk::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk_path, circuit_settings)?;
|
||||
let vk =
|
||||
load_vk::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk_path, circuit_settings.clone())?;
|
||||
let now = Instant::now();
|
||||
let result = verify_proof_circuit_kzg(params.verifier_params(), proof, &vk, strategy);
|
||||
let result = verify_proof_circuit_kzg(
|
||||
params.verifier_params(),
|
||||
proof,
|
||||
&vk,
|
||||
strategy,
|
||||
1 << circuit_settings.run_args.logrows,
|
||||
);
|
||||
let elapsed = now.elapsed();
|
||||
info!(
|
||||
"verify took {}.{}",
|
||||
@@ -2077,7 +2083,7 @@ pub(crate) fn verify_aggr(
|
||||
let strategy = AccumulatorStrategy::new(params.verifier_params());
|
||||
let vk = load_vk::<KZGCommitmentScheme<Bn256>, Fr, AggregationCircuit>(vk_path, ())?;
|
||||
let now = Instant::now();
|
||||
let result = verify_proof_circuit_kzg(¶ms, proof, &vk, strategy);
|
||||
let result = verify_proof_circuit_kzg(¶ms, proof, &vk, strategy, 1 << logrows);
|
||||
|
||||
let elapsed = now.elapsed();
|
||||
info!(
|
||||
|
||||
@@ -4,6 +4,7 @@ use crate::circuit::InputType;
|
||||
use crate::fieldutils::i128_to_felt;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use crate::tensor::Tensor;
|
||||
use crate::EZKL_BUF_CAPACITY;
|
||||
use halo2curves::bn256::Fr as Fp;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use postgres::{Client, NoTls};
|
||||
@@ -15,6 +16,8 @@ use pyo3::types::PyDict;
|
||||
use pyo3::ToPyObject;
|
||||
use serde::ser::SerializeStruct;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::io::BufReader;
|
||||
use std::io::BufWriter;
|
||||
use std::io::Read;
|
||||
use std::panic::UnwindSafe;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
@@ -490,16 +493,20 @@ impl GraphData {
|
||||
|
||||
/// Load the model input from a file
|
||||
pub fn from_path(path: std::path::PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let mut file = std::fs::File::open(path.clone())
|
||||
.map_err(|_| format!("failed to open input at {}", path.display()))?;
|
||||
let mut data = String::new();
|
||||
file.read_to_string(&mut data)?;
|
||||
serde_json::from_str(&data).map_err(|e| e.into())
|
||||
let reader = std::fs::File::open(path)?;
|
||||
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, reader);
|
||||
let mut buf = String::new();
|
||||
reader.read_to_string(&mut buf)?;
|
||||
let graph_input = serde_json::from_str(&buf)?;
|
||||
Ok(graph_input)
|
||||
}
|
||||
|
||||
/// Save the model input to a file
|
||||
pub fn save(&self, path: std::path::PathBuf) -> Result<(), Box<dyn std::error::Error>> {
|
||||
serde_json::to_writer(std::fs::File::create(path)?, &self).map_err(|e| e.into())
|
||||
// buf writer
|
||||
let writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::create(path)?);
|
||||
serde_json::to_writer(writer, self)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
///
|
||||
@@ -617,13 +624,13 @@ impl ToPyObject for DataSource {
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use crate::pfsys::field_to_string_montgomery;
|
||||
use crate::pfsys::field_to_string;
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
impl ToPyObject for FileSourceInner {
|
||||
fn to_object(&self, py: Python) -> PyObject {
|
||||
match self {
|
||||
FileSourceInner::Field(data) => field_to_string_montgomery(data).to_object(py),
|
||||
FileSourceInner::Field(data) => field_to_string(data).to_object(py),
|
||||
FileSourceInner::Bool(data) => data.to_object(py),
|
||||
FileSourceInner::Float(data) => data.to_object(py),
|
||||
}
|
||||
|
||||
536
src/graph/mod.rs
536
src/graph/mod.rs
@@ -16,6 +16,7 @@ use halo2_proofs::plonk::VerifyingKey;
|
||||
use halo2_proofs::poly::kzg::commitment::ParamsKZG;
|
||||
pub use input::DataSource;
|
||||
use itertools::Itertools;
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use self::input::OnChainSource;
|
||||
@@ -23,19 +24,22 @@ use self::input::{FileSource, GraphData};
|
||||
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
|
||||
use crate::circuit::lookup::LookupOp;
|
||||
use crate::circuit::modules::ModulePlanner;
|
||||
use crate::circuit::table::{Range, Table, RESERVED_BLINDING_ROWS_PAD};
|
||||
use crate::circuit::table::{num_cols_required, Range, Table, RESERVED_BLINDING_ROWS_PAD};
|
||||
use crate::circuit::{CheckMode, InputType};
|
||||
use crate::fieldutils::felt_to_f64;
|
||||
use crate::pfsys::PrettyElements;
|
||||
use crate::tensor::{Tensor, ValTensor};
|
||||
use crate::RunArgs;
|
||||
use crate::{RunArgs, EZKL_BUF_CAPACITY};
|
||||
|
||||
use halo2_proofs::{
|
||||
circuit::Layouter,
|
||||
plonk::{Circuit, ConstraintSystem, Error as PlonkError},
|
||||
};
|
||||
use halo2curves::bn256::{self, Bn256, Fr as Fp, G1Affine};
|
||||
use halo2curves::ff::PrimeField;
|
||||
use log::{debug, error, info, trace, warn};
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use lazy_static::lazy_static;
|
||||
use log::{debug, error, trace, warn};
|
||||
use maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
|
||||
pub use model::*;
|
||||
pub use node::*;
|
||||
@@ -46,20 +50,36 @@ use pyo3::types::PyDict;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use pyo3::ToPyObject;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::{Read, Write};
|
||||
use std::ops::Deref;
|
||||
use thiserror::Error;
|
||||
pub use utilities::*;
|
||||
pub use vars::*;
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
use crate::pfsys::field_to_string_montgomery;
|
||||
use crate::pfsys::field_to_string;
|
||||
|
||||
/// The safety factor for the range of the lookup table.
|
||||
pub const RANGE_MULTIPLIER: i128 = 2;
|
||||
|
||||
/// The maximum number of columns in a lookup table.
|
||||
pub const MAX_NUM_LOOKUP_COLS: usize = 12;
|
||||
|
||||
/// Max representation of a lookup table input
|
||||
pub const MAX_LOOKUP_ABS: i128 = 8 * 2_i128.pow(MAX_PUBLIC_SRS);
|
||||
pub const MAX_LOOKUP_ABS: i128 = (MAX_NUM_LOOKUP_COLS as i128) * 2_i128.pow(MAX_PUBLIC_SRS);
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
lazy_static! {
|
||||
/// Max circuit area
|
||||
pub static ref EZKL_MAX_CIRCUIT_AREA: Option<usize> =
|
||||
if let Ok(max_circuit_area) = std::env::var("EZKL_MAX_CIRCUIT_AREA") {
|
||||
Some(max_circuit_area.parse().unwrap_or(0))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
const EZKL_MAX_CIRCUIT_AREA: Option<usize> = None;
|
||||
|
||||
/// circuit related errors.
|
||||
#[derive(Debug, Error)]
|
||||
@@ -117,15 +137,16 @@ pub enum GraphError {
|
||||
MissingResults,
|
||||
}
|
||||
|
||||
const ASSUMED_BLINDING_FACTORS: usize = 5;
|
||||
///
|
||||
pub const ASSUMED_BLINDING_FACTORS: usize = 5;
|
||||
/// The minimum number of rows in the grid
|
||||
pub const MIN_LOGROWS: u32 = 6;
|
||||
|
||||
/// 26
|
||||
pub const MAX_PUBLIC_SRS: u32 = bn256::Fr::S - 2;
|
||||
|
||||
/// Lookup deg
|
||||
pub const LOOKUP_DEG: usize = 5;
|
||||
///
|
||||
pub const RESERVED_BLINDING_ROWS: usize = ASSUMED_BLINDING_FACTORS + RESERVED_BLINDING_ROWS_PAD;
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
||||
@@ -154,6 +175,8 @@ pub struct GraphWitness {
|
||||
pub max_lookup_inputs: i128,
|
||||
/// max lookup input
|
||||
pub min_lookup_inputs: i128,
|
||||
/// max range check size
|
||||
pub max_range_size: i128,
|
||||
}
|
||||
|
||||
impl GraphWitness {
|
||||
@@ -181,6 +204,7 @@ impl GraphWitness {
|
||||
processed_outputs: None,
|
||||
max_lookup_inputs: 0,
|
||||
min_lookup_inputs: 0,
|
||||
max_range_size: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,42 +215,41 @@ impl GraphWitness {
|
||||
output_scales: Vec<crate::Scale>,
|
||||
visibility: VarVisibility,
|
||||
) {
|
||||
let mut pretty_elements = PrettyElements::default();
|
||||
pretty_elements.rescaled_inputs = self
|
||||
.inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, t)| {
|
||||
let scale = input_scales[i];
|
||||
t.iter()
|
||||
.map(|x| dequantize(*x, scale, 0.).to_string())
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
|
||||
pretty_elements.inputs = self
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
|
||||
.collect();
|
||||
|
||||
pretty_elements.rescaled_outputs = self
|
||||
.outputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, t)| {
|
||||
let scale = output_scales[i];
|
||||
t.iter()
|
||||
.map(|x| dequantize(*x, scale, 0.).to_string())
|
||||
.collect()
|
||||
})
|
||||
.collect();
|
||||
|
||||
pretty_elements.outputs = self
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
|
||||
.collect();
|
||||
let mut pretty_elements = PrettyElements {
|
||||
rescaled_inputs: self
|
||||
.inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, t)| {
|
||||
let scale = input_scales[i];
|
||||
t.iter()
|
||||
.map(|x| dequantize(*x, scale, 0.).to_string())
|
||||
.collect()
|
||||
})
|
||||
.collect(),
|
||||
inputs: self
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
|
||||
.collect(),
|
||||
rescaled_outputs: self
|
||||
.outputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, t)| {
|
||||
let scale = output_scales[i];
|
||||
t.iter()
|
||||
.map(|x| dequantize(*x, scale, 0.).to_string())
|
||||
.collect()
|
||||
})
|
||||
.collect(),
|
||||
outputs: self
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
|
||||
.collect(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Some(processed_inputs) = self.processed_inputs.clone() {
|
||||
pretty_elements.processed_inputs = processed_inputs
|
||||
@@ -292,16 +315,20 @@ impl GraphWitness {
|
||||
|
||||
/// Load the model input from a file
|
||||
pub fn from_path(path: std::path::PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
let mut file = std::fs::File::open(path.clone())
|
||||
let file = std::fs::File::open(path.clone())
|
||||
.map_err(|_| format!("failed to load model at {}", path.display()))?;
|
||||
let mut data = String::new();
|
||||
file.read_to_string(&mut data)?;
|
||||
serde_json::from_str(&data).map_err(|e| e.into())
|
||||
|
||||
let reader = std::io::BufReader::with_capacity(*EZKL_BUF_CAPACITY, file);
|
||||
serde_json::from_reader(reader).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Save the model input to a file
|
||||
pub fn save(&self, path: std::path::PathBuf) -> Result<(), Box<dyn std::error::Error>> {
|
||||
serde_json::to_writer(std::fs::File::create(path)?, &self).map_err(|e| e.into())
|
||||
// use buf writer
|
||||
let writer =
|
||||
std::io::BufWriter::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::create(path)?);
|
||||
|
||||
serde_json::to_writer(writer, &self).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
///
|
||||
@@ -335,19 +362,23 @@ impl ToPyObject for GraphWitness {
|
||||
let inputs: Vec<Vec<String>> = self
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|x| x.iter().map(field_to_string_montgomery).collect())
|
||||
.map(|x| x.iter().map(field_to_string).collect())
|
||||
.collect();
|
||||
|
||||
let outputs: Vec<Vec<String>> = self
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|x| x.iter().map(field_to_string_montgomery).collect())
|
||||
.map(|x| x.iter().map(field_to_string).collect())
|
||||
.collect();
|
||||
|
||||
dict.set_item("inputs", inputs).unwrap();
|
||||
dict.set_item("outputs", outputs).unwrap();
|
||||
dict.set_item("max_lookup_inputs", self.max_lookup_inputs)
|
||||
.unwrap();
|
||||
dict.set_item("min_lookup_inputs", self.min_lookup_inputs)
|
||||
.unwrap();
|
||||
dict.set_item("max_range_size", self.max_range_size)
|
||||
.unwrap();
|
||||
|
||||
if let Some(processed_inputs) = &self.processed_inputs {
|
||||
//poseidon_hash
|
||||
@@ -389,10 +420,7 @@ impl ToPyObject for GraphWitness {
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
fn insert_poseidon_hash_pydict(pydict: &PyDict, poseidon_hash: &Vec<Fp>) -> Result<(), PyErr> {
|
||||
let poseidon_hash: Vec<String> = poseidon_hash
|
||||
.iter()
|
||||
.map(field_to_string_montgomery)
|
||||
.collect();
|
||||
let poseidon_hash: Vec<String> = poseidon_hash.iter().map(field_to_string).collect();
|
||||
pydict.set_item("poseidon_hash", poseidon_hash)?;
|
||||
|
||||
Ok(())
|
||||
@@ -421,6 +449,10 @@ pub struct GraphSettings {
|
||||
pub total_assignments: usize,
|
||||
/// total const size
|
||||
pub total_const_size: usize,
|
||||
/// total dynamic column size
|
||||
pub total_dynamic_col_size: usize,
|
||||
/// number of dynamic lookups
|
||||
pub num_dynamic_lookups: usize,
|
||||
/// the shape of public inputs to the model (in order of appearance)
|
||||
pub model_instance_shapes: Vec<Vec<usize>>,
|
||||
/// model output scales
|
||||
@@ -444,6 +476,24 @@ pub struct GraphSettings {
|
||||
}
|
||||
|
||||
impl GraphSettings {
|
||||
fn model_constraint_logrows(&self) -> u32 {
|
||||
(self.num_rows as f64 + RESERVED_BLINDING_ROWS as f64)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
fn dynamic_lookup_logrows(&self) -> u32 {
|
||||
(self.total_dynamic_col_size as f64).log2().ceil() as u32
|
||||
}
|
||||
|
||||
fn module_constraint_logrows(&self) -> u32 {
|
||||
(self.module_sizes.max_constraints() as f64).log2().ceil() as u32
|
||||
}
|
||||
|
||||
fn constants_logrows(&self) -> u32 {
|
||||
(self.total_const_size as f64).log2().ceil() as u32
|
||||
}
|
||||
|
||||
/// calculate the total number of instances
|
||||
pub fn total_instances(&self) -> Vec<usize> {
|
||||
let mut instances: Vec<usize> = self
|
||||
@@ -456,22 +506,33 @@ impl GraphSettings {
|
||||
instances
|
||||
}
|
||||
|
||||
/// calculate the log2 of the total number of instances
|
||||
pub fn log2_total_instances(&self) -> u32 {
|
||||
let sum = self.total_instances().iter().sum::<usize>();
|
||||
|
||||
// max between 1 and the log2 of the sums
|
||||
std::cmp::max((sum as f64).log2().ceil() as u32, 1)
|
||||
}
|
||||
|
||||
/// save params to file
|
||||
pub fn save(&self, path: &std::path::PathBuf) -> Result<(), std::io::Error> {
|
||||
let encoded = serde_json::to_string(&self)?;
|
||||
let mut file = std::fs::File::create(path)?;
|
||||
file.write_all(encoded.as_bytes())
|
||||
// buf writer
|
||||
let writer =
|
||||
std::io::BufWriter::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::create(path)?);
|
||||
serde_json::to_writer(writer, &self).map_err(|e| {
|
||||
error!("failed to save settings file at {}", e);
|
||||
std::io::Error::new(std::io::ErrorKind::Other, e)
|
||||
})
|
||||
}
|
||||
/// load params from file
|
||||
pub fn load(path: &std::path::PathBuf) -> Result<Self, std::io::Error> {
|
||||
let mut file = std::fs::File::open(path).map_err(|e| {
|
||||
error!("failed to open settings file at {}", e);
|
||||
e
|
||||
})?;
|
||||
let mut data = String::new();
|
||||
file.read_to_string(&mut data)?;
|
||||
let res = serde_json::from_str(&data)?;
|
||||
Ok(res)
|
||||
// buf reader
|
||||
let reader =
|
||||
std::io::BufReader::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::open(path)?);
|
||||
serde_json::from_reader(reader).map_err(|e| {
|
||||
error!("failed to load settings file at {}", e);
|
||||
std::io::Error::new(std::io::ErrorKind::Other, e)
|
||||
})
|
||||
}
|
||||
|
||||
/// Export the ezkl configuration as json
|
||||
@@ -517,6 +578,11 @@ impl GraphSettings {
|
||||
|| self.run_args.param_visibility.is_hashed()
|
||||
}
|
||||
|
||||
/// requires dynamic lookup
|
||||
pub fn requires_dynamic_lookup(&self) -> bool {
|
||||
self.num_dynamic_lookups > 0
|
||||
}
|
||||
|
||||
/// any kzg visibility
|
||||
pub fn module_requires_kzg(&self) -> bool {
|
||||
self.run_args.input_visibility.is_kzgcommit()
|
||||
@@ -530,6 +596,7 @@ impl GraphSettings {
|
||||
pub struct GraphConfig {
|
||||
model_config: ModelConfig,
|
||||
module_configs: ModuleConfigs,
|
||||
circuit_size: CircuitSize,
|
||||
}
|
||||
|
||||
/// Defines the circuit for a computational graph / model loaded from a `.onnx` file.
|
||||
@@ -566,7 +633,7 @@ impl GraphCircuit {
|
||||
///
|
||||
pub fn save(&self, path: std::path::PathBuf) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let f = std::fs::File::create(path)?;
|
||||
let writer = std::io::BufWriter::new(f);
|
||||
let writer = std::io::BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
|
||||
bincode::serialize_into(writer, &self)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -574,11 +641,10 @@ impl GraphCircuit {
|
||||
///
|
||||
pub fn load(path: std::path::PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
|
||||
// read bytes from file
|
||||
let mut f = std::fs::File::open(&path)?;
|
||||
let metadata = std::fs::metadata(&path)?;
|
||||
let mut buffer = vec![0; metadata.len() as usize];
|
||||
f.read_exact(&mut buffer)?;
|
||||
let result = bincode::deserialize(&buffer)?;
|
||||
let f = std::fs::File::open(path)?;
|
||||
let reader = std::io::BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
|
||||
let result: GraphCircuit = bincode::deserialize_from(reader)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
@@ -593,6 +659,17 @@ pub enum TestDataSource {
|
||||
OnChain,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TestDataSource {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
TestDataSource::File => write!(f, "file"),
|
||||
TestDataSource::OnChain => write!(f, "on-chain"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for TestDataSource {}
|
||||
|
||||
impl From<String> for TestDataSource {
|
||||
fn from(value: String) -> Self {
|
||||
match value.to_lowercase().as_str() {
|
||||
@@ -809,7 +886,7 @@ impl GraphCircuit {
|
||||
let shapes = self.model().graph.input_shapes()?;
|
||||
let scales = self.model().graph.get_input_scales();
|
||||
let input_types = self.model().graph.get_input_types()?;
|
||||
info!("input scales: {:?}", scales);
|
||||
debug!("input scales: {:?}", scales);
|
||||
|
||||
match &data.input_data {
|
||||
DataSource::File(file_data) => {
|
||||
@@ -828,7 +905,7 @@ impl GraphCircuit {
|
||||
let shapes = self.model().graph.input_shapes()?;
|
||||
let scales = self.model().graph.get_input_scales();
|
||||
let input_types = self.model().graph.get_input_types()?;
|
||||
info!("input scales: {:?}", scales);
|
||||
debug!("input scales: {:?}", scales);
|
||||
|
||||
self.process_data_source(&data.input_data, shapes, scales, input_types)
|
||||
.await
|
||||
@@ -954,18 +1031,10 @@ impl GraphCircuit {
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
fn reserved_blinding_rows() -> f64 {
|
||||
(ASSUMED_BLINDING_FACTORS + RESERVED_BLINDING_ROWS_PAD) as f64
|
||||
}
|
||||
|
||||
fn calc_safe_lookup_range(
|
||||
min_lookup_inputs: i128,
|
||||
max_lookup_inputs: i128,
|
||||
lookup_safety_margin: i128,
|
||||
) -> Range {
|
||||
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: i128) -> Range {
|
||||
let mut margin = (
|
||||
lookup_safety_margin * min_lookup_inputs,
|
||||
lookup_safety_margin * max_lookup_inputs,
|
||||
lookup_safety_margin * min_max_lookup.0,
|
||||
lookup_safety_margin * min_max_lookup.1,
|
||||
);
|
||||
if lookup_safety_margin == 1 {
|
||||
margin.0 += 4;
|
||||
@@ -975,18 +1044,34 @@ impl GraphCircuit {
|
||||
margin
|
||||
}
|
||||
|
||||
fn calc_num_cols(safe_range: Range, max_logrows: u32) -> usize {
|
||||
let max_col_size = Table::<Fp>::cal_col_size(
|
||||
max_logrows as usize,
|
||||
Self::reserved_blinding_rows() as usize,
|
||||
);
|
||||
Table::<Fp>::num_cols_required(safe_range, max_col_size)
|
||||
fn calc_num_cols(range_len: i128, max_logrows: u32) -> usize {
|
||||
let max_col_size = Table::<Fp>::cal_col_size(max_logrows as usize, RESERVED_BLINDING_ROWS);
|
||||
num_cols_required(range_len, max_col_size)
|
||||
}
|
||||
|
||||
fn calc_min_logrows(
|
||||
fn table_size_logrows(
|
||||
&self,
|
||||
safe_lookup_range: Range,
|
||||
max_range_size: i128,
|
||||
) -> Result<u32, Box<dyn std::error::Error>> {
|
||||
// pick the range with the largest absolute size safe_lookup_range or max_range_size
|
||||
let safe_range = std::cmp::max(
|
||||
(safe_lookup_range.1 - safe_lookup_range.0).abs(),
|
||||
max_range_size,
|
||||
);
|
||||
|
||||
let min_bits = (safe_range as f64 + RESERVED_BLINDING_ROWS as f64 + 1.)
|
||||
.log2()
|
||||
.ceil() as u32;
|
||||
|
||||
Ok(min_bits)
|
||||
}
|
||||
|
||||
/// calculate the minimum logrows required for the circuit
|
||||
pub fn calc_min_logrows(
|
||||
&mut self,
|
||||
min_lookup_inputs: i128,
|
||||
max_lookup_inputs: i128,
|
||||
min_max_lookup: Range,
|
||||
max_range_size: i128,
|
||||
max_logrows: Option<u32>,
|
||||
lookup_safety_margin: i128,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
@@ -996,125 +1081,91 @@ impl GraphCircuit {
|
||||
let mut max_logrows = std::cmp::max(max_logrows, MIN_LOGROWS);
|
||||
let mut min_logrows = MIN_LOGROWS;
|
||||
|
||||
let reserved_blinding_rows = Self::reserved_blinding_rows();
|
||||
let safe_lookup_range = Self::calc_safe_lookup_range(min_max_lookup, lookup_safety_margin);
|
||||
|
||||
// check if has overflowed max lookup input
|
||||
if max_lookup_inputs > MAX_LOOKUP_ABS / lookup_safety_margin
|
||||
|| min_lookup_inputs < -MAX_LOOKUP_ABS / lookup_safety_margin
|
||||
{
|
||||
let err_string = format!("max lookup input ({}) is too large", max_lookup_inputs);
|
||||
if (min_max_lookup.1 - min_max_lookup.0).abs() > MAX_LOOKUP_ABS / lookup_safety_margin {
|
||||
let err_string = format!("max lookup input {:?} is too large", min_max_lookup);
|
||||
return Err(err_string.into());
|
||||
}
|
||||
|
||||
let safe_range = Self::calc_safe_lookup_range(
|
||||
min_lookup_inputs,
|
||||
max_lookup_inputs,
|
||||
lookup_safety_margin,
|
||||
if max_range_size.abs() > MAX_LOOKUP_ABS {
|
||||
let err_string = format!("max range check size {:?} is too large", max_range_size);
|
||||
return Err(err_string.into());
|
||||
}
|
||||
|
||||
// These are hard lower limits, we can't overflow instances or modules constraints
|
||||
let instance_logrows = self.settings().log2_total_instances();
|
||||
let module_constraint_logrows = self.settings().module_constraint_logrows();
|
||||
let dynamic_lookup_logrows = self.settings().dynamic_lookup_logrows();
|
||||
min_logrows = std::cmp::max(
|
||||
min_logrows,
|
||||
// max of the instance logrows and the module constraint logrows and the dynamic lookup logrows is the lower limit
|
||||
*[
|
||||
instance_logrows,
|
||||
module_constraint_logrows,
|
||||
dynamic_lookup_logrows,
|
||||
]
|
||||
.iter()
|
||||
.max()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// These are upper limits, going above these is wasteful, but they are not hard limits
|
||||
let model_constraint_logrows = self.settings().model_constraint_logrows();
|
||||
let min_bits = self.table_size_logrows(safe_lookup_range, max_range_size)?;
|
||||
let constants_logrows = self.settings().constants_logrows();
|
||||
max_logrows = std::cmp::min(
|
||||
max_logrows,
|
||||
// max of the model constraint logrows, min_bits, and the constants logrows is the upper limit
|
||||
*[model_constraint_logrows, min_bits, constants_logrows]
|
||||
.iter()
|
||||
.max()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// we now have a min and max logrows
|
||||
max_logrows = std::cmp::max(min_logrows, max_logrows);
|
||||
|
||||
// degrade the max logrows until the extended k is small enough
|
||||
while min_logrows < max_logrows
|
||||
&& !self.extended_k_is_small_enough(
|
||||
min_logrows,
|
||||
Self::calc_num_cols(safe_range, min_logrows),
|
||||
)
|
||||
{
|
||||
min_logrows += 1;
|
||||
}
|
||||
|
||||
if !self
|
||||
.extended_k_is_small_enough(min_logrows, Self::calc_num_cols(safe_range, min_logrows))
|
||||
{
|
||||
let err_string = format!(
|
||||
"extended k is too large to accommodate the quotient polynomial with logrows {}",
|
||||
min_logrows
|
||||
);
|
||||
error!("{}", err_string);
|
||||
return Err(err_string.into());
|
||||
}
|
||||
|
||||
while min_logrows < max_logrows
|
||||
&& !self.extended_k_is_small_enough(
|
||||
max_logrows,
|
||||
Self::calc_num_cols(safe_range, max_logrows),
|
||||
)
|
||||
&& !self.extended_k_is_small_enough(max_logrows, safe_lookup_range, max_range_size)
|
||||
{
|
||||
max_logrows -= 1;
|
||||
}
|
||||
|
||||
if !self
|
||||
.extended_k_is_small_enough(max_logrows, Self::calc_num_cols(safe_range, max_logrows))
|
||||
{
|
||||
if !self.extended_k_is_small_enough(max_logrows, safe_lookup_range, max_range_size) {
|
||||
let err_string = format!(
|
||||
"extended k is too large to accommodate the quotient polynomial with logrows {}",
|
||||
max_logrows
|
||||
);
|
||||
error!("{}", err_string);
|
||||
debug!("{}", err_string);
|
||||
return Err(err_string.into());
|
||||
}
|
||||
|
||||
let min_bits = ((safe_range.1 - safe_range.0) as f64 + reserved_blinding_rows + 1.)
|
||||
.log2()
|
||||
.ceil() as usize;
|
||||
|
||||
let min_rows_from_constraints = (self.settings().num_rows as f64 + reserved_blinding_rows)
|
||||
.log2()
|
||||
.ceil() as usize;
|
||||
|
||||
let mut logrows = std::cmp::max(min_bits, min_rows_from_constraints);
|
||||
|
||||
// if public input then public inputs col will have public inputs len
|
||||
if self.settings().run_args.input_visibility.is_public()
|
||||
|| self.settings().run_args.output_visibility.is_public()
|
||||
{
|
||||
let mut max_instance_len = self
|
||||
.model()
|
||||
.instance_shapes()?
|
||||
.iter()
|
||||
.fold(0, |acc, x| std::cmp::max(acc, x.iter().product::<usize>()))
|
||||
as f64
|
||||
+ reserved_blinding_rows;
|
||||
// if there are modules then we need to add the max module size
|
||||
if self.settings().uses_modules() {
|
||||
max_instance_len += self
|
||||
.settings()
|
||||
.module_sizes
|
||||
.num_instances()
|
||||
.iter()
|
||||
.sum::<usize>() as f64;
|
||||
}
|
||||
let instance_len_logrows = (max_instance_len).log2().ceil() as usize;
|
||||
logrows = std::cmp::max(logrows, instance_len_logrows);
|
||||
// this is for fixed const columns
|
||||
}
|
||||
|
||||
// ensure logrows is at least 4
|
||||
logrows = std::cmp::max(logrows, min_logrows as usize);
|
||||
logrows = std::cmp::min(logrows, max_logrows as usize);
|
||||
let logrows = max_logrows;
|
||||
|
||||
let model = self.model().clone();
|
||||
let settings_mut = self.settings_mut();
|
||||
settings_mut.run_args.lookup_range = safe_range;
|
||||
settings_mut.run_args.logrows = logrows as u32;
|
||||
settings_mut.run_args.lookup_range = safe_lookup_range;
|
||||
settings_mut.run_args.logrows = logrows;
|
||||
|
||||
*settings_mut = GraphCircuit::new(model, &settings_mut.run_args)?
|
||||
.settings()
|
||||
.clone();
|
||||
|
||||
// recalculate the total const size give nthe new logrows
|
||||
let total_const_len = settings_mut.total_const_size;
|
||||
let const_len_logrows = (total_const_len as f64).log2().ceil() as u32;
|
||||
settings_mut.run_args.logrows =
|
||||
std::cmp::max(settings_mut.run_args.logrows, const_len_logrows);
|
||||
// recalculate the total number of constraints given the new logrows
|
||||
let min_rows_from_constraints = (settings_mut.num_rows as f64 + reserved_blinding_rows)
|
||||
.log2()
|
||||
.ceil() as u32;
|
||||
settings_mut.run_args.logrows =
|
||||
std::cmp::max(settings_mut.run_args.logrows, min_rows_from_constraints);
|
||||
// recalculate the logrows if there has been overflow on the constants
|
||||
settings_mut.run_args.logrows = std::cmp::max(
|
||||
settings_mut.run_args.logrows,
|
||||
settings_mut.constants_logrows(),
|
||||
);
|
||||
// recalculate the logrows if there has been overflow for the model constraints
|
||||
settings_mut.run_args.logrows = std::cmp::max(
|
||||
settings_mut.run_args.logrows,
|
||||
settings_mut.model_constraint_logrows(),
|
||||
);
|
||||
|
||||
settings_mut.run_args.logrows = std::cmp::min(max_logrows, settings_mut.run_args.logrows);
|
||||
|
||||
info!(
|
||||
debug!(
|
||||
"setting lookup_range to: {:?}, setting logrows to: {}",
|
||||
self.settings().run_args.lookup_range,
|
||||
self.settings().run_args.logrows
|
||||
@@ -1123,12 +1174,29 @@ impl GraphCircuit {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn extended_k_is_small_enough(&self, k: u32, num_lookup_cols: usize) -> bool {
|
||||
let max_degree = self.settings().run_args.num_inner_cols + 2;
|
||||
let max_lookup_degree = LOOKUP_DEG + num_lookup_cols - 1; // num_lookup_cols - 1 is the degree of the lookup synthetic selector
|
||||
fn extended_k_is_small_enough(
|
||||
&self,
|
||||
k: u32,
|
||||
safe_lookup_range: Range,
|
||||
max_range_size: i128,
|
||||
) -> bool {
|
||||
// if num cols is too large then the extended k is too large
|
||||
if Self::calc_num_cols(safe_lookup_range.1 - safe_lookup_range.0, k) > MAX_NUM_LOOKUP_COLS {
|
||||
return false;
|
||||
} else if Self::calc_num_cols(max_range_size, k) > MAX_NUM_LOOKUP_COLS {
|
||||
return false;
|
||||
}
|
||||
|
||||
let max_degree = std::cmp::max(max_degree, max_lookup_degree);
|
||||
let mut settings = self.settings().clone();
|
||||
settings.run_args.lookup_range = safe_lookup_range;
|
||||
settings.run_args.logrows = k;
|
||||
settings.required_range_checks = vec![(0, max_range_size)];
|
||||
let mut cs = ConstraintSystem::default();
|
||||
Self::configure_with_params(&mut cs, settings);
|
||||
#[cfg(feature = "mv-lookup")]
|
||||
let cs = cs.chunk_lookups();
|
||||
// quotient_poly_degree * params.n - 1 is the degree of the quotient polynomial
|
||||
let max_degree = cs.degree();
|
||||
let quotient_poly_degree = (max_degree - 1) as u64;
|
||||
// n = 2^k
|
||||
let n = 1u64 << k;
|
||||
@@ -1143,29 +1211,13 @@ impl GraphCircuit {
|
||||
true
|
||||
}
|
||||
|
||||
/// Calibrate the circuit to the supplied data.
|
||||
pub fn calibrate_from_min_max(
|
||||
&mut self,
|
||||
min_lookup_inputs: i128,
|
||||
max_lookup_inputs: i128,
|
||||
max_logrows: Option<u32>,
|
||||
lookup_safety_margin: i128,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
self.calc_min_logrows(
|
||||
min_lookup_inputs,
|
||||
max_lookup_inputs,
|
||||
max_logrows,
|
||||
lookup_safety_margin,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs the forward pass of the model / graph of computations and any associated hashing.
|
||||
pub fn forward(
|
||||
&self,
|
||||
inputs: &mut [Tensor<Fp>],
|
||||
vk: Option<&VerifyingKey<G1Affine>>,
|
||||
srs: Option<&ParamsKZG<Bn256>>,
|
||||
throw_range_check_error: bool,
|
||||
) -> Result<GraphWitness, Box<dyn std::error::Error>> {
|
||||
let original_inputs = inputs.to_vec();
|
||||
|
||||
@@ -1206,7 +1258,9 @@ impl GraphCircuit {
|
||||
}
|
||||
}
|
||||
|
||||
let mut model_results = self.model().forward(inputs)?;
|
||||
let mut model_results =
|
||||
self.model()
|
||||
.forward(inputs, &self.settings().run_args, throw_range_check_error)?;
|
||||
|
||||
if visibility.output.requires_processing() {
|
||||
let module_outlets = visibility.output.overwrites_inputs();
|
||||
@@ -1249,6 +1303,7 @@ impl GraphCircuit {
|
||||
processed_outputs,
|
||||
max_lookup_inputs: model_results.max_lookup_inputs,
|
||||
min_lookup_inputs: model_results.min_lookup_inputs,
|
||||
max_range_size: model_results.max_range_size,
|
||||
};
|
||||
|
||||
witness.generate_rescaled_elements(
|
||||
@@ -1366,7 +1421,6 @@ impl GraphCircuit {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
|
||||
struct CircuitSize {
|
||||
num_instances: usize,
|
||||
@@ -1374,20 +1428,22 @@ struct CircuitSize {
|
||||
num_fixed: usize,
|
||||
num_challenges: usize,
|
||||
num_selectors: usize,
|
||||
logrows: u32,
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
impl CircuitSize {
|
||||
pub fn from_cs(cs: &ConstraintSystem<Fp>) -> Self {
|
||||
pub fn from_cs(cs: &ConstraintSystem<Fp>, logrows: u32) -> Self {
|
||||
CircuitSize {
|
||||
num_instances: cs.num_instance_columns(),
|
||||
num_advice_columns: cs.num_advice_columns(),
|
||||
num_fixed: cs.num_fixed_columns(),
|
||||
num_challenges: cs.num_challenges(),
|
||||
num_selectors: cs.num_selectors(),
|
||||
logrows,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
/// Export the ezkl configuration as json
|
||||
pub fn as_json(&self) -> Result<String, Box<dyn std::error::Error>> {
|
||||
let serialized = match serde_json::to_string(&self) {
|
||||
@@ -1398,6 +1454,25 @@ impl CircuitSize {
|
||||
};
|
||||
Ok(serialized)
|
||||
}
|
||||
|
||||
/// number of columns
|
||||
pub fn num_columns(&self) -> usize {
|
||||
self.num_instances + self.num_advice_columns + self.num_fixed
|
||||
}
|
||||
|
||||
/// area of the circuit
|
||||
pub fn area(&self) -> usize {
|
||||
self.num_columns() * (1 << self.logrows)
|
||||
}
|
||||
|
||||
/// area less than max
|
||||
pub fn area_less_than_max(&self) -> bool {
|
||||
if EZKL_MAX_CIRCUIT_AREA.is_some() {
|
||||
self.area() < EZKL_MAX_CIRCUIT_AREA.unwrap()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Circuit<Fp> for GraphCircuit {
|
||||
@@ -1435,34 +1510,18 @@ impl Circuit<Fp> for GraphCircuit {
|
||||
params.run_args.logrows as usize,
|
||||
);
|
||||
|
||||
let mut vars = ModelVars::new(
|
||||
cs,
|
||||
params.run_args.logrows as usize,
|
||||
params.total_assignments,
|
||||
params.run_args.num_inner_cols,
|
||||
params.total_const_size,
|
||||
params.module_requires_fixed(),
|
||||
);
|
||||
let mut vars = ModelVars::new(cs, ¶ms);
|
||||
|
||||
module_configs.configure_complex_modules(cs, visibility, params.module_sizes.clone());
|
||||
|
||||
vars.instantiate_instance(
|
||||
cs,
|
||||
params.model_instance_shapes,
|
||||
params.model_instance_shapes.clone(),
|
||||
params.run_args.input_scale,
|
||||
module_configs.instance,
|
||||
);
|
||||
|
||||
let base = Model::configure(
|
||||
cs,
|
||||
&vars,
|
||||
params.run_args.lookup_range,
|
||||
params.run_args.logrows as usize,
|
||||
params.required_lookups,
|
||||
params.required_range_checks,
|
||||
params.check_mode,
|
||||
)
|
||||
.unwrap();
|
||||
let base = Model::configure(cs, &vars, ¶ms).unwrap();
|
||||
|
||||
let model_config = ModelConfig { base, vars };
|
||||
|
||||
@@ -1472,10 +1531,12 @@ impl Circuit<Fp> for GraphCircuit {
|
||||
(cs.degree() as f32).log2().ceil()
|
||||
);
|
||||
|
||||
let circuit_size = CircuitSize::from_cs(cs, params.run_args.logrows);
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
info!(
|
||||
debug!(
|
||||
"circuit size: \n {}",
|
||||
CircuitSize::from_cs(cs)
|
||||
circuit_size
|
||||
.as_json()
|
||||
.unwrap()
|
||||
.to_colored_json_auto()
|
||||
@@ -1485,6 +1546,7 @@ impl Circuit<Fp> for GraphCircuit {
|
||||
GraphConfig {
|
||||
model_config,
|
||||
module_configs,
|
||||
circuit_size,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1497,6 +1559,16 @@ impl Circuit<Fp> for GraphCircuit {
|
||||
config: Self::Config,
|
||||
mut layouter: impl Layouter<Fp>,
|
||||
) -> Result<(), PlonkError> {
|
||||
// check if the circuit area is less than the max
|
||||
if !config.circuit_size.area_less_than_max() {
|
||||
error!(
|
||||
"circuit area {} is larger than the max allowed area {}",
|
||||
config.circuit_size.area(),
|
||||
EZKL_MAX_CIRCUIT_AREA.unwrap()
|
||||
);
|
||||
return Err(PlonkError::Synthesis);
|
||||
}
|
||||
|
||||
trace!("Setting input in synthesize");
|
||||
let input_vis = &self.settings().run_args.input_visibility;
|
||||
let output_vis = &self.settings().run_args.output_visibility;
|
||||
|
||||
@@ -10,7 +10,6 @@ use crate::circuit::table::Range;
|
||||
use crate::circuit::Input;
|
||||
use crate::circuit::InputType;
|
||||
use crate::circuit::Unknown;
|
||||
use crate::fieldutils::felt_to_i128;
|
||||
use crate::tensor::ValType;
|
||||
use crate::{
|
||||
circuit::{lookup::LookupOp, BaseConfig as PolyConfig, CheckMode, Op},
|
||||
@@ -57,6 +56,8 @@ use unzip_n::unzip_n;
|
||||
|
||||
unzip_n!(pub 3);
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
type TractResult = (Graph<TypedFact, Box<dyn TypedOp>>, SymbolValues);
|
||||
/// The result of a forward pass.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ForwardResult {
|
||||
@@ -66,6 +67,19 @@ pub struct ForwardResult {
|
||||
pub max_lookup_inputs: i128,
|
||||
/// The minimum value of any input to a lookup operation.
|
||||
pub min_lookup_inputs: i128,
|
||||
/// The max range check size
|
||||
pub max_range_size: i128,
|
||||
}
|
||||
|
||||
impl From<DummyPassRes> for ForwardResult {
|
||||
fn from(res: DummyPassRes) -> Self {
|
||||
Self {
|
||||
outputs: res.outputs,
|
||||
max_lookup_inputs: res.max_lookup_inputs,
|
||||
min_lookup_inputs: res.min_lookup_inputs,
|
||||
max_range_size: res.max_range_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A circuit configuration for the entirety of a model loaded from an Onnx file.
|
||||
@@ -85,6 +99,10 @@ pub type NodeGraph = BTreeMap<usize, NodeType>;
|
||||
pub struct DummyPassRes {
|
||||
/// number of rows use
|
||||
pub num_rows: usize,
|
||||
/// num dynamic lookups
|
||||
pub num_dynamic_lookups: usize,
|
||||
/// dynamic lookup col size
|
||||
pub dynamic_lookup_col_coord: usize,
|
||||
/// linear coordinate
|
||||
pub linear_coord: usize,
|
||||
/// total const size
|
||||
@@ -93,6 +111,14 @@ pub struct DummyPassRes {
|
||||
pub lookup_ops: HashSet<LookupOp>,
|
||||
/// range checks
|
||||
pub range_checks: HashSet<Range>,
|
||||
/// max lookup inputs
|
||||
pub max_lookup_inputs: i128,
|
||||
/// min lookup inputs
|
||||
pub min_lookup_inputs: i128,
|
||||
/// min range check
|
||||
pub max_range_size: i128,
|
||||
/// outputs
|
||||
pub outputs: Vec<Tensor<Fp>>,
|
||||
}
|
||||
|
||||
/// A struct for loading from an Onnx file and converting a computational graph to a circuit.
|
||||
@@ -478,7 +504,7 @@ impl Model {
|
||||
) -> Result<GraphSettings, Box<dyn Error>> {
|
||||
let instance_shapes = self.instance_shapes()?;
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
info!(
|
||||
debug!(
|
||||
"{} {} {}",
|
||||
"model has".blue(),
|
||||
instance_shapes.len().to_string().blue(),
|
||||
@@ -486,7 +512,25 @@ impl Model {
|
||||
);
|
||||
// this is the total number of variables we will need to allocate
|
||||
// for the circuit
|
||||
let res = self.dummy_layout(run_args, &self.graph.input_shapes()?)?;
|
||||
let default_value = if !self.visibility.input.is_fixed() {
|
||||
ValType::Value(Value::<Fp>::unknown())
|
||||
} else {
|
||||
ValType::Constant(Fp::ONE)
|
||||
};
|
||||
|
||||
let inputs: Vec<ValTensor<Fp>> = self
|
||||
.graph
|
||||
.input_shapes()?
|
||||
.iter()
|
||||
.map(|shape| {
|
||||
let mut t: ValTensor<Fp> =
|
||||
vec![default_value.clone(); shape.iter().product()].into();
|
||||
t.reshape(shape)?;
|
||||
Ok(t)
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
|
||||
let res = self.dummy_layout(run_args, &inputs, false)?;
|
||||
|
||||
// if we're using percentage tolerance, we need to add the necessary range check ops for it.
|
||||
|
||||
@@ -500,6 +544,8 @@ impl Model {
|
||||
required_range_checks: res.range_checks.into_iter().collect(),
|
||||
model_output_scales: self.graph.get_output_scales()?,
|
||||
model_input_scales: self.graph.get_input_scales(),
|
||||
num_dynamic_lookups: res.num_dynamic_lookups,
|
||||
total_dynamic_col_size: res.dynamic_lookup_col_coord,
|
||||
total_const_size: res.total_const_size,
|
||||
check_mode,
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
@@ -521,207 +567,18 @@ impl Model {
|
||||
/// * `reader` - A reader for an Onnx file.
|
||||
/// * `model_inputs` - A vector of [Tensor]s to use as inputs to the model.
|
||||
/// * `run_args` - [RunArgs]
|
||||
pub fn forward(&self, model_inputs: &[Tensor<Fp>]) -> Result<ForwardResult, Box<dyn Error>> {
|
||||
let mut results: BTreeMap<&usize, Vec<Tensor<Fp>>> = BTreeMap::new();
|
||||
let mut max_lookup_inputs = 0;
|
||||
let mut min_lookup_inputs = 0;
|
||||
|
||||
let input_shapes = self.graph.input_shapes()?;
|
||||
|
||||
for (i, input_idx) in self.graph.inputs.iter().enumerate() {
|
||||
let mut input = model_inputs[i].clone();
|
||||
input.reshape(&input_shapes[i])?;
|
||||
results.insert(input_idx, vec![input]);
|
||||
}
|
||||
|
||||
for (idx, n) in self.graph.nodes.iter() {
|
||||
let mut inputs = vec![];
|
||||
if n.is_input() {
|
||||
let t = results.get(idx).ok_or(GraphError::MissingResults)?[0].clone();
|
||||
inputs.push(t);
|
||||
} else {
|
||||
for (idx, outlet) in n.inputs().iter() {
|
||||
match results.get(&idx) {
|
||||
Some(value) => inputs.push(value[*outlet].clone()),
|
||||
None => return Err(Box::new(GraphError::MissingNode(*idx))),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
debug!("executing {}: {}", idx, n.as_str());
|
||||
debug!("dims: {:?}", n.out_dims());
|
||||
debug!(
|
||||
"input_dims: {:?}",
|
||||
inputs.iter().map(|x| x.dims()).collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
debug!("input nodes: {:?}", n.inputs());
|
||||
|
||||
if n.is_lookup() {
|
||||
let (mut min, mut max) = (0, 0);
|
||||
for i in &inputs {
|
||||
max = max.max(
|
||||
i.iter()
|
||||
.map(|x| felt_to_i128(*x))
|
||||
.max()
|
||||
.ok_or("missing max")?,
|
||||
);
|
||||
min = min.min(
|
||||
i.iter()
|
||||
.map(|x| felt_to_i128(*x))
|
||||
.min()
|
||||
.ok_or("missing min")?,
|
||||
);
|
||||
}
|
||||
max_lookup_inputs = max_lookup_inputs.max(max);
|
||||
min_lookup_inputs = min_lookup_inputs.min(min);
|
||||
debug!("max lookup inputs: {}", max);
|
||||
debug!("min lookup inputs: {}", min);
|
||||
}
|
||||
|
||||
match n {
|
||||
NodeType::Node(n) => {
|
||||
// execute the op
|
||||
let start = instant::Instant::now();
|
||||
let mut res = Op::<Fp>::f(&n.opkind, &inputs)?;
|
||||
res.output.reshape(&n.out_dims)?;
|
||||
let elapsed = start.elapsed();
|
||||
trace!("op took: {:?}", elapsed);
|
||||
// see if any of the intermediate lookup calcs are the max
|
||||
if !res.intermediate_lookups.is_empty() {
|
||||
let (mut min, mut max) = (0, 0);
|
||||
for i in &res.intermediate_lookups {
|
||||
max = max.max(i.clone().into_iter().max().ok_or("missing max")?);
|
||||
min = min.min(i.clone().into_iter().min().ok_or("missing min")?);
|
||||
}
|
||||
max_lookup_inputs = max_lookup_inputs.max(max);
|
||||
min_lookup_inputs = min_lookup_inputs.min(min);
|
||||
debug!("intermediate max lookup inputs: {}", max);
|
||||
debug!("intermediate min lookup inputs: {}", min);
|
||||
}
|
||||
debug!(
|
||||
"------------ output node int {}: {} \n ------------ float: {} \n ------------ max: {} \n ------------ min: {} \n ------------ scale: {}",
|
||||
idx,
|
||||
res.output.map(crate::fieldutils::felt_to_i32).show(),
|
||||
res.output
|
||||
.map(|x| crate::fieldutils::felt_to_f64(x)
|
||||
/ scale_to_multiplier(n.out_scale))
|
||||
.show(),
|
||||
res.output.clone().into_iter().map(crate::fieldutils::felt_to_i128).max().unwrap_or(0),
|
||||
res.output.clone().into_iter().map(crate::fieldutils::felt_to_i128).min().unwrap_or(0),
|
||||
n.out_scale
|
||||
);
|
||||
results.insert(idx, vec![res.output]);
|
||||
}
|
||||
NodeType::SubGraph {
|
||||
model,
|
||||
output_mappings,
|
||||
input_mappings,
|
||||
inputs: input_tuple,
|
||||
..
|
||||
} => {
|
||||
let orig_inputs = inputs.clone();
|
||||
let input_mappings = input_mappings.clone();
|
||||
|
||||
let input_dims = inputs.iter().map(|inp| inp.dims());
|
||||
let num_iter = number_of_iterations(&input_mappings, input_dims.collect());
|
||||
|
||||
debug!(
|
||||
"{} iteration(s) in a subgraph with inputs {:?} and sources {:?}",
|
||||
num_iter, input_tuple, model.graph.inputs
|
||||
);
|
||||
|
||||
debug!("input_mappings: {:?}", input_mappings);
|
||||
|
||||
let mut full_results: Vec<Tensor<Fp>> = vec![];
|
||||
|
||||
for i in 0..num_iter {
|
||||
// replace the Stacked input with the current chunk iter
|
||||
for ((mapping, inp), og_input) in
|
||||
input_mappings.iter().zip(&mut inputs).zip(&orig_inputs)
|
||||
{
|
||||
if let InputMapping::Stacked { axis, chunk } = mapping {
|
||||
let start = i * chunk;
|
||||
let end = (i + 1) * chunk;
|
||||
let t = crate::tensor::ops::slice(og_input, axis, &start, &end)?;
|
||||
*inp = t;
|
||||
}
|
||||
}
|
||||
|
||||
let res = model.forward(&inputs)?;
|
||||
// recursively get the max lookup inputs for subgraphs
|
||||
max_lookup_inputs = max_lookup_inputs.max(res.max_lookup_inputs);
|
||||
min_lookup_inputs = min_lookup_inputs.min(res.min_lookup_inputs);
|
||||
|
||||
let mut outlets = BTreeMap::new();
|
||||
for (mappings, outlet_res) in output_mappings.iter().zip(res.outputs) {
|
||||
for mapping in mappings {
|
||||
match mapping {
|
||||
OutputMapping::Single { outlet, .. } => {
|
||||
outlets.insert(outlet, outlet_res.clone());
|
||||
}
|
||||
OutputMapping::Stacked { outlet, axis, .. } => {
|
||||
if !full_results.is_empty() {
|
||||
let stacked_res = crate::tensor::ops::concat(
|
||||
&[&full_results[*outlet], &outlet_res],
|
||||
*axis,
|
||||
)?;
|
||||
|
||||
outlets.insert(outlet, stacked_res);
|
||||
} else {
|
||||
outlets.insert(outlet, outlet_res.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
full_results = outlets.into_values().collect_vec();
|
||||
|
||||
let output_states = output_state_idx(output_mappings);
|
||||
let input_states = input_state_idx(&input_mappings);
|
||||
|
||||
assert_eq!(input_states.len(), output_states.len());
|
||||
|
||||
for (input_idx, output_idx) in input_states.iter().zip(output_states) {
|
||||
inputs[*input_idx] = full_results[output_idx].clone();
|
||||
}
|
||||
}
|
||||
|
||||
trace!(
|
||||
"------------ output subgraph node {}: {:?}",
|
||||
idx,
|
||||
full_results
|
||||
.iter()
|
||||
.map(|x|
|
||||
// convert to tensor i32
|
||||
x.map(crate::fieldutils::felt_to_i32).show())
|
||||
.collect_vec()
|
||||
);
|
||||
|
||||
results.insert(idx, full_results);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let output_nodes = self.graph.outputs.iter();
|
||||
debug!(
|
||||
"model outputs are nodes: {:?}",
|
||||
output_nodes.clone().collect_vec()
|
||||
);
|
||||
let outputs = output_nodes
|
||||
.map(|(idx, outlet)| {
|
||||
Ok(results.get(&idx).ok_or(GraphError::MissingResults)?[*outlet].clone())
|
||||
})
|
||||
.collect::<Result<Vec<_>, GraphError>>()?;
|
||||
|
||||
let res = ForwardResult {
|
||||
outputs,
|
||||
max_lookup_inputs,
|
||||
min_lookup_inputs,
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
pub fn forward(
|
||||
&self,
|
||||
model_inputs: &[Tensor<Fp>],
|
||||
run_args: &RunArgs,
|
||||
throw_range_check_error: bool,
|
||||
) -> Result<ForwardResult, Box<dyn Error>> {
|
||||
let valtensor_inputs: Vec<ValTensor<Fp>> = model_inputs
|
||||
.iter()
|
||||
.map(|x| x.map(|elem| ValType::Value(Value::known(elem))).into())
|
||||
.collect();
|
||||
let res = self.dummy_layout(run_args, &valtensor_inputs, throw_range_check_error)?;
|
||||
Ok(res.into())
|
||||
}
|
||||
|
||||
/// Loads an Onnx model from a specified path.
|
||||
@@ -733,7 +590,7 @@ impl Model {
|
||||
fn load_onnx_using_tract(
|
||||
reader: &mut dyn std::io::Read,
|
||||
run_args: &RunArgs,
|
||||
) -> Result<(Graph<TypedFact, Box<dyn TypedOp>>, SymbolValues), Box<dyn Error>> {
|
||||
) -> Result<TractResult, Box<dyn Error>> {
|
||||
use tract_onnx::{
|
||||
tract_core::internal::IntoArcTensor, tract_hir::internal::GenericFactoid,
|
||||
};
|
||||
@@ -772,7 +629,7 @@ impl Model {
|
||||
for (symbol, value) in run_args.variables.iter() {
|
||||
let symbol = model.symbol_table.sym(symbol);
|
||||
symbol_values = symbol_values.with(&symbol, *value as i64);
|
||||
info!("set {} to {}", symbol, value);
|
||||
debug!("set {} to {}", symbol, value);
|
||||
}
|
||||
|
||||
// Note: do not optimize the model, as the layout will depend on underlying hardware
|
||||
@@ -1152,35 +1009,44 @@ impl Model {
|
||||
/// # Arguments
|
||||
/// * `meta` - The constraint system.
|
||||
/// * `vars` - The variables for the circuit.
|
||||
/// * `run_args` - [RunArgs]
|
||||
/// * `required_lookups` - The required lookup operations for the circuit.
|
||||
/// * `settings` - [GraphSettings]
|
||||
pub fn configure(
|
||||
meta: &mut ConstraintSystem<Fp>,
|
||||
vars: &ModelVars<Fp>,
|
||||
lookup_range: Range,
|
||||
logrows: usize,
|
||||
required_lookups: Vec<LookupOp>,
|
||||
required_range_checks: Vec<Range>,
|
||||
check_mode: CheckMode,
|
||||
settings: &GraphSettings,
|
||||
) -> Result<PolyConfig<Fp>, Box<dyn Error>> {
|
||||
info!("configuring model");
|
||||
debug!("configuring model");
|
||||
|
||||
let lookup_range = settings.run_args.lookup_range;
|
||||
let logrows = settings.run_args.logrows as usize;
|
||||
let num_dynamic_lookups = settings.num_dynamic_lookups;
|
||||
let required_lookups = settings.required_lookups.clone();
|
||||
let required_range_checks = settings.required_range_checks.clone();
|
||||
|
||||
let mut base_gate = PolyConfig::configure(
|
||||
meta,
|
||||
vars.advices[0..2].try_into()?,
|
||||
&vars.advices[2],
|
||||
check_mode,
|
||||
settings.check_mode,
|
||||
);
|
||||
// set scale for HybridOp::RangeCheck and call self.conf_lookup on that op for percentage tolerance case
|
||||
let input = &vars.advices[0];
|
||||
let output = &vars.advices[1];
|
||||
let index = &vars.advices[2];
|
||||
let output = &vars.advices[2];
|
||||
let index = &vars.advices[1];
|
||||
for op in required_lookups {
|
||||
base_gate.configure_lookup(meta, input, output, index, lookup_range, logrows, &op)?;
|
||||
}
|
||||
|
||||
for range in required_range_checks {
|
||||
base_gate.configure_range_check(meta, input, range)?;
|
||||
base_gate.configure_range_check(meta, input, index, range, logrows)?;
|
||||
}
|
||||
|
||||
for _ in 0..num_dynamic_lookups {
|
||||
base_gate.configure_dynamic_lookup(
|
||||
meta,
|
||||
vars.advices[0..2].try_into()?,
|
||||
vars.advices[3..5].try_into()?,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(base_gate)
|
||||
@@ -1293,7 +1159,7 @@ impl Model {
|
||||
|
||||
// Then number of columns in the circuits
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
info!(
|
||||
debug!(
|
||||
"{} {} {} (coord={}, constants={})",
|
||||
"model uses".blue(),
|
||||
num_rows.to_string().blue(),
|
||||
@@ -1335,18 +1201,29 @@ impl Model {
|
||||
};
|
||||
|
||||
debug!(
|
||||
"laying out {}: {}, row:{}, coord:{}, total_constants: {}",
|
||||
"laying out {}: {}, row:{}, coord:{}, total_constants: {}, max_lookup_inputs: {}, min_lookup_inputs: {}",
|
||||
idx,
|
||||
node.as_str(),
|
||||
region.row(),
|
||||
region.linear_coord(),
|
||||
region.total_constants()
|
||||
region.total_constants(),
|
||||
region.max_lookup_inputs(),
|
||||
region.min_lookup_inputs()
|
||||
);
|
||||
debug!("dims: {:?}", node.out_dims());
|
||||
debug!(
|
||||
"input_dims {:?}",
|
||||
values.iter().map(|v| v.dims()).collect_vec()
|
||||
);
|
||||
debug!("output scales: {:?}", node.out_scales());
|
||||
debug!("input indices: {:?}", node.inputs());
|
||||
debug!(
|
||||
"input scales: {:?}",
|
||||
node.inputs()
|
||||
.iter()
|
||||
.map(|(idx, outlet)| self.graph.nodes[idx].out_scales()[*outlet])
|
||||
.collect_vec()
|
||||
);
|
||||
|
||||
match &node {
|
||||
NodeType::Node(n) => {
|
||||
@@ -1489,28 +1366,14 @@ impl Model {
|
||||
pub fn dummy_layout(
|
||||
&self,
|
||||
run_args: &RunArgs,
|
||||
input_shapes: &[Vec<usize>],
|
||||
inputs: &[ValTensor<Fp>],
|
||||
throw_range_check_error: bool,
|
||||
) -> Result<DummyPassRes, Box<dyn Error>> {
|
||||
info!("calculating num of constraints using dummy model layout...");
|
||||
debug!("calculating num of constraints using dummy model layout...");
|
||||
|
||||
let start_time = instant::Instant::now();
|
||||
|
||||
let mut results = BTreeMap::<usize, Vec<ValTensor<Fp>>>::new();
|
||||
let default_value = if !self.visibility.input.is_fixed() {
|
||||
ValType::Value(Value::<Fp>::unknown())
|
||||
} else {
|
||||
ValType::Constant(Fp::ONE)
|
||||
};
|
||||
|
||||
let inputs: Vec<ValTensor<Fp>> = input_shapes
|
||||
.iter()
|
||||
.map(|shape| {
|
||||
let mut t: ValTensor<Fp> =
|
||||
vec![default_value.clone(); shape.iter().product()].into();
|
||||
t.reshape(shape)?;
|
||||
Ok(t)
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
|
||||
for (i, input_idx) in self.graph.inputs.iter().enumerate() {
|
||||
results.insert(*input_idx, vec![inputs[i].clone()]);
|
||||
@@ -1523,7 +1386,7 @@ impl Model {
|
||||
vars: ModelVars::new_dummy(),
|
||||
};
|
||||
|
||||
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols);
|
||||
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, throw_range_check_error);
|
||||
|
||||
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;
|
||||
|
||||
@@ -1534,27 +1397,26 @@ impl Model {
|
||||
ValType::Constant(Fp::ONE)
|
||||
};
|
||||
|
||||
let comparator = outputs
|
||||
let output_scales = self.graph.get_output_scales()?;
|
||||
let res = outputs
|
||||
.iter()
|
||||
.map(|x| {
|
||||
let mut v: ValTensor<Fp> =
|
||||
vec![default_value.clone(); x.dims().iter().product::<usize>()].into();
|
||||
v.reshape(x.dims())?;
|
||||
Ok(v)
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
.enumerate()
|
||||
.map(|(i, output)| {
|
||||
let mut tolerance = run_args.tolerance;
|
||||
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
|
||||
|
||||
let mut comparator: ValTensor<Fp> =
|
||||
vec![default_value.clone(); output.dims().iter().product::<usize>()].into();
|
||||
comparator.reshape(output.dims())?;
|
||||
|
||||
let _ = outputs
|
||||
.into_iter()
|
||||
.zip(comparator)
|
||||
.map(|(o, c)| {
|
||||
dummy_config.layout(
|
||||
&mut region,
|
||||
&[o, c],
|
||||
Box::new(HybridOp::RangeCheck(run_args.tolerance)),
|
||||
&[output.clone(), comparator],
|
||||
Box::new(HybridOp::RangeCheck(tolerance)),
|
||||
)
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
.collect::<Result<Vec<_>, _>>();
|
||||
res?;
|
||||
} else if !self.visibility.output.is_private() {
|
||||
for output in &outputs {
|
||||
region.increment_total_constants(output.num_constants());
|
||||
@@ -1566,7 +1428,7 @@ impl Model {
|
||||
|
||||
// Then number of columns in the circuits
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
info!(
|
||||
debug!(
|
||||
"{} {} {} (coord={}, constants={})",
|
||||
"model uses".blue(),
|
||||
region.row().to_string().blue(),
|
||||
@@ -1575,12 +1437,26 @@ impl Model {
|
||||
region.total_constants().to_string().red()
|
||||
);
|
||||
|
||||
let outputs = outputs
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.get_felt_evals()
|
||||
.unwrap_or(Tensor::new(Some(&[Fp::ZERO]), &[1]).unwrap())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let res = DummyPassRes {
|
||||
num_rows: region.row(),
|
||||
linear_coord: region.linear_coord(),
|
||||
total_const_size: region.total_constants(),
|
||||
lookup_ops: region.used_lookups(),
|
||||
range_checks: region.used_range_checks(),
|
||||
max_lookup_inputs: region.max_lookup_inputs(),
|
||||
min_lookup_inputs: region.min_lookup_inputs(),
|
||||
max_range_size: region.max_range_size(),
|
||||
num_dynamic_lookups: region.dynamic_lookup_index(),
|
||||
dynamic_lookup_col_coord: region.dynamic_lookup_col_coord(),
|
||||
outputs,
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
|
||||
@@ -148,7 +148,7 @@ impl RebaseScale {
|
||||
SupportedOp::RebaseScale(RebaseScale {
|
||||
inner: op.inner.clone(),
|
||||
target_scale: op.target_scale,
|
||||
multiplier: multiplier,
|
||||
multiplier,
|
||||
rebase_op: HybridOp::Div {
|
||||
denom: crate::circuit::utils::F32((multiplier) as f32),
|
||||
use_range_check_for_int: !div_rebasing,
|
||||
@@ -219,8 +219,6 @@ impl Op<Fp> for RebaseScale {
|
||||
let mut res = Op::<Fp>::f(&*self.inner, x)?;
|
||||
let rebase_res = Op::<Fp>::f(&self.rebase_op, &[res.output])?;
|
||||
res.output = rebase_res.output;
|
||||
res.intermediate_lookups
|
||||
.extend(rebase_res.intermediate_lookups);
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
@@ -499,6 +497,7 @@ impl Node {
|
||||
/// * `public_params` - flag if parameters of model are public
|
||||
/// * `idx` - The node's unique identifier.
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
|
||||
other_nodes: &mut BTreeMap<usize, super::NodeType>,
|
||||
|
||||
@@ -439,17 +439,16 @@ pub fn new_op_from_onnx(
|
||||
let op = load_op::<ScatterElements>(node.op(), idx, node.op().name().to_string())?;
|
||||
let axis = op.axis;
|
||||
|
||||
let mut op =
|
||||
SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::ScatterElements {
|
||||
dim: axis,
|
||||
constant_idx: None,
|
||||
});
|
||||
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
|
||||
dim: axis,
|
||||
constant_idx: None,
|
||||
});
|
||||
|
||||
// if param_visibility.is_public() {
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(1);
|
||||
op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::ScatterElements {
|
||||
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
|
||||
dim: axis,
|
||||
constant_idx: Some(c.raw_values.map(|x| x as usize)),
|
||||
})
|
||||
@@ -478,17 +477,16 @@ pub fn new_op_from_onnx(
|
||||
let op = load_op::<GatherElements>(node.op(), idx, node.op().name().to_string())?;
|
||||
let axis = op.axis;
|
||||
|
||||
let mut op =
|
||||
SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::GatherElements {
|
||||
dim: axis,
|
||||
constant_idx: None,
|
||||
});
|
||||
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
|
||||
dim: axis,
|
||||
constant_idx: None,
|
||||
});
|
||||
|
||||
// if param_visibility.is_public() {
|
||||
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
|
||||
inputs[1].decrement_use();
|
||||
deleted_indices.push(inputs.len() - 1);
|
||||
op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::GatherElements {
|
||||
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
|
||||
dim: axis,
|
||||
constant_idx: Some(c.raw_values.map(|x| x as usize)),
|
||||
})
|
||||
@@ -736,7 +734,7 @@ pub fn new_op_from_onnx(
|
||||
SupportedOp::Hybrid(HybridOp::Recip {
|
||||
input_scale: (scale_to_multiplier(in_scale) as f32).into(),
|
||||
output_scale: (scale_to_multiplier(max_scale) as f32).into(),
|
||||
use_range_check_for_int: false,
|
||||
use_range_check_for_int: true,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::error::Error;
|
||||
use std::fmt::Display;
|
||||
|
||||
use crate::tensor::TensorType;
|
||||
use crate::tensor::{ValTensor, VarTensor};
|
||||
@@ -14,6 +15,7 @@ use pyo3::{
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -40,6 +42,33 @@ pub enum Visibility {
|
||||
Fixed,
|
||||
}
|
||||
|
||||
impl Display for Visibility {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
Visibility::KZGCommit => write!(f, "kzgcommit"),
|
||||
Visibility::Private => write!(f, "private"),
|
||||
Visibility::Public => write!(f, "public"),
|
||||
Visibility::Fixed => write!(f, "fixed"),
|
||||
Visibility::Hashed {
|
||||
hash_is_public,
|
||||
outlets,
|
||||
} => {
|
||||
if *hash_is_public {
|
||||
write!(f, "hashed/public")
|
||||
} else {
|
||||
write!(f, "hashed/private/{}", outlets.iter().join(","))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for Visibility {
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for Visibility {
|
||||
fn from(s: &'a str) -> Self {
|
||||
if s.contains("hashed/private") {
|
||||
@@ -202,17 +231,6 @@ impl Visibility {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for Visibility {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
Visibility::KZGCommit => write!(f, "kzgcommit"),
|
||||
Visibility::Private => write!(f, "private"),
|
||||
Visibility::Public => write!(f, "public"),
|
||||
Visibility::Fixed => write!(f, "fixed"),
|
||||
Visibility::Hashed { .. } => write!(f, "hashed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the scale of the model input, model parameters.
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, PartialOrd)]
|
||||
@@ -402,20 +420,31 @@ impl<F: PrimeField + TensorType + PartialOrd> ModelVars<F> {
|
||||
}
|
||||
|
||||
/// Allocate all columns that will be assigned to by a model.
|
||||
pub fn new(
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
logrows: usize,
|
||||
var_len: usize,
|
||||
num_inner_cols: usize,
|
||||
num_constants: usize,
|
||||
module_requires_fixed: bool,
|
||||
) -> Self {
|
||||
info!("number of blinding factors: {}", cs.blinding_factors());
|
||||
pub fn new(cs: &mut ConstraintSystem<F>, params: &GraphSettings) -> Self {
|
||||
debug!("number of blinding factors: {}", cs.blinding_factors());
|
||||
|
||||
let advices = (0..3)
|
||||
let logrows = params.run_args.logrows as usize;
|
||||
let var_len = params.total_assignments;
|
||||
let num_inner_cols = params.run_args.num_inner_cols;
|
||||
let num_constants = params.total_const_size;
|
||||
let module_requires_fixed = params.module_requires_fixed();
|
||||
let requires_dynamic_lookup = params.requires_dynamic_lookup();
|
||||
let dynamic_lookup_size = params.total_dynamic_col_size;
|
||||
|
||||
let mut advices = (0..3)
|
||||
.map(|_| VarTensor::new_advice(cs, logrows, num_inner_cols, var_len))
|
||||
.collect_vec();
|
||||
|
||||
if requires_dynamic_lookup {
|
||||
for _ in 0..2 {
|
||||
let dynamic_lookup = VarTensor::new_advice(cs, logrows, 1, dynamic_lookup_size);
|
||||
if dynamic_lookup.num_blocks() > 1 {
|
||||
panic!("dynamic lookup should only have one block");
|
||||
};
|
||||
advices.push(dynamic_lookup);
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
"model uses {} advice blocks (size={})",
|
||||
advices.iter().map(|v| v.num_blocks()).sum::<usize>(),
|
||||
|
||||
67
src/lib.rs
67
src/lib.rs
@@ -7,7 +7,6 @@
|
||||
overflowing_literals,
|
||||
path_statements,
|
||||
patterns_in_fns_without_body,
|
||||
private_in_public,
|
||||
unconditional_recursion,
|
||||
unused,
|
||||
unused_allocation,
|
||||
@@ -33,6 +32,7 @@ use circuit::{table::Range, CheckMode, Tolerance};
|
||||
use clap::Args;
|
||||
use graph::Visibility;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
/// Methods for configuring tensor operations and assigning values to them in a Halo2 circuit.
|
||||
pub mod circuit;
|
||||
@@ -71,11 +71,34 @@ pub mod tensor;
|
||||
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
|
||||
pub mod wasm;
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
/// The denominator in the fixed point representation used when quantizing inputs
|
||||
pub type Scale = i32;
|
||||
|
||||
#[cfg(not(target_arch = "wasm32"))]
|
||||
// Buf writer capacity
|
||||
lazy_static! {
|
||||
/// The capacity of the buffer used for writing to disk
|
||||
pub static ref EZKL_BUF_CAPACITY: usize = std::env::var("EZKL_BUF_CAPACITY")
|
||||
.unwrap_or("8000".to_string())
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
/// The serialization format for the keys
|
||||
pub static ref EZKL_KEY_FORMAT: String = std::env::var("EZKL_KEY_FORMAT")
|
||||
.unwrap_or("raw-bytes".to_string());
|
||||
}
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
const EZKL_KEY_FORMAT: &str = "raw-bytes";
|
||||
|
||||
#[cfg(target_arch = "wasm32")]
|
||||
const EZKL_BUF_CAPACITY: &usize = &8000;
|
||||
|
||||
/// Parameters specific to a proving run
|
||||
#[derive(Debug, Args, Deserialize, Serialize, Clone, PartialEq, PartialOrd)]
|
||||
#[derive(Debug, Args, Deserialize, Serialize, Clone, PartialEq, PartialOrd, ToFlags)]
|
||||
pub struct RunArgs {
|
||||
/// The tolerance for error on model outputs
|
||||
#[arg(short = 'T', long, default_value = "0")]
|
||||
@@ -90,7 +113,7 @@ pub struct RunArgs {
|
||||
#[arg(long, default_value = "1")]
|
||||
pub scale_rebase_multiplier: u32,
|
||||
/// The min and max elements in the lookup table input column
|
||||
#[arg(short = 'B', long, value_parser = parse_tuple::<i128>, default_value = "(-32768,32768)")]
|
||||
#[arg(short = 'B', long, value_parser = parse_key_val::<i128, i128>, default_value = "-32768->32768")]
|
||||
pub lookup_range: Range,
|
||||
/// The log_2 number of rows
|
||||
#[arg(short = 'K', long, default_value = "17")]
|
||||
@@ -99,7 +122,7 @@ pub struct RunArgs {
|
||||
#[arg(short = 'N', long, default_value = "2")]
|
||||
pub num_inner_cols: usize,
|
||||
/// Hand-written parser for graph variables, eg. batch_size=1
|
||||
#[arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size=1", value_delimiter = ',')]
|
||||
#[arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size->1", value_delimiter = ',')]
|
||||
pub variables: Vec<(String, usize)>,
|
||||
/// Flags whether inputs are public, private, hashed
|
||||
#[arg(long, default_value = "private")]
|
||||
@@ -157,6 +180,9 @@ impl RunArgs {
|
||||
if self.num_inner_cols < 1 {
|
||||
return Err("num_inner_cols must be >= 1".into());
|
||||
}
|
||||
if self.tolerance.val > 0.0 && self.output_visibility != Visibility::Public {
|
||||
return Err("tolerance > 0.0 requires output_visibility to be public".into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -181,34 +207,15 @@ fn parse_key_val<T, U>(
|
||||
s: &str,
|
||||
) -> Result<(T, U), Box<dyn std::error::Error + Send + Sync + 'static>>
|
||||
where
|
||||
T: std::str::FromStr,
|
||||
T: std::str::FromStr + std::fmt::Debug,
|
||||
T::Err: std::error::Error + Send + Sync + 'static,
|
||||
U: std::str::FromStr,
|
||||
U: std::str::FromStr + std::fmt::Debug,
|
||||
U::Err: std::error::Error + Send + Sync + 'static,
|
||||
{
|
||||
let pos = s
|
||||
.find('=')
|
||||
.ok_or_else(|| format!("invalid KEY=value: no `=` found in `{s}`"))?;
|
||||
Ok((s[..pos].parse()?, s[pos + 1..].parse()?))
|
||||
}
|
||||
|
||||
/// Parse a tuple
|
||||
fn parse_tuple<T>(s: &str) -> Result<(T, T), Box<dyn std::error::Error + Send + Sync + 'static>>
|
||||
where
|
||||
T: std::str::FromStr + Clone,
|
||||
T::Err: std::error::Error + Send + Sync + 'static,
|
||||
{
|
||||
let res = s.trim_matches(|p| p == '(' || p == ')').split(',');
|
||||
|
||||
let res = res
|
||||
.map(|x| {
|
||||
// remove blank space
|
||||
let x = x.trim();
|
||||
x.parse::<T>()
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
if res.len() != 2 {
|
||||
return Err("invalid tuple".into());
|
||||
}
|
||||
Ok((res[0].clone(), res[1].clone()))
|
||||
.find("->")
|
||||
.ok_or_else(|| format!("invalid x->y: no `->` found in `{s}`"))?;
|
||||
let a = s[..pos].parse()?;
|
||||
let b = s[pos + 2..].parse()?;
|
||||
Ok((a, b))
|
||||
}
|
||||
|
||||
122
src/pfsys/mod.rs
122
src/pfsys/mod.rs
@@ -8,6 +8,7 @@ use crate::circuit::CheckMode;
|
||||
use crate::graph::GraphWitness;
|
||||
use crate::pfsys::evm::aggregation::PoseidonTranscript;
|
||||
use crate::tensor::TensorType;
|
||||
use crate::{EZKL_BUF_CAPACITY, EZKL_KEY_FORMAT};
|
||||
use clap::ValueEnum;
|
||||
use halo2_proofs::circuit::Value;
|
||||
use halo2_proofs::plonk::{
|
||||
@@ -39,9 +40,19 @@ use std::io::{self, BufReader, BufWriter, Cursor, Write};
|
||||
use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use thiserror::Error as thisError;
|
||||
use tosubcommand::ToFlags;
|
||||
|
||||
use halo2curves::bn256::{Bn256, Fr, G1Affine};
|
||||
|
||||
fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
|
||||
match s {
|
||||
"processed" => halo2_proofs::SerdeFormat::Processed,
|
||||
"raw-bytes-unchecked" => halo2_proofs::SerdeFormat::RawBytesUnchecked,
|
||||
"raw-bytes" => halo2_proofs::SerdeFormat::RawBytes,
|
||||
_ => panic!("invalid serde format"),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[derive(
|
||||
ValueEnum, Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd,
|
||||
@@ -52,6 +63,25 @@ pub enum ProofType {
|
||||
ForAggr,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ProofType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
ProofType::Single => "single",
|
||||
ProofType::ForAggr => "for-aggr",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for ProofType {
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ProofType> for TranscriptType {
|
||||
fn from(val: ProofType) -> Self {
|
||||
match val {
|
||||
@@ -154,6 +184,25 @@ pub enum TranscriptType {
|
||||
EVM,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TranscriptType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
TranscriptType::Poseidon => "poseidon",
|
||||
TranscriptType::EVM => "evm",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl ToFlags for TranscriptType {
|
||||
fn to_flags(&self) -> Vec<String> {
|
||||
vec![format!("{}", self)]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "python-bindings")]
|
||||
impl ToPyObject for TranscriptType {
|
||||
fn to_object(&self, py: Python) -> PyObject {
|
||||
@@ -167,8 +216,8 @@ impl ToPyObject for TranscriptType {
|
||||
#[cfg(feature = "python-bindings")]
|
||||
///
|
||||
pub fn g1affine_to_pydict(g1affine_dict: &PyDict, g1affine: &G1Affine) {
|
||||
let g1affine_x = field_to_string_montgomery(&g1affine.x);
|
||||
let g1affine_y = field_to_string_montgomery(&g1affine.y);
|
||||
let g1affine_x = field_to_string(&g1affine.x);
|
||||
let g1affine_y = field_to_string(&g1affine.y);
|
||||
g1affine_dict.set_item("x", g1affine_x).unwrap();
|
||||
g1affine_dict.set_item("y", g1affine_y).unwrap();
|
||||
}
|
||||
@@ -178,23 +227,23 @@ use halo2curves::bn256::G1;
|
||||
#[cfg(feature = "python-bindings")]
|
||||
///
|
||||
pub fn g1_to_pydict(g1_dict: &PyDict, g1: &G1) {
|
||||
let g1_x = field_to_string_montgomery(&g1.x);
|
||||
let g1_y = field_to_string_montgomery(&g1.y);
|
||||
let g1_z = field_to_string_montgomery(&g1.z);
|
||||
let g1_x = field_to_string(&g1.x);
|
||||
let g1_y = field_to_string(&g1.y);
|
||||
let g1_z = field_to_string(&g1.z);
|
||||
g1_dict.set_item("x", g1_x).unwrap();
|
||||
g1_dict.set_item("y", g1_y).unwrap();
|
||||
g1_dict.set_item("z", g1_z).unwrap();
|
||||
}
|
||||
|
||||
/// converts fp into `Vec<u64>` in Montgomery form
|
||||
pub fn field_to_string_montgomery<F: PrimeField + SerdeObject + Serialize>(fp: &F) -> String {
|
||||
/// converts fp into a little endian Hex string
|
||||
pub fn field_to_string<F: PrimeField + SerdeObject + Serialize>(fp: &F) -> String {
|
||||
let repr = serde_json::to_string(&fp).unwrap();
|
||||
let b: String = serde_json::from_str(&repr).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
/// converts `Vec<u64>` in Montgomery form into fp
|
||||
pub fn string_to_field_montgomery<F: PrimeField + SerdeObject + Serialize + DeserializeOwned>(
|
||||
/// converts a little endian Hex string into a field element
|
||||
pub fn string_to_field<F: PrimeField + SerdeObject + Serialize + DeserializeOwned>(
|
||||
b: &String,
|
||||
) -> F {
|
||||
let repr = serde_json::to_string(&b).unwrap();
|
||||
@@ -259,7 +308,7 @@ where
|
||||
let field_elems: Vec<Vec<String>> = self
|
||||
.instances
|
||||
.iter()
|
||||
.map(|x| x.iter().map(|fp| field_to_string_montgomery(fp)).collect())
|
||||
.map(|x| x.iter().map(|fp| field_to_string(fp)).collect())
|
||||
.collect::<Vec<_>>();
|
||||
dict.set_item("instances", field_elems).unwrap();
|
||||
let hex_proof = hex::encode(&self.proof);
|
||||
@@ -315,7 +364,7 @@ where
|
||||
/// Saves the Proof to a specified `proof_path`.
|
||||
pub fn save(&self, proof_path: &PathBuf) -> Result<(), Box<dyn Error>> {
|
||||
let file = std::fs::File::create(proof_path)?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, file);
|
||||
serde_json::to_writer(&mut writer, &self)?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -328,8 +377,10 @@ where
|
||||
<C as CurveAffine>::ScalarExt: FromUniformBytes<64>,
|
||||
{
|
||||
trace!("reading proof");
|
||||
let data = std::fs::read_to_string(proof_path)?;
|
||||
serde_json::from_str(&data).map_err(|e| e.into())
|
||||
let file = std::fs::File::open(proof_path)?;
|
||||
let reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, file);
|
||||
let proof: Self = serde_json::from_reader(reader)?;
|
||||
Ok(proof)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -555,6 +606,7 @@ where
|
||||
verifier_params,
|
||||
pk.get_vk(),
|
||||
strategy,
|
||||
verifier_params.n(),
|
||||
)?;
|
||||
}
|
||||
let elapsed = now.elapsed();
|
||||
@@ -642,6 +694,7 @@ pub fn verify_proof_circuit<
|
||||
params: &'params Scheme::ParamsVerifier,
|
||||
vk: &VerifyingKey<Scheme::Curve>,
|
||||
strategy: Strategy,
|
||||
orig_n: u64,
|
||||
) -> Result<Strategy::Output, halo2_proofs::plonk::Error>
|
||||
where
|
||||
Scheme::Scalar: SerdeObject
|
||||
@@ -662,7 +715,7 @@ where
|
||||
trace!("instances {:?}", instances);
|
||||
|
||||
let mut transcript = TranscriptReadBuffer::init(Cursor::new(snark.proof.clone()));
|
||||
verify_proof::<Scheme, V, _, TR, _>(params, vk, strategy, instances, &mut transcript)
|
||||
verify_proof::<Scheme, V, _, TR, _>(params, vk, strategy, instances, &mut transcript, orig_n)
|
||||
}
|
||||
|
||||
/// Loads a [VerifyingKey] at `path`.
|
||||
@@ -678,13 +731,14 @@ where
|
||||
info!("loading verification key from {:?}", path);
|
||||
let f =
|
||||
File::open(path.clone()).map_err(|_| format!("failed to load vk at {}", path.display()))?;
|
||||
let mut reader = BufReader::new(f);
|
||||
VerifyingKey::<Scheme::Curve>::read::<_, C>(
|
||||
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
|
||||
let vk = VerifyingKey::<Scheme::Curve>::read::<_, C>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
serde_format_from_str(&EZKL_KEY_FORMAT),
|
||||
params,
|
||||
)
|
||||
.map_err(Box::<dyn Error>::from)
|
||||
)?;
|
||||
info!("done loading verification key ✅");
|
||||
Ok(vk)
|
||||
}
|
||||
|
||||
/// Loads a [ProvingKey] at `path`.
|
||||
@@ -700,19 +754,20 @@ where
|
||||
info!("loading proving key from {:?}", path);
|
||||
let f =
|
||||
File::open(path.clone()).map_err(|_| format!("failed to load pk at {}", path.display()))?;
|
||||
let mut reader = BufReader::new(f);
|
||||
ProvingKey::<Scheme::Curve>::read::<_, C>(
|
||||
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
|
||||
let pk = ProvingKey::<Scheme::Curve>::read::<_, C>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
serde_format_from_str(&EZKL_KEY_FORMAT),
|
||||
params,
|
||||
)
|
||||
.map_err(Box::<dyn Error>::from)
|
||||
)?;
|
||||
info!("done loading proving key ✅");
|
||||
Ok(pk)
|
||||
}
|
||||
|
||||
/// Saves a [ProvingKey] to `path`.
|
||||
pub fn save_pk<Scheme: CommitmentScheme>(
|
||||
path: &PathBuf,
|
||||
vk: &ProvingKey<Scheme::Curve>,
|
||||
pk: &ProvingKey<Scheme::Curve>,
|
||||
) -> Result<(), io::Error>
|
||||
where
|
||||
Scheme::Curve: SerdeObject + CurveAffine,
|
||||
@@ -720,9 +775,10 @@ where
|
||||
{
|
||||
info!("saving proving key 💾");
|
||||
let f = File::create(path)?;
|
||||
let mut writer = BufWriter::new(f);
|
||||
vk.write(&mut writer, halo2_proofs::SerdeFormat::RawBytes)?;
|
||||
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
|
||||
pk.write(&mut writer, serde_format_from_str(&EZKL_KEY_FORMAT))?;
|
||||
writer.flush()?;
|
||||
info!("done saving proving key ✅");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -737,9 +793,10 @@ where
|
||||
{
|
||||
info!("saving verification key 💾");
|
||||
let f = File::create(path)?;
|
||||
let mut writer = BufWriter::new(f);
|
||||
vk.write(&mut writer, halo2_proofs::SerdeFormat::RawBytes)?;
|
||||
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
|
||||
vk.write(&mut writer, serde_format_from_str(&EZKL_KEY_FORMAT))?;
|
||||
writer.flush()?;
|
||||
info!("done saving verification key ✅");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -750,7 +807,7 @@ pub fn save_params<Scheme: CommitmentScheme>(
|
||||
) -> Result<(), io::Error> {
|
||||
info!("saving parameters 💾");
|
||||
let f = File::create(path)?;
|
||||
let mut writer = BufWriter::new(f);
|
||||
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
|
||||
params.write(&mut writer)?;
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
@@ -840,6 +897,7 @@ pub(crate) fn verify_proof_circuit_kzg<
|
||||
proof: Snark<Fr, G1Affine>,
|
||||
vk: &VerifyingKey<G1Affine>,
|
||||
strategy: Strategy,
|
||||
orig_n: u64,
|
||||
) -> Result<Strategy::Output, halo2_proofs::plonk::Error> {
|
||||
match proof.transcript_type {
|
||||
TranscriptType::EVM => verify_proof_circuit::<
|
||||
@@ -849,7 +907,7 @@ pub(crate) fn verify_proof_circuit_kzg<
|
||||
_,
|
||||
_,
|
||||
EvmTranscript<G1Affine, _, _, _>,
|
||||
>(&proof, params, vk, strategy),
|
||||
>(&proof, params, vk, strategy, orig_n),
|
||||
TranscriptType::Poseidon => verify_proof_circuit::<
|
||||
Fr,
|
||||
VerifierSHPLONK<'_, Bn256>,
|
||||
@@ -857,7 +915,7 @@ pub(crate) fn verify_proof_circuit_kzg<
|
||||
_,
|
||||
_,
|
||||
PoseidonTranscript<NativeLoader, _>,
|
||||
>(&proof, params, vk, strategy),
|
||||
>(&proof, params, vk, strategy, orig_n),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
118
src/python.rs
118
src/python.rs
@@ -15,10 +15,9 @@ use crate::graph::{
|
||||
use crate::pfsys::evm::aggregation::AggregationCircuit;
|
||||
use crate::pfsys::{
|
||||
load_pk, load_vk, save_params, save_vk, srs::gen_srs as ezkl_gen_srs, srs::load_srs, ProofType,
|
||||
Snark, TranscriptType,
|
||||
TranscriptType,
|
||||
};
|
||||
use crate::RunArgs;
|
||||
use ethers::types::H160;
|
||||
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
|
||||
use halo2curves::bn256::{Bn256, Fq, Fr, G1Affine, G1};
|
||||
use pyo3::exceptions::{PyIOError, PyRuntimeError};
|
||||
@@ -26,7 +25,6 @@ use pyo3::prelude::*;
|
||||
use pyo3::wrap_pyfunction;
|
||||
use pyo3_log;
|
||||
use snark_verifier::util::arithmetic::PrimeField;
|
||||
use std::str::FromStr;
|
||||
use std::{fs::File, path::PathBuf};
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
@@ -65,9 +63,9 @@ struct PyG1 {
|
||||
impl From<G1> for PyG1 {
|
||||
fn from(g1: G1) -> Self {
|
||||
PyG1 {
|
||||
x: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.x),
|
||||
y: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.y),
|
||||
z: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.z),
|
||||
x: crate::pfsys::field_to_string::<Fq>(&g1.x),
|
||||
y: crate::pfsys::field_to_string::<Fq>(&g1.y),
|
||||
z: crate::pfsys::field_to_string::<Fq>(&g1.z),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,9 +73,9 @@ impl From<G1> for PyG1 {
|
||||
impl From<PyG1> for G1 {
|
||||
fn from(val: PyG1) -> Self {
|
||||
G1 {
|
||||
x: crate::pfsys::string_to_field_montgomery::<Fq>(&val.x),
|
||||
y: crate::pfsys::string_to_field_montgomery::<Fq>(&val.y),
|
||||
z: crate::pfsys::string_to_field_montgomery::<Fq>(&val.z),
|
||||
x: crate::pfsys::string_to_field::<Fq>(&val.x),
|
||||
y: crate::pfsys::string_to_field::<Fq>(&val.y),
|
||||
z: crate::pfsys::string_to_field::<Fq>(&val.z),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -108,8 +106,8 @@ pub struct PyG1Affine {
|
||||
impl From<G1Affine> for PyG1Affine {
|
||||
fn from(g1: G1Affine) -> Self {
|
||||
PyG1Affine {
|
||||
x: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.x),
|
||||
y: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.y),
|
||||
x: crate::pfsys::field_to_string::<Fq>(&g1.x),
|
||||
y: crate::pfsys::field_to_string::<Fq>(&g1.y),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -117,8 +115,8 @@ impl From<G1Affine> for PyG1Affine {
|
||||
impl From<PyG1Affine> for G1Affine {
|
||||
fn from(val: PyG1Affine) -> Self {
|
||||
G1Affine {
|
||||
x: crate::pfsys::string_to_field_montgomery::<Fq>(&val.x),
|
||||
y: crate::pfsys::string_to_field_montgomery::<Fq>(&val.y),
|
||||
x: crate::pfsys::string_to_field::<Fq>(&val.x),
|
||||
y: crate::pfsys::string_to_field::<Fq>(&val.y),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -219,53 +217,51 @@ impl Into<PyRunArgs> for RunArgs {
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts 4 u64s to a field element
|
||||
/// Converts a felt to big endian
|
||||
#[pyfunction(signature = (
|
||||
array,
|
||||
felt,
|
||||
))]
|
||||
fn string_to_felt(array: PyFelt) -> PyResult<String> {
|
||||
Ok(format!(
|
||||
"{:?}",
|
||||
crate::pfsys::string_to_field_montgomery::<Fr>(&array)
|
||||
))
|
||||
fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
|
||||
Ok(format!("{:?}", felt))
|
||||
}
|
||||
|
||||
/// Converts 4 u64s representing a field element directly to an integer
|
||||
/// Converts a field element hex string to an integer
|
||||
#[pyfunction(signature = (
|
||||
array,
|
||||
))]
|
||||
fn string_to_int(array: PyFelt) -> PyResult<i128> {
|
||||
let felt = crate::pfsys::string_to_field_montgomery::<Fr>(&array);
|
||||
fn felt_to_int(array: PyFelt) -> PyResult<i128> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&array);
|
||||
let int_rep = felt_to_i128(felt);
|
||||
Ok(int_rep)
|
||||
}
|
||||
|
||||
/// Converts 4 u64s representing a field element directly to a (rescaled from fixed point scaling) floating point
|
||||
/// Converts a field eleement hex string to a floating point number
|
||||
#[pyfunction(signature = (
|
||||
array,
|
||||
scale
|
||||
))]
|
||||
fn string_to_float(array: PyFelt, scale: crate::Scale) -> PyResult<f64> {
|
||||
let felt = crate::pfsys::string_to_field_montgomery::<Fr>(&array);
|
||||
fn felt_to_float(array: PyFelt, scale: crate::Scale) -> PyResult<f64> {
|
||||
let felt = crate::pfsys::string_to_field::<Fr>(&array);
|
||||
let int_rep = felt_to_i128(felt);
|
||||
let multiplier = scale_to_multiplier(scale);
|
||||
let float_rep = int_rep as f64 / multiplier;
|
||||
Ok(float_rep)
|
||||
}
|
||||
|
||||
/// Converts a floating point element to 4 u64s representing a fixed point field element
|
||||
/// Converts a floating point element to a field element hex string
|
||||
#[pyfunction(signature = (
|
||||
input,
|
||||
scale
|
||||
))]
|
||||
fn float_to_string(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
|
||||
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
|
||||
let int_rep = quantize_float(&input, 0.0, scale)
|
||||
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
|
||||
let felt = i128_to_felt(int_rep);
|
||||
Ok(crate::pfsys::field_to_string_montgomery::<Fr>(&felt))
|
||||
Ok(crate::pfsys::field_to_string::<Fr>(&felt))
|
||||
}
|
||||
|
||||
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
|
||||
/// Converts a buffer to vector of field elements
|
||||
#[pyfunction(signature = (
|
||||
buffer
|
||||
))]
|
||||
@@ -318,7 +314,10 @@ fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
|
||||
.map(|x| PrimeField::from_u128(u8_array_to_u128_le(*x)))
|
||||
.collect();
|
||||
|
||||
let field_elements: Vec<String> = field_elements.iter().map(|x| format!("{:?}", x)).collect();
|
||||
let field_elements: Vec<String> = field_elements
|
||||
.iter()
|
||||
.map(|x| crate::pfsys::field_to_string::<Fr>(x))
|
||||
.collect();
|
||||
|
||||
Ok(field_elements)
|
||||
}
|
||||
@@ -330,7 +329,7 @@ fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
|
||||
fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
|
||||
let message: Vec<Fr> = message
|
||||
.iter()
|
||||
.map(crate::pfsys::string_to_field_montgomery::<Fr>)
|
||||
.map(crate::pfsys::string_to_field::<Fr>)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let output =
|
||||
@@ -341,7 +340,7 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
|
||||
|
||||
let hash = output[0]
|
||||
.iter()
|
||||
.map(crate::pfsys::field_to_string_montgomery::<Fr>)
|
||||
.map(crate::pfsys::field_to_string::<Fr>)
|
||||
.collect::<Vec<_>>();
|
||||
Ok(hash)
|
||||
}
|
||||
@@ -361,7 +360,7 @@ fn kzg_commit(
|
||||
) -> PyResult<Vec<PyG1Affine>> {
|
||||
let message: Vec<Fr> = message
|
||||
.iter()
|
||||
.map(crate::pfsys::string_to_field_montgomery::<Fr>)
|
||||
.map(crate::pfsys::string_to_field::<Fr>)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let settings = GraphSettings::load(&settings_path)
|
||||
@@ -525,7 +524,7 @@ fn gen_settings(
|
||||
scales = None,
|
||||
scale_rebase_multiplier = DEFAULT_SCALE_REBASE_MULTIPLIERS.split(",").map(|x| x.parse().unwrap()).collect(),
|
||||
max_logrows = None,
|
||||
div_rebasing = None,
|
||||
only_range_check_rebase = DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap(),
|
||||
))]
|
||||
fn calibrate_settings(
|
||||
data: PathBuf,
|
||||
@@ -536,7 +535,7 @@ fn calibrate_settings(
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
max_logrows: Option<u32>,
|
||||
div_rebasing: Option<bool>,
|
||||
only_range_check_rebase: bool,
|
||||
) -> Result<bool, PyErr> {
|
||||
crate::execute::calibrate(
|
||||
model,
|
||||
@@ -546,7 +545,7 @@ fn calibrate_settings(
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
only_range_check_rebase,
|
||||
max_logrows,
|
||||
)
|
||||
.map_err(|e| {
|
||||
@@ -689,14 +688,23 @@ fn prove(
|
||||
settings_path=PathBuf::from(DEFAULT_SETTINGS),
|
||||
vk_path=PathBuf::from(DEFAULT_VK),
|
||||
srs_path=None,
|
||||
non_reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse::<bool>().unwrap(),
|
||||
))]
|
||||
fn verify(
|
||||
proof_path: PathBuf,
|
||||
settings_path: PathBuf,
|
||||
vk_path: PathBuf,
|
||||
srs_path: Option<PathBuf>,
|
||||
non_reduced_srs: bool,
|
||||
) -> Result<bool, PyErr> {
|
||||
crate::execute::verify(proof_path, settings_path, vk_path, srs_path).map_err(|e| {
|
||||
crate::execute::verify(
|
||||
proof_path,
|
||||
settings_path,
|
||||
vk_path,
|
||||
srs_path,
|
||||
non_reduced_srs,
|
||||
)
|
||||
.map_err(|e| {
|
||||
let err_str = format!("Failed to run verify: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
@@ -1022,24 +1030,15 @@ fn verify_evm(
|
||||
addr_da: Option<&str>,
|
||||
addr_vk: Option<&str>,
|
||||
) -> Result<bool, PyErr> {
|
||||
let addr_verifier = H160::from_str(addr_verifier).map_err(|e| {
|
||||
let err_str = format!("address is invalid: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
let addr_verifier = H160Flag::from(addr_verifier);
|
||||
let addr_da = if let Some(addr_da) = addr_da {
|
||||
let addr_da = H160::from_str(addr_da).map_err(|e| {
|
||||
let err_str = format!("address is invalid: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
let addr_da = H160Flag::from(addr_da);
|
||||
Some(addr_da)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let addr_vk = if let Some(addr_vk) = addr_vk {
|
||||
let addr_vk = H160::from_str(addr_vk).map_err(|e| {
|
||||
let err_str = format!("address is invalid: {}", e);
|
||||
PyRuntimeError::new_err(err_str)
|
||||
})?;
|
||||
let addr_vk = H160Flag::from(addr_vk);
|
||||
Some(addr_vk)
|
||||
} else {
|
||||
None
|
||||
@@ -1097,16 +1096,6 @@ fn create_evm_verifier_aggr(
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// print hex representation of a proof
|
||||
#[pyfunction(signature = (proof_path))]
|
||||
fn print_proof_hex(proof_path: PathBuf) -> Result<String, PyErr> {
|
||||
let proof = Snark::load::<KZGCommitmentScheme<Bn256>>(&proof_path)
|
||||
.map_err(|_| PyIOError::new_err("Failed to load proof"))?;
|
||||
|
||||
let hex_str = hex::encode(proof.proof);
|
||||
Ok(format!("0x{}", hex_str))
|
||||
}
|
||||
|
||||
// Python Module
|
||||
#[pymodule]
|
||||
fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
@@ -1115,13 +1104,13 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
m.add_class::<PyG1Affine>()?;
|
||||
m.add_class::<PyG1>()?;
|
||||
m.add_class::<PyTestDataSource>()?;
|
||||
m.add_function(wrap_pyfunction!(string_to_felt, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(string_to_int, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(string_to_float, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(felt_to_big_endian, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(felt_to_int, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(felt_to_float, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(kzg_commit, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(swap_proof_commitments, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(poseidon_hash, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(float_to_string, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(float_to_felt, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(buffer_to_felts, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(gen_vk_from_pk_aggr, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(gen_vk_from_pk_single, m)?)?;
|
||||
@@ -1145,7 +1134,6 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
|
||||
m.add_function(wrap_pyfunction!(deploy_vk_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(print_proof_hex, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(setup_test_evm_witness, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(create_evm_verifier_aggr, m)?)?;
|
||||
m.add_function(wrap_pyfunction!(create_evm_data_attestation, m)?)?;
|
||||
|
||||
@@ -580,16 +580,16 @@ impl<T: Clone + TensorType> Tensor<T> {
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// let mut a = Tensor::<i32>::new(Some(&[1,2,3,4,5,6]), &[2, 3]).unwrap();
|
||||
/// let expected = Tensor::<i32>::new(Some(&[1, 2, 3, 4, 5, 6, 0, 0]), &[8]).unwrap();
|
||||
/// assert_eq!(a.pad_to_zero_rem(4).unwrap(), expected);
|
||||
/// assert_eq!(a.pad_to_zero_rem(4, 0).unwrap(), expected);
|
||||
///
|
||||
/// let expected = Tensor::<i32>::new(Some(&[1, 2, 3, 4, 5, 6, 0, 0, 0]), &[9]).unwrap();
|
||||
/// assert_eq!(a.pad_to_zero_rem(9).unwrap(), expected);
|
||||
/// assert_eq!(a.pad_to_zero_rem(9, 0).unwrap(), expected);
|
||||
/// ```
|
||||
pub fn pad_to_zero_rem(&self, n: usize) -> Result<Tensor<T>, TensorError> {
|
||||
pub fn pad_to_zero_rem(&self, n: usize, pad: T) -> Result<Tensor<T>, TensorError> {
|
||||
let mut inner = self.inner.clone();
|
||||
let remainder = self.len() % n;
|
||||
if remainder != 0 {
|
||||
inner.resize(self.len() + n - remainder, T::zero().unwrap());
|
||||
inner.resize(self.len() + n - remainder, pad);
|
||||
}
|
||||
Tensor::new(Some(&inner), &[inner.len()])
|
||||
}
|
||||
|
||||
@@ -243,7 +243,7 @@ pub fn and<
|
||||
/// Some(&[1, 0, 1, 0, 1, 0]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let result = equals(&a, &b).unwrap().0;
|
||||
/// let result = equals(&a, &b).unwrap();
|
||||
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 1, 0, 1, 1]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
@@ -260,7 +260,7 @@ pub fn equals<
|
||||
>(
|
||||
a: &Tensor<T>,
|
||||
b: &Tensor<T>,
|
||||
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
let a = a.clone();
|
||||
let b = b.clone();
|
||||
|
||||
@@ -268,7 +268,7 @@ pub fn equals<
|
||||
|
||||
let result = nonlinearities::kronecker_delta(&diff);
|
||||
|
||||
Ok((result, vec![diff]))
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Greater than operation.
|
||||
@@ -289,7 +289,7 @@ pub fn equals<
|
||||
/// ).unwrap();
|
||||
/// let result = greater(&a, &b).unwrap();
|
||||
/// let expected = Tensor::<i128>::new(Some(&[0, 1, 1, 0, 0, 0]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result.0, expected);
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn greater<
|
||||
T: TensorType
|
||||
@@ -302,7 +302,7 @@ pub fn greater<
|
||||
>(
|
||||
a: &Tensor<T>,
|
||||
b: &Tensor<T>,
|
||||
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
let mask_inter = (a.clone() - b.clone())?;
|
||||
let mask = mask_inter.map(|x| {
|
||||
if x > T::zero().ok_or(TensorError::Unsupported).unwrap() {
|
||||
@@ -311,7 +311,7 @@ pub fn greater<
|
||||
T::zero().ok_or(TensorError::Unsupported).unwrap()
|
||||
}
|
||||
});
|
||||
Ok((mask, vec![mask_inter]))
|
||||
Ok(mask)
|
||||
}
|
||||
|
||||
/// Greater equals than operation.
|
||||
@@ -332,7 +332,7 @@ pub fn greater<
|
||||
/// ).unwrap();
|
||||
/// let result = greater_equal(&a, &b).unwrap();
|
||||
/// let expected = Tensor::<i128>::new(Some(&[1, 1, 1, 1, 0, 0]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result.0, expected);
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn greater_equal<
|
||||
T: TensorType
|
||||
@@ -345,7 +345,7 @@ pub fn greater_equal<
|
||||
>(
|
||||
a: &Tensor<T>,
|
||||
b: &Tensor<T>,
|
||||
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
let mask_inter = (a.clone() - b.clone())?;
|
||||
let mask = mask_inter.map(|x| {
|
||||
if x >= T::zero().ok_or(TensorError::Unsupported).unwrap() {
|
||||
@@ -354,7 +354,7 @@ pub fn greater_equal<
|
||||
T::zero().ok_or(TensorError::Unsupported).unwrap()
|
||||
}
|
||||
});
|
||||
Ok((mask, vec![mask_inter]))
|
||||
Ok(mask)
|
||||
}
|
||||
|
||||
/// Less than to operation.
|
||||
@@ -375,7 +375,7 @@ pub fn greater_equal<
|
||||
/// ).unwrap();
|
||||
/// let result = less(&a, &b).unwrap();
|
||||
/// let expected = Tensor::<i128>::new(Some(&[0, 1, 0, 0, 0, 1]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result.0, expected);
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
///
|
||||
pub fn less<
|
||||
@@ -389,7 +389,7 @@ pub fn less<
|
||||
>(
|
||||
a: &Tensor<T>,
|
||||
b: &Tensor<T>,
|
||||
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
// a < b <=> b > a
|
||||
greater(b, a)
|
||||
}
|
||||
@@ -412,7 +412,7 @@ pub fn less<
|
||||
/// ).unwrap();
|
||||
/// let result = less_equal(&a, &b).unwrap();
|
||||
/// let expected = Tensor::<i128>::new(Some(&[1, 1, 0, 1, 1, 1]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result.0, expected);
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
///
|
||||
pub fn less_equal<
|
||||
@@ -426,7 +426,7 @@ pub fn less_equal<
|
||||
>(
|
||||
a: &Tensor<T>,
|
||||
b: &Tensor<T>,
|
||||
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
// a < b <=> b > a
|
||||
greater_equal(b, a)
|
||||
}
|
||||
@@ -2300,12 +2300,12 @@ pub fn deconv<
|
||||
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6]),
|
||||
/// &[1, 1, 3, 3],
|
||||
/// ).unwrap();
|
||||
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), false).unwrap().0;
|
||||
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), false).unwrap();
|
||||
/// let expected: Tensor<i128> = Tensor::<i128>::new(Some(&[11, 8, 8, 10]), &[1, 1, 2, 2]).unwrap();
|
||||
/// assert_eq!(pooled, expected);
|
||||
///
|
||||
/// // This time with normalization
|
||||
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), true).unwrap().0;
|
||||
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), true).unwrap();
|
||||
/// let expected: Tensor<i128> = Tensor::<i128>::new(Some(&[3, 2, 2, 3]), &[1, 1, 2, 2]).unwrap();
|
||||
/// assert_eq!(pooled, expected);
|
||||
/// ```
|
||||
@@ -2315,7 +2315,7 @@ pub fn sumpool(
|
||||
stride: (usize, usize),
|
||||
kernel_shape: (usize, usize),
|
||||
normalize: bool,
|
||||
) -> Result<(Tensor<i128>, Vec<Tensor<i128>>), TensorError> {
|
||||
) -> Result<Tensor<i128>, TensorError> {
|
||||
let image_dims = image.dims();
|
||||
let batch_size = image_dims[0];
|
||||
let image_channels = image_dims[1];
|
||||
@@ -2345,15 +2345,12 @@ pub fn sumpool(
|
||||
let mut combined = res.combine()?;
|
||||
combined.reshape(&[&[batch_size, image_channels], shape].concat())?;
|
||||
|
||||
let mut inter = vec![];
|
||||
|
||||
if normalize {
|
||||
inter.push(combined.clone());
|
||||
let norm = kernel.len();
|
||||
combined = nonlinearities::const_div(&combined, norm as f64);
|
||||
}
|
||||
|
||||
Ok((combined, inter))
|
||||
Ok(combined)
|
||||
}
|
||||
|
||||
/// Applies 2D max pooling over a 4D tensor of shape B x C x H x W.
|
||||
@@ -3048,11 +3045,7 @@ pub mod nonlinearities {
|
||||
}
|
||||
|
||||
/// softmax layout
|
||||
pub fn softmax_axes(
|
||||
a: &Tensor<i128>,
|
||||
scale: f64,
|
||||
axes: &[usize],
|
||||
) -> (Tensor<i128>, Vec<Tensor<i128>>) {
|
||||
pub fn softmax_axes(a: &Tensor<i128>, scale: f64, axes: &[usize]) -> Tensor<i128> {
|
||||
// we want this to be as small as possible so we set the output scale to 1
|
||||
let dims = a.dims();
|
||||
|
||||
@@ -3060,8 +3053,6 @@ pub mod nonlinearities {
|
||||
return softmax(a, scale);
|
||||
}
|
||||
|
||||
let mut intermediate_values = vec![];
|
||||
|
||||
let cartesian_coord = dims[..dims.len() - 1]
|
||||
.iter()
|
||||
.map(|x| 0..*x)
|
||||
@@ -3084,8 +3075,7 @@ pub mod nonlinearities {
|
||||
|
||||
let res = softmax(&softmax_input, scale);
|
||||
|
||||
outputs.push(res.0);
|
||||
intermediate_values.extend(res.1);
|
||||
outputs.push(res);
|
||||
}
|
||||
|
||||
let mut res = Tensor::new(Some(&outputs), &[outputs.len()])
|
||||
@@ -3093,7 +3083,7 @@ pub mod nonlinearities {
|
||||
.combine()
|
||||
.unwrap();
|
||||
res.reshape(dims).unwrap();
|
||||
(res, intermediate_values)
|
||||
res
|
||||
}
|
||||
|
||||
/// Applies softmax
|
||||
@@ -3110,24 +3100,20 @@ pub mod nonlinearities {
|
||||
/// Some(&[2, 2, 3, 2, 2, 0]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let result = softmax(&x, 128.0).0;
|
||||
/// let result = softmax(&x, 128.0);
|
||||
/// // doubles the scale of the input
|
||||
/// let expected = Tensor::<i128>::new(Some(&[2730, 2730, 2751, 2730, 2730, 2688]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn softmax(a: &Tensor<i128>, scale: f64) -> (Tensor<i128>, Vec<Tensor<i128>>) {
|
||||
pub fn softmax(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
|
||||
// the more accurate calculation is commented out and we implement as below so it matches the steps in layout
|
||||
let mut intermediate_values = vec![];
|
||||
|
||||
intermediate_values.push(a.clone());
|
||||
|
||||
let exp = exp(a, scale);
|
||||
|
||||
let sum = sum(&exp).unwrap();
|
||||
intermediate_values.push(sum.clone());
|
||||
let inv_denom = recip(&sum, scale, scale);
|
||||
|
||||
((exp * inv_denom).unwrap(), intermediate_values)
|
||||
(exp * inv_denom).unwrap()
|
||||
}
|
||||
|
||||
/// Applies range_check_percent
|
||||
@@ -3787,6 +3773,30 @@ pub mod nonlinearities {
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Elementwise inverse.
|
||||
/// # Arguments
|
||||
/// * `out_scale` - Single value
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::tensor::ops::nonlinearities::zero_recip;
|
||||
/// let k = 2_f64;
|
||||
/// let result = zero_recip(1.0);
|
||||
/// let expected = Tensor::<i128>::new(Some(&[4503599627370496]), &[1]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn zero_recip(out_scale: f64) -> Tensor<i128> {
|
||||
let a = Tensor::<i128>::new(Some(&[0]), &[1]).unwrap();
|
||||
|
||||
a.par_enum_map(|_, a_i| {
|
||||
let rescaled = a_i as f64;
|
||||
let denom = (1_f64) / (rescaled + f64::EPSILON);
|
||||
let d_inv_x = out_scale * denom;
|
||||
Ok::<_, TensorError>(d_inv_x.round() as i128)
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Elementwise greater than
|
||||
/// # Arguments
|
||||
///
|
||||
|
||||
@@ -454,12 +454,12 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
|
||||
}
|
||||
|
||||
/// Calls `pad_to_zero_rem` on the inner tensor.
|
||||
pub fn pad_to_zero_rem(&mut self, n: usize) -> Result<(), Box<dyn Error>> {
|
||||
pub fn pad_to_zero_rem(&mut self, n: usize, pad: ValType<F>) -> Result<(), Box<dyn Error>> {
|
||||
match self {
|
||||
ValTensor::Value {
|
||||
inner: v, dims: d, ..
|
||||
} => {
|
||||
*v = v.pad_to_zero_rem(n)?;
|
||||
*v = v.pad_to_zero_rem(n, pad)?;
|
||||
*d = v.dims().to_vec();
|
||||
}
|
||||
ValTensor::Instance { .. } => {
|
||||
@@ -672,7 +672,7 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
|
||||
}
|
||||
Ok(indices)
|
||||
}
|
||||
ValTensor::Instance { .. } => Err(TensorError::WrongMethod),
|
||||
ValTensor::Instance { .. } => Ok(vec![]),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -690,7 +690,7 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
|
||||
}
|
||||
Ok(indices)
|
||||
}
|
||||
ValTensor::Instance { .. } => Err(TensorError::WrongMethod),
|
||||
ValTensor::Instance { .. } => Ok(vec![]),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -709,7 +709,11 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
|
||||
*d = v.dims().to_vec();
|
||||
}
|
||||
ValTensor::Instance { .. } => {
|
||||
return Err(TensorError::WrongMethod);
|
||||
if indices.is_empty() {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(TensorError::WrongMethod);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
@@ -871,3 +875,30 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
|
||||
/// inverts the inner values
|
||||
pub fn inverse(&self) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
let mut cloned_self = self.clone();
|
||||
|
||||
match &mut cloned_self {
|
||||
ValTensor::Value {
|
||||
inner: v, dims: d, ..
|
||||
} => {
|
||||
*v = v.map(|x| match x {
|
||||
ValType::AssignedValue(v) => ValType::AssignedValue(v.invert()),
|
||||
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
|
||||
ValType::AssignedValue(v.value_field().invert())
|
||||
}
|
||||
ValType::Value(v) => ValType::Value(v.map(|x| x.invert().unwrap_or(F::ZERO))),
|
||||
ValType::Constant(v) => ValType::Constant(v.invert().unwrap_or(F::ZERO)),
|
||||
});
|
||||
*d = v.dims().to_vec();
|
||||
}
|
||||
ValTensor::Instance { .. } => {
|
||||
return Err(Box::new(TensorError::WrongMethod));
|
||||
}
|
||||
};
|
||||
Ok(cloned_self)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,10 +33,7 @@ pub enum VarTensor {
|
||||
impl VarTensor {
|
||||
///
|
||||
pub fn is_advice(&self) -> bool {
|
||||
match self {
|
||||
VarTensor::Advice { .. } => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self, VarTensor::Advice { .. })
|
||||
}
|
||||
|
||||
///
|
||||
|
||||
54
src/wasm.rs
54
src/wasm.rs
@@ -69,19 +69,30 @@ pub fn encodeVerifierCalldata(
|
||||
Ok(encoded)
|
||||
}
|
||||
|
||||
/// Converts 4 u64s to a field element
|
||||
/// Converts a hex string to a byte array
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn stringToFelt(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
|
||||
pub fn feltToBigEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
|
||||
let felt: Fr = serde_json::from_slice(&array[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
|
||||
Ok(format!("{:?}", felt))
|
||||
}
|
||||
|
||||
/// Converts 4 u64s representing a field element directly to an integer
|
||||
/// Converts a felt to a little endian string
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn stringToInt(
|
||||
pub fn feltToLittleEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
|
||||
let felt: Fr = serde_json::from_slice(&array[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
|
||||
let repr = serde_json::to_string(&felt).unwrap();
|
||||
let b: String = serde_json::from_str(&repr).unwrap();
|
||||
Ok(b)
|
||||
}
|
||||
|
||||
/// Converts a hex string to a byte array
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn feltToInt(
|
||||
array: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
|
||||
let felt: Fr = serde_json::from_slice(&array[..])
|
||||
@@ -92,10 +103,10 @@ pub fn stringToInt(
|
||||
))
|
||||
}
|
||||
|
||||
/// Converts 4 u64s representing a field element directly to a (rescaled from fixed point scaling) floating point
|
||||
/// Converts felts to a floating point element
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn stringToFloat(
|
||||
pub fn feltToFloat(
|
||||
array: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
scale: crate::Scale,
|
||||
) -> Result<f64, JsError> {
|
||||
@@ -106,26 +117,26 @@ pub fn stringToFloat(
|
||||
Ok(int_rep as f64 / multiplier)
|
||||
}
|
||||
|
||||
/// Converts a floating point element to 4 u64s representing a fixed point field element
|
||||
/// Converts a floating point number to a hex string representing a fixed point field element
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn floatTostring(
|
||||
pub fn floatToFelt(
|
||||
input: f64,
|
||||
scale: crate::Scale,
|
||||
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
|
||||
let int_rep =
|
||||
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
let felt = i128_to_felt(int_rep);
|
||||
let vec = crate::pfsys::field_to_string_montgomery::<halo2curves::bn256::Fr>(&felt);
|
||||
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
|
||||
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|
||||
|e| JsError::new(&format!("Failed to serialize string_montgomery{}", e)),
|
||||
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
|
||||
)?))
|
||||
}
|
||||
|
||||
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn bufferToVecOfstring(
|
||||
pub fn bufferToVecOfFelt(
|
||||
buffer: wasm_bindgen::Clamped<Vec<u8>>,
|
||||
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
|
||||
// Convert the buffer to a slice
|
||||
@@ -211,7 +222,7 @@ pub fn genWitness(
|
||||
.map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
|
||||
let witness = circuit
|
||||
.forward(&mut input, None, None)
|
||||
.forward(&mut input, None, None, false)
|
||||
.map_err(|e| JsError::new(&format!("{}", e)))?;
|
||||
|
||||
serde_json::to_vec(&witness)
|
||||
@@ -311,13 +322,19 @@ pub fn verify(
|
||||
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
|
||||
&mut reader,
|
||||
halo2_proofs::SerdeFormat::RawBytes,
|
||||
circuit_settings,
|
||||
circuit_settings.clone(),
|
||||
)
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
|
||||
|
||||
let strategy = KZGSingleStrategy::new(params.verifier_params());
|
||||
|
||||
let result = verify_proof_circuit_kzg(params.verifier_params(), snark, &vk, strategy);
|
||||
let result = verify_proof_circuit_kzg(
|
||||
params.verifier_params(),
|
||||
snark,
|
||||
&vk,
|
||||
strategy,
|
||||
1 << circuit_settings.run_args.logrows,
|
||||
);
|
||||
|
||||
match result {
|
||||
Ok(_) => Ok(true),
|
||||
@@ -387,15 +404,6 @@ pub fn prove(
|
||||
.into_bytes())
|
||||
}
|
||||
|
||||
/// print hex representation of a proof
|
||||
#[wasm_bindgen]
|
||||
#[allow(non_snake_case)]
|
||||
pub fn printProofHex(proof: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
|
||||
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
|
||||
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
|
||||
let hex_str = hex::encode(proof.proof);
|
||||
Ok(format!("0x{}", hex_str))
|
||||
}
|
||||
// VALIDATION FUNCTIONS
|
||||
|
||||
/// Witness file validation
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
#[cfg(test)]
|
||||
mod native_tests {
|
||||
|
||||
use ezkl::circuit::Tolerance;
|
||||
use ezkl::fieldutils::{felt_to_i128, i128_to_felt};
|
||||
// use ezkl::circuit::table::RESERVED_BLINDING_ROWS_PAD;
|
||||
use ezkl::graph::input::{FileSource, FileSourceInner, GraphData};
|
||||
use ezkl::graph::{DataSource, GraphSettings, GraphWitness};
|
||||
@@ -192,90 +194,90 @@ mod native_tests {
|
||||
];
|
||||
|
||||
const TESTS: [&str; 77] = [
|
||||
"1l_mlp",
|
||||
"1l_mlp", //0
|
||||
"1l_slice",
|
||||
"1l_concat",
|
||||
"1l_flatten",
|
||||
// "1l_average",
|
||||
"1l_div",
|
||||
"1l_pad",
|
||||
"1l_pad", // 5
|
||||
"1l_reshape",
|
||||
"1l_eltwise_div",
|
||||
"1l_sigmoid",
|
||||
"1l_sqrt",
|
||||
"1l_softmax",
|
||||
"1l_softmax", //10
|
||||
// "1l_instance_norm",
|
||||
"1l_batch_norm",
|
||||
"1l_prelu",
|
||||
"1l_leakyrelu",
|
||||
"1l_gelu_noappx",
|
||||
// "1l_gelu_tanh_appx",
|
||||
"1l_relu",
|
||||
"1l_relu", //15
|
||||
"1l_downsample",
|
||||
"1l_tanh",
|
||||
"2l_relu_sigmoid_small",
|
||||
"2l_relu_fc",
|
||||
"2l_relu_small",
|
||||
"2l_relu_small", //20
|
||||
"2l_relu_sigmoid",
|
||||
"1l_conv",
|
||||
"2l_sigmoid_small",
|
||||
"2l_relu_sigmoid_conv",
|
||||
"3l_relu_conv_fc",
|
||||
"3l_relu_conv_fc", //25
|
||||
"4l_relu_conv_fc",
|
||||
"1l_erf",
|
||||
"1l_var",
|
||||
"1l_elu", //30
|
||||
"min",
|
||||
"1l_elu",
|
||||
"min", //30
|
||||
"max",
|
||||
"1l_max_pool",
|
||||
"1l_conv_transpose",
|
||||
"1l_upsample", //35
|
||||
"1l_identity",
|
||||
"1l_upsample",
|
||||
"1l_identity", //35
|
||||
"idolmodel",
|
||||
"trig",
|
||||
"prelu_gmm",
|
||||
"lstm", //40
|
||||
"rnn",
|
||||
"lstm",
|
||||
"rnn", //40
|
||||
"quantize_dequantize",
|
||||
"1l_where",
|
||||
"boolean",
|
||||
"boolean_identity",
|
||||
"decision_tree", // "variable_cnn",
|
||||
"decision_tree", // 45
|
||||
"random_forest",
|
||||
"gradient_boosted_trees",
|
||||
"1l_topk",
|
||||
"xgboost", //50
|
||||
"lightgbm",
|
||||
"xgboost",
|
||||
"lightgbm", //50
|
||||
"hummingbird_decision_tree",
|
||||
"oh_decision_tree",
|
||||
"linear_svc",
|
||||
"gather_elements",
|
||||
"less",
|
||||
"less", //55
|
||||
"xgboost_reg",
|
||||
"1l_powf",
|
||||
"scatter_elements",
|
||||
"1l_linear", //60
|
||||
"linear_regression",
|
||||
"1l_linear",
|
||||
"linear_regression", //60
|
||||
"sklearn_mlp",
|
||||
"1l_mean",
|
||||
"rounding_ops",
|
||||
// "mean_as_constrain",
|
||||
"arange",
|
||||
"layernorm",
|
||||
"layernorm", //65
|
||||
"bitwise_ops",
|
||||
"blackman_window",
|
||||
"softsign", //70
|
||||
"softsign", //68
|
||||
"softplus",
|
||||
"selu",
|
||||
"selu", //70
|
||||
"hard_sigmoid",
|
||||
"log_softmax",
|
||||
"eye",
|
||||
"ltsf",
|
||||
"remainder",
|
||||
"remainder", //75
|
||||
"bitshift",
|
||||
];
|
||||
|
||||
const WASM_TESTS: [&str; 48] = [
|
||||
const WASM_TESTS: [&str; 46] = [
|
||||
"1l_mlp",
|
||||
"1l_slice",
|
||||
"1l_concat",
|
||||
@@ -324,8 +326,6 @@ mod native_tests {
|
||||
"1l_where",
|
||||
"boolean",
|
||||
"boolean_identity",
|
||||
"decision_tree", // "variable_cnn",
|
||||
"random_forest",
|
||||
"gradient_boosted_trees",
|
||||
"1l_topk",
|
||||
// "xgboost",
|
||||
@@ -477,6 +477,7 @@ mod native_tests {
|
||||
use crate::native_tests::kzg_fuzz;
|
||||
use crate::native_tests::render_circuit;
|
||||
use crate::native_tests::model_serialization_different_binaries;
|
||||
use rand::Rng;
|
||||
use tempdir::TempDir;
|
||||
|
||||
#[test]
|
||||
@@ -496,7 +497,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None);
|
||||
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
});
|
||||
@@ -560,7 +561,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 18.0, false);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 3.1, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -569,10 +570,23 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None);
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn mock_tolerance_public_outputs_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
// gen random number between 0.0 and 1.0
|
||||
let tolerance = rand::thread_rng().gen_range(0.0..1.0) * 100.0;
|
||||
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn mock_large_batch_public_outputs_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
@@ -580,7 +594,7 @@ mod native_tests {
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
let large_batch_dir = &format!("large_batches_{}", test);
|
||||
crate::native_tests::mk_data_batches_(path, test, &large_batch_dir, 10);
|
||||
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None);
|
||||
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -589,7 +603,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None);
|
||||
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -598,7 +612,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None);
|
||||
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -607,7 +621,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None);
|
||||
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -616,7 +630,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None);
|
||||
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -625,7 +639,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None);
|
||||
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -634,7 +648,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "kzgcommit", "private", "public", 1, "resources", None);
|
||||
mock(path, test.to_string(), "kzgcommit", "private", "public", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -644,7 +658,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None);
|
||||
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -654,7 +668,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "kzgcommit", "public", 1, "resources", None);
|
||||
mock(path, test.to_string(), "private", "kzgcommit", "public", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -663,7 +677,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None);
|
||||
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -673,7 +687,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "private", "kzgcommit", 1, "resources", None);
|
||||
mock(path, test.to_string(), "public", "private", "kzgcommit", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -682,7 +696,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None);
|
||||
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -692,7 +706,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "public", "kzgcommit", "hashed", 1, "resources", None);
|
||||
mock(path, test.to_string(), "public", "kzgcommit", "hashed", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -702,7 +716,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "kzgcommit", "kzgcommit", "kzgcommit", 1, "resources", None);
|
||||
mock(path, test.to_string(), "kzgcommit", "kzgcommit", "kzgcommit", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -712,7 +726,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None);
|
||||
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -722,7 +736,7 @@ mod native_tests {
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
// needs an extra row for the large model
|
||||
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None);
|
||||
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -732,7 +746,7 @@ mod native_tests {
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
// needs an extra row for the large model
|
||||
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None);
|
||||
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -828,7 +842,7 @@ mod native_tests {
|
||||
|
||||
});
|
||||
|
||||
seq!(N in 0..=47 {
|
||||
seq!(N in 0..=45 {
|
||||
|
||||
#(#[test_case(WASM_TESTS[N])])*
|
||||
fn kzg_prove_and_verify_with_overflow_(test: &str) {
|
||||
@@ -876,7 +890,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None);
|
||||
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
});
|
||||
@@ -1273,7 +1287,9 @@ mod native_tests {
|
||||
batch_size: usize,
|
||||
cal_target: &str,
|
||||
scales_to_use: Option<Vec<u32>>,
|
||||
tolerance: f32,
|
||||
) {
|
||||
let mut tolerance = tolerance;
|
||||
gen_circuit_settings_and_witness(
|
||||
test_dir,
|
||||
example_name.clone(),
|
||||
@@ -1285,19 +1301,131 @@ mod native_tests {
|
||||
scales_to_use,
|
||||
2,
|
||||
false,
|
||||
&mut tolerance,
|
||||
);
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"mock",
|
||||
"-W",
|
||||
format!("{}/{}/witness.json", test_dir, example_name).as_str(),
|
||||
"-M",
|
||||
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
if tolerance > 0.0 {
|
||||
// load witness and shift the output by a small amount that is less than tolerance percent
|
||||
let witness = GraphWitness::from_path(
|
||||
format!("{}/{}/witness.json", test_dir, example_name).into(),
|
||||
)
|
||||
.unwrap();
|
||||
let witness = witness.clone();
|
||||
let outputs = witness.outputs.clone();
|
||||
|
||||
// get values as i128
|
||||
let output_perturbed_safe: Vec<Vec<halo2curves::bn256::Fr>> = outputs
|
||||
.iter()
|
||||
.map(|sv| {
|
||||
sv.iter()
|
||||
.map(|v| {
|
||||
// randomly perturb by a small amount less than tolerance
|
||||
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
|
||||
halo2curves::bn256::Fr::zero()
|
||||
} else {
|
||||
i128_to_felt(
|
||||
(felt_to_i128(*v) as f32
|
||||
* (rand::thread_rng().gen_range(-0.01..0.01) * tolerance))
|
||||
as i128,
|
||||
)
|
||||
};
|
||||
|
||||
*v + perturbation
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// get values as i128
|
||||
let output_perturbed_bad: Vec<Vec<halo2curves::bn256::Fr>> = outputs
|
||||
.iter()
|
||||
.map(|sv| {
|
||||
sv.iter()
|
||||
.map(|v| {
|
||||
// randomly perturb by a small amount less than tolerance
|
||||
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
|
||||
halo2curves::bn256::Fr::from(2)
|
||||
} else {
|
||||
i128_to_felt(
|
||||
(felt_to_i128(*v) as f32
|
||||
* (rand::thread_rng().gen_range(0.02..0.1) * tolerance))
|
||||
as i128,
|
||||
)
|
||||
};
|
||||
*v + perturbation
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let good_witness = GraphWitness {
|
||||
outputs: output_perturbed_safe,
|
||||
..witness.clone()
|
||||
};
|
||||
|
||||
// save
|
||||
good_witness
|
||||
.save(format!("{}/{}/witness_ok.json", test_dir, example_name).into())
|
||||
.unwrap();
|
||||
|
||||
let bad_witness = GraphWitness {
|
||||
outputs: output_perturbed_bad,
|
||||
..witness.clone()
|
||||
};
|
||||
|
||||
// save
|
||||
bad_witness
|
||||
.save(format!("{}/{}/witness_bad.json", test_dir, example_name).into())
|
||||
.unwrap();
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"mock",
|
||||
"-W",
|
||||
format!("{}/{}/witness.json", test_dir, example_name).as_str(),
|
||||
"-M",
|
||||
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"mock",
|
||||
"-W",
|
||||
format!("{}/{}/witness_ok.json", test_dir, example_name).as_str(),
|
||||
"-M",
|
||||
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"mock",
|
||||
"-W",
|
||||
format!("{}/{}/witness_bad.json", test_dir, example_name).as_str(),
|
||||
"-M",
|
||||
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(!status.success());
|
||||
} else {
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"mock",
|
||||
"-W",
|
||||
format!("{}/{}/witness.json", test_dir, example_name).as_str(),
|
||||
"-M",
|
||||
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
@@ -1312,6 +1440,7 @@ mod native_tests {
|
||||
scales_to_use: Option<Vec<u32>>,
|
||||
num_inner_columns: usize,
|
||||
div_rebasing: bool,
|
||||
tolerance: &mut f32,
|
||||
) {
|
||||
let mut args = vec![
|
||||
"gen-settings".to_string(),
|
||||
@@ -1321,11 +1450,12 @@ mod native_tests {
|
||||
"--settings-path={}/{}/settings.json",
|
||||
test_dir, example_name
|
||||
),
|
||||
format!("--variables=batch_size={}", batch_size),
|
||||
format!("--variables=batch_size->{}", batch_size),
|
||||
format!("--input-visibility={}", input_visibility),
|
||||
format!("--param-visibility={}", param_visibility),
|
||||
format!("--output-visibility={}", output_visibility),
|
||||
format!("--num-inner-cols={}", num_inner_columns),
|
||||
format!("--tolerance={}", tolerance),
|
||||
];
|
||||
|
||||
if div_rebasing {
|
||||
@@ -1368,6 +1498,24 @@ mod native_tests {
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
let mut settings =
|
||||
GraphSettings::load(&format!("{}/{}/settings.json", test_dir, example_name).into())
|
||||
.unwrap();
|
||||
|
||||
let any_output_scales_smol = settings.model_output_scales.iter().any(|s| *s <= 0);
|
||||
|
||||
if any_output_scales_smol {
|
||||
// set the tolerance to 0.0
|
||||
settings.run_args.tolerance = Tolerance {
|
||||
val: 0.0,
|
||||
scale: 0.0.into(),
|
||||
};
|
||||
settings
|
||||
.save(&format!("{}/{}/settings.json", test_dir, example_name).into())
|
||||
.unwrap();
|
||||
*tolerance = 0.0;
|
||||
}
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"compile-circuit",
|
||||
@@ -1402,6 +1550,7 @@ mod native_tests {
|
||||
}
|
||||
|
||||
// Mock prove (fast, but does not cover some potential issues)
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn accuracy_measurement(
|
||||
test_dir: &str,
|
||||
example_name: String,
|
||||
@@ -1424,6 +1573,7 @@ mod native_tests {
|
||||
None,
|
||||
2,
|
||||
div_rebasing,
|
||||
&mut 0.0,
|
||||
);
|
||||
|
||||
println!(
|
||||
@@ -1455,7 +1605,7 @@ mod native_tests {
|
||||
format!("{}/{}/network.onnx", test_dir, example_name).as_str(),
|
||||
"-O",
|
||||
format!("{}/{}/render.png", test_dir, example_name).as_str(),
|
||||
"--lookup-range=(-32768,32768)",
|
||||
"--lookup-range=-32768->32768",
|
||||
"-K=17",
|
||||
])
|
||||
.status()
|
||||
@@ -1683,6 +1833,7 @@ mod native_tests {
|
||||
scales_to_use,
|
||||
num_inner_columns,
|
||||
false,
|
||||
&mut 0.0,
|
||||
);
|
||||
|
||||
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
|
||||
@@ -1745,6 +1896,30 @@ mod native_tests {
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
|
||||
// load settings file
|
||||
let settings =
|
||||
std::fs::read_to_string(settings_path.clone()).expect("failed to read settings file");
|
||||
|
||||
let graph_settings = serde_json::from_str::<GraphSettings>(&settings)
|
||||
.expect("failed to parse settings file");
|
||||
|
||||
// get_srs for the graph_settings_num_instances
|
||||
download_srs(graph_settings.log2_total_instances());
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"verify",
|
||||
format!("--settings-path={}", settings_path).as_str(),
|
||||
"--proof-path",
|
||||
&format!("{}/{}/proof.pf", test_dir, example_name),
|
||||
"--vk-path",
|
||||
&format!("{}/{}/key.vk", test_dir, example_name),
|
||||
"--reduced-srs",
|
||||
])
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
assert!(status.success());
|
||||
}
|
||||
|
||||
// prove-serialize-verify, the usual full path
|
||||
@@ -1760,6 +1935,7 @@ mod native_tests {
|
||||
None,
|
||||
2,
|
||||
false,
|
||||
&mut 0.0,
|
||||
);
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
@@ -2036,6 +2212,7 @@ mod native_tests {
|
||||
Some(vec![4]),
|
||||
1,
|
||||
false,
|
||||
&mut 0.0,
|
||||
);
|
||||
|
||||
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);
|
||||
|
||||
@@ -12,7 +12,7 @@ def get_ezkl_output(witness_file, settings_file):
|
||||
outputs = witness_output['outputs']
|
||||
with open(settings_file) as f:
|
||||
settings = json.load(f)
|
||||
ezkl_outputs = [[ezkl.string_to_float(
|
||||
ezkl_outputs = [[ezkl.felt_to_float(
|
||||
outputs[i][j], settings['model_output_scales'][i]) for j in range(len(outputs[i]))] for i in range(len(outputs))]
|
||||
return ezkl_outputs
|
||||
|
||||
@@ -91,9 +91,7 @@ def compare_outputs(zk_output, onnx_output):
|
||||
print("------- zk_output: ", list1_i)
|
||||
print("------- onnx_output: ", list2_i)
|
||||
|
||||
|
||||
|
||||
return np.mean(np.abs(res))
|
||||
return res
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@@ -113,6 +111,9 @@ if __name__ == '__main__':
|
||||
onnx_output = get_onnx_output(model_file, input_file)
|
||||
# compare the outputs
|
||||
percentage_difference = compare_outputs(ezkl_output, onnx_output)
|
||||
mean_percentage_difference = np.mean(np.abs(percentage_difference))
|
||||
max_percentage_difference = np.max(np.abs(percentage_difference))
|
||||
# print the percentage difference
|
||||
print("mean percent diff: ", percentage_difference)
|
||||
assert percentage_difference < target, "Percentage difference is too high"
|
||||
print("mean percent diff: ", mean_percentage_difference)
|
||||
print("max percent diff: ", max_percentage_difference)
|
||||
assert mean_percentage_difference < target, "Percentage difference is too high"
|
||||
|
||||
@@ -131,7 +131,7 @@ mod py_tests {
|
||||
"simple_demo_aggregated_proofs.ipynb",
|
||||
"ezkl_demo.ipynb", // 10
|
||||
"lstm.ipynb",
|
||||
"set_membership.ipynb",
|
||||
"set_membership.ipynb", // 12
|
||||
"decision_tree.ipynb",
|
||||
"random_forest.ipynb",
|
||||
"gradient_boosted_trees.ipynb", // 15
|
||||
|
||||
@@ -56,9 +56,9 @@ def test_poseidon_hash():
|
||||
Test for poseidon_hash
|
||||
"""
|
||||
message = [1.0, 2.0, 3.0, 4.0]
|
||||
message = [ezkl.float_to_string(x, 7) for x in message]
|
||||
message = [ezkl.float_to_felt(x, 7) for x in message]
|
||||
res = ezkl.poseidon_hash(message)
|
||||
assert ezkl.string_to_felt(
|
||||
assert ezkl.felt_to_big_endian(
|
||||
res[0]) == "0x0da7e5e5c8877242fa699f586baf770d731defd54f952d4adeb85047a0e32f45"
|
||||
|
||||
|
||||
@@ -70,14 +70,14 @@ def test_field_serialization():
|
||||
|
||||
input = 890
|
||||
scale = 7
|
||||
felt = ezkl.float_to_string(input, scale)
|
||||
roundtrip_input = ezkl.string_to_float(felt, scale)
|
||||
felt = ezkl.float_to_felt(input, scale)
|
||||
roundtrip_input = ezkl.felt_to_float(felt, scale)
|
||||
assert input == roundtrip_input
|
||||
|
||||
input = -700
|
||||
scale = 7
|
||||
felt = ezkl.float_to_string(input, scale)
|
||||
roundtrip_input = ezkl.string_to_float(felt, scale)
|
||||
felt = ezkl.float_to_felt(input, scale)
|
||||
roundtrip_input = ezkl.felt_to_float(felt, scale)
|
||||
assert input == roundtrip_input
|
||||
|
||||
|
||||
@@ -88,12 +88,12 @@ def test_buffer_to_felts():
|
||||
buffer = bytearray("a sample string!", 'utf-8')
|
||||
felts = ezkl.buffer_to_felts(buffer)
|
||||
ref_felt_1 = "0x0000000000000000000000000000000021676e6972747320656c706d61732061"
|
||||
assert felts == [ref_felt_1]
|
||||
assert ezkl.felt_to_big_endian(felts[0]) == ref_felt_1
|
||||
|
||||
buffer = bytearray("a sample string!"+"high", 'utf-8')
|
||||
felts = ezkl.buffer_to_felts(buffer)
|
||||
ref_felt_2 = "0x0000000000000000000000000000000000000000000000000000000068676968"
|
||||
assert felts == [ref_felt_1, ref_felt_2]
|
||||
assert [ezkl.felt_to_big_endian(felts[0]), ezkl.felt_to_big_endian(felts[1])] == [ref_felt_1, ref_felt_2]
|
||||
|
||||
|
||||
def test_gen_srs():
|
||||
@@ -113,13 +113,13 @@ def test_calibrate_over_user_range():
|
||||
data_path = os.path.join(
|
||||
examples_path,
|
||||
'onnx',
|
||||
'1l_average',
|
||||
'1l_relu',
|
||||
'input.json'
|
||||
)
|
||||
model_path = os.path.join(
|
||||
examples_path,
|
||||
'onnx',
|
||||
'1l_average',
|
||||
'1l_relu',
|
||||
'network.onnx'
|
||||
)
|
||||
output_path = os.path.join(
|
||||
@@ -147,13 +147,13 @@ def test_calibrate():
|
||||
data_path = os.path.join(
|
||||
examples_path,
|
||||
'onnx',
|
||||
'1l_average',
|
||||
'1l_relu',
|
||||
'input.json'
|
||||
)
|
||||
model_path = os.path.join(
|
||||
examples_path,
|
||||
'onnx',
|
||||
'1l_average',
|
||||
'1l_relu',
|
||||
'network.onnx'
|
||||
)
|
||||
output_path = os.path.join(
|
||||
@@ -183,7 +183,7 @@ def test_model_compile():
|
||||
model_path = os.path.join(
|
||||
examples_path,
|
||||
'onnx',
|
||||
'1l_average',
|
||||
'1l_relu',
|
||||
'network.onnx'
|
||||
)
|
||||
compiled_model_path = os.path.join(
|
||||
@@ -205,7 +205,7 @@ def test_forward():
|
||||
data_path = os.path.join(
|
||||
examples_path,
|
||||
'onnx',
|
||||
'1l_average',
|
||||
'1l_relu',
|
||||
'input.json'
|
||||
)
|
||||
model_path = os.path.join(
|
||||
@@ -392,9 +392,7 @@ def test_prove_evm():
|
||||
assert res['transcript_type'] == 'EVM'
|
||||
assert os.path.isfile(proof_path)
|
||||
|
||||
res = ezkl.print_proof_hex(proof_path)
|
||||
# to figure out a better way of testing print_proof_hex
|
||||
assert type(res) == str
|
||||
|
||||
|
||||
|
||||
def test_create_evm_verifier():
|
||||
|
||||
@@ -8,9 +8,9 @@ mod wasm32 {
|
||||
use ezkl::graph::GraphWitness;
|
||||
use ezkl::pfsys;
|
||||
use ezkl::wasm::{
|
||||
bufferToVecOfstring, compiledCircuitValidation, encodeVerifierCalldata, genPk, genVk,
|
||||
genWitness, inputValidation, pkValidation, poseidonHash, printProofHex, proofValidation,
|
||||
prove, settingsValidation, srsValidation, stringToFelt, stringToFloat, stringToInt,
|
||||
bufferToVecOfFelt, compiledCircuitValidation, encodeVerifierCalldata, feltToBigEndian,
|
||||
feltToFloat, feltToInt, feltToLittleEndian, genPk, genVk, genWitness, inputValidation,
|
||||
pkValidation, poseidonHash, proofValidation, prove, settingsValidation, srsValidation,
|
||||
u8_array_to_u128_le, verify, vkValidation, witnessValidation,
|
||||
};
|
||||
use halo2_solidity_verifier::encode_calldata;
|
||||
@@ -76,22 +76,29 @@ mod wasm32 {
|
||||
for i in 0..32 {
|
||||
let field_element = Fr::from(i);
|
||||
let serialized = serde_json::to_vec(&field_element).unwrap();
|
||||
|
||||
let clamped = wasm_bindgen::Clamped(serialized);
|
||||
let scale = 2;
|
||||
let floating_point = stringToFloat(clamped.clone(), scale)
|
||||
let floating_point = feltToFloat(clamped.clone(), scale)
|
||||
.map_err(|_| "failed")
|
||||
.unwrap();
|
||||
assert_eq!(floating_point, (i as f64) / 4.0);
|
||||
|
||||
let integer: i128 = serde_json::from_slice(
|
||||
&stringToInt(clamped.clone()).map_err(|_| "failed").unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let integer: i128 =
|
||||
serde_json::from_slice(&feltToInt(clamped.clone()).map_err(|_| "failed").unwrap())
|
||||
.unwrap();
|
||||
assert_eq!(integer, i as i128);
|
||||
|
||||
let hex_string = format!("{:?}", field_element);
|
||||
let returned_string = stringToFelt(clamped).map_err(|_| "failed").unwrap();
|
||||
let hex_string = format!("{:?}", field_element.clone());
|
||||
let returned_string: String = feltToBigEndian(clamped.clone())
|
||||
.map_err(|_| "failed")
|
||||
.unwrap();
|
||||
assert_eq!(hex_string, returned_string);
|
||||
let repr = serde_json::to_string(&field_element).unwrap();
|
||||
let little_endian_string: String = serde_json::from_str(&repr).unwrap();
|
||||
let returned_string: String =
|
||||
feltToLittleEndian(clamped).map_err(|_| "failed").unwrap();
|
||||
assert_eq!(little_endian_string, returned_string);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +108,7 @@ mod wasm32 {
|
||||
let mut buffer = string_high.clone().into_bytes();
|
||||
let clamped = wasm_bindgen::Clamped(buffer.clone());
|
||||
|
||||
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
|
||||
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
|
||||
|
||||
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
|
||||
|
||||
@@ -118,7 +125,7 @@ mod wasm32 {
|
||||
let buffer = string_sample.clone().into_bytes();
|
||||
let clamped = wasm_bindgen::Clamped(buffer.clone());
|
||||
|
||||
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
|
||||
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
|
||||
|
||||
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
|
||||
|
||||
@@ -133,7 +140,7 @@ mod wasm32 {
|
||||
let buffer = string_concat.into_bytes();
|
||||
let clamped = wasm_bindgen::Clamped(buffer.clone());
|
||||
|
||||
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
|
||||
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
|
||||
|
||||
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
|
||||
|
||||
@@ -258,15 +265,6 @@ mod wasm32 {
|
||||
assert!(value);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
async fn print_proof_hex_test() {
|
||||
let proof = printProofHex(wasm_bindgen::Clamped(PROOF.to_vec()))
|
||||
.map_err(|_| "failed")
|
||||
.unwrap();
|
||||
|
||||
assert!(proof.len() > 0);
|
||||
}
|
||||
|
||||
#[wasm_bindgen_test]
|
||||
async fn verify_validations() {
|
||||
// Run witness validation on network (should fail)
|
||||
|
||||
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -27,6 +27,8 @@
|
||||
"check_mode": "UNSAFE"
|
||||
},
|
||||
"num_rows": 16,
|
||||
"total_dynamic_col_size": 0,
|
||||
"num_dynamic_lookups": 0,
|
||||
"total_assignments": 32,
|
||||
"total_const_size": 8,
|
||||
"model_instance_shapes": [
|
||||
|
||||
@@ -38,7 +38,10 @@ describe('Generate witness, prove and verify', () => {
|
||||
let pk = await readEzklArtifactsFile(path, example, 'key.pk');
|
||||
let circuit_ser = await readEzklArtifactsFile(path, example, 'network.compiled');
|
||||
circuit_settings_ser = await readEzklArtifactsFile(path, example, 'settings.json');
|
||||
params_ser = await readEzklSrsFile(path, example);
|
||||
// get the log rows from the circuit settings
|
||||
const circuit_settings = deserialize(circuit_settings_ser) as any;
|
||||
const logrows = circuit_settings.run_args.logrows as string;
|
||||
params_ser = await readEzklSrsFile(logrows);
|
||||
const startTimeProve = Date.now();
|
||||
result = wasmFunctions.prove(witness, pk, circuit_ser, params_ser);
|
||||
const endTimeProve = Date.now();
|
||||
@@ -54,6 +57,7 @@ describe('Generate witness, prove and verify', () => {
|
||||
let result
|
||||
const vk = await readEzklArtifactsFile(path, example, 'key.vk');
|
||||
const startTimeVerify = Date.now();
|
||||
params_ser = await readEzklSrsFile("1");
|
||||
result = wasmFunctions.verify(proof_ser, vk, circuit_settings_ser, params_ser);
|
||||
const result_ref = wasmFunctions.verify(proof_ser_ref, vk, circuit_settings_ser, params_ser);
|
||||
const endTimeVerify = Date.now();
|
||||
|
||||
@@ -16,15 +16,7 @@ export async function readEzklArtifactsFile(path: string, example: string, filen
|
||||
return new Uint8ClampedArray(buffer.buffer);
|
||||
}
|
||||
|
||||
export async function readEzklSrsFile(path: string, example: string): Promise<Uint8ClampedArray> {
|
||||
// const settingsPath = path.join(__dirname, '..', '..', 'ezkl', 'examples', 'onnx', example, 'settings.json');
|
||||
|
||||
const settingsPath = `${path}/${example}/settings.json`
|
||||
const settingsBuffer = await fs.readFile(settingsPath, { encoding: 'utf-8' });
|
||||
const settings = JSONBig.parse(settingsBuffer);
|
||||
const logrows = settings.run_args.logrows;
|
||||
// const filePath = path.join(__dirname, '..', '..', 'ezkl', 'examples', 'onnx', `kzg${logrows}.srs`);
|
||||
// srs path is at $HOME/.ezkl/srs
|
||||
export async function readEzklSrsFile(logrows: string): Promise<Uint8ClampedArray> {
|
||||
const filePath = `${userHomeDir}/.ezkl/srs/kzg${logrows}.srs`
|
||||
const buffer = await fs.readFile(filePath);
|
||||
return new Uint8ClampedArray(buffer.buffer);
|
||||
|
||||
Binary file not shown.
@@ -1 +1 @@
|
||||
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":-1}
|
||||
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":-1,"max_range_size":0}
|
||||
Reference in New Issue
Block a user