Compare commits

...

29 Commits

Author SHA1 Message Date
dante
a59e3780b2 chore: rm recip_int helper (#733) 2024-03-05 21:51:14 +00:00
dante
345fb5672a chore: cleanup unused args (#732) 2024-03-05 13:43:29 +00:00
dante
70daaff2e4 chore: cleanup calibrate (#731) 2024-03-04 17:52:11 +00:00
dante
a437d8a51f feat: "sub"-dynamic tables (#730) 2024-03-04 10:35:28 +00:00
Ethan Cemer
fe535c1cac feat: wasm felt to little endian string (#729)
---------

Co-authored-by: Alexander Camuto <45801863+alexander-camuto@users.noreply.github.com>
2024-03-01 14:06:20 +00:00
dante
3e8dcb001a chore: test for reduced-srs on wasm bundle (#728)
---------

Co-authored-by: Ethan <tylercemer@gmail.com>
2024-03-01 13:23:07 +00:00
dante
14786acb95 feat: dynamic lookups (#727) 2024-03-01 01:44:45 +00:00
dante
80a3c44cb4 feat: lookup-less recip by default (#725) 2024-02-28 16:35:20 +00:00
dante
1656846d1a fix: transcript should serialize as lc flag (#726) 2024-02-26 22:02:47 +00:00
dante
88098b8190 fix!: cleanup felt serialization language in python and wasm (#724)
BREAKING CHANGE: python and wasm felt utilities have new names
2024-02-25 14:06:48 +00:00
dante
6c0c17c9be fix: include tol check in fwd pass (#723) 2024-02-23 01:28:59 +00:00
dante
bf69b16fc1 fix: rm optional bool flags (#722) 2024-02-21 12:45:42 +00:00
dante
74feb829da feat: parse command ast into flag strings (#720) 2024-02-21 00:38:26 +00:00
dante
d429e7edab fix: buffer data read and writes (#719) 2024-02-19 11:49:15 +00:00
dante
f0e5b82787 refactor: selectable key ser (#718) 2024-02-19 11:26:18 +00:00
dante
3f7261f50b fix: set buf capacity for witness, settings, proof (#717) 2024-02-16 21:59:20 +00:00
dante
678a249dcb feat: allow for reduced n srs for verification (#716) 2024-02-16 18:28:54 +00:00
dante
0291eb2d0f fix: reduce verbosity of common operations (#715) 2024-02-15 17:27:33 +00:00
dante
1b637a70b0 refactor: print_proof_hex is redundant with proof file (#713) 2024-02-14 15:25:28 +00:00
dante
abcd5380db feat: programmable buffer capacity (#712) 2024-02-13 15:49:14 +00:00
dante
076b737108 chore: allow for a max circuit area cap (#711) 2024-02-12 14:36:51 +00:00
dante
97d9832591 refactor: calibration for resources and accuracy over same scale range (#710) 2024-02-11 15:03:38 +00:00
dante
e0771683a6 chore: update h2 curves (#709) 2024-02-10 22:54:38 +00:00
dante
319c222307 chore: more descriptive debug logs on forward pass (#708) 2024-02-10 16:10:33 +00:00
dante
85ee6e7f9d refactor: use layout as the forward function (#707) 2024-02-08 21:15:46 +00:00
dante
4c8daf773c refactor: lookup-less layer norm (#706) 2024-02-07 21:19:17 +00:00
dante
80041ac523 refactor: equals argument without lookups (#705) 2024-02-07 14:20:13 +00:00
dante
2a1ee1102c refactor: range check recip (#703) 2024-02-05 14:42:26 +00:00
dante
95d4fd4a70 feat: power of 2 div using type system (#702) 2024-02-04 02:43:38 +00:00
59 changed files with 4282 additions and 3036 deletions

View File

@@ -11,7 +11,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- name: nanoGPT Mock

View File

@@ -45,7 +45,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- name: Checkout repo

View File

@@ -26,7 +26,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- name: Build
@@ -38,7 +38,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- name: Docs
@@ -50,7 +50,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -73,7 +73,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -106,7 +106,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -139,7 +139,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -172,7 +172,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -189,7 +189,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
@@ -198,10 +198,8 @@ jobs:
# chromedriver-version: "115.0.5790.102"
- name: Install wasm32-unknown-unknown
run: rustup target add wasm32-unknown-unknown
- name: Install wasm runner
run: cargo install wasm-server-runner
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
- name: Run wasm verifier tests
# on mac:
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
@@ -214,7 +212,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -231,13 +229,15 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: public outputs and tolerance > 0
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
- name: public outputs + batch size == 10
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 32
- name: kzg inputs
@@ -286,7 +286,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -312,7 +312,7 @@ jobs:
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
- name: KZG prove and verify tests (EVM + VK rendered seperately)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_render_seperately_ --test-threads 1
- name: KZG prove and verify tests (EVM + kzg all)
@@ -345,18 +345,15 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Install wasm-server-runner
run: cargo install wasm-server-runner
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- name: Use pnpm 8
uses: pnpm/action-setup@v2
@@ -416,11 +413,11 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- uses: baptiste0928/cargo-install@v1
with:
@@ -450,7 +447,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -460,7 +457,7 @@ jobs:
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
- name: fuzz tests (EVM)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_fuzz_ --test-threads 2
# - name: fuzz tests
@@ -473,7 +470,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -491,7 +488,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -503,12 +500,12 @@ jobs:
prove-and-verify-aggr-tests:
runs-on: large-self-hosted
needs: [build, library-tests, python-tests]
needs: [build, library-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -516,16 +513,16 @@ jobs:
crate: cargo-nextest
locked: true
- name: KZG )tests
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 8 -- --include-ignored
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 4 -- --include-ignored
prove-and-verify-aggr-evm-tests:
runs-on: large-self-hosted
needs: [build, library-tests, python-tests]
needs: [build, library-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -535,7 +532,7 @@ jobs:
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
- name: KZG prove and verify aggr tests
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
@@ -546,7 +543,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -568,7 +565,7 @@ jobs:
python-version: "3.7"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- name: Install solc
@@ -576,9 +573,9 @@ jobs:
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
- name: Build python ezkl
run: source .env/bin/activate; maturin develop --features python-bindings --release
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Run pytest
run: source .env/bin/activate; pytest -vv
@@ -592,7 +589,7 @@ jobs:
python-version: "3.7"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -602,7 +599,7 @@ jobs:
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
- name: Build python ezkl
run: source .env/bin/activate; maturin develop --features python-bindings --release
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Div rebase
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_div_rebase_
- name: Public inputs
@@ -623,7 +620,7 @@ jobs:
python-version: "3.9"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -633,11 +630,11 @@ jobs:
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 95a93cd397f25f3f8d49d2851eb52bc2d52dd983 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
- name: Build python ezkl
run: source .env/bin/activate; maturin develop --features python-bindings --release
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
# - name: authenticate-kaggle-cli
# shell: bash
# env:

View File

@@ -22,18 +22,15 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2023-08-24
toolchain: nightly-2024-01-04
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Install wasm-server-runner
run: cargo install wasm-server-runner
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2023-08-24-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
- name: Install binaryen
run: |
set -e

558
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -17,7 +17,7 @@ crate-type = ["cdylib", "rlib"]
[dependencies]
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch= "main" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch= "main" }
halo2curves = { version = "0.6.0", features = ["derive_serde"] }
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev="9fff22c", features=["derive_serde"] }
rand = { version = "0.8", default_features = false }
itertools = { version = "0.10.3", default_features = false }
clap = { version = "4.3.3", features = ["derive"]}
@@ -34,10 +34,13 @@ bincode = { version = "1.3.3", default_features = false }
ark-std = { version = "^0.3.0", default-features = false }
unzip-n = "0.1.2"
num = "0.4.1"
portable-atomic = "1.6.0"
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand" }
# evm related deps
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ethers = { version = "2.0.7", default_features = false, features = ["ethers-solc"] }
ethers = { version = "2.0.11", default_features = false, features = ["ethers-solc"] }
indicatif = {version = "0.17.5", features = ["rayon"]}
gag = { version = "1.0.0", default_features = false}
instant = { version = "0.1" }
@@ -158,6 +161,7 @@ mv-lookup = ["halo2_proofs/mv-lookup", "snark-verifier/mv-lookup", "halo2_solidi
det-prove = []
icicle = ["halo2_proofs/icicle_gpu"]
empty-cmd = []
no-banner = []
# icicle patch to 0.1.0 if feature icicle is enabled
[patch.'https://github.com/ingonyama-zk/icicle']

View File

@@ -6,6 +6,7 @@ use ezkl::fieldutils;
use ezkl::fieldutils::i32_to_felt;
use ezkl::tensor::*;
use halo2_proofs::dev::MockProver;
use halo2_proofs::poly::commitment::Params;
use halo2_proofs::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
@@ -489,6 +490,7 @@ pub fn runconv() {
strategy,
pi_for_real_prover,
&mut transcript,
params.n(),
);
assert!(verify.is_ok());

View File

@@ -309,7 +309,7 @@
"metadata": {},
"outputs": [],
"source": [
"print(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]))"
"print(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]))"
]
},
{
@@ -325,7 +325,7 @@
"metadata": {},
"outputs": [],
"source": [
"from web3 import Web3, HTTPProvider, utils\n",
"from web3 import Web3, HTTPProvider\n",
"from solcx import compile_standard\n",
"from decimal import Decimal\n",
"import json\n",
@@ -338,7 +338,7 @@
"\n",
"def test_on_chain_data(res):\n",
" # Step 0: Convert the tensor to a flat list\n",
" data = [int(ezkl.string_to_felt(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
" data = [int(ezkl.felt_to_big_endian(res['processed_outputs']['poseidon_hash'][0]), 0)]\n",
"\n",
" # Step 1: Prepare the data\n",
" # Step 2: Prepare and compile the contract.\n",
@@ -648,7 +648,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
"version": "3.9.15"
},
"orig_nbformat": 4
},

View File

@@ -695,7 +695,7 @@
"formatted_output = \"[\"\n",
"for i, value in enumerate(proof[\"instances\"]):\n",
" for j, field_element in enumerate(value):\n",
" onchain_input_array.append(ezkl.string_to_felt(field_element))\n",
" onchain_input_array.append(ezkl.felt_to_big_endian(field_element))\n",
" formatted_output += str(onchain_input_array[-1])\n",
" if j != len(value) - 1:\n",
" formatted_output += \", \"\n",
@@ -705,7 +705,7 @@
"# copy them over to remix and see if they verify\n",
"# What happens when you change a value?\n",
"print(\"pubInputs: \", formatted_output)\n",
"print(\"proof: \", \"0x\" + proof[\"proof\"])"
"print(\"proof: \", proof[\"proof\"])"
]
},
{

View File

@@ -343,7 +343,6 @@
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" compress_selectors=True,\n",
" )\n",
"\n",
" assert res == True\n",

View File

@@ -122,8 +122,8 @@
"# Loop through each element in the y tensor\n",
"for e in y_input:\n",
" # Apply the custom function and append the result to the list\n",
" print(ezkl.float_to_string(e,7))\n",
" result.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 7)])[0])\n",
" print(ezkl.float_to_felt(e,7))\n",
" result.append(ezkl.poseidon_hash([ezkl.float_to_felt(e, 7)])[0])\n",
"\n",
"y = y.unsqueeze(0)\n",
"y = y.reshape(1, 9)\n",

View File

@@ -126,7 +126,7 @@
"# Loop through each element in the y tensor\n",
"for e in user_preimages:\n",
" # Apply the custom function and append the result to the list\n",
" users.append(ezkl.poseidon_hash([ezkl.float_to_string(e, 0)])[0])\n",
" users.append(ezkl.poseidon_hash([ezkl.float_to_felt(e, 0)])[0])\n",
"\n",
"users_t = torch.tensor(user_preimages)\n",
"users_t = users_t.reshape(1, 6)\n",
@@ -303,7 +303,7 @@
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
"witness = json.load(open(witness_path, \"r\"))\n",
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
"witness[\"outputs\"][0] = [ezkl.float_to_felt(1.0, 0)]\n",
"json.dump(witness, open(witness_path, \"w\"))"
]
},
@@ -417,7 +417,7 @@
"# we force the output to be 1 this corresponds to the solvency test being true -- and we set this to a fixed vis output\n",
"# this means that the output is fixed and the verifier can see it but that if the input is not in the set the output will not be 0 and the verifier will reject\n",
"witness = json.load(open(witness_path, \"r\"))\n",
"witness[\"outputs\"][0] = [ezkl.float_to_string(1.0, 0)]\n",
"witness[\"outputs\"][0] = [ezkl.float_to_felt(1.0, 0)]\n",
"json.dump(witness, open(witness_path, \"w\"))\n"
]
},
@@ -510,7 +510,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.13"
"version": "3.9.15"
}
},
"nbformat": 4,

View File

@@ -633,7 +633,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [4])"
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\", scales = [11])"
]
},
{
@@ -664,7 +664,6 @@
" compiled_model_path,\n",
" vk_path,\n",
" pk_path,\n",
" \n",
")"
]
},

View File

@@ -503,11 +503,11 @@
"pyplot.arrow(0, 0, 1, 0, width=0.02, alpha=0.5)\n",
"pyplot.arrow(0, 0, 0, 1, width=0.02, alpha=0.5)\n",
"\n",
"arrow_x = ezkl.string_to_float(witness['outputs'][0][0], out_scale)\n",
"arrow_y = ezkl.string_to_float(witness['outputs'][0][1], out_scale)\n",
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][0], out_scale)\n",
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][1], out_scale)\n",
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)\n",
"arrow_x = ezkl.string_to_float(witness['outputs'][0][2], out_scale)\n",
"arrow_y = ezkl.string_to_float(witness['outputs'][0][3], out_scale)\n",
"arrow_x = ezkl.felt_to_float(witness['outputs'][0][2], out_scale)\n",
"arrow_y = ezkl.felt_to_float(witness['outputs'][0][3], out_scale)\n",
"pyplot.arrow(0, 0, arrow_x, arrow_y, width=0.02)"
]
}

View File

@@ -0,0 +1,39 @@
from torch import nn
import torch
import json
class Circuit(nn.Module):
def __init__(self, inplace=False):
super(Circuit, self).__init__()
def forward(self, x):
return x/ 10000
circuit = Circuit()
x = torch.empty(1, 8).random_(0, 2)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0]]}

Binary file not shown.

View File

@@ -11,8 +11,8 @@ use ezkl::execute::run;
#[cfg(not(target_arch = "wasm32"))]
use ezkl::logger::init_logger;
#[cfg(not(target_arch = "wasm32"))]
use log::{error, info};
#[cfg(not(target_arch = "wasm32"))]
use log::{debug, error, info};
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
use rand::prelude::SliceRandom;
#[cfg(not(target_arch = "wasm32"))]
#[cfg(feature = "icicle")]
@@ -25,6 +25,7 @@ use std::error::Error;
pub async fn main() -> Result<(), Box<dyn Error>> {
let args = Cli::parse();
init_logger();
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
banner();
#[cfg(feature = "icicle")]
if env::var("ENABLE_ICICLE_GPU").is_ok() {
@@ -32,7 +33,7 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
} else {
info!("Running with CPU");
}
info!("command: \n {}", &args.as_json()?.to_colored_json_auto()?);
debug!("command: \n {}", &args.as_json()?.to_colored_json_auto()?);
let res = run(args.command).await;
match &res {
Ok(_) => info!("succeeded"),
@@ -44,7 +45,7 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
#[cfg(target_arch = "wasm32")]
pub fn main() {}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(not(any(target_arch = "wasm32", feature = "no-banner")))]
fn banner() {
let ell: Vec<&str> = vec![
"for Neural Networks",

View File

@@ -41,7 +41,7 @@ pub struct KZGChip {
}
impl KZGChip {
/// Returns the number of inputs to the hash function
/// Commit to the message using the KZG commitment scheme
pub fn commit(
message: Vec<Fp>,
degree: u32,

View File

@@ -15,7 +15,7 @@ use halo2_proofs::{
Instance, Selector, TableColumn,
},
};
use log::{trace, warn};
use log::{debug, trace};
/// A simple [`FloorPlanner`] that performs minimal optimizations.
#[derive(Debug)]
@@ -119,7 +119,7 @@ impl<'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> Layouter<F> for ModuleLayo
Error::Synthesis
})?;
if !self.regions.contains_key(&index) {
warn!("spawning module {}", index)
debug!("spawning module {}", index)
};
self.current_module = index;
}

View File

@@ -12,15 +12,11 @@ pub enum BaseOp {
DotInit,
CumProdInit,
CumProd,
Identity,
Add,
Mult,
Sub,
SumInit,
Sum,
Neg,
Range { tol: i32 },
IsZero,
IsBoolean,
}
@@ -36,12 +32,8 @@ impl BaseOp {
let (a, b) = inputs;
match &self {
BaseOp::Add => a + b,
BaseOp::Identity => b,
BaseOp::Neg => -b,
BaseOp::Sub => a - b,
BaseOp::Mult => a * b,
BaseOp::Range { .. } => b,
BaseOp::IsZero => b,
BaseOp::IsBoolean => b,
_ => panic!("nonaccum_f called on accumulating operation"),
}
@@ -73,19 +65,15 @@ impl BaseOp {
/// display func
pub fn as_str(&self) -> &'static str {
match self {
BaseOp::Identity => "IDENTITY",
BaseOp::Dot => "DOT",
BaseOp::DotInit => "DOTINIT",
BaseOp::CumProdInit => "CUMPRODINIT",
BaseOp::CumProd => "CUMPROD",
BaseOp::Add => "ADD",
BaseOp::Neg => "NEG",
BaseOp::Sub => "SUB",
BaseOp::Mult => "MULT",
BaseOp::Sum => "SUM",
BaseOp::SumInit => "SUMINIT",
BaseOp::Range { .. } => "RANGE",
BaseOp::IsZero => "ISZERO",
BaseOp::IsBoolean => "ISBOOLEAN",
}
}
@@ -93,8 +81,6 @@ impl BaseOp {
/// Returns the range of the query offset for this operation.
pub fn query_offset_rng(&self) -> (i32, usize) {
match self {
BaseOp::Identity => (0, 1),
BaseOp::Neg => (0, 1),
BaseOp::DotInit => (0, 1),
BaseOp::Dot => (-1, 2),
BaseOp::CumProd => (-1, 2),
@@ -104,8 +90,6 @@ impl BaseOp {
BaseOp::Mult => (0, 1),
BaseOp::Sum => (-1, 2),
BaseOp::SumInit => (0, 1),
BaseOp::Range { .. } => (0, 1),
BaseOp::IsZero => (0, 1),
BaseOp::IsBoolean => (0, 1),
}
}
@@ -113,8 +97,6 @@ impl BaseOp {
/// Returns the number of inputs for this operation.
pub fn num_inputs(&self) -> usize {
match self {
BaseOp::Identity => 1,
BaseOp::Neg => 1,
BaseOp::DotInit => 2,
BaseOp::Dot => 2,
BaseOp::CumProdInit => 1,
@@ -124,28 +106,22 @@ impl BaseOp {
BaseOp::Mult => 2,
BaseOp::Sum => 1,
BaseOp::SumInit => 1,
BaseOp::Range { .. } => 1,
BaseOp::IsZero => 1,
BaseOp::IsBoolean => 1,
BaseOp::IsBoolean => 0,
}
}
/// Returns the number of outputs for this operation.
pub fn constraint_idx(&self) -> usize {
match self {
BaseOp::Identity => 0,
BaseOp::Neg => 0,
BaseOp::DotInit => 0,
BaseOp::Dot => 1,
BaseOp::Add => 0,
BaseOp::Sub => 0,
BaseOp::Mult => 0,
BaseOp::Range { .. } => 0,
BaseOp::Sum => 1,
BaseOp::SumInit => 0,
BaseOp::CumProd => 1,
BaseOp::CumProdInit => 0,
BaseOp::IsZero => 0,
BaseOp::IsBoolean => 0,
}
}

View File

@@ -16,10 +16,11 @@ use pyo3::{
types::PyString,
};
use serde::{Deserialize, Serialize};
use tosubcommand::ToFlags;
use crate::{
circuit::ops::base::BaseOp,
circuit::{
ops::base::BaseOp,
table::{Range, RangeCheck, Table},
utils,
},
@@ -61,6 +62,22 @@ pub enum CheckMode {
UNSAFE,
}
impl std::fmt::Display for CheckMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
CheckMode::SAFE => write!(f, "safe"),
CheckMode::UNSAFE => write!(f, "unsafe"),
}
}
}
impl ToFlags for CheckMode {
/// Convert the struct to a subcommand string
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl From<String> for CheckMode {
fn from(value: String) -> Self {
match value.to_lowercase().as_str() {
@@ -83,6 +100,19 @@ pub struct Tolerance {
pub scale: utils::F32,
}
impl std::fmt::Display for Tolerance {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:.2}", self.val)
}
}
impl ToFlags for Tolerance {
/// Convert the struct to a subcommand string
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl FromStr for Tolerance {
type Err = String;
@@ -158,31 +188,158 @@ impl<'source> FromPyObject<'source> for Tolerance {
}
}
/// A struct representing the selectors for the dynamic lookup tables
#[derive(Clone, Debug, Default)]
pub struct DynamicLookups {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub lookup_selectors: BTreeMap<(usize, usize), Selector>,
/// Selectors for the dynamic lookup tables
pub table_selectors: Vec<Selector>,
/// Inputs:
pub inputs: Vec<VarTensor>,
/// tables
pub tables: Vec<VarTensor>,
}
impl DynamicLookups {
/// Returns a new [DynamicLookups] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
let dummy_var = VarTensor::dummy(col_size, num_inner_cols);
let single_col_dummy_var = VarTensor::dummy(col_size, 1);
Self {
lookup_selectors: BTreeMap::new(),
table_selectors: vec![],
inputs: vec![dummy_var.clone(), dummy_var.clone(), dummy_var.clone()],
tables: vec![
single_col_dummy_var.clone(),
single_col_dummy_var.clone(),
single_col_dummy_var.clone(),
],
}
}
}
/// A struct representing the selectors for the dynamic lookup tables
#[derive(Clone, Debug, Default)]
pub struct Shuffles {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub input_selectors: BTreeMap<(usize, usize), Selector>,
/// Selectors for the dynamic lookup tables
pub reference_selectors: Vec<Selector>,
/// Inputs:
pub inputs: Vec<VarTensor>,
/// tables
pub references: Vec<VarTensor>,
}
impl Shuffles {
/// Returns a new [DynamicLookups] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
let dummy_var = VarTensor::dummy(col_size, num_inner_cols);
let single_col_dummy_var = VarTensor::dummy(col_size, 1);
Self {
input_selectors: BTreeMap::new(),
reference_selectors: vec![],
inputs: vec![dummy_var.clone(), dummy_var.clone()],
references: vec![single_col_dummy_var.clone(), single_col_dummy_var.clone()],
}
}
}
/// A struct representing the selectors for the static lookup tables
#[derive(Clone, Debug, Default)]
pub struct StaticLookups<F: PrimeField + TensorType + PartialOrd> {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub selectors: BTreeMap<(LookupOp, usize, usize), Selector>,
/// Selectors for the dynamic lookup tables
pub tables: BTreeMap<LookupOp, Table<F>>,
///
pub index: VarTensor,
///
pub output: VarTensor,
///
pub input: VarTensor,
}
impl<F: PrimeField + TensorType + PartialOrd> StaticLookups<F> {
/// Returns a new [StaticLookups] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
let dummy_var = VarTensor::dummy(col_size, num_inner_cols);
Self {
selectors: BTreeMap::new(),
tables: BTreeMap::new(),
index: dummy_var.clone(),
output: dummy_var.clone(),
input: dummy_var,
}
}
}
/// A struct representing the selectors for custom gates
#[derive(Clone, Debug, Default)]
pub struct CustomGates {
/// the inputs to the accumulated operations.
pub inputs: Vec<VarTensor>,
/// the (currently singular) output of the accumulated operations.
pub output: VarTensor,
/// selector
pub selectors: BTreeMap<(BaseOp, usize, usize), Selector>,
}
impl CustomGates {
/// Returns a new [CustomGates] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
let dummy_var = VarTensor::dummy(col_size, num_inner_cols);
Self {
inputs: vec![dummy_var.clone(), dummy_var.clone()],
output: dummy_var,
selectors: BTreeMap::new(),
}
}
}
/// A struct representing the selectors for the range checks
#[derive(Clone, Debug, Default)]
pub struct RangeChecks<F: PrimeField + TensorType + PartialOrd> {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub selectors: BTreeMap<(Range, usize, usize), Selector>,
/// Selectors for the dynamic lookup tables
pub ranges: BTreeMap<Range, RangeCheck<F>>,
///
pub index: VarTensor,
///
pub input: VarTensor,
}
impl<F: PrimeField + TensorType + PartialOrd> RangeChecks<F> {
/// Returns a new [RangeChecks] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
let dummy_var = VarTensor::dummy(col_size, num_inner_cols);
Self {
selectors: BTreeMap::new(),
ranges: BTreeMap::new(),
index: dummy_var.clone(),
input: dummy_var,
}
}
}
/// Configuration for an accumulated arg.
#[derive(Clone, Debug, Default)]
pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
/// the inputs to the accumulated operations.
pub inputs: Vec<VarTensor>,
/// the VarTensor reserved for lookup operations (could be an element of inputs)
/// Note that you should be careful to ensure that the lookup_input is not simultaneously assigned to by other non-lookup operations eg. in the case of composite ops.
pub lookup_input: VarTensor,
/// the (currently singular) output of the accumulated operations.
pub output: VarTensor,
/// the VarTensor reserved for lookup operations (could be an element of inputs or the same as output)
/// Note that you should be careful to ensure that the lookup_output is not simultaneously assigned to by other non-lookup operations eg. in the case of composite ops.
pub lookup_output: VarTensor,
///
pub lookup_index: VarTensor,
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure [BaseOp].
pub selectors: BTreeMap<(BaseOp, usize, usize), Selector>,
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many lookup ops.
pub lookup_selectors: BTreeMap<(LookupOp, usize, usize), Selector>,
///
pub tables: BTreeMap<LookupOp, Table<F>>,
///
pub range_checks: BTreeMap<Range, RangeCheck<F>>,
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many lookup ops.
pub range_check_selectors: BTreeMap<(Range, usize, usize), Selector>,
/// Custom gates
pub custom_gates: CustomGates,
/// StaticLookups
pub static_lookups: StaticLookups<F>,
/// [Selector]s for the dynamic lookup tables
pub dynamic_lookups: DynamicLookups,
/// [Selector]s for the range checks
pub range_checks: RangeChecks<F>,
/// [Selector]s for the shuffles
pub shuffles: Shuffles,
/// Activate sanity checks
pub check_mode: CheckMode,
_marker: PhantomData<F>,
@@ -191,19 +348,12 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
/// Returns a new [BaseConfig] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
let dummy_var = VarTensor::dummy(col_size, num_inner_cols);
Self {
inputs: vec![dummy_var.clone(), dummy_var.clone()],
lookup_input: dummy_var.clone(),
output: dummy_var.clone(),
lookup_output: dummy_var.clone(),
lookup_index: dummy_var,
selectors: BTreeMap::new(),
lookup_selectors: BTreeMap::new(),
range_check_selectors: BTreeMap::new(),
tables: BTreeMap::new(),
range_checks: BTreeMap::new(),
custom_gates: CustomGates::dummy(col_size, num_inner_cols),
static_lookups: StaticLookups::dummy(col_size, num_inner_cols),
dynamic_lookups: DynamicLookups::dummy(col_size, num_inner_cols),
shuffles: Shuffles::dummy(col_size, num_inner_cols),
range_checks: RangeChecks::dummy(col_size, num_inner_cols),
check_mode: CheckMode::SAFE,
_marker: PhantomData,
}
@@ -236,10 +386,7 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
for j in 0..output.num_inner_cols() {
nonaccum_selectors.insert((BaseOp::Add, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::Sub, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::Neg, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::Mult, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::IsZero, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::Identity, i, j), meta.selector());
nonaccum_selectors.insert((BaseOp::IsBoolean, i, j), meta.selector());
}
}
@@ -276,9 +423,14 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
let constraints = match base_op {
BaseOp::IsBoolean => {
vec![(qis[1].clone()) * (qis[1].clone() - Expression::Constant(F::from(1)))]
let expected_output: Tensor<Expression<F>> = output
.query_rng(meta, *block_idx, *inner_col_idx, 0, 1)
.expect("non accum: output query failed");
let output = expected_output[base_op.constraint_idx()].clone();
vec![(output.clone()) * (output.clone() - Expression::Constant(F::from(1)))]
}
BaseOp::IsZero => vec![qis[1].clone()],
_ => {
let expected_output: Tensor<Expression<F>> = output
.query_rng(meta, *block_idx, *inner_col_idx, rotation_offset, rng)
@@ -332,16 +484,15 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
.collect();
Self {
selectors,
lookup_selectors: BTreeMap::new(),
range_check_selectors: BTreeMap::new(),
inputs: inputs.to_vec(),
lookup_input: VarTensor::Empty,
lookup_output: VarTensor::Empty,
lookup_index: VarTensor::Empty,
tables: BTreeMap::new(),
range_checks: BTreeMap::new(),
output: output.clone(),
custom_gates: CustomGates {
inputs: inputs.to_vec(),
output: output.clone(),
selectors,
},
static_lookups: StaticLookups::default(),
dynamic_lookups: DynamicLookups::default(),
shuffles: Shuffles::default(),
range_checks: RangeChecks::default(),
check_mode,
_marker: PhantomData,
}
@@ -362,8 +513,6 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
where
F: Field,
{
let mut selectors = BTreeMap::new();
if !index.is_advice() {
return Err("wrong input type for lookup index".into());
}
@@ -376,9 +525,9 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
// we borrow mutably twice so we need to do this dance
let table = if !self.tables.contains_key(nl) {
let table = if !self.static_lookups.tables.contains_key(nl) {
// as all tables have the same input we see if there's another table who's input we can reuse
let table = if let Some(table) = self.tables.values().next() {
let table = if let Some(table) = self.static_lookups.tables.values().next() {
Table::<F>::configure(
cs,
lookup_range,
@@ -389,7 +538,7 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
} else {
Table::<F>::configure(cs, lookup_range, logrows, nl, None)
};
self.tables.insert(nl.clone(), table.clone());
self.static_lookups.tables.insert(nl.clone(), table.clone());
table
} else {
return Ok(());
@@ -473,49 +622,218 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
res
});
}
selectors.insert((nl.clone(), x, y), multi_col_selector);
self.static_lookups
.selectors
.insert((nl.clone(), x, y), multi_col_selector);
}
}
self.lookup_selectors.extend(selectors);
// if we haven't previously initialized the input/output, do so now
if let VarTensor::Empty = self.lookup_input {
if let VarTensor::Empty = self.static_lookups.input {
debug!("assigning lookup input");
self.lookup_input = input.clone();
self.static_lookups.input = input.clone();
}
if let VarTensor::Empty = self.lookup_output {
if let VarTensor::Empty = self.static_lookups.output {
debug!("assigning lookup output");
self.lookup_output = output.clone();
self.static_lookups.output = output.clone();
}
if let VarTensor::Empty = self.lookup_index {
if let VarTensor::Empty = self.static_lookups.index {
debug!("assigning lookup index");
self.lookup_index = index.clone();
self.static_lookups.index = index.clone();
}
Ok(())
}
/// Configures and creates lookup selectors
#[allow(clippy::too_many_arguments)]
pub fn configure_dynamic_lookup(
&mut self,
cs: &mut ConstraintSystem<F>,
lookups: &[VarTensor; 3],
tables: &[VarTensor; 3],
) -> Result<(), Box<dyn Error>>
where
F: Field,
{
for l in lookups.iter() {
if !l.is_advice() {
return Err("wrong input type for dynamic lookup".into());
}
}
for t in tables.iter() {
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
return Err("wrong table type for dynamic lookup".into());
}
}
let one = Expression::Constant(F::ONE);
let s_ltable = cs.complex_selector();
for x in 0..lookups[0].num_blocks() {
for y in 0..lookups[0].num_inner_cols() {
let s_lookup = cs.complex_selector();
cs.lookup_any("lookup", |cs| {
let s_lookupq = cs.query_selector(s_lookup);
let mut expression = vec![];
let s_ltableq = cs.query_selector(s_ltable);
let mut lookup_queries = vec![one.clone()];
for lookup in lookups {
lookup_queries.push(match lookup {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
let mut table_queries = vec![one.clone()];
for table in tables {
table_queries.push(match table {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[0][0], Rotation(0))
}
_ => unreachable!(),
});
}
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
expression.extend(lhs.zip(rhs));
expression
});
self.dynamic_lookups
.lookup_selectors
.entry((x, y))
.or_insert(s_lookup);
}
}
self.dynamic_lookups.table_selectors.push(s_ltable);
// if we haven't previously initialized the input/output, do so now
if self.dynamic_lookups.tables.is_empty() {
debug!("assigning dynamic lookup table");
self.dynamic_lookups.tables = tables.to_vec();
}
if self.dynamic_lookups.inputs.is_empty() {
debug!("assigning dynamic lookup input");
self.dynamic_lookups.inputs = lookups.to_vec();
}
Ok(())
}
/// Configures and creates lookup selectors
#[allow(clippy::too_many_arguments)]
pub fn configure_shuffles(
&mut self,
cs: &mut ConstraintSystem<F>,
inputs: &[VarTensor; 2],
references: &[VarTensor; 2],
) -> Result<(), Box<dyn Error>>
where
F: Field,
{
for l in inputs.iter() {
if !l.is_advice() {
return Err("wrong input type for dynamic lookup".into());
}
}
for t in references.iter() {
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
return Err("wrong table type for dynamic lookup".into());
}
}
let one = Expression::Constant(F::ONE);
let s_reference = cs.complex_selector();
for x in 0..inputs[0].num_blocks() {
for y in 0..inputs[0].num_inner_cols() {
let s_input = cs.complex_selector();
cs.lookup_any("lookup", |cs| {
let s_inputq = cs.query_selector(s_input);
let mut expression = vec![];
let s_referenceq = cs.query_selector(s_reference);
let mut input_queries = vec![one.clone()];
for input in inputs {
input_queries.push(match input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
let mut ref_queries = vec![one.clone()];
for reference in references {
ref_queries.push(match reference {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[0][0], Rotation(0))
}
_ => unreachable!(),
});
}
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
expression.extend(lhs.zip(rhs));
expression
});
self.shuffles
.input_selectors
.entry((x, y))
.or_insert(s_input);
}
}
self.shuffles.reference_selectors.push(s_reference);
// if we haven't previously initialized the input/output, do so now
if self.shuffles.references.is_empty() {
debug!("assigning shuffles reference");
self.shuffles.references = references.to_vec();
}
if self.shuffles.inputs.is_empty() {
debug!("assigning shuffles input");
self.shuffles.inputs = inputs.to_vec();
}
Ok(())
}
/// Configures and creates lookup selectors
#[allow(clippy::too_many_arguments)]
pub fn configure_range_check(
&mut self,
cs: &mut ConstraintSystem<F>,
input: &VarTensor,
index: &VarTensor,
range: Range,
logrows: usize,
) -> Result<(), Box<dyn Error>>
where
F: Field,
{
let mut selectors = BTreeMap::new();
if !input.is_advice() {
return Err("wrong input type for lookup input".into());
}
// we borrow mutably twice so we need to do this dance
let range_check = if !self.range_checks.contains_key(&range) {
let range_check = if let std::collections::btree_map::Entry::Vacant(e) =
self.range_checks.ranges.entry(range)
{
// as all tables have the same input we see if there's another table who's input we can reuse
let range_check = RangeCheck::<F>::configure(cs, range);
self.range_checks.insert(range, range_check.clone());
let range_check = RangeCheck::<F>::configure(cs, range, logrows);
e.insert(range_check.clone());
range_check
} else {
return Ok(());
@@ -523,39 +841,73 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
for x in 0..input.num_blocks() {
for y in 0..input.num_inner_cols() {
let single_col_sel = cs.complex_selector();
let len = range_check.selector_constructor.degree;
let multi_col_selector = cs.complex_selector();
cs.lookup("", |cs| {
let mut res = vec![];
let sel = cs.query_selector(single_col_sel);
for (col_idx, input_col) in range_check.inputs.iter().enumerate() {
cs.lookup("", |cs| {
let mut res = vec![];
let sel = cs.query_selector(multi_col_selector);
let input_query = match &input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
};
let synthetic_sel = match len {
1 => Expression::Constant(F::from(1)),
_ => match index {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
},
};
let default_x = range_check.get_first_element();
let input_query = match &input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
};
let not_sel = Expression::Constant(F::ONE) - sel.clone();
let default_x = range_check.get_first_element(col_idx);
res.extend([(
sel.clone() * input_query.clone()
+ not_sel.clone() * Expression::Constant(default_x),
range_check.input,
)]);
let col_expr = sel.clone()
* range_check
.selector_constructor
.get_expr_at_idx(col_idx, synthetic_sel);
res
});
selectors.insert((range, x, y), single_col_sel);
let multiplier = range_check
.selector_constructor
.get_selector_val_at_idx(col_idx);
let not_expr = Expression::Constant(multiplier) - col_expr.clone();
res.extend([(
col_expr.clone() * input_query.clone()
+ not_expr.clone() * Expression::Constant(default_x),
*input_col,
)]);
log::trace!("---------------- col {:?} ------------------", col_idx,);
log::trace!("expr: {:?}", col_expr,);
log::trace!("multiplier: {:?}", multiplier);
log::trace!("not_expr: {:?}", not_expr);
log::trace!("default x: {:?}", default_x);
res
});
}
self.range_checks
.selectors
.insert((range, x, y), multi_col_selector);
}
}
self.range_check_selectors.extend(selectors);
// if we haven't previously initialized the input/output, do so now
if let VarTensor::Empty = self.lookup_input {
debug!("assigning lookup input");
self.lookup_input = input.clone();
if let VarTensor::Empty = self.range_checks.input {
debug!("assigning range check input");
self.range_checks.input = input.clone();
}
if let VarTensor::Empty = self.range_checks.index {
debug!("assigning range check index");
self.range_checks.index = index.clone();
}
Ok(())
@@ -563,7 +915,7 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
/// layout_tables must be called before layout.
pub fn layout_tables(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn Error>> {
for (i, table) in self.tables.values_mut().enumerate() {
for (i, table) in self.static_lookups.tables.values_mut().enumerate() {
if !table.is_assigned {
debug!(
"laying out table for {}",
@@ -584,7 +936,7 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
&mut self,
layouter: &mut impl Layouter<F>,
) -> Result<(), Box<dyn Error>> {
for range_check in self.range_checks.values_mut() {
for range_check in self.range_checks.ranges.values_mut() {
if !range_check.is_assigned {
debug!("laying out range check for {:?}", range_check.range);
range_check.layout(layouter)?;

View File

@@ -1,11 +1,11 @@
use super::*;
use crate::{
circuit::{self, layouts, utils, Tolerance},
circuit::{layouts, utils, Tolerance},
fieldutils::{felt_to_i128, i128_to_felt},
graph::multiplier_to_scale,
tensor::{self, Tensor, TensorError, TensorType, ValTensor},
};
use halo2curves::ff::PrimeField;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
// import run args from model
@@ -13,6 +13,15 @@ use serde::{Deserialize, Serialize};
/// An enum representing the operations that consist of both lookups and arithmetic operations.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum HybridOp {
Recip {
input_scale: utils::F32,
output_scale: utils::F32,
use_range_check_for_int: bool,
},
Div {
denom: utils::F32,
use_range_check_for_int: bool,
},
ReduceMax {
axes: Vec<usize>,
},
@@ -59,14 +68,6 @@ pub enum HybridOp {
dim: usize,
num_classes: usize,
},
GatherElements {
dim: usize,
constant_idx: Option<Tensor<usize>>,
},
ScatterElements {
dim: usize,
constant_idx: Option<Tensor<usize>>,
},
}
impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
@@ -74,7 +75,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
match self {
HybridOp::Greater | HybridOp::Less | HybridOp::Equals => vec![0, 1],
HybridOp::ScatterElements { .. } => vec![0, 2],
HybridOp::GreaterEqual | HybridOp::LessEqual => vec![0, 1],
_ => vec![],
}
}
@@ -87,142 +88,42 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
fn f(&self, inputs: &[Tensor<F>]) -> Result<ForwardResult<F>, TensorError> {
let x = inputs[0].clone().map(|x| felt_to_i128(x));
let (res, intermediate_lookups) = match &self {
HybridOp::ReduceMax { axes, .. } => {
let res = tensor::ops::max_axes(&x, axes)?;
let max_minus_one =
Tensor::from(vec![x.clone().into_iter().max().unwrap() - 1].into_iter());
let unit = Tensor::from(vec![1].into_iter());
// relu(x - max(x - 1)
let inter_1 = (x.clone() - max_minus_one)?;
// relu(1 - sum(relu(inter_1)))
let inter_2 = (unit
- tensor::ops::sum(&tensor::ops::nonlinearities::leakyrelu(&inter_1, 0.0))?)?;
(res.clone(), vec![inter_1, inter_2])
}
HybridOp::ReduceMin { axes, .. } => {
let res = tensor::ops::min_axes(&x, axes)?;
let min_plus_one =
Tensor::from(vec![x.clone().into_iter().min().unwrap() + 1].into_iter());
let unit = Tensor::from(vec![1].into_iter());
// relu(min(x + 1) - x)
let inter_1 = (min_plus_one - x.clone())?;
// relu(1 - sum(relu(inter_1)))
let inter_2 = (unit
- tensor::ops::sum(&tensor::ops::nonlinearities::leakyrelu(&inter_1, 0.0))?)?;
(res.clone(), vec![inter_1, inter_2])
}
HybridOp::ReduceArgMax { dim } => {
let res = tensor::ops::argmax_axes(&x, *dim)?;
let indices = Tensor::from(0..x.dims()[*dim] as i128);
let mut inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
let inter =
Op::f(&HybridOp::ReduceMax { axes: vec![*dim] }, inputs)?.intermediate_lookups;
inter_equals.extend(inter);
(res.clone(), inter_equals)
}
HybridOp::ReduceArgMin { dim } => {
let res = tensor::ops::argmin_axes(&x, *dim)?;
let indices = Tensor::from(0..x.dims()[*dim] as i128);
let mut inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
let inter =
Op::f(&HybridOp::ReduceMin { axes: vec![*dim] }, inputs)?.intermediate_lookups;
inter_equals.extend(inter);
(res.clone(), inter_equals)
let res = match &self {
HybridOp::ReduceMax { axes, .. } => tensor::ops::max_axes(&x, axes)?,
HybridOp::ReduceMin { axes, .. } => tensor::ops::min_axes(&x, axes)?,
HybridOp::Div { denom, .. } => {
crate::tensor::ops::nonlinearities::const_div(&x, denom.0 as f64)
}
HybridOp::Recip {
input_scale,
output_scale,
..
} => crate::tensor::ops::nonlinearities::recip(
&x,
input_scale.0 as f64,
output_scale.0 as f64,
),
HybridOp::ReduceArgMax { dim } => tensor::ops::argmax_axes(&x, *dim)?,
HybridOp::ReduceArgMin { dim } => tensor::ops::argmin_axes(&x, *dim)?,
HybridOp::Gather { dim, constant_idx } => {
if let Some(idx) = constant_idx {
log::debug!("idx: {}", idx.show());
let res = tensor::ops::gather(&x, idx, *dim)?;
(res.clone(), vec![])
tensor::ops::gather(&x, idx, *dim)?
} else {
let y = inputs[1].clone().map(|x| felt_to_i128(x));
let indices = Tensor::from(0..x.dims()[*dim] as i128);
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
let res = tensor::ops::gather(&x, &y.map(|x| x as usize), *dim)?;
(res.clone(), inter_equals)
tensor::ops::gather(&x, &y.map(|x| x as usize), *dim)?
}
}
HybridOp::OneHot { dim, num_classes } => {
let indices = Tensor::from(0..x.dims()[*dim] as i128);
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
let res = tensor::ops::one_hot(&x, *num_classes, *dim)?;
(res.clone(), inter_equals)
tensor::ops::one_hot(&x, *num_classes, *dim)?.clone()
}
HybridOp::TopK { dim, k, largest } => {
let res = tensor::ops::topk_axes(&x, *k, *dim, *largest)?;
let mut inter_equals = x
.clone()
.into_iter()
.flat_map(|elem| {
tensor::ops::equals(&res, &vec![elem].into_iter().into())
.unwrap()
.1
})
.collect::<Vec<_>>();
// sort in descending order and take pairwise differences
inter_equals.push(
x.into_iter()
.sorted()
.tuple_windows()
.map(|(a, b)| b - a)
.into(),
);
(res.clone(), inter_equals)
}
HybridOp::GatherElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
log::debug!("idx: {}", idx.show());
let res = tensor::ops::gather_elements(&x, idx, *dim)?;
(res.clone(), vec![])
} else {
let y = inputs[1].clone().map(|x| felt_to_i128(x));
let indices = Tensor::from(0..x.dims()[*dim] as i128);
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
let res = tensor::ops::gather_elements(&x, &y.map(|x| x as usize), *dim)?;
(res.clone(), inter_equals)
}
}
HybridOp::ScatterElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
log::debug!("idx: {}", idx.show());
let src = inputs[1].clone().map(|x| felt_to_i128(x));
let res = tensor::ops::scatter(&x, idx, &src, *dim)?;
(res.clone(), vec![])
} else {
let idx = inputs[1].clone().map(|x| felt_to_i128(x) as usize);
let src = inputs[2].clone().map(|x| felt_to_i128(x));
let indices = Tensor::from(0..x.dims()[*dim] as i128);
let inter_equals: Vec<Tensor<i128>> = vec![indices.clone(), -indices];
let res = tensor::ops::scatter(&x, &idx, &src, *dim)?;
(res.clone(), inter_equals)
}
}
HybridOp::TopK { dim, k, largest } => tensor::ops::topk_axes(&x, *k, *dim, *largest)?,
HybridOp::MaxPool2d {
padding,
stride,
pool_dims,
..
} => {
let max_minus_one =
Tensor::from(vec![x.clone().into_iter().max().unwrap() - 1].into_iter());
let unit = Tensor::from(vec![1].into_iter());
// relu(x - max(x - 1)
let inter_1 = (x.clone() - max_minus_one)?;
// relu(1 - sum(relu(inter_1)))
let inter_2 = (unit
- tensor::ops::sum(&tensor::ops::nonlinearities::leakyrelu(&inter_1, 0.0))?)?;
(
tensor::ops::max_pool2d(&x, padding, stride, pool_dims)?,
vec![inter_1, inter_2],
)
}
} => tensor::ops::max_pool2d(&x, padding, stride, pool_dims)?,
HybridOp::SumPool {
padding,
stride,
@@ -234,10 +135,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
}
HybridOp::RangeCheck(tol) => {
let y = inputs[1].clone().map(|x| felt_to_i128(x));
(
tensor::ops::nonlinearities::range_check_percent(&[x, y], 128, 128, tol.val),
vec![],
)
tensor::ops::nonlinearities::range_check_percent(&[x, y], 128, 128, tol.val)
}
HybridOp::Greater => {
let y = inputs[1].clone().map(|x| felt_to_i128(x));
@@ -264,14 +162,26 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
// convert back to felt
let output = res.map(|x| i128_to_felt(x));
Ok(ForwardResult {
output,
intermediate_lookups,
})
Ok(ForwardResult { output })
}
fn as_string(&self) -> String {
match self {
HybridOp::Recip {
input_scale,
output_scale,
use_range_check_for_int,
} => format!(
"RECIP (input_scale={}, output_scale={}, use_range_check_for_int={})",
input_scale, output_scale, use_range_check_for_int
),
HybridOp::Div {
denom,
use_range_check_for_int,
} => format!(
"DIV (denom={}, use_range_check_for_int={})",
denom, use_range_check_for_int
),
HybridOp::SumPool {
padding,
stride,
@@ -306,8 +216,6 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
HybridOp::TopK { k, dim, largest } => {
format!("TOPK (k={}, dim={}, largest={})", k, dim, largest)
}
HybridOp::GatherElements { dim, .. } => format!("GATHERELEMENTS (dim={})", dim),
HybridOp::ScatterElements { dim, .. } => format!("SCATTERELEMENTS (dim={})", dim),
HybridOp::OneHot { dim, num_classes } => {
format!("ONEHOT (dim={}, num_classes={})", dim, num_classes)
}
@@ -335,6 +243,55 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
*kernel_shape,
*normalized,
)?,
HybridOp::Recip {
input_scale,
output_scale,
use_range_check_for_int,
} => {
if input_scale.0.fract() == 0.0
&& output_scale.0.fract() == 0.0
&& *use_range_check_for_int
{
layouts::recip(
config,
region,
values[..].try_into()?,
i128_to_felt(input_scale.0 as i128),
i128_to_felt(output_scale.0 as i128),
)?
} else {
layouts::nonlinearity(
config,
region,
values.try_into()?,
&LookupOp::Recip {
input_scale: *input_scale,
output_scale: *output_scale,
},
)?
}
}
HybridOp::Div {
denom,
use_range_check_for_int,
..
} => {
if denom.0.fract() == 0.0 && *use_range_check_for_int {
layouts::loop_div(
config,
region,
values[..].try_into()?,
i128_to_felt(denom.0 as i128),
)?
} else {
layouts::nonlinearity(
config,
region,
values.try_into()?,
&LookupOp::Div { denom: *denom },
)?
}
}
HybridOp::Gather { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::gather(values[0].get_inner_tensor()?, idx, *dim)?.into()
@@ -342,26 +299,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
layouts::gather(config, region, values[..].try_into()?, *dim)?
}
}
HybridOp::GatherElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
} else {
layouts::gather_elements(config, region, values[..].try_into()?, *dim)?
}
}
HybridOp::ScatterElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::scatter(
values[0].get_inner_tensor()?,
idx,
values[1].get_inner_tensor()?,
*dim,
)?
.into()
} else {
layouts::scatter_elements(config, region, values[..].try_into()?, *dim)?
}
}
HybridOp::MaxPool2d {
padding,
stride,
@@ -422,86 +360,12 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
| HybridOp::OneHot { .. }
| HybridOp::ReduceArgMin { .. } => 0,
HybridOp::Softmax { .. } => 2 * in_scales[0],
HybridOp::Recip { output_scale, .. } => multiplier_to_scale(output_scale.0 as f64),
_ => in_scales[0],
};
Ok(scale)
}
fn required_lookups(&self) -> Vec<LookupOp> {
match self {
HybridOp::ReduceMax { .. }
| HybridOp::ReduceMin { .. }
| HybridOp::MaxPool2d { .. } => Op::<F>::required_lookups(&LookupOp::ReLU),
HybridOp::Softmax { scale, .. } => {
vec![
LookupOp::Exp { scale: *scale },
LookupOp::Recip {
scale: scale.0.powf(2.0).into(),
},
]
}
HybridOp::RangeCheck(tol) => {
let mut lookups = vec![];
if tol.val > 0.0 {
let scale_squared = tol.scale.0.powf(2.0);
lookups.extend([
LookupOp::Recip {
scale: scale_squared.into(),
},
LookupOp::GreaterThan {
a: circuit::utils::F32((tol.val * scale_squared) / 100.0),
},
]);
}
lookups
}
HybridOp::Greater { .. } | HybridOp::Less { .. } => {
vec![LookupOp::GreaterThan {
a: circuit::utils::F32(0.),
}]
}
HybridOp::GreaterEqual { .. } | HybridOp::LessEqual { .. } => {
vec![LookupOp::GreaterThanEqual {
a: circuit::utils::F32(0.),
}]
}
HybridOp::TopK { .. } => {
vec![
LookupOp::GreaterThan {
a: circuit::utils::F32(0.),
},
LookupOp::KroneckerDelta,
]
}
HybridOp::Gather {
constant_idx: None, ..
}
| HybridOp::OneHot { .. }
| HybridOp::GatherElements {
constant_idx: None, ..
}
| HybridOp::ScatterElements {
constant_idx: None, ..
}
| HybridOp::Equals { .. } => {
vec![LookupOp::KroneckerDelta]
}
HybridOp::ReduceArgMax { .. } | HybridOp::ReduceArgMin { .. } => {
vec![LookupOp::ReLU, LookupOp::KroneckerDelta]
}
HybridOp::SumPool {
kernel_shape,
normalized: true,
..
} => {
vec![LookupOp::Div {
denom: utils::F32((kernel_shape.0 * kernel_shape.1) as f32),
}]
}
_ => vec![],
}
}
fn clone_dyn(&self) -> Box<dyn Op<F>> {
Box::new(self.clone()) // Forward to the derive(Clone) impl
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,7 @@ use std::error::Error;
use crate::{
circuit::{layouts, table::Range, utils},
fieldutils::{felt_to_i128, i128_to_felt},
graph::{multiplier_to_scale, scale_to_multiplier},
graph::multiplier_to_scale,
tensor::{self, Tensor, TensorError, TensorType},
};
@@ -17,42 +17,112 @@ use halo2curves::ff::PrimeField;
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize)]
pub enum LookupOp {
Abs,
Div { denom: utils::F32 },
Cast { scale: utils::F32 },
Div {
denom: utils::F32,
},
Cast {
scale: utils::F32,
},
ReLU,
Max { scale: utils::F32, a: utils::F32 },
Min { scale: utils::F32, a: utils::F32 },
Ceil { scale: utils::F32 },
Floor { scale: utils::F32 },
Round { scale: utils::F32 },
RoundHalfToEven { scale: utils::F32 },
Sqrt { scale: utils::F32 },
Rsqrt { scale: utils::F32 },
Recip { scale: utils::F32 },
LeakyReLU { slope: utils::F32 },
Sigmoid { scale: utils::F32 },
Ln { scale: utils::F32 },
Exp { scale: utils::F32 },
Cos { scale: utils::F32 },
ACos { scale: utils::F32 },
Cosh { scale: utils::F32 },
ACosh { scale: utils::F32 },
Sin { scale: utils::F32 },
ASin { scale: utils::F32 },
Sinh { scale: utils::F32 },
ASinh { scale: utils::F32 },
Tan { scale: utils::F32 },
ATan { scale: utils::F32 },
Tanh { scale: utils::F32 },
ATanh { scale: utils::F32 },
Erf { scale: utils::F32 },
GreaterThan { a: utils::F32 },
LessThan { a: utils::F32 },
GreaterThanEqual { a: utils::F32 },
LessThanEqual { a: utils::F32 },
Max {
scale: utils::F32,
a: utils::F32,
},
Min {
scale: utils::F32,
a: utils::F32,
},
Ceil {
scale: utils::F32,
},
Floor {
scale: utils::F32,
},
Round {
scale: utils::F32,
},
RoundHalfToEven {
scale: utils::F32,
},
Sqrt {
scale: utils::F32,
},
Rsqrt {
scale: utils::F32,
},
Recip {
input_scale: utils::F32,
output_scale: utils::F32,
},
LeakyReLU {
slope: utils::F32,
},
Sigmoid {
scale: utils::F32,
},
Ln {
scale: utils::F32,
},
Exp {
scale: utils::F32,
},
Cos {
scale: utils::F32,
},
ACos {
scale: utils::F32,
},
Cosh {
scale: utils::F32,
},
ACosh {
scale: utils::F32,
},
Sin {
scale: utils::F32,
},
ASin {
scale: utils::F32,
},
Sinh {
scale: utils::F32,
},
ASinh {
scale: utils::F32,
},
Tan {
scale: utils::F32,
},
ATan {
scale: utils::F32,
},
Tanh {
scale: utils::F32,
},
ATanh {
scale: utils::F32,
},
Erf {
scale: utils::F32,
},
GreaterThan {
a: utils::F32,
},
LessThan {
a: utils::F32,
},
GreaterThanEqual {
a: utils::F32,
},
LessThanEqual {
a: utils::F32,
},
Sign,
KroneckerDelta,
Pow { scale: utils::F32, a: utils::F32 },
Pow {
scale: utils::F32,
a: utils::F32,
},
}
impl LookupOp {
@@ -120,7 +190,14 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
&x,
f32::from(*scale).into(),
)),
LookupOp::Recip { scale } => Ok(tensor::ops::nonlinearities::recip(&x, scale.into())),
LookupOp::Recip {
input_scale,
output_scale,
} => Ok(tensor::ops::nonlinearities::recip(
&x,
input_scale.into(),
output_scale.into(),
)),
LookupOp::ReLU => Ok(tensor::ops::nonlinearities::leakyrelu(&x, 0_f64)),
LookupOp::LeakyReLU { slope: a } => {
@@ -150,10 +227,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
let output = res.map(|x| i128_to_felt(x));
Ok(ForwardResult {
output,
intermediate_lookups: vec![],
})
Ok(ForwardResult { output })
}
/// Returns the name of the operation
@@ -169,11 +243,17 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
LookupOp::Max { scale, a } => format!("MAX(scale={}, a={})", scale, a),
LookupOp::Min { scale, a } => format!("MIN(scale={}, a={})", scale, a),
LookupOp::Sign => "SIGN".into(),
LookupOp::GreaterThan { .. } => "GREATER_THAN".into(),
LookupOp::GreaterThanEqual { .. } => "GREATER_THAN_EQUAL".into(),
LookupOp::LessThan { .. } => "LESS_THAN".into(),
LookupOp::LessThanEqual { .. } => "LESS_THAN_EQUAL".into(),
LookupOp::Recip { scale, .. } => format!("RECIP(scale={})", scale),
LookupOp::GreaterThan { a } => format!("GREATER_THAN(a={})", a),
LookupOp::GreaterThanEqual { a } => format!("GREATER_THAN_EQUAL(a={})", a),
LookupOp::LessThan { a } => format!("LESS_THAN(a={})", a),
LookupOp::LessThanEqual { a } => format!("LESS_THAN_EQUAL(a={})", a),
LookupOp::Recip {
input_scale,
output_scale,
} => format!(
"RECIP(input_scale={}, output_scale={})",
input_scale, output_scale
),
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
LookupOp::Cast { scale } => format!("CAST(scale={})", scale),
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
@@ -220,12 +300,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
let in_scale = inputs_scale[0];
in_scale + multiplier_to_scale(1. / scale.0 as f64)
}
LookupOp::Recip { scale } => {
let mut out_scale = inputs_scale[0];
out_scale +=
multiplier_to_scale(scale.0 as f64 / scale_to_multiplier(out_scale).powf(2.0));
out_scale
}
LookupOp::Recip { output_scale, .. } => multiplier_to_scale(output_scale.into()),
LookupOp::Sign
| LookupOp::GreaterThan { .. }
| LookupOp::LessThan { .. }
@@ -237,10 +312,6 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
Ok(scale)
}
fn required_lookups(&self) -> Vec<LookupOp> {
vec![self.clone()]
}
fn clone_dyn(&self) -> Box<dyn Op<F>> {
Box::new(self.clone()) // Forward to the derive(Clone) impl
}

View File

@@ -10,8 +10,6 @@ use halo2curves::ff::PrimeField;
use self::{lookup::LookupOp, region::RegionCtx};
use super::table::Range;
///
pub mod base;
///
@@ -31,7 +29,6 @@ pub mod region;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd> {
pub(crate) output: Tensor<F>,
pub(crate) intermediate_lookups: Vec<Tensor<i128>>,
}
/// A trait representing operations that can be represented as constraints in a circuit.
@@ -57,16 +54,6 @@ pub trait Op<F: PrimeField + TensorType + PartialOrd>: std::fmt::Debug + Send +
vec![]
}
/// Returns the lookups required by the operation.
fn required_lookups(&self) -> Vec<LookupOp> {
vec![]
}
/// Returns the range checks required by the operation.
fn required_range_checks(&self) -> Vec<Range> {
vec![]
}
/// Returns true if the operation is an input.
fn is_input(&self) -> bool {
false
@@ -190,7 +177,6 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Input {
fn f(&self, x: &[Tensor<F>]) -> Result<ForwardResult<F>, TensorError> {
Ok(ForwardResult {
output: x[0].clone(),
intermediate_lookups: vec![],
})
}
@@ -213,6 +199,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Input {
config,
region,
values[..].try_into()?,
true,
)?))
}
_ => Ok(Some(super::layouts::identity(
@@ -315,10 +302,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
fn f(&self, _: &[Tensor<F>]) -> Result<ForwardResult<F>, TensorError> {
let output = self.quantized_values.clone();
Ok(ForwardResult {
output,
intermediate_lookups: vec![],
})
Ok(ForwardResult { output })
}
fn as_string(&self) -> String {

View File

@@ -1,5 +1,6 @@
use crate::{
circuit::layouts,
fieldutils::felt_to_i128,
tensor::{self, Tensor, TensorError},
};
@@ -9,6 +10,14 @@ use super::{base::BaseOp, *};
/// An enum representing the operations that can be expressed as arithmetic (non lookup) operations.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum PolyOp {
GatherElements {
dim: usize,
constant_idx: Option<Tensor<usize>>,
},
ScatterElements {
dim: usize,
constant_idx: Option<Tensor<usize>>,
},
MultiBroadcastTo {
shape: Vec<usize>,
},
@@ -33,7 +42,9 @@ pub enum PolyOp {
Sub,
Neg,
Mult,
Identity,
Identity {
out_scale: Option<crate::Scale>,
},
Reshape(Vec<usize>),
MoveAxis {
source: usize,
@@ -49,8 +60,6 @@ pub enum PolyOp {
len_prod: usize,
},
Pow(u32),
Pack(u32, u32),
GlobalSumPool,
Concat {
axis: usize,
},
@@ -79,13 +88,17 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
fn as_string(&self) -> String {
match &self {
PolyOp::GatherElements { dim, .. } => format!("GATHERELEMENTS (dim={})", dim),
PolyOp::ScatterElements { dim, .. } => format!("SCATTERELEMENTS (dim={})", dim),
PolyOp::MultiBroadcastTo { shape } => format!("MULTIBROADCASTTO (shape={:?})", shape),
PolyOp::MoveAxis { .. } => "MOVEAXIS".into(),
PolyOp::Downsample { .. } => "DOWNSAMPLE".into(),
PolyOp::Resize { .. } => "RESIZE".into(),
PolyOp::Iff => "IFF".into(),
PolyOp::Einsum { equation, .. } => format!("EINSUM {}", equation),
PolyOp::Identity => "IDENTITY".into(),
PolyOp::Identity { out_scale } => {
format!("IDENTITY (out_scale={:?})", out_scale)
}
PolyOp::Reshape(shape) => format!("RESHAPE (shape={:?})", shape),
PolyOp::Flatten(_) => "FLATTEN".into(),
PolyOp::Pad(_) => "PAD".into(),
@@ -95,8 +108,6 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
PolyOp::Sum { .. } => "SUM".into(),
PolyOp::Prod { .. } => "PROD".into(),
PolyOp::Pow(_) => "POW".into(),
PolyOp::Pack(_, _) => "PACK".into(),
PolyOp::GlobalSumPool => "GLOBALSUMPOOL".into(),
PolyOp::Conv { .. } => "CONV".into(),
PolyOp::DeConv { .. } => "DECONV".into(),
PolyOp::Concat { axis } => format!("CONCAT (axis={})", axis),
@@ -135,7 +146,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
PolyOp::Resize { scale_factor } => tensor::ops::resize(&inputs[0], scale_factor),
PolyOp::Iff => tensor::ops::iff(&inputs[0], &inputs[1], &inputs[2]),
PolyOp::Einsum { equation } => tensor::ops::einsum(equation, &inputs),
PolyOp::Identity => Ok(inputs[0].clone()),
PolyOp::Identity { .. } => Ok(inputs[0].clone()),
PolyOp::Reshape(new_dims) => {
let mut t = inputs[0].clone();
t.reshape(new_dims)?;
@@ -166,13 +177,6 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
output_padding,
stride,
} => tensor::ops::deconv(&inputs, *padding, *output_padding, *stride),
PolyOp::Pack(base, scale) => {
if 1 != inputs.len() {
return Err(TensorError::DimMismatch("pack inputs".to_string()));
}
tensor::ops::pack(&inputs[0], F::from(*base as u64), *scale)
}
PolyOp::Pow(u) => {
if 1 != inputs.len() {
return Err(TensorError::DimMismatch("pow inputs".to_string()));
@@ -191,7 +195,6 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
}
tensor::ops::prod_axes(&inputs[0], axes)
}
PolyOp::GlobalSumPool => unreachable!(),
PolyOp::Concat { axis } => {
tensor::ops::concat(&inputs.iter().collect::<Vec<_>>(), *axis)
}
@@ -199,14 +202,36 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
if 1 != inputs.len() {
return Err(TensorError::DimMismatch("slice inputs".to_string()));
}
Ok(tensor::ops::slice(&inputs[0], axis, start, end)?)
tensor::ops::slice(&inputs[0], axis, start, end)
}
PolyOp::GatherElements { dim, constant_idx } => {
let x = inputs[0].clone();
let y = if let Some(idx) = constant_idx {
idx.clone()
} else {
inputs[1].clone().map(|x| felt_to_i128(x) as usize)
};
tensor::ops::gather_elements(&x, &y, *dim)
}
PolyOp::ScatterElements { dim, constant_idx } => {
let x = inputs[0].clone();
let idx = if let Some(idx) = constant_idx {
idx.clone()
} else {
inputs[1].clone().map(|x| felt_to_i128(x) as usize)
};
let src = if constant_idx.is_some() {
inputs[1].clone()
} else {
inputs[2].clone()
};
tensor::ops::scatter(&x, &idx, &src, *dim)
}
}?;
Ok(ForwardResult {
output: res,
intermediate_lookups: vec![],
})
Ok(ForwardResult { output: res })
}
fn layout(
@@ -247,6 +272,26 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
PolyOp::Conv { padding, stride } => {
layouts::conv(config, region, values[..].try_into()?, *padding, *stride)?
}
PolyOp::GatherElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
} else {
layouts::gather_elements(config, region, values[..].try_into()?, *dim)?
}
}
PolyOp::ScatterElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::scatter(
values[0].get_inner_tensor()?,
idx,
values[1].get_inner_tensor()?,
*dim,
)?
.into()
} else {
layouts::scatter_elements(config, region, values[..].try_into()?, *dim)?
}
}
PolyOp::DeConv {
padding,
output_padding,
@@ -264,7 +309,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
PolyOp::Mult => {
layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Mult)?
}
PolyOp::Identity => layouts::identity(config, region, values[..].try_into()?)?,
PolyOp::Identity { .. } => layouts::identity(config, region, values[..].try_into()?)?,
PolyOp::Reshape(d) | PolyOp::Flatten(d) => layouts::reshape(values[..].try_into()?, d)?,
PolyOp::Pad(p) => {
if values.len() != 1 {
@@ -277,10 +322,6 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
input
}
PolyOp::Pow(exp) => layouts::pow(config, region, values[..].try_into()?, *exp)?,
PolyOp::Pack(base, scale) => {
layouts::pack(config, region, values[..].try_into()?, *base, *scale)?
}
PolyOp::GlobalSumPool => unreachable!(),
PolyOp::Concat { axis } => layouts::concat(values[..].try_into()?, axis)?,
PolyOp::Slice { axis, start, end } => {
layouts::slice(config, region, values[..].try_into()?, axis, start, end)?
@@ -322,9 +363,8 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
output_scale
}
PolyOp::Add => {
let mut scale_a = 0;
let scale_b = in_scales[0];
scale_a += in_scales[1];
let scale_a = in_scales[0];
let scale_b = in_scales[1];
assert_eq!(scale_a, scale_b);
scale_a
}
@@ -336,19 +376,21 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
}
PolyOp::Reshape(_) | PolyOp::Flatten(_) => in_scales[0],
PolyOp::Pow(pow) => in_scales[0] * (*pow as crate::Scale),
PolyOp::Identity { out_scale } => out_scale.unwrap_or(in_scales[0]),
_ => in_scales[0],
};
Ok(scale)
}
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
if matches!(
self,
PolyOp::Add { .. } | PolyOp::Sub | PolyOp::Concat { .. }
) {
if matches!(self, PolyOp::Add { .. } | PolyOp::Sub) {
vec![0, 1]
} else if matches!(self, PolyOp::Iff) {
vec![1, 2]
} else if matches!(self, PolyOp::Concat { .. }) {
(0..100).collect()
} else if matches!(self, PolyOp::ScatterElements { .. }) {
vec![0, 2]
} else {
vec![]
}

View File

@@ -1,4 +1,7 @@
use crate::tensor::{Tensor, TensorError, TensorType, ValTensor, ValType, VarTensor};
use crate::{
circuit::table::Range,
tensor::{Tensor, TensorError, TensorType, ValTensor, ValType, VarTensor},
};
use halo2_proofs::{
circuit::Region,
plonk::{Error, Selector},
@@ -7,9 +10,76 @@ use halo2curves::ff::PrimeField;
use std::{
cell::RefCell,
collections::HashSet,
sync::atomic::{AtomicUsize, Ordering},
sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex,
},
};
use portable_atomic::AtomicI128 as AtomicInt;
use super::lookup::LookupOp;
/// Dynamic lookup index
#[derive(Clone, Debug, Default)]
pub struct DynamicLookupIndex {
index: usize,
col_coord: usize,
}
impl DynamicLookupIndex {
/// Create a new dynamic lookup index
pub fn new(index: usize, col_coord: usize) -> DynamicLookupIndex {
DynamicLookupIndex { index, col_coord }
}
/// Get the lookup index
pub fn index(&self) -> usize {
self.index
}
/// Get the column coord
pub fn col_coord(&self) -> usize {
self.col_coord
}
/// update with another dynamic lookup index
pub fn update(&mut self, other: &DynamicLookupIndex) {
self.index += other.index;
self.col_coord += other.col_coord;
}
}
/// Dynamic lookup index
#[derive(Clone, Debug, Default)]
pub struct ShuffleIndex {
index: usize,
col_coord: usize,
}
impl ShuffleIndex {
/// Create a new dynamic lookup index
pub fn new(index: usize, col_coord: usize) -> ShuffleIndex {
ShuffleIndex { index, col_coord }
}
/// Get the lookup index
pub fn index(&self) -> usize {
self.index
}
/// Get the column coord
pub fn col_coord(&self) -> usize {
self.col_coord
}
/// update with another shuffle index
pub fn update(&mut self, other: &ShuffleIndex) {
self.index += other.index;
self.col_coord += other.col_coord;
}
}
/// Region error
#[derive(Debug, thiserror::Error)]
pub enum RegionError {
@@ -56,6 +126,14 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd> {
linear_coord: usize,
num_inner_cols: usize,
total_constants: usize,
dynamic_lookup_index: DynamicLookupIndex,
shuffle_index: ShuffleIndex,
used_lookups: HashSet<LookupOp>,
used_range_checks: HashSet<Range>,
max_lookup_inputs: i128,
min_lookup_inputs: i128,
max_range_size: i128,
throw_range_check_error: bool,
}
impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
@@ -64,6 +142,31 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
self.total_constants += n;
}
///
pub fn increment_dynamic_lookup_index(&mut self, n: usize) {
self.dynamic_lookup_index.index += n;
}
///
pub fn increment_dynamic_lookup_col_coord(&mut self, n: usize) {
self.dynamic_lookup_index.col_coord += n;
}
///
pub fn increment_shuffle_index(&mut self, n: usize) {
self.shuffle_index.index += n;
}
///
pub fn increment_shuffle_col_coord(&mut self, n: usize) {
self.shuffle_index.col_coord += n;
}
///
pub fn throw_range_check_error(&self) -> bool {
self.throw_range_check_error
}
/// Create a new region context
pub fn new(region: Region<'a, F>, row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> {
let region = Some(RefCell::new(region));
@@ -75,6 +178,14 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
row,
linear_coord,
total_constants: 0,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error: false,
}
}
/// Create a new region context from a wrapped region
@@ -82,6 +193,8 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
region: Option<RefCell<Region<'a, F>>>,
row: usize,
num_inner_cols: usize,
dynamic_lookup_index: DynamicLookupIndex,
shuffle_index: ShuffleIndex,
) -> RegionCtx<'a, F> {
let linear_coord = row * num_inner_cols;
RegionCtx {
@@ -90,11 +203,23 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
linear_coord,
row,
total_constants: 0,
dynamic_lookup_index,
shuffle_index,
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error: false,
}
}
/// Create a new region context
pub fn new_dummy(row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> {
pub fn new_dummy(
row: usize,
num_inner_cols: usize,
throw_range_check_error: bool,
) -> RegionCtx<'a, F> {
let region = None;
let linear_coord = row * num_inner_cols;
@@ -104,6 +229,14 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
linear_coord,
row,
total_constants: 0,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error,
}
}
@@ -111,8 +244,9 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
pub fn new_dummy_with_constants(
row: usize,
linear_coord: usize,
constants: usize,
total_constants: usize,
num_inner_cols: usize,
throw_range_check_error: bool,
) -> RegionCtx<'a, F> {
let region = None;
RegionCtx {
@@ -120,7 +254,15 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
num_inner_cols,
linear_coord,
row,
total_constants: constants,
total_constants,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error,
}
}
@@ -160,6 +302,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
/// Create a new region context per loop iteration
/// hacky but it works
pub fn dummy_loop<T: TensorType + Send + Sync>(
&mut self,
output: &mut Tensor<T>,
@@ -170,6 +313,12 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
let row = AtomicUsize::new(self.row());
let linear_coord = AtomicUsize::new(self.linear_coord());
let constants = AtomicUsize::new(self.total_constants());
let max_lookup_inputs = AtomicInt::new(self.max_lookup_inputs());
let min_lookup_inputs = AtomicInt::new(self.min_lookup_inputs());
let lookups = Arc::new(Mutex::new(self.used_lookups.clone()));
let range_checks = Arc::new(Mutex::new(self.used_range_checks.clone()));
let dynamic_lookup_index = Arc::new(Mutex::new(self.dynamic_lookup_index.clone()));
let shuffle_index = Arc::new(Mutex::new(self.shuffle_index.clone()));
*output = output
.par_enum_map(|idx, _| {
@@ -177,12 +326,15 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
let starting_offset = row.load(Ordering::SeqCst);
let starting_linear_coord = linear_coord.load(Ordering::SeqCst);
let starting_constants = constants.load(Ordering::SeqCst);
// get inner value of the locked lookups
// we need to make sure that the region is not shared between threads
let mut local_reg = Self::new_dummy_with_constants(
starting_offset,
starting_linear_coord,
starting_constants,
self.num_inner_cols,
self.throw_range_check_error,
);
let res = inner_loop_function(idx, &mut local_reg);
// we update the offset and constants
@@ -195,15 +347,100 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
local_reg.total_constants() - starting_constants,
Ordering::SeqCst,
);
max_lookup_inputs.fetch_max(local_reg.max_lookup_inputs(), Ordering::SeqCst);
min_lookup_inputs.fetch_min(local_reg.min_lookup_inputs(), Ordering::SeqCst);
// update the lookups
let mut lookups = lookups.lock().unwrap();
lookups.extend(local_reg.used_lookups());
// update the range checks
let mut range_checks = range_checks.lock().unwrap();
range_checks.extend(local_reg.used_range_checks());
// update the dynamic lookup index
let mut dynamic_lookup_index = dynamic_lookup_index.lock().unwrap();
dynamic_lookup_index.update(&local_reg.dynamic_lookup_index);
// update the shuffle index
let mut shuffle_index = shuffle_index.lock().unwrap();
shuffle_index.update(&local_reg.shuffle_index);
res
})
.map_err(|e| {
log::error!("dummy_loop: {:?}", e);
Error::Synthesis
})?;
.map_err(|e| RegionError::from(format!("dummy_loop: {:?}", e)))?;
self.total_constants = constants.into_inner();
self.linear_coord = linear_coord.into_inner();
#[allow(trivial_numeric_casts)]
{
self.max_lookup_inputs = max_lookup_inputs.into_inner();
self.min_lookup_inputs = min_lookup_inputs.into_inner();
}
self.row = row.into_inner();
self.used_lookups = Arc::try_unwrap(lookups)
.map_err(|e| RegionError::from(format!("dummy_loop: failed to get lookups: {:?}", e)))?
.into_inner()
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get lookups: {:?}", e))
})?;
self.used_range_checks = Arc::try_unwrap(range_checks)
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get range checks: {:?}", e))
})?
.into_inner()
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get range checks: {:?}", e))
})?;
self.dynamic_lookup_index = Arc::try_unwrap(dynamic_lookup_index)
.map_err(|e| {
RegionError::from(format!(
"dummy_loop: failed to get dynamic lookup index: {:?}",
e
))
})?
.into_inner()
.map_err(|e| {
RegionError::from(format!(
"dummy_loop: failed to get dynamic lookup index: {:?}",
e
))
})?;
self.shuffle_index = Arc::try_unwrap(shuffle_index)
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get shuffle index: {:?}", e))
})?
.into_inner()
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get shuffle index: {:?}", e))
})?;
Ok(())
}
/// Update the max and min from inputs
pub fn update_max_min_lookup_inputs(
&mut self,
inputs: &[ValTensor<F>],
) -> Result<(), Box<dyn std::error::Error>> {
let (mut min, mut max) = (0, 0);
for i in inputs {
max = max.max(i.get_int_evals()?.into_iter().max().unwrap_or_default());
min = min.min(i.get_int_evals()?.into_iter().min().unwrap_or_default());
}
self.max_lookup_inputs = self.max_lookup_inputs.max(max);
self.min_lookup_inputs = self.min_lookup_inputs.min(min);
Ok(())
}
/// Update the max and min from inputs
pub fn update_max_min_lookup_range(
&mut self,
range: Range,
) -> Result<(), Box<dyn std::error::Error>> {
if range.0 > range.1 {
return Err("update_max_min_lookup_range: invalid range".into());
}
let range_size = (range.1 - range.0).abs();
self.max_range_size = self.max_range_size.max(range_size);
Ok(())
}
@@ -212,15 +449,20 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
self.region.is_none()
}
/// duplicate_dummy
pub fn duplicate_dummy(&self) -> Self {
Self {
region: None,
linear_coord: self.linear_coord,
num_inner_cols: self.num_inner_cols,
row: self.row,
total_constants: self.total_constants,
}
/// add used lookup
pub fn add_used_lookup(
&mut self,
lookup: LookupOp,
inputs: &[ValTensor<F>],
) -> Result<(), Box<dyn std::error::Error>> {
self.used_lookups.insert(lookup);
self.update_max_min_lookup_inputs(inputs)
}
/// add used range check
pub fn add_used_range_check(&mut self, range: Range) -> Result<(), Box<dyn std::error::Error>> {
self.used_range_checks.insert(range);
self.update_max_min_lookup_range(range)
}
/// Get the offset
@@ -238,6 +480,51 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
self.total_constants
}
/// Get the dynamic lookup index
pub fn dynamic_lookup_index(&self) -> usize {
self.dynamic_lookup_index.index
}
/// Get the dynamic lookup column coordinate
pub fn dynamic_lookup_col_coord(&self) -> usize {
self.dynamic_lookup_index.col_coord
}
/// Get the shuffle index
pub fn shuffle_index(&self) -> usize {
self.shuffle_index.index
}
/// Get the shuffle column coordinate
pub fn shuffle_col_coord(&self) -> usize {
self.shuffle_index.col_coord
}
/// get used lookups
pub fn used_lookups(&self) -> HashSet<LookupOp> {
self.used_lookups.clone()
}
/// get used range checks
pub fn used_range_checks(&self) -> HashSet<Range> {
self.used_range_checks.clone()
}
/// max lookup inputs
pub fn max_lookup_inputs(&self) -> i128 {
self.max_lookup_inputs
}
/// min lookup inputs
pub fn min_lookup_inputs(&self) -> i128 {
self.min_lookup_inputs
}
/// max range check
pub fn max_range_size(&self) -> i128 {
self.max_range_size
}
/// Assign a constant value
pub fn assign_constant(&mut self, var: &VarTensor, value: F) -> Result<ValType<F>, Error> {
self.total_constants += 1;
@@ -262,6 +549,38 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
}
}
///
pub fn combined_dynamic_shuffle_coord(&self) -> usize {
self.dynamic_lookup_col_coord() + self.shuffle_col_coord()
}
/// Assign a valtensor to a vartensor
pub fn assign_dynamic_lookup(
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, Error> {
self.total_constants += values.num_constants();
if let Some(region) = &self.region {
var.assign(
&mut region.borrow_mut(),
self.combined_dynamic_shuffle_coord(),
values,
)
} else {
Ok(values.clone())
}
}
/// Assign a valtensor to a vartensor
pub fn assign_shuffle(
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, Error> {
self.assign_dynamic_lookup(var, values)
}
/// Assign a valtensor to a vartensor
pub fn assign_with_omissions(
&mut self,

View File

@@ -130,14 +130,12 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(bits as u32) - reserved_blinding_rows
}
}
///
pub fn num_cols_required(range: Range, col_size: usize) -> usize {
// double it to be safe
let range_len = range.1 - range.0;
// number of cols needed to store the range
(range_len / (col_size as i128)) as usize + 1
}
///
pub fn num_cols_required(range_len: i128, col_size: usize) -> usize {
// number of cols needed to store the range
(range_len / (col_size as i128)) as usize + 1
}
impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
@@ -152,7 +150,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
let factors = cs.blinding_factors() + RESERVED_BLINDING_ROWS_PAD;
let col_size = Self::cal_col_size(logrows, factors);
// number of cols needed to store the range
let num_cols = Self::num_cols_required(range, col_size);
let num_cols = num_cols_required((range.1 - range.0).abs(), col_size);
log::debug!("table range: {:?}", range);
@@ -265,7 +263,9 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
#[derive(Clone, Debug)]
pub struct RangeCheck<F: PrimeField> {
/// Input to table.
pub input: TableColumn,
pub inputs: Vec<TableColumn>,
/// col size
pub col_size: usize,
/// selector cn
pub selector_constructor: SelectorConstructor<F>,
/// Flags if table has been previously assigned to.
@@ -277,8 +277,10 @@ pub struct RangeCheck<F: PrimeField> {
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
/// get first_element of column
pub fn get_first_element(&self) -> F {
i128_to_felt(self.range.0)
pub fn get_first_element(&self, chunk: usize) -> F {
let chunk = chunk as i128;
// we index from 1 to prevent soundness issues
i128_to_felt(chunk * (self.col_size as i128) + self.range.0)
}
///
@@ -290,24 +292,58 @@ impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(bits as u32) - reserved_blinding_rows
}
/// get column index given input
pub fn get_col_index(&self, input: F) -> F {
// range is split up into chunks of size col_size, find the chunk that input is in
let chunk =
(crate::fieldutils::felt_to_i128(input) - self.range.0).abs() / (self.col_size as i128);
i128_to_felt(chunk)
}
}
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
/// Configures the table.
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range) -> RangeCheck<F> {
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range, logrows: usize) -> RangeCheck<F> {
log::debug!("range check range: {:?}", range);
let inputs = cs.lookup_table_column();
let factors = cs.blinding_factors() + RESERVED_BLINDING_ROWS_PAD;
let col_size = Self::cal_col_size(logrows, factors);
// number of cols needed to store the range
let num_cols = num_cols_required((range.1 - range.0).abs(), col_size);
let inputs = {
let mut cols = vec![];
for _ in 0..num_cols {
cols.push(cs.lookup_table_column());
}
cols
};
let num_cols = inputs.len();
if num_cols > 1 {
warn!("Using {} columns for range-check.", num_cols);
}
RangeCheck {
input: inputs,
inputs,
col_size,
is_assigned: false,
selector_constructor: SelectorConstructor::new(2),
selector_constructor: SelectorConstructor::new(num_cols),
range,
_marker: PhantomData,
}
}
/// Take a linear coordinate and output the (column, row) position in the storage block.
pub fn cartesian_coord(&self, linear_coord: usize) -> (usize, usize) {
let x = linear_coord / self.col_size;
let y = linear_coord % self.col_size;
(x, y)
}
/// Assigns values to the constraints generated when calling `configure`.
pub fn layout(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn Error>> {
if self.is_assigned {
@@ -318,28 +354,43 @@ impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
let largest = self.range.1;
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i128_to_felt(x));
let chunked_inputs = inputs.chunks(self.col_size);
self.is_assigned = true;
layouter.assign_table(
|| "range check table",
|mut table| {
let _ = inputs
.iter()
.enumerate()
.map(|(row_offset, input)| {
table.assign_cell(
|| format!("rc_i_col row {}", row_offset),
self.input,
row_offset,
|| Value::known(*input),
)?;
let col_multipliers: Vec<F> = (0..chunked_inputs.len())
.map(|x| self.selector_constructor.get_selector_val_at_idx(x))
.collect();
let _ = chunked_inputs
.enumerate()
.map(|(chunk_idx, inputs)| {
layouter.assign_table(
|| "range check table",
|mut table| {
let _ = inputs
.iter()
.enumerate()
.map(|(mut row_offset, input)| {
let col_multiplier = col_multipliers[chunk_idx];
row_offset += chunk_idx * self.col_size;
let (x, y) = self.cartesian_coord(row_offset);
table.assign_cell(
|| format!("rc_i_col row {}", row_offset),
self.inputs[x],
y,
|| Value::known(*input * col_multiplier),
)?;
Ok(())
})
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
Ok(())
})
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
Ok(())
},
)?;
},
)
})
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
Ok(())
}
}

View File

@@ -1,4 +1,3 @@
use crate::circuit::ops::hybrid::HybridOp;
use crate::circuit::ops::poly::PolyOp;
use crate::circuit::*;
use crate::tensor::{Tensor, TensorType, ValTensor, VarTensor};
@@ -246,7 +245,7 @@ mod matmul_col_overflow {
#[cfg(test)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
mod matmul_col_ultra_overflow_double_col {
use halo2_proofs::poly::commitment::ParamsProver;
use halo2_proofs::poly::commitment::{Params, ParamsProver};
use super::*;
@@ -349,19 +348,22 @@ mod matmul_col_ultra_overflow_double_col {
let strategy =
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
let vk = pk.get_vk();
let result =
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
let result = crate::pfsys::verify_proof_circuit_kzg(
params.verifier_params(),
proof,
vk,
strategy,
params.n(),
);
assert!(result.is_ok());
println!("done.");
}
}
#[cfg(test)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
mod matmul_col_ultra_overflow {
use halo2_proofs::poly::commitment::ParamsProver;
use halo2_proofs::poly::commitment::{Params, ParamsProver};
use super::*;
@@ -463,12 +465,15 @@ mod matmul_col_ultra_overflow {
let strategy =
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
let vk = pk.get_vk();
let result =
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
let result = crate::pfsys::verify_proof_circuit_kzg(
params.verifier_params(),
proof,
vk,
strategy,
params.n(),
);
assert!(result.is_ok());
println!("done.");
}
}
@@ -1140,7 +1145,7 @@ mod conv {
#[cfg(test)]
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
mod conv_col_ultra_overflow {
use halo2_proofs::poly::commitment::ParamsProver;
use halo2_proofs::poly::commitment::{Params, ParamsProver};
use super::*;
@@ -1262,12 +1267,15 @@ mod conv_col_ultra_overflow {
let strategy =
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
let vk = pk.get_vk();
let result =
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
let result = crate::pfsys::verify_proof_circuit_kzg(
params.verifier_params(),
proof,
vk,
strategy,
params.n(),
);
assert!(result.is_ok());
println!("done.");
}
}
@@ -1275,7 +1283,7 @@ mod conv_col_ultra_overflow {
// not wasm 32 unknown
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
mod conv_relu_col_ultra_overflow {
use halo2_proofs::poly::commitment::ParamsProver;
use halo2_proofs::poly::commitment::{Params, ParamsProver};
use super::*;
@@ -1412,12 +1420,15 @@ mod conv_relu_col_ultra_overflow {
let strategy =
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
let vk = pk.get_vk();
let result =
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
let result = crate::pfsys::verify_proof_circuit_kzg(
params.verifier_params(),
proof,
vk,
strategy,
params.n(),
);
assert!(result.is_ok());
println!("done.");
}
}
@@ -1555,6 +1566,280 @@ mod add {
}
}
#[cfg(test)]
mod dynamic_lookup {
use super::*;
const K: usize = 6;
const LEN: usize = 4;
const NUM_LOOP: usize = 5;
#[derive(Clone)]
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
tables: [[ValTensor<F>; 2]; NUM_LOOP],
lookups: [[ValTensor<F>; 2]; NUM_LOOP],
_marker: PhantomData<F>,
}
impl Circuit<F> for MyCircuit<F> {
type Config = BaseConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
type Params = TestParams;
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let a = VarTensor::new_advice(cs, K, 2, LEN);
let b = VarTensor::new_advice(cs, K, 2, LEN);
let c: VarTensor = VarTensor::new_advice(cs, K, 2, LEN);
let d = VarTensor::new_advice(cs, K, 1, LEN);
let e = VarTensor::new_advice(cs, K, 1, LEN);
let f: VarTensor = VarTensor::new_advice(cs, K, 1, LEN);
let _constant = VarTensor::constant_cols(cs, K, LEN * NUM_LOOP, false);
let mut config =
Self::Config::configure(cs, &[a.clone(), b.clone()], &c, CheckMode::SAFE);
config
.configure_dynamic_lookup(
cs,
&[a.clone(), b.clone(), c.clone()],
&[d.clone(), e.clone(), f.clone()],
)
.unwrap();
config
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
for i in 0..NUM_LOOP {
layouts::dynamic_lookup(
&config,
&mut region,
&self.lookups[i],
&self.tables[i],
)
.map_err(|_| Error::Synthesis)?;
}
assert_eq!(
region.dynamic_lookup_col_coord(),
NUM_LOOP * self.tables[0][0].len()
);
assert_eq!(region.dynamic_lookup_index(), NUM_LOOP);
Ok(())
},
)
.unwrap();
Ok(())
}
}
#[test]
fn dynamiclookupcircuit() {
// parameters
let tables = (0..NUM_LOOP)
.map(|loop_idx| {
[
ValTensor::from(Tensor::from(
(0..LEN).map(|i| Value::known(F::from((i * loop_idx) as u64 + 1))),
)),
ValTensor::from(Tensor::from(
(0..LEN).map(|i| Value::known(F::from((loop_idx * i * i) as u64 + 1))),
)),
]
})
.collect::<Vec<_>>();
let lookups = (0..NUM_LOOP)
.map(|loop_idx| {
[
ValTensor::from(Tensor::from(
(0..3).map(|i| Value::known(F::from((i * loop_idx) as u64 + 1))),
)),
ValTensor::from(Tensor::from(
(0..3).map(|i| Value::known(F::from((loop_idx * i * i) as u64 + 1))),
)),
]
})
.collect::<Vec<_>>();
let circuit = MyCircuit::<F> {
tables: tables.clone().try_into().unwrap(),
lookups: lookups.try_into().unwrap(),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied();
let lookups = (0..NUM_LOOP)
.map(|loop_idx| {
let prev_idx = if loop_idx == 0 {
NUM_LOOP - 1
} else {
loop_idx - 1
};
[
ValTensor::from(Tensor::from(
(0..3).map(|i| Value::known(F::from((i * prev_idx) as u64 + 1))),
)),
ValTensor::from(Tensor::from(
(0..3).map(|i| Value::known(F::from((prev_idx * i * i) as u64 + 1))),
)),
]
})
.collect::<Vec<_>>();
let circuit = MyCircuit::<F> {
tables: tables.try_into().unwrap(),
lookups: lookups.try_into().unwrap(),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
assert!(prover.verify().is_err());
}
}
#[cfg(test)]
mod shuffle {
use super::*;
const K: usize = 6;
const LEN: usize = 4;
const NUM_LOOP: usize = 5;
#[derive(Clone)]
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
inputs: [[ValTensor<F>; 1]; NUM_LOOP],
references: [[ValTensor<F>; 1]; NUM_LOOP],
_marker: PhantomData<F>,
}
impl Circuit<F> for MyCircuit<F> {
type Config = BaseConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
type Params = TestParams;
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let a = VarTensor::new_advice(cs, K, 2, LEN);
let b = VarTensor::new_advice(cs, K, 2, LEN);
let c: VarTensor = VarTensor::new_advice(cs, K, 2, LEN);
let d = VarTensor::new_advice(cs, K, 1, LEN);
let e = VarTensor::new_advice(cs, K, 1, LEN);
let _constant = VarTensor::constant_cols(cs, K, LEN * NUM_LOOP, false);
let mut config =
Self::Config::configure(cs, &[a.clone(), b.clone()], &c, CheckMode::SAFE);
config
.configure_shuffles(cs, &[a.clone(), b.clone()], &[d.clone(), e.clone()])
.unwrap();
config
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
for i in 0..NUM_LOOP {
layouts::shuffles(
&config,
&mut region,
&self.inputs[i],
&self.references[i],
)
.map_err(|_| Error::Synthesis)?;
}
assert_eq!(
region.shuffle_col_coord(),
NUM_LOOP * self.references[0][0].len()
);
assert_eq!(region.shuffle_index(), NUM_LOOP);
Ok(())
},
)
.unwrap();
Ok(())
}
}
#[test]
fn shufflecircuit() {
// parameters
let references = (0..NUM_LOOP)
.map(|loop_idx| {
[ValTensor::from(Tensor::from((0..LEN).map(|i| {
Value::known(F::from((i * loop_idx) as u64 + 1))
})))]
})
.collect::<Vec<_>>();
let inputs = (0..NUM_LOOP)
.map(|loop_idx| {
[ValTensor::from(Tensor::from((0..LEN).rev().map(|i| {
Value::known(F::from((i * loop_idx) as u64 + 1))
})))]
})
.collect::<Vec<_>>();
let circuit = MyCircuit::<F> {
references: references.clone().try_into().unwrap(),
inputs: inputs.try_into().unwrap(),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied();
let inputs = (0..NUM_LOOP)
.map(|loop_idx| {
let prev_idx = if loop_idx == 0 {
NUM_LOOP - 1
} else {
loop_idx - 1
};
[ValTensor::from(Tensor::from((0..LEN).rev().map(|i| {
Value::known(F::from((i * prev_idx) as u64 + 1))
})))]
})
.collect::<Vec<_>>();
let circuit = MyCircuit::<F> {
references: references.try_into().unwrap(),
inputs: inputs.try_into().unwrap(),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
assert!(prover.verify().is_err());
}
}
#[cfg(test)]
mod add_with_overflow {
use super::*;
@@ -1958,75 +2243,6 @@ mod pow {
}
}
#[cfg(test)]
mod pack {
use super::*;
const K: usize = 8;
const LEN: usize = 4;
#[derive(Clone)]
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
inputs: [ValTensor<F>; 1],
_marker: PhantomData<F>,
}
impl Circuit<F> for MyCircuit<F> {
type Config = BaseConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
type Params = TestParams;
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let a = VarTensor::new_advice(cs, K, 1, LEN);
let b = VarTensor::new_advice(cs, K, 1, LEN);
let output = VarTensor::new_advice(cs, K, 1, LEN);
Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE)
}
fn synthesize(
&self,
mut config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
&self.inputs.clone(),
Box::new(PolyOp::Pack(2, 1)),
)
.map_err(|_| Error::Synthesis)
},
)
.unwrap();
Ok(())
}
}
#[test]
fn packcircuit() {
// parameters
let a = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1))));
let circuit = MyCircuit::<F> {
inputs: [ValTensor::from(a)],
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied();
}
}
#[cfg(test)]
mod matmul_relu {
use super::*;
@@ -2120,144 +2336,6 @@ mod matmul_relu {
}
}
#[cfg(test)]
mod rangecheckpercent {
use crate::circuit::Tolerance;
use crate::{circuit, tensor::Tensor};
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
dev::MockProver,
plonk::{Circuit, ConstraintSystem, Error},
};
const RANGE: f32 = 1.0; // 1 percent error tolerance
const K: usize = 18;
const LEN: usize = 1;
const SCALE: usize = i128::pow(2, 7) as usize;
use super::*;
#[derive(Clone)]
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
input: ValTensor<F>,
output: ValTensor<F>,
_marker: PhantomData<F>,
}
impl Circuit<F> for MyCircuit<F> {
type Config = BaseConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
type Params = TestParams;
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let scale = utils::F32(SCALE.pow(2) as f32);
let a = VarTensor::new_advice(cs, K, 1, LEN);
let b = VarTensor::new_advice(cs, K, 1, LEN);
let output = VarTensor::new_advice(cs, K, 1, LEN);
let mut config =
Self::Config::configure(cs, &[a.clone(), b.clone()], &output, CheckMode::SAFE);
// set up a new GreaterThan and Recip tables
let nl = &LookupOp::GreaterThan {
a: circuit::utils::F32((RANGE * scale.0) / 100.0),
};
config
.configure_lookup(cs, &b, &output, &a, (-32768, 32768), K, nl)
.unwrap();
config
.configure_lookup(
cs,
&b,
&output,
&a,
(-32768, 32768),
K,
&LookupOp::Recip { scale },
)
.unwrap();
config
}
fn synthesize(
&self,
mut config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
config.layout_tables(&mut layouter).unwrap();
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
&[self.output.clone(), self.input.clone()],
Box::new(HybridOp::RangeCheck(Tolerance {
val: RANGE,
scale: SCALE.into(),
})),
)
.map_err(|_| Error::Synthesis)
},
)
.unwrap();
Ok(())
}
}
#[test]
#[allow(clippy::assertions_on_constants)]
fn test_range_check_percent() {
// Successful cases
{
let inp = Tensor::new(Some(&[Value::<F>::known(F::from(100_u64))]), &[1]).unwrap();
let out = Tensor::new(Some(&[Value::<F>::known(F::from(101_u64))]), &[1]).unwrap();
let circuit = MyCircuit::<F> {
input: ValTensor::from(inp),
output: ValTensor::from(out),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied();
}
{
let inp = Tensor::new(Some(&[Value::<F>::known(F::from(200_u64))]), &[1]).unwrap();
let out = Tensor::new(Some(&[Value::<F>::known(F::from(199_u64))]), &[1]).unwrap();
let circuit = MyCircuit::<F> {
input: ValTensor::from(inp),
output: ValTensor::from(out),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied();
}
// Unsuccessful case
{
let inp = Tensor::new(Some(&[Value::<F>::known(F::from(100_u64))]), &[1]).unwrap();
let out = Tensor::new(Some(&[Value::<F>::known(F::from(102_u64))]), &[1]).unwrap();
let circuit = MyCircuit::<F> {
input: ValTensor::from(inp),
output: ValTensor::from(out),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
match prover.verify() {
Ok(_) => {
assert!(false)
}
Err(_) => {
assert!(true)
}
}
}
}
}
#[cfg(test)]
mod relu {
use super::*;
@@ -2339,7 +2417,7 @@ mod lookup_ultra_overflow {
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
plonk::{Circuit, ConstraintSystem, Error},
poly::commitment::ParamsProver,
poly::commitment::{Params, ParamsProver},
};
#[derive(Clone)]
@@ -2443,120 +2521,14 @@ mod lookup_ultra_overflow {
let strategy =
halo2_proofs::poly::kzg::strategy::SingleStrategy::new(params.verifier_params());
let vk = pk.get_vk();
let result =
crate::pfsys::verify_proof_circuit_kzg(params.verifier_params(), proof, vk, strategy);
let result = crate::pfsys::verify_proof_circuit_kzg(
params.verifier_params(),
proof,
vk,
strategy,
params.n(),
);
assert!(result.is_ok());
println!("done.");
}
}
#[cfg(test)]
mod softmax {
use super::*;
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
dev::MockProver,
plonk::{Circuit, ConstraintSystem, Error},
};
const K: usize = 18;
const LEN: usize = 3;
const SCALE: f32 = 128.0;
#[derive(Clone)]
struct SoftmaxCircuit<F: PrimeField + TensorType + PartialOrd> {
pub input: ValTensor<F>,
_marker: PhantomData<F>,
}
impl Circuit<F> for SoftmaxCircuit<F> {
type Config = BaseConfig<F>;
type FloorPlanner = SimpleFloorPlanner;
type Params = TestParams;
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let a = VarTensor::new_advice(cs, K, 1, LEN);
let b = VarTensor::new_advice(cs, K, 1, LEN);
let output = VarTensor::new_advice(cs, K, 1, LEN);
let mut config = Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE);
let advices = (0..3)
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
.collect::<Vec<_>>();
config
.configure_lookup(
cs,
&advices[0],
&advices[1],
&advices[2],
(-32768, 32768),
K,
&LookupOp::Exp {
scale: SCALE.into(),
},
)
.unwrap();
config
.configure_lookup(
cs,
&advices[0],
&advices[1],
&advices[2],
(-32768, 32768),
K,
&LookupOp::Recip {
scale: SCALE.powf(2.0).into(),
},
)
.unwrap();
config
}
fn synthesize(
&self,
mut config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
config.layout_tables(&mut layouter).unwrap();
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1);
let _output = config
.layout(
&mut region,
&[self.input.clone()],
Box::new(HybridOp::Softmax {
scale: SCALE.into(),
axes: vec![0],
}),
)
.unwrap();
Ok(())
},
)
.unwrap();
Ok(())
}
}
#[test]
fn softmax_circuit() {
let input = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1))));
let circuit = SoftmaxCircuit::<F> {
input: ValTensor::from(input),
_marker: PhantomData,
};
let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap();
prover.assert_satisfied();
}
}

View File

@@ -1,4 +1,4 @@
use clap::{Parser, Subcommand, ValueEnum};
use clap::{Parser, Subcommand};
#[cfg(not(target_arch = "wasm32"))]
use ethers::types::H160;
#[cfg(feature = "python-bindings")]
@@ -9,8 +9,9 @@ use pyo3::{
types::PyString,
};
use serde::{Deserialize, Serialize};
use std::error::Error;
use std::path::PathBuf;
use std::{error::Error, str::FromStr};
use tosubcommand::{ToFlags, ToSubcommand};
use crate::{pfsys::ProofType, RunArgs};
@@ -76,7 +77,7 @@ pub const DEFAULT_CALIBRATION_FILE: &str = "calibration.json";
/// Default lookup safety margin
pub const DEFAULT_LOOKUP_SAFETY_MARGIN: &str = "2";
/// Default Compress selectors
pub const DEFAULT_COMPRESS_SELECTORS: &str = "false";
pub const DEFAULT_DISABLE_SELECTOR_COMPRESSION: &str = "false";
/// Default render vk seperately
pub const DEFAULT_RENDER_VK_SEPERATELY: &str = "false";
/// Default VK sol path
@@ -85,15 +86,11 @@ pub const DEFAULT_VK_SOL: &str = "vk.sol";
pub const DEFAULT_VK_ABI: &str = "vk.abi";
/// Default scale rebase multipliers for calibration
pub const DEFAULT_SCALE_REBASE_MULTIPLIERS: &str = "1,2,10";
/// Default use reduced srs for verification
pub const DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION: &str = "false";
/// Default only check for range check rebase
pub const DEFAULT_ONLY_RANGE_CHECK_REBASE: &str = "false";
impl std::fmt::Display for TranscriptType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.to_possible_value()
.expect("no values are skipped")
.get_name()
.fmt(f)
}
}
#[cfg(feature = "python-bindings")]
/// Converts TranscriptType into a PyObject (Required for TranscriptType to be compatible with Python)
impl IntoPy<PyObject> for TranscriptType {
@@ -138,17 +135,27 @@ impl Default for CalibrationTarget {
}
}
impl ToString for CalibrationTarget {
fn to_string(&self) -> String {
match self {
CalibrationTarget::Resources { col_overflow: true } => {
"resources/col-overflow".to_string()
impl std::fmt::Display for CalibrationTarget {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
CalibrationTarget::Resources { col_overflow: true } => {
"resources/col-overflow".to_string()
}
CalibrationTarget::Resources {
col_overflow: false,
} => "resources".to_string(),
CalibrationTarget::Accuracy => "accuracy".to_string(),
}
CalibrationTarget::Resources {
col_overflow: false,
} => "resources".to_string(),
CalibrationTarget::Accuracy => "accuracy".to_string(),
}
)
}
}
impl ToFlags for CalibrationTarget {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
@@ -169,6 +176,36 @@ impl From<&str> for CalibrationTarget {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
/// wrapper for H160 to make it easy to parse into flag vals
pub struct H160Flag {
inner: H160,
}
#[cfg(not(target_arch = "wasm32"))]
impl From<H160Flag> for H160 {
fn from(val: H160Flag) -> H160 {
val.inner
}
}
#[cfg(not(target_arch = "wasm32"))]
impl ToFlags for H160Flag {
fn to_flags(&self) -> Vec<String> {
vec![format!("{:#x}", self.inner)]
}
}
#[cfg(not(target_arch = "wasm32"))]
impl From<&str> for H160Flag {
fn from(s: &str) -> Self {
Self {
inner: H160::from_str(s).unwrap(),
}
}
}
#[cfg(feature = "python-bindings")]
/// Converts CalibrationTarget into a PyObject (Required for CalibrationTarget to be compatible with Python)
impl IntoPy<PyObject> for CalibrationTarget {
@@ -201,7 +238,7 @@ impl<'source> FromPyObject<'source> for CalibrationTarget {
}
}
}
// not wasm
use lazy_static::lazy_static;
// if CARGO VERSION is 0.0.0 replace with "source - no compatibility guaranteed"
@@ -242,7 +279,7 @@ impl Cli {
}
#[allow(missing_docs)]
#[derive(Debug, Subcommand, Clone, Deserialize, Serialize, PartialEq, PartialOrd)]
#[derive(Debug, Subcommand, Clone, Deserialize, Serialize, PartialEq, PartialOrd, ToSubcommand)]
pub enum Commands {
#[cfg(feature = "empty-cmd")]
/// Creates an empty buffer
@@ -336,9 +373,9 @@ pub enum Commands {
/// max logrows to use for calibration, 26 is the max public SRS size
#[arg(long)]
max_logrows: Option<u32>,
// whether to fix the div_rebasing value truthiness during calibration. this changes how we rebase
#[arg(long)]
div_rebasing: Option<bool>,
// whether to only range check rebases (instead of trying both range check and lookup)
#[arg(long, default_value = DEFAULT_ONLY_RANGE_CHECK_REBASE)]
only_range_check_rebase: bool,
},
/// Generates a dummy SRS
@@ -413,8 +450,8 @@ pub enum Commands {
#[arg(long, default_value = DEFAULT_SPLIT)]
split_proofs: bool,
/// compress selectors
#[arg(long, default_value = DEFAULT_COMPRESS_SELECTORS)]
compress_selectors: bool,
#[arg(long, default_value = DEFAULT_DISABLE_SELECTOR_COMPRESSION)]
disable_selector_compression: bool,
},
/// Aggregates proofs :)
Aggregate {
@@ -434,7 +471,7 @@ pub enum Commands {
long,
require_equals = true,
num_args = 0..=1,
default_value_t = TranscriptType::EVM,
default_value_t = TranscriptType::default(),
value_enum
)]
transcript: TranscriptType,
@@ -478,8 +515,8 @@ pub enum Commands {
#[arg(short = 'W', long)]
witness: Option<PathBuf>,
/// compress selectors
#[arg(long, default_value = DEFAULT_COMPRESS_SELECTORS)]
compress_selectors: bool,
#[arg(long, default_value = DEFAULT_DISABLE_SELECTOR_COMPRESSION)]
disable_selector_compression: bool,
},
#[cfg(not(target_arch = "wasm32"))]
@@ -489,13 +526,13 @@ pub enum Commands {
#[arg(short = 'W', long, default_value = DEFAULT_WITNESS)]
witness: PathBuf,
/// The path to the compiled model file (generated using the compile-circuit command)
#[arg(short = 'M', long, default_value = DEFAULT_COMPILED_CIRCUIT)]
#[arg(short = 'M', long)]
compiled_circuit: PathBuf,
#[arg(
long,
require_equals = true,
num_args = 0..=1,
default_value_t = TranscriptType::EVM,
default_value_t = TranscriptType::default(),
value_enum
)]
transcript: TranscriptType,
@@ -503,13 +540,13 @@ pub enum Commands {
#[arg(long, default_value = DEFAULT_FUZZ_RUNS)]
num_runs: usize,
/// compress selectors
#[arg(long, default_value = DEFAULT_COMPRESS_SELECTORS)]
compress_selectors: bool,
#[arg(long, default_value = DEFAULT_DISABLE_SELECTOR_COMPRESSION)]
disable_selector_compression: bool,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys a test contact that the data attester reads from and creates a data attestation formatted input.json file that contains call data information
#[command(arg_required_else_help = true)]
SetupTestEVMData {
SetupTestEvmData {
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
#[arg(short = 'D', long)]
data: PathBuf,
@@ -537,7 +574,7 @@ pub enum Commands {
TestUpdateAccountCalls {
/// The path to the verifier contract's address
#[arg(long)]
addr: H160,
addr: H160Flag,
/// The path to the .json data file.
#[arg(short = 'D', long)]
data: PathBuf,
@@ -587,9 +624,9 @@ pub enum Commands {
check_mode: CheckMode,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an EVM verifier for a single proof
/// Creates an Evm verifier for a single proof
#[command(name = "create-evm-verifier")]
CreateEVMVerifier {
CreateEvmVerifier {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
#[arg(long)]
srs_path: Option<PathBuf>,
@@ -612,9 +649,9 @@ pub enum Commands {
render_vk_seperately: bool,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an EVM verifier for a single proof
/// Creates an Evm verifier for a single proof
#[command(name = "create-evm-vk")]
CreateEVMVK {
CreateEvmVK {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
#[arg(long)]
srs_path: Option<PathBuf>,
@@ -632,9 +669,9 @@ pub enum Commands {
abi_path: PathBuf,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an EVM verifier that attests to on-chain inputs for a single proof
/// Creates an Evm verifier that attests to on-chain inputs for a single proof
#[command(name = "create-evm-da")]
CreateEVMDataAttestation {
CreateEvmDataAttestation {
/// The path to load circuit settings .json file from (generated using the gen-settings command)
#[arg(short = 'S', long, default_value = DEFAULT_SETTINGS)]
settings_path: PathBuf,
@@ -654,9 +691,9 @@ pub enum Commands {
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an EVM verifier for an aggregate proof
/// Creates an Evm verifier for an aggregate proof
#[command(name = "create-evm-verifier-aggr")]
CreateEVMVerifierAggr {
CreateEvmVerifierAggr {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
#[arg(long)]
srs_path: Option<PathBuf>,
@@ -695,6 +732,9 @@ pub enum Commands {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
#[arg(long)]
srs_path: Option<PathBuf>,
/// Reduce SRS logrows to the number of instances rather than the number of logrows used for proofs (only works if the srs were generated in the same ceremony)
#[arg(long, default_value = DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION)]
reduced_srs: bool,
},
/// Verifies an aggregate proof, returning accept or reject
VerifyAggr {
@@ -776,31 +816,23 @@ pub enum Commands {
private_key: Option<String>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Verifies a proof using a local EVM executor, returning accept or reject
/// Verifies a proof using a local Evm executor, returning accept or reject
#[command(name = "verify-evm")]
VerifyEVM {
VerifyEvm {
/// The path to the proof file (generated using the prove command)
#[arg(long, default_value = DEFAULT_PROOF)]
proof_path: PathBuf,
/// The path to verifier contract's address
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS)]
addr_verifier: H160,
addr_verifier: H160Flag,
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
#[arg(short = 'U', long)]
rpc_url: Option<String>,
/// does the verifier use data attestation ?
#[arg(long)]
addr_da: Option<H160>,
addr_da: Option<H160Flag>,
// is the vk rendered seperately, if so specify an address
#[arg(long)]
addr_vk: Option<H160>,
},
/// Print the proof in hexadecimal
#[command(name = "print-proof-hex")]
PrintProofHex {
/// The path to the proof file
#[arg(long, default_value = DEFAULT_PROOF)]
proof_path: PathBuf,
addr_vk: Option<H160Flag>,
},
}

View File

@@ -3,6 +3,8 @@ use crate::circuit::CheckMode;
use crate::commands::CalibrationTarget;
use crate::commands::Commands;
#[cfg(not(target_arch = "wasm32"))]
use crate::commands::H160Flag;
#[cfg(not(target_arch = "wasm32"))]
use crate::eth::{deploy_contract_via_solidity, deploy_da_verifier_via_solidity};
#[cfg(not(target_arch = "wasm32"))]
#[allow(unused_imports)]
@@ -21,8 +23,7 @@ use crate::pfsys::{create_proof_circuit_kzg, verify_proof_circuit_kzg};
use crate::pfsys::{save_vk, srs::*};
use crate::tensor::TensorError;
use crate::RunArgs;
#[cfg(not(target_arch = "wasm32"))]
use ethers::types::H160;
#[cfg(unix)]
use gag::Gag;
use halo2_proofs::dev::VerifyFailure;
use halo2_proofs::poly::commitment::Params;
@@ -63,7 +64,11 @@ use std::process::Command;
use std::sync::atomic::{AtomicBool, AtomicI64, Ordering};
#[cfg(not(target_arch = "wasm32"))]
use std::sync::OnceLock;
#[cfg(not(target_arch = "wasm32"))]
use crate::EZKL_BUF_CAPACITY;
#[cfg(not(target_arch = "wasm32"))]
use std::io::BufWriter;
use std::time::Duration;
use tabled::Tabled;
use thiserror::Error;
@@ -140,13 +145,13 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
compiled_circuit,
transcript,
num_runs,
compress_selectors,
disable_selector_compression,
} => fuzz(
compiled_circuit,
witness,
transcript,
num_runs,
compress_selectors,
disable_selector_compression,
),
Commands::GenSrs { srs_path, logrows } => gen_srs_cmd(srs_path, logrows as u32),
#[cfg(not(target_arch = "wasm32"))]
@@ -178,7 +183,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
scales,
scale_rebase_multiplier,
max_logrows,
div_rebasing,
only_range_check_rebase,
} => calibrate(
model,
data,
@@ -187,7 +192,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
lookup_safety_margin,
scales,
scale_rebase_multiplier,
div_rebasing,
only_range_check_rebase,
max_logrows,
)
.map(|e| serde_json::to_string(&e).unwrap()),
@@ -202,7 +207,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
.map(|e| serde_json::to_string(&e).unwrap()),
Commands::Mock { model, witness } => mock(model, witness),
#[cfg(not(target_arch = "wasm32"))]
Commands::CreateEVMVerifier {
Commands::CreateEvmVerifier {
vk_path,
srs_path,
settings_path,
@@ -217,7 +222,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
abi_path,
render_vk_seperately,
),
Commands::CreateEVMVK {
Commands::CreateEvmVK {
vk_path,
srs_path,
settings_path,
@@ -225,14 +230,14 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
abi_path,
} => create_evm_vk(vk_path, srs_path, settings_path, sol_code_path, abi_path),
#[cfg(not(target_arch = "wasm32"))]
Commands::CreateEVMDataAttestation {
Commands::CreateEvmDataAttestation {
settings_path,
sol_code_path,
abi_path,
data,
} => create_evm_data_attestation(settings_path, sol_code_path, abi_path, data),
#[cfg(not(target_arch = "wasm32"))]
Commands::CreateEVMVerifierAggr {
Commands::CreateEvmVerifierAggr {
vk_path,
srs_path,
sol_code_path,
@@ -260,17 +265,17 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
vk_path,
pk_path,
witness,
compress_selectors,
disable_selector_compression,
} => setup(
compiled_circuit,
srs_path,
vk_path,
pk_path,
witness,
compress_selectors,
disable_selector_compression,
),
#[cfg(not(target_arch = "wasm32"))]
Commands::SetupTestEVMData {
Commands::SetupTestEvmData {
data,
compiled_circuit,
test_data,
@@ -331,7 +336,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
srs_path,
logrows,
split_proofs,
compress_selectors,
disable_selector_compression,
} => setup_aggregate(
sample_snarks,
vk_path,
@@ -339,7 +344,7 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
srs_path,
logrows,
split_proofs,
compress_selectors,
disable_selector_compression,
),
Commands::Aggregate {
proof_path,
@@ -366,7 +371,8 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
settings_path,
vk_path,
srs_path,
} => verify(proof_path, settings_path, vk_path, srs_path)
reduced_srs,
} => verify(proof_path, settings_path, vk_path, srs_path, reduced_srs)
.map(|e| serde_json::to_string(&e).unwrap()),
Commands::VerifyAggr {
proof_path,
@@ -433,14 +439,13 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::VerifyEVM {
Commands::VerifyEvm {
proof_path,
addr_verifier,
rpc_url,
addr_da,
addr_vk,
} => verify_evm(proof_path, addr_verifier, rpc_url, addr_da, addr_vk).await,
Commands::PrintProofHex { proof_path } => print_proof_hex(proof_path),
}
}
@@ -488,8 +493,21 @@ async fn fetch_srs(uri: &str) -> Result<Vec<u8>, Box<dyn Error>> {
#[cfg(not(target_arch = "wasm32"))]
fn check_srs_hash(logrows: u32, srs_path: Option<PathBuf>) -> Result<String, Box<dyn Error>> {
use std::io::Read;
let path = get_srs_path(logrows, srs_path);
let hash = sha256::digest(std::fs::read(path.clone())?);
let file = std::fs::File::open(path.clone())?;
let mut buffer = vec![];
let mut reader = std::io::BufReader::with_capacity(*EZKL_BUF_CAPACITY, file);
let bytes_read = reader.read_to_end(&mut buffer)?;
info!(
"read {} bytes from SRS file (vector of len = {})",
bytes_read,
buffer.len()
);
let hash = sha256::digest(buffer);
info!("SRS hash: {}", hash);
let predefined_hash = match { crate::srs_sha::PUBLIC_SRS_SHA256_HASHES.get(&logrows) } {
@@ -553,7 +571,11 @@ pub(crate) async fn get_srs_cmd(
}
let mut file = std::fs::File::create(get_srs_path(k, srs_path.clone()))?;
file.write_all(reader.get_ref())?;
let mut buffer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, &mut file);
buffer.write_all(reader.get_ref())?;
buffer.flush()?;
info!("SRS downloaded");
} else {
info!("SRS already exists at that path");
@@ -612,7 +634,7 @@ pub(crate) async fn gen_witness(
let start_time = Instant::now();
let witness = circuit.forward(&mut input, vk.as_ref(), srs.as_ref())?;
let witness = circuit.forward(&mut input, vk.as_ref(), srs.as_ref(), false)?;
// print each variable tuple (symbol, value) as symbol=value
trace!(
@@ -628,8 +650,12 @@ pub(crate) async fn gen_witness(
);
if let Some(output_path) = output {
serde_json::to_writer(&File::create(output_path)?, &witness)?;
witness.save(output_path)?;
}
// print the witness in debug
debug!("witness: \n {}", witness.as_json()?.to_colored_json_auto()?);
Ok(witness)
}
@@ -722,11 +748,11 @@ impl AccuracyResults {
let percentage_error = error.enum_map(|i, x| Ok::<_, TensorError>(x / original[i]))?;
let abs_percentage_error = percentage_error.map(|x| x.abs());
errors.extend(error.into_iter());
abs_errors.extend(abs_error.into_iter());
squared_errors.extend(squared_error.into_iter());
percentage_errors.extend(percentage_error.into_iter());
abs_percentage_errors.extend(abs_percentage_error.into_iter());
errors.extend(error);
abs_errors.extend(abs_error);
squared_errors.extend(squared_error);
percentage_errors.extend(percentage_error);
abs_percentage_errors.extend(abs_percentage_error);
}
let mean_percent_error =
@@ -776,6 +802,7 @@ impl AccuracyResults {
/// Calibrate the circuit parameters to a given a dataset
#[cfg(not(target_arch = "wasm32"))]
#[allow(trivial_casts)]
#[allow(clippy::too_many_arguments)]
pub(crate) fn calibrate(
model_path: PathBuf,
data: PathBuf,
@@ -784,7 +811,7 @@ pub(crate) fn calibrate(
lookup_safety_margin: i128,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
div_rebasing: Option<bool>,
only_range_check_rebase: bool,
max_logrows: Option<u32>,
) -> Result<GraphSettings, Box<dyn Error>> {
use std::collections::HashMap;
@@ -795,18 +822,8 @@ pub(crate) fn calibrate(
let settings = GraphSettings::load(&settings_path)?;
// now retrieve the run args
// we load the model to get the input and output shapes
// check if gag already exists
#[cfg(unix)]
let _r = match Gag::stdout() {
Ok(r) => Some(r),
Err(_) => None,
};
let model = Model::from_run_args(&settings.run_args, &model_path)?;
// drop the gag
#[cfg(unix)]
std::mem::drop(_r);
let chunks = data.split_into_batches(model.graph.input_shapes()?)?;
info!("num of calibration batches: {}", chunks.len());
@@ -822,14 +839,11 @@ pub(crate) fn calibrate(
let range = if let Some(scales) = scales {
scales
} else {
match target {
CalibrationTarget::Resources { .. } => (8..10).collect::<Vec<crate::Scale>>(),
CalibrationTarget::Accuracy => (10..14).collect::<Vec<crate::Scale>>(),
}
(11..14).collect::<Vec<crate::Scale>>()
};
let div_rebasing = if let Some(div_rebasing) = div_rebasing {
vec![div_rebasing]
let div_rebasing = if only_range_check_rebase {
vec![false]
} else {
vec![true, false]
};
@@ -888,17 +902,12 @@ pub(crate) fn calibrate(
input_scale, param_scale, scale_rebase_multiplier, div_rebasing
));
#[cfg(unix)]
let _r = match Gag::stdout() {
Ok(r) => Some(r),
Err(_) => None,
};
#[cfg(unix)]
let _q = match Gag::stderr() {
Ok(r) => Some(r),
Err(_) => None,
};
let key = (input_scale, param_scale, scale_rebase_multiplier);
let key = (
input_scale,
param_scale,
scale_rebase_multiplier,
div_rebasing,
);
forward_pass_res.insert(key, vec![]);
let local_run_args = RunArgs {
@@ -909,20 +918,27 @@ pub(crate) fn calibrate(
..settings.run_args.clone()
};
// if unix get a gag
#[cfg(unix)]
let _r = match Gag::stdout() {
Ok(g) => Some(g),
_ => None,
};
#[cfg(unix)]
let _g = match Gag::stderr() {
Ok(g) => Some(g),
_ => None,
};
let mut circuit = match GraphCircuit::from_run_args(&local_run_args, &model_path) {
Ok(c) => c,
Err(e) => {
// drop the gag
#[cfg(unix)]
std::mem::drop(_r);
#[cfg(unix)]
std::mem::drop(_q);
debug!("circuit creation from run args failed: {:?}", e);
continue;
}
};
chunks
let forward_res = chunks
.iter()
.map(|chunk| {
let chunk = chunk.clone();
@@ -932,7 +948,7 @@ pub(crate) fn calibrate(
.map_err(|e| format!("failed to load circuit inputs: {}", e))?;
let forward_res = circuit
.forward(&mut data.clone(), None, None)
.forward(&mut data.clone(), None, None, true)
.map_err(|e| format!("failed to forward: {}", e))?;
// push result to the hashmap
@@ -943,37 +959,46 @@ pub(crate) fn calibrate(
Ok(()) as Result<(), String>
})
.collect::<Result<Vec<()>, String>>()?;
.collect::<Result<Vec<()>, String>>();
let min_lookup_range = forward_pass_res
.get(&key)
.unwrap()
match forward_res {
Ok(_) => (),
// typically errors will be due to the circuit overflowing the i128 limit
Err(e) => {
debug!("forward pass failed: {:?}", e);
continue;
}
}
// drop the gag
#[cfg(unix)]
drop(_r);
#[cfg(unix)]
drop(_g);
let result = forward_pass_res.get(&key).ok_or("key not found")?;
let min_lookup_range = result
.iter()
.map(|x| x.min_lookup_inputs)
.min()
.unwrap_or(0);
let max_lookup_range = forward_pass_res
.get(&key)
.unwrap()
let max_lookup_range = result
.iter()
.map(|x| x.max_lookup_inputs)
.max()
.unwrap_or(0);
let res = circuit.calibrate_from_min_max(
min_lookup_range,
max_lookup_range,
let max_range_size = result.iter().map(|x| x.max_range_size).max().unwrap_or(0);
let res = circuit.calc_min_logrows(
(min_lookup_range, max_lookup_range),
max_range_size,
max_logrows,
lookup_safety_margin,
);
// drop the gag
#[cfg(unix)]
std::mem::drop(_r);
#[cfg(unix)]
std::mem::drop(_q);
if res.is_ok() {
let new_settings = circuit.settings().clone();
@@ -1086,6 +1111,7 @@ pub(crate) fn calibrate(
best_params.run_args.input_scale,
best_params.run_args.param_scale,
best_params.run_args.scale_rebase_multiplier,
best_params.run_args.div_rebasing,
))
.ok_or("no params found")?
.iter()
@@ -1168,16 +1194,6 @@ pub(crate) fn mock(
Ok(String::new())
}
pub(crate) fn print_proof_hex(proof_path: PathBuf) -> Result<String, Box<dyn Error>> {
let proof = Snark::load::<KZGCommitmentScheme<Bn256>>(&proof_path)?;
for instance in proof.instances {
println!("{:?}", instance);
}
let hex_str = hex::encode(proof.proof);
info!("0x{}", hex_str);
Ok(format!("0x{}", hex_str))
}
#[cfg(feature = "render")]
pub(crate) fn render(
model: PathBuf,
@@ -1396,10 +1412,10 @@ pub(crate) async fn deploy_evm(
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn verify_evm(
proof_path: PathBuf,
addr_verifier: H160,
addr_verifier: H160Flag,
rpc_url: Option<String>,
addr_da: Option<H160>,
addr_vk: Option<H160>,
addr_da: Option<H160Flag>,
addr_vk: Option<H160Flag>,
) -> Result<String, Box<dyn Error>> {
use crate::eth::verify_proof_with_data_attestation;
check_solc_requirement();
@@ -1409,14 +1425,20 @@ pub(crate) async fn verify_evm(
let result = if let Some(addr_da) = addr_da {
verify_proof_with_data_attestation(
proof.clone(),
addr_verifier,
addr_da,
addr_vk,
addr_verifier.into(),
addr_da.into(),
addr_vk.map(|s| s.into()),
rpc_url.as_deref(),
)
.await?
} else {
verify_proof_via_solidity(proof.clone(), addr_verifier, addr_vk, rpc_url.as_deref()).await?
verify_proof_via_solidity(
proof.clone(),
addr_verifier.into(),
addr_vk.map(|s| s.into()),
rpc_url.as_deref(),
)
.await?
};
info!("Solidity verification result: {}", result);
@@ -1508,7 +1530,7 @@ pub(crate) fn setup(
vk_path: PathBuf,
pk_path: PathBuf,
witness: Option<PathBuf>,
compress_selectors: bool,
disable_selector_compression: bool,
) -> Result<String, Box<dyn Error>> {
// these aren't real values so the sanity checks are mostly meaningless
let mut circuit = GraphCircuit::load(compiled_circuit)?;
@@ -1522,7 +1544,7 @@ pub(crate) fn setup(
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
disable_selector_compression,
)
.map_err(Box::<dyn Error>::from)?;
@@ -1572,14 +1594,14 @@ pub(crate) async fn setup_test_evm_witness(
use crate::pfsys::ProofType;
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn test_update_account_calls(
addr: H160,
addr: H160Flag,
data: PathBuf,
rpc_url: Option<String>,
) -> Result<String, Box<dyn Error>> {
use crate::eth::update_account_calls;
check_solc_requirement();
update_account_calls(addr, data, rpc_url.as_deref()).await?;
update_account_calls(addr.into(), data, rpc_url.as_deref()).await?;
Ok(String::new())
}
@@ -1663,7 +1685,7 @@ pub(crate) fn fuzz(
data_path: PathBuf,
transcript: TranscriptType,
num_runs: usize,
compress_selectors: bool,
disable_selector_compression: bool,
) -> Result<String, Box<dyn Error>> {
check_solc_requirement();
let passed = AtomicBool::new(true);
@@ -1682,7 +1704,7 @@ pub(crate) fn fuzz(
let pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&params,
compress_selectors,
disable_selector_compression,
)
.map_err(Box::<dyn Error>::from)?;
@@ -1703,7 +1725,7 @@ pub(crate) fn fuzz(
let bad_pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&new_params,
compress_selectors,
disable_selector_compression,
)
.map_err(|_| ())?;
@@ -1724,6 +1746,7 @@ pub(crate) fn fuzz(
bad_proof,
pk.get_vk(),
strategy.clone(),
params.n(),
)
.map_err(|_| ())
};
@@ -1754,6 +1777,7 @@ pub(crate) fn fuzz(
bad_proof,
pk.get_vk(),
strategy.clone(),
params.n(),
)
.map_err(|_| ())
};
@@ -1779,7 +1803,7 @@ pub(crate) fn fuzz(
let bad_pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
&new_params,
compress_selectors,
disable_selector_compression,
)
.map_err(|_| ())?;
@@ -1790,6 +1814,7 @@ pub(crate) fn fuzz(
proof.clone(),
bad_vk,
strategy.clone(),
params.n(),
)
.map_err(|_| ())
};
@@ -1821,6 +1846,7 @@ pub(crate) fn fuzz(
bad_proof,
pk.get_vk(),
strategy.clone(),
params.n(),
)
.map_err(|_| ())
};
@@ -1856,6 +1882,7 @@ pub(crate) fn fuzz(
bad_proof,
pk.get_vk(),
strategy.clone(),
params.n(),
)
.map_err(|_| ())
};
@@ -1955,7 +1982,7 @@ pub(crate) fn setup_aggregate(
srs_path: Option<PathBuf>,
logrows: u32,
split_proofs: bool,
compress_selectors: bool,
disable_selector_compression: bool,
) -> Result<String, Box<dyn Error>> {
// the K used for the aggregation circuit
let params = load_params_cmd(srs_path, logrows)?;
@@ -1969,7 +1996,7 @@ pub(crate) fn setup_aggregate(
let agg_pk = create_keys::<KZGCommitmentScheme<Bn256>, Fr, AggregationCircuit>(
&agg_circuit,
&params,
compress_selectors,
disable_selector_compression,
)?;
let agg_vk = agg_pk.get_vk();
@@ -2041,15 +2068,30 @@ pub(crate) fn verify(
settings_path: PathBuf,
vk_path: PathBuf,
srs_path: Option<PathBuf>,
reduced_srs: bool,
) -> Result<bool, Box<dyn Error>> {
let circuit_settings = GraphSettings::load(&settings_path)?;
let params = load_params_cmd(srs_path, circuit_settings.run_args.logrows)?;
let params = if reduced_srs {
// only need G_0 for the verification with shplonk
load_params_cmd(srs_path, 1)?
} else {
load_params_cmd(srs_path, circuit_settings.run_args.logrows)?
};
let proof = Snark::load::<KZGCommitmentScheme<Bn256>>(&proof_path)?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
let vk = load_vk::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk_path, circuit_settings)?;
let vk =
load_vk::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk_path, circuit_settings.clone())?;
let now = Instant::now();
let result = verify_proof_circuit_kzg(params.verifier_params(), proof, &vk, strategy);
let result = verify_proof_circuit_kzg(
params.verifier_params(),
proof,
&vk,
strategy,
1 << circuit_settings.run_args.logrows,
);
let elapsed = now.elapsed();
info!(
"verify took {}.{}",
@@ -2073,7 +2115,7 @@ pub(crate) fn verify_aggr(
let strategy = AccumulatorStrategy::new(params.verifier_params());
let vk = load_vk::<KZGCommitmentScheme<Bn256>, Fr, AggregationCircuit>(vk_path, ())?;
let now = Instant::now();
let result = verify_proof_circuit_kzg(&params, proof, &vk, strategy);
let result = verify_proof_circuit_kzg(&params, proof, &vk, strategy, 1 << logrows);
let elapsed = now.elapsed();
info!(

View File

@@ -4,6 +4,7 @@ use crate::circuit::InputType;
use crate::fieldutils::i128_to_felt;
#[cfg(not(target_arch = "wasm32"))]
use crate::tensor::Tensor;
use crate::EZKL_BUF_CAPACITY;
use halo2curves::bn256::Fr as Fp;
#[cfg(not(target_arch = "wasm32"))]
use postgres::{Client, NoTls};
@@ -15,6 +16,8 @@ use pyo3::types::PyDict;
use pyo3::ToPyObject;
use serde::ser::SerializeStruct;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::io::BufReader;
use std::io::BufWriter;
use std::io::Read;
use std::panic::UnwindSafe;
#[cfg(not(target_arch = "wasm32"))]
@@ -490,16 +493,20 @@ impl GraphData {
/// Load the model input from a file
pub fn from_path(path: std::path::PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
let mut file = std::fs::File::open(path.clone())
.map_err(|_| format!("failed to open input at {}", path.display()))?;
let mut data = String::new();
file.read_to_string(&mut data)?;
serde_json::from_str(&data).map_err(|e| e.into())
let reader = std::fs::File::open(path)?;
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, reader);
let mut buf = String::new();
reader.read_to_string(&mut buf)?;
let graph_input = serde_json::from_str(&buf)?;
Ok(graph_input)
}
/// Save the model input to a file
pub fn save(&self, path: std::path::PathBuf) -> Result<(), Box<dyn std::error::Error>> {
serde_json::to_writer(std::fs::File::create(path)?, &self).map_err(|e| e.into())
// buf writer
let writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::create(path)?);
serde_json::to_writer(writer, self)?;
Ok(())
}
///
@@ -617,13 +624,13 @@ impl ToPyObject for DataSource {
}
#[cfg(feature = "python-bindings")]
use crate::pfsys::field_to_string_montgomery;
use crate::pfsys::field_to_string;
#[cfg(feature = "python-bindings")]
impl ToPyObject for FileSourceInner {
fn to_object(&self, py: Python) -> PyObject {
match self {
FileSourceInner::Field(data) => field_to_string_montgomery(data).to_object(py),
FileSourceInner::Field(data) => field_to_string(data).to_object(py),
FileSourceInner::Bool(data) => data.to_object(py),
FileSourceInner::Float(data) => data.to_object(py),
}

View File

@@ -12,10 +12,13 @@ pub mod utilities;
pub mod vars;
#[cfg(not(target_arch = "wasm32"))]
use colored_json::ToColoredJson;
#[cfg(unix)]
use gag::Gag;
use halo2_proofs::plonk::VerifyingKey;
use halo2_proofs::poly::kzg::commitment::ParamsKZG;
pub use input::DataSource;
use itertools::Itertools;
use tosubcommand::ToFlags;
#[cfg(not(target_arch = "wasm32"))]
use self::input::OnChainSource;
@@ -23,19 +26,22 @@ use self::input::{FileSource, GraphData};
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
use crate::circuit::lookup::LookupOp;
use crate::circuit::modules::ModulePlanner;
use crate::circuit::table::{Range, Table, RESERVED_BLINDING_ROWS_PAD};
use crate::circuit::table::{num_cols_required, Range, Table, RESERVED_BLINDING_ROWS_PAD};
use crate::circuit::{CheckMode, InputType};
use crate::fieldutils::felt_to_f64;
use crate::pfsys::PrettyElements;
use crate::tensor::{Tensor, ValTensor};
use crate::RunArgs;
use crate::{RunArgs, EZKL_BUF_CAPACITY};
use halo2_proofs::{
circuit::Layouter,
plonk::{Circuit, ConstraintSystem, Error as PlonkError},
};
use halo2curves::bn256::{self, Bn256, Fr as Fp, G1Affine};
use halo2curves::ff::PrimeField;
use log::{debug, error, info, trace, warn};
#[cfg(not(target_arch = "wasm32"))]
use lazy_static::lazy_static;
use log::{debug, error, trace, warn};
use maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
pub use model::*;
pub use node::*;
@@ -46,20 +52,36 @@ use pyo3::types::PyDict;
#[cfg(feature = "python-bindings")]
use pyo3::ToPyObject;
use serde::{Deserialize, Serialize};
use std::io::{Read, Write};
use std::ops::Deref;
use thiserror::Error;
pub use utilities::*;
pub use vars::*;
#[cfg(feature = "python-bindings")]
use crate::pfsys::field_to_string_montgomery;
use crate::pfsys::field_to_string;
/// The safety factor for the range of the lookup table.
pub const RANGE_MULTIPLIER: i128 = 2;
/// The maximum number of columns in a lookup table.
pub const MAX_NUM_LOOKUP_COLS: usize = 12;
/// Max representation of a lookup table input
pub const MAX_LOOKUP_ABS: i128 = 8 * 2_i128.pow(MAX_PUBLIC_SRS);
pub const MAX_LOOKUP_ABS: i128 = (MAX_NUM_LOOKUP_COLS as i128) * 2_i128.pow(MAX_PUBLIC_SRS);
#[cfg(not(target_arch = "wasm32"))]
lazy_static! {
/// Max circuit area
pub static ref EZKL_MAX_CIRCUIT_AREA: Option<usize> =
if let Ok(max_circuit_area) = std::env::var("EZKL_MAX_CIRCUIT_AREA") {
Some(max_circuit_area.parse().unwrap_or(0))
} else {
None
};
}
#[cfg(target_arch = "wasm32")]
const EZKL_MAX_CIRCUIT_AREA: Option<usize> = None;
/// circuit related errors.
#[derive(Debug, Error)]
@@ -117,15 +139,16 @@ pub enum GraphError {
MissingResults,
}
const ASSUMED_BLINDING_FACTORS: usize = 5;
///
pub const ASSUMED_BLINDING_FACTORS: usize = 5;
/// The minimum number of rows in the grid
pub const MIN_LOGROWS: u32 = 6;
/// 26
pub const MAX_PUBLIC_SRS: u32 = bn256::Fr::S - 2;
/// Lookup deg
pub const LOOKUP_DEG: usize = 5;
///
pub const RESERVED_BLINDING_ROWS: usize = ASSUMED_BLINDING_FACTORS + RESERVED_BLINDING_ROWS_PAD;
use std::cell::RefCell;
@@ -154,6 +177,8 @@ pub struct GraphWitness {
pub max_lookup_inputs: i128,
/// max lookup input
pub min_lookup_inputs: i128,
/// max range check size
pub max_range_size: i128,
}
impl GraphWitness {
@@ -181,6 +206,7 @@ impl GraphWitness {
processed_outputs: None,
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
}
}
@@ -191,42 +217,41 @@ impl GraphWitness {
output_scales: Vec<crate::Scale>,
visibility: VarVisibility,
) {
let mut pretty_elements = PrettyElements::default();
pretty_elements.rescaled_inputs = self
.inputs
.iter()
.enumerate()
.map(|(i, t)| {
let scale = input_scales[i];
t.iter()
.map(|x| dequantize(*x, scale, 0.).to_string())
.collect()
})
.collect();
pretty_elements.inputs = self
.inputs
.iter()
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
.collect();
pretty_elements.rescaled_outputs = self
.outputs
.iter()
.enumerate()
.map(|(i, t)| {
let scale = output_scales[i];
t.iter()
.map(|x| dequantize(*x, scale, 0.).to_string())
.collect()
})
.collect();
pretty_elements.outputs = self
.outputs
.iter()
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
.collect();
let mut pretty_elements = PrettyElements {
rescaled_inputs: self
.inputs
.iter()
.enumerate()
.map(|(i, t)| {
let scale = input_scales[i];
t.iter()
.map(|x| dequantize(*x, scale, 0.).to_string())
.collect()
})
.collect(),
inputs: self
.inputs
.iter()
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
.collect(),
rescaled_outputs: self
.outputs
.iter()
.enumerate()
.map(|(i, t)| {
let scale = output_scales[i];
t.iter()
.map(|x| dequantize(*x, scale, 0.).to_string())
.collect()
})
.collect(),
outputs: self
.outputs
.iter()
.map(|t| t.iter().map(|x| format!("{:?}", x)).collect())
.collect(),
..Default::default()
};
if let Some(processed_inputs) = self.processed_inputs.clone() {
pretty_elements.processed_inputs = processed_inputs
@@ -292,16 +317,20 @@ impl GraphWitness {
/// Load the model input from a file
pub fn from_path(path: std::path::PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
let mut file = std::fs::File::open(path.clone())
let file = std::fs::File::open(path.clone())
.map_err(|_| format!("failed to load model at {}", path.display()))?;
let mut data = String::new();
file.read_to_string(&mut data)?;
serde_json::from_str(&data).map_err(|e| e.into())
let reader = std::io::BufReader::with_capacity(*EZKL_BUF_CAPACITY, file);
serde_json::from_reader(reader).map_err(|e| e.into())
}
/// Save the model input to a file
pub fn save(&self, path: std::path::PathBuf) -> Result<(), Box<dyn std::error::Error>> {
serde_json::to_writer(std::fs::File::create(path)?, &self).map_err(|e| e.into())
// use buf writer
let writer =
std::io::BufWriter::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::create(path)?);
serde_json::to_writer(writer, &self).map_err(|e| e.into())
}
///
@@ -335,19 +364,23 @@ impl ToPyObject for GraphWitness {
let inputs: Vec<Vec<String>> = self
.inputs
.iter()
.map(|x| x.iter().map(field_to_string_montgomery).collect())
.map(|x| x.iter().map(field_to_string).collect())
.collect();
let outputs: Vec<Vec<String>> = self
.outputs
.iter()
.map(|x| x.iter().map(field_to_string_montgomery).collect())
.map(|x| x.iter().map(field_to_string).collect())
.collect();
dict.set_item("inputs", inputs).unwrap();
dict.set_item("outputs", outputs).unwrap();
dict.set_item("max_lookup_inputs", self.max_lookup_inputs)
.unwrap();
dict.set_item("min_lookup_inputs", self.min_lookup_inputs)
.unwrap();
dict.set_item("max_range_size", self.max_range_size)
.unwrap();
if let Some(processed_inputs) = &self.processed_inputs {
//poseidon_hash
@@ -389,10 +422,7 @@ impl ToPyObject for GraphWitness {
#[cfg(feature = "python-bindings")]
fn insert_poseidon_hash_pydict(pydict: &PyDict, poseidon_hash: &Vec<Fp>) -> Result<(), PyErr> {
let poseidon_hash: Vec<String> = poseidon_hash
.iter()
.map(field_to_string_montgomery)
.collect();
let poseidon_hash: Vec<String> = poseidon_hash.iter().map(field_to_string).collect();
pydict.set_item("poseidon_hash", poseidon_hash)?;
Ok(())
@@ -421,6 +451,14 @@ pub struct GraphSettings {
pub total_assignments: usize,
/// total const size
pub total_const_size: usize,
/// total dynamic column size
pub total_dynamic_col_size: usize,
/// number of dynamic lookups
pub num_dynamic_lookups: usize,
/// number of shuffles
pub num_shuffles: usize,
/// total shuffle column size
pub total_shuffle_col_size: usize,
/// the shape of public inputs to the model (in order of appearance)
pub model_instance_shapes: Vec<Vec<usize>>,
/// model output scales
@@ -444,6 +482,30 @@ pub struct GraphSettings {
}
impl GraphSettings {
fn model_constraint_logrows(&self) -> u32 {
(self.num_rows as f64 + RESERVED_BLINDING_ROWS as f64)
.log2()
.ceil() as u32
}
fn dynamic_lookup_and_shuffle_logrows(&self) -> u32 {
(self.total_dynamic_col_size as f64 + self.total_shuffle_col_size as f64)
.log2()
.ceil() as u32
}
fn dynamic_lookup_and_shuffle_col_size(&self) -> usize {
self.total_dynamic_col_size + self.total_shuffle_col_size
}
fn module_constraint_logrows(&self) -> u32 {
(self.module_sizes.max_constraints() as f64).log2().ceil() as u32
}
fn constants_logrows(&self) -> u32 {
(self.total_const_size as f64).log2().ceil() as u32
}
/// calculate the total number of instances
pub fn total_instances(&self) -> Vec<usize> {
let mut instances: Vec<usize> = self
@@ -456,22 +518,33 @@ impl GraphSettings {
instances
}
/// calculate the log2 of the total number of instances
pub fn log2_total_instances(&self) -> u32 {
let sum = self.total_instances().iter().sum::<usize>();
// max between 1 and the log2 of the sums
std::cmp::max((sum as f64).log2().ceil() as u32, 1)
}
/// save params to file
pub fn save(&self, path: &std::path::PathBuf) -> Result<(), std::io::Error> {
let encoded = serde_json::to_string(&self)?;
let mut file = std::fs::File::create(path)?;
file.write_all(encoded.as_bytes())
// buf writer
let writer =
std::io::BufWriter::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::create(path)?);
serde_json::to_writer(writer, &self).map_err(|e| {
error!("failed to save settings file at {}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
}
/// load params from file
pub fn load(path: &std::path::PathBuf) -> Result<Self, std::io::Error> {
let mut file = std::fs::File::open(path).map_err(|e| {
error!("failed to open settings file at {}", e);
e
})?;
let mut data = String::new();
file.read_to_string(&mut data)?;
let res = serde_json::from_str(&data)?;
Ok(res)
// buf reader
let reader =
std::io::BufReader::with_capacity(*EZKL_BUF_CAPACITY, std::fs::File::open(path)?);
serde_json::from_reader(reader).map_err(|e| {
error!("failed to load settings file at {}", e);
std::io::Error::new(std::io::ErrorKind::Other, e)
})
}
/// Export the ezkl configuration as json
@@ -517,6 +590,16 @@ impl GraphSettings {
|| self.run_args.param_visibility.is_hashed()
}
/// requires dynamic lookup
pub fn requires_dynamic_lookup(&self) -> bool {
self.num_dynamic_lookups > 0
}
/// requires dynamic shuffle
pub fn requires_shuffle(&self) -> bool {
self.num_shuffles > 0
}
/// any kzg visibility
pub fn module_requires_kzg(&self) -> bool {
self.run_args.input_visibility.is_kzgcommit()
@@ -530,6 +613,7 @@ impl GraphSettings {
pub struct GraphConfig {
model_config: ModelConfig,
module_configs: ModuleConfigs,
circuit_size: CircuitSize,
}
/// Defines the circuit for a computational graph / model loaded from a `.onnx` file.
@@ -566,7 +650,7 @@ impl GraphCircuit {
///
pub fn save(&self, path: std::path::PathBuf) -> Result<(), Box<dyn std::error::Error>> {
let f = std::fs::File::create(path)?;
let writer = std::io::BufWriter::new(f);
let writer = std::io::BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
bincode::serialize_into(writer, &self)?;
Ok(())
}
@@ -574,11 +658,10 @@ impl GraphCircuit {
///
pub fn load(path: std::path::PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
// read bytes from file
let mut f = std::fs::File::open(&path)?;
let metadata = std::fs::metadata(&path)?;
let mut buffer = vec![0; metadata.len() as usize];
f.read_exact(&mut buffer)?;
let result = bincode::deserialize(&buffer)?;
let f = std::fs::File::open(path)?;
let reader = std::io::BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
let result: GraphCircuit = bincode::deserialize_from(reader)?;
Ok(result)
}
}
@@ -593,6 +676,17 @@ pub enum TestDataSource {
OnChain,
}
impl std::fmt::Display for TestDataSource {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TestDataSource::File => write!(f, "file"),
TestDataSource::OnChain => write!(f, "on-chain"),
}
}
}
impl ToFlags for TestDataSource {}
impl From<String> for TestDataSource {
fn from(value: String) -> Self {
match value.to_lowercase().as_str() {
@@ -809,7 +903,7 @@ impl GraphCircuit {
let shapes = self.model().graph.input_shapes()?;
let scales = self.model().graph.get_input_scales();
let input_types = self.model().graph.get_input_types()?;
info!("input scales: {:?}", scales);
debug!("input scales: {:?}", scales);
match &data.input_data {
DataSource::File(file_data) => {
@@ -828,7 +922,7 @@ impl GraphCircuit {
let shapes = self.model().graph.input_shapes()?;
let scales = self.model().graph.get_input_scales();
let input_types = self.model().graph.get_input_types()?;
info!("input scales: {:?}", scales);
debug!("input scales: {:?}", scales);
self.process_data_source(&data.input_data, shapes, scales, input_types)
.await
@@ -954,39 +1048,47 @@ impl GraphCircuit {
Ok(data)
}
fn reserved_blinding_rows() -> f64 {
(ASSUMED_BLINDING_FACTORS + RESERVED_BLINDING_ROWS_PAD) as f64
}
fn calc_safe_lookup_range(
min_lookup_inputs: i128,
max_lookup_inputs: i128,
lookup_safety_margin: i128,
) -> Range {
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: i128) -> Range {
let mut margin = (
lookup_safety_margin * min_lookup_inputs,
lookup_safety_margin * max_lookup_inputs,
lookup_safety_margin * min_max_lookup.0,
lookup_safety_margin * min_max_lookup.1,
);
if lookup_safety_margin == 1 {
margin.0 += 1;
margin.1 += 1;
margin.0 += 4;
margin.1 += 4;
}
margin
}
fn calc_num_cols(safe_range: Range, max_logrows: u32) -> usize {
let max_col_size = Table::<Fp>::cal_col_size(
max_logrows as usize,
Self::reserved_blinding_rows() as usize,
);
Table::<Fp>::num_cols_required(safe_range, max_col_size)
fn calc_num_cols(range_len: i128, max_logrows: u32) -> usize {
let max_col_size = Table::<Fp>::cal_col_size(max_logrows as usize, RESERVED_BLINDING_ROWS);
num_cols_required(range_len, max_col_size)
}
fn calc_min_logrows(
fn table_size_logrows(
&self,
safe_lookup_range: Range,
max_range_size: i128,
) -> Result<u32, Box<dyn std::error::Error>> {
// pick the range with the largest absolute size safe_lookup_range or max_range_size
let safe_range = std::cmp::max(
(safe_lookup_range.1 - safe_lookup_range.0).abs(),
max_range_size,
);
let min_bits = (safe_range as f64 + RESERVED_BLINDING_ROWS as f64 + 1.)
.log2()
.ceil() as u32;
Ok(min_bits)
}
/// calculate the minimum logrows required for the circuit
pub fn calc_min_logrows(
&mut self,
min_lookup_inputs: i128,
max_lookup_inputs: i128,
min_max_lookup: Range,
max_range_size: i128,
max_logrows: Option<u32>,
lookup_safety_margin: i128,
) -> Result<(), Box<dyn std::error::Error>> {
@@ -996,125 +1098,91 @@ impl GraphCircuit {
let mut max_logrows = std::cmp::max(max_logrows, MIN_LOGROWS);
let mut min_logrows = MIN_LOGROWS;
let reserved_blinding_rows = Self::reserved_blinding_rows();
let safe_lookup_range = Self::calc_safe_lookup_range(min_max_lookup, lookup_safety_margin);
// check if has overflowed max lookup input
if max_lookup_inputs > MAX_LOOKUP_ABS / lookup_safety_margin
|| min_lookup_inputs < -MAX_LOOKUP_ABS / lookup_safety_margin
{
let err_string = format!("max lookup input ({}) is too large", max_lookup_inputs);
if (min_max_lookup.1 - min_max_lookup.0).abs() > MAX_LOOKUP_ABS / lookup_safety_margin {
let err_string = format!("max lookup input {:?} is too large", min_max_lookup);
return Err(err_string.into());
}
let safe_range = Self::calc_safe_lookup_range(
min_lookup_inputs,
max_lookup_inputs,
lookup_safety_margin,
if max_range_size.abs() > MAX_LOOKUP_ABS {
let err_string = format!("max range check size {:?} is too large", max_range_size);
return Err(err_string.into());
}
// These are hard lower limits, we can't overflow instances or modules constraints
let instance_logrows = self.settings().log2_total_instances();
let module_constraint_logrows = self.settings().module_constraint_logrows();
let dynamic_lookup_logrows = self.settings().dynamic_lookup_and_shuffle_logrows();
min_logrows = std::cmp::max(
min_logrows,
// max of the instance logrows and the module constraint logrows and the dynamic lookup logrows is the lower limit
*[
instance_logrows,
module_constraint_logrows,
dynamic_lookup_logrows,
]
.iter()
.max()
.unwrap(),
);
// These are upper limits, going above these is wasteful, but they are not hard limits
let model_constraint_logrows = self.settings().model_constraint_logrows();
let min_bits = self.table_size_logrows(safe_lookup_range, max_range_size)?;
let constants_logrows = self.settings().constants_logrows();
max_logrows = std::cmp::min(
max_logrows,
// max of the model constraint logrows, min_bits, and the constants logrows is the upper limit
*[model_constraint_logrows, min_bits, constants_logrows]
.iter()
.max()
.unwrap(),
);
// we now have a min and max logrows
max_logrows = std::cmp::max(min_logrows, max_logrows);
// degrade the max logrows until the extended k is small enough
while min_logrows < max_logrows
&& !self.extended_k_is_small_enough(
min_logrows,
Self::calc_num_cols(safe_range, min_logrows),
)
{
min_logrows += 1;
}
if !self
.extended_k_is_small_enough(min_logrows, Self::calc_num_cols(safe_range, min_logrows))
{
let err_string = format!(
"extended k is too large to accommodate the quotient polynomial with logrows {}",
min_logrows
);
error!("{}", err_string);
return Err(err_string.into());
}
while min_logrows < max_logrows
&& !self.extended_k_is_small_enough(
max_logrows,
Self::calc_num_cols(safe_range, max_logrows),
)
&& !self.extended_k_is_small_enough(max_logrows, safe_lookup_range, max_range_size)
{
max_logrows -= 1;
}
if !self
.extended_k_is_small_enough(max_logrows, Self::calc_num_cols(safe_range, max_logrows))
{
if !self.extended_k_is_small_enough(max_logrows, safe_lookup_range, max_range_size) {
let err_string = format!(
"extended k is too large to accommodate the quotient polynomial with logrows {}",
max_logrows
);
error!("{}", err_string);
debug!("{}", err_string);
return Err(err_string.into());
}
let min_bits = ((safe_range.1 - safe_range.0) as f64 + reserved_blinding_rows + 1.)
.log2()
.ceil() as usize;
let min_rows_from_constraints = (self.settings().num_rows as f64 + reserved_blinding_rows)
.log2()
.ceil() as usize;
let mut logrows = std::cmp::max(min_bits, min_rows_from_constraints);
// if public input then public inputs col will have public inputs len
if self.settings().run_args.input_visibility.is_public()
|| self.settings().run_args.output_visibility.is_public()
{
let mut max_instance_len = self
.model()
.instance_shapes()?
.iter()
.fold(0, |acc, x| std::cmp::max(acc, x.iter().product::<usize>()))
as f64
+ reserved_blinding_rows;
// if there are modules then we need to add the max module size
if self.settings().uses_modules() {
max_instance_len += self
.settings()
.module_sizes
.num_instances()
.iter()
.sum::<usize>() as f64;
}
let instance_len_logrows = (max_instance_len).log2().ceil() as usize;
logrows = std::cmp::max(logrows, instance_len_logrows);
// this is for fixed const columns
}
// ensure logrows is at least 4
logrows = std::cmp::max(logrows, min_logrows as usize);
logrows = std::cmp::min(logrows, max_logrows as usize);
let logrows = max_logrows;
let model = self.model().clone();
let settings_mut = self.settings_mut();
settings_mut.run_args.lookup_range = safe_range;
settings_mut.run_args.logrows = logrows as u32;
settings_mut.run_args.lookup_range = safe_lookup_range;
settings_mut.run_args.logrows = logrows;
*settings_mut = GraphCircuit::new(model, &settings_mut.run_args)?
.settings()
.clone();
// recalculate the total const size give nthe new logrows
let total_const_len = settings_mut.total_const_size;
let const_len_logrows = (total_const_len as f64).log2().ceil() as u32;
settings_mut.run_args.logrows =
std::cmp::max(settings_mut.run_args.logrows, const_len_logrows);
// recalculate the total number of constraints given the new logrows
let min_rows_from_constraints = (settings_mut.num_rows as f64 + reserved_blinding_rows)
.log2()
.ceil() as u32;
settings_mut.run_args.logrows =
std::cmp::max(settings_mut.run_args.logrows, min_rows_from_constraints);
// recalculate the logrows if there has been overflow on the constants
settings_mut.run_args.logrows = std::cmp::max(
settings_mut.run_args.logrows,
settings_mut.constants_logrows(),
);
// recalculate the logrows if there has been overflow for the model constraints
settings_mut.run_args.logrows = std::cmp::max(
settings_mut.run_args.logrows,
settings_mut.model_constraint_logrows(),
);
settings_mut.run_args.logrows = std::cmp::min(max_logrows, settings_mut.run_args.logrows);
info!(
debug!(
"setting lookup_range to: {:?}, setting logrows to: {}",
self.settings().run_args.lookup_range,
self.settings().run_args.logrows
@@ -1123,12 +1191,48 @@ impl GraphCircuit {
Ok(())
}
fn extended_k_is_small_enough(&self, k: u32, num_lookup_cols: usize) -> bool {
let max_degree = self.settings().run_args.num_inner_cols + 2;
let max_lookup_degree = LOOKUP_DEG + num_lookup_cols - 1; // num_lookup_cols - 1 is the degree of the lookup synthetic selector
fn extended_k_is_small_enough(
&self,
k: u32,
safe_lookup_range: Range,
max_range_size: i128,
) -> bool {
// if num cols is too large then the extended k is too large
if Self::calc_num_cols(safe_lookup_range.1 - safe_lookup_range.0, k) > MAX_NUM_LOOKUP_COLS
|| Self::calc_num_cols(max_range_size, k) > MAX_NUM_LOOKUP_COLS
{
return false;
}
let max_degree = std::cmp::max(max_degree, max_lookup_degree);
let mut settings = self.settings().clone();
settings.run_args.lookup_range = safe_lookup_range;
settings.run_args.logrows = k;
settings.required_range_checks = vec![(0, max_range_size)];
let mut cs = ConstraintSystem::default();
// if unix get a gag
#[cfg(unix)]
let _r = match Gag::stdout() {
Ok(g) => Some(g),
_ => None,
};
#[cfg(unix)]
let _g = match Gag::stderr() {
Ok(g) => Some(g),
_ => None,
};
Self::configure_with_params(&mut cs, settings);
// drop the gag
#[cfg(unix)]
drop(_r);
#[cfg(unix)]
drop(_g);
#[cfg(feature = "mv-lookup")]
let cs = cs.chunk_lookups();
// quotient_poly_degree * params.n - 1 is the degree of the quotient polynomial
let max_degree = cs.degree();
let quotient_poly_degree = (max_degree - 1) as u64;
// n = 2^k
let n = 1u64 << k;
@@ -1143,29 +1247,13 @@ impl GraphCircuit {
true
}
/// Calibrate the circuit to the supplied data.
pub fn calibrate_from_min_max(
&mut self,
min_lookup_inputs: i128,
max_lookup_inputs: i128,
max_logrows: Option<u32>,
lookup_safety_margin: i128,
) -> Result<(), Box<dyn std::error::Error>> {
self.calc_min_logrows(
min_lookup_inputs,
max_lookup_inputs,
max_logrows,
lookup_safety_margin,
)?;
Ok(())
}
/// Runs the forward pass of the model / graph of computations and any associated hashing.
pub fn forward(
&self,
inputs: &mut [Tensor<Fp>],
vk: Option<&VerifyingKey<G1Affine>>,
srs: Option<&ParamsKZG<Bn256>>,
throw_range_check_error: bool,
) -> Result<GraphWitness, Box<dyn std::error::Error>> {
let original_inputs = inputs.to_vec();
@@ -1206,7 +1294,9 @@ impl GraphCircuit {
}
}
let mut model_results = self.model().forward(inputs)?;
let mut model_results =
self.model()
.forward(inputs, &self.settings().run_args, throw_range_check_error)?;
if visibility.output.requires_processing() {
let module_outlets = visibility.output.overwrites_inputs();
@@ -1249,6 +1339,7 @@ impl GraphCircuit {
processed_outputs,
max_lookup_inputs: model_results.max_lookup_inputs,
min_lookup_inputs: model_results.min_lookup_inputs,
max_range_size: model_results.max_range_size,
};
witness.generate_rescaled_elements(
@@ -1366,7 +1457,6 @@ impl GraphCircuit {
}
}
#[cfg(not(target_arch = "wasm32"))]
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
struct CircuitSize {
num_instances: usize,
@@ -1374,20 +1464,22 @@ struct CircuitSize {
num_fixed: usize,
num_challenges: usize,
num_selectors: usize,
logrows: u32,
}
#[cfg(not(target_arch = "wasm32"))]
impl CircuitSize {
pub fn from_cs(cs: &ConstraintSystem<Fp>) -> Self {
pub fn from_cs(cs: &ConstraintSystem<Fp>, logrows: u32) -> Self {
CircuitSize {
num_instances: cs.num_instance_columns(),
num_advice_columns: cs.num_advice_columns(),
num_fixed: cs.num_fixed_columns(),
num_challenges: cs.num_challenges(),
num_selectors: cs.num_selectors(),
logrows,
}
}
#[cfg(not(target_arch = "wasm32"))]
/// Export the ezkl configuration as json
pub fn as_json(&self) -> Result<String, Box<dyn std::error::Error>> {
let serialized = match serde_json::to_string(&self) {
@@ -1398,6 +1490,25 @@ impl CircuitSize {
};
Ok(serialized)
}
/// number of columns
pub fn num_columns(&self) -> usize {
self.num_instances + self.num_advice_columns + self.num_fixed
}
/// area of the circuit
pub fn area(&self) -> usize {
self.num_columns() * (1 << self.logrows)
}
/// area less than max
pub fn area_less_than_max(&self) -> bool {
if EZKL_MAX_CIRCUIT_AREA.is_some() {
self.area() < EZKL_MAX_CIRCUIT_AREA.unwrap()
} else {
true
}
}
}
impl Circuit<Fp> for GraphCircuit {
@@ -1435,34 +1546,18 @@ impl Circuit<Fp> for GraphCircuit {
params.run_args.logrows as usize,
);
let mut vars = ModelVars::new(
cs,
params.run_args.logrows as usize,
params.total_assignments,
params.run_args.num_inner_cols,
params.total_const_size,
params.module_requires_fixed(),
);
let mut vars = ModelVars::new(cs, &params);
module_configs.configure_complex_modules(cs, visibility, params.module_sizes.clone());
vars.instantiate_instance(
cs,
params.model_instance_shapes,
params.model_instance_shapes.clone(),
params.run_args.input_scale,
module_configs.instance,
);
let base = Model::configure(
cs,
&vars,
params.run_args.lookup_range,
params.run_args.logrows as usize,
params.required_lookups,
params.required_range_checks,
params.check_mode,
)
.unwrap();
let base = Model::configure(cs, &vars, &params).unwrap();
let model_config = ModelConfig { base, vars };
@@ -1472,10 +1567,12 @@ impl Circuit<Fp> for GraphCircuit {
(cs.degree() as f32).log2().ceil()
);
let circuit_size = CircuitSize::from_cs(cs, params.run_args.logrows);
#[cfg(not(target_arch = "wasm32"))]
info!(
debug!(
"circuit size: \n {}",
CircuitSize::from_cs(cs)
circuit_size
.as_json()
.unwrap()
.to_colored_json_auto()
@@ -1485,6 +1582,7 @@ impl Circuit<Fp> for GraphCircuit {
GraphConfig {
model_config,
module_configs,
circuit_size,
}
}
@@ -1497,6 +1595,16 @@ impl Circuit<Fp> for GraphCircuit {
config: Self::Config,
mut layouter: impl Layouter<Fp>,
) -> Result<(), PlonkError> {
// check if the circuit area is less than the max
if !config.circuit_size.area_less_than_max() {
error!(
"circuit area {} is larger than the max allowed area {}",
config.circuit_size.area(),
EZKL_MAX_CIRCUIT_AREA.unwrap()
);
return Err(PlonkError::Synthesis);
}
trace!("Setting input in synthesize");
let input_vis = &self.settings().run_args.input_visibility;
let output_vis = &self.settings().run_args.output_visibility;

View File

@@ -10,7 +10,6 @@ use crate::circuit::table::Range;
use crate::circuit::Input;
use crate::circuit::InputType;
use crate::circuit::Unknown;
use crate::fieldutils::felt_to_i128;
use crate::tensor::ValType;
use crate::{
circuit::{lookup::LookupOp, BaseConfig as PolyConfig, CheckMode, Op},
@@ -57,6 +56,8 @@ use unzip_n::unzip_n;
unzip_n!(pub 3);
#[cfg(not(target_arch = "wasm32"))]
type TractResult = (Graph<TypedFact, Box<dyn TypedOp>>, SymbolValues);
/// The result of a forward pass.
#[derive(Clone, Debug)]
pub struct ForwardResult {
@@ -66,6 +67,19 @@ pub struct ForwardResult {
pub max_lookup_inputs: i128,
/// The minimum value of any input to a lookup operation.
pub min_lookup_inputs: i128,
/// The max range check size
pub max_range_size: i128,
}
impl From<DummyPassRes> for ForwardResult {
fn from(res: DummyPassRes) -> Self {
Self {
outputs: res.outputs,
max_lookup_inputs: res.max_lookup_inputs,
min_lookup_inputs: res.min_lookup_inputs,
max_range_size: res.max_range_size,
}
}
}
/// A circuit configuration for the entirety of a model loaded from an Onnx file.
@@ -80,6 +94,37 @@ pub struct ModelConfig {
/// Representation of execution graph
pub type NodeGraph = BTreeMap<usize, NodeType>;
/// A struct for loading from an Onnx file and converting a computational graph to a circuit.
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct DummyPassRes {
/// number of rows use
pub num_rows: usize,
/// num dynamic lookups
pub num_dynamic_lookups: usize,
/// dynamic lookup col size
pub dynamic_lookup_col_coord: usize,
/// num shuffles
pub num_shuffles: usize,
/// shuffle
pub shuffle_col_coord: usize,
/// linear coordinate
pub linear_coord: usize,
/// total const size
pub total_const_size: usize,
/// lookup ops
pub lookup_ops: HashSet<LookupOp>,
/// range checks
pub range_checks: HashSet<Range>,
/// max lookup inputs
pub max_lookup_inputs: i128,
/// min lookup inputs
pub min_lookup_inputs: i128,
/// min range check
pub max_range_size: i128,
/// outputs
pub outputs: Vec<Tensor<Fp>>,
}
/// A struct for loading from an Onnx file and converting a computational graph to a circuit.
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
pub struct Model {
@@ -234,20 +279,7 @@ impl NodeType {
NodeType::SubGraph { out_dims, .. } => out_dims.clone(),
}
}
/// Returns the lookups required by a graph
pub fn required_lookups(&self) -> Vec<LookupOp> {
match self {
NodeType::Node(n) => n.opkind.required_lookups(),
NodeType::SubGraph { model, .. } => model.required_lookups(),
}
}
/// Returns the lookups required by a graph
pub fn required_range_checks(&self) -> Vec<Range> {
match self {
NodeType::Node(n) => n.opkind.required_range_checks(),
NodeType::SubGraph { model, .. } => model.required_range_checks(),
}
}
/// Returns the scales of the node's output.
pub fn out_scales(&self) -> Vec<crate::Scale> {
match self {
@@ -432,23 +464,6 @@ impl ParsedNodes {
}
impl Model {
fn required_lookups(&self) -> Vec<LookupOp> {
self.graph
.nodes
.values()
.flat_map(|n| n.required_lookups())
.collect_vec()
}
///
fn required_range_checks(&self) -> Vec<Range> {
self.graph
.nodes
.values()
.flat_map(|n| n.required_range_checks())
.collect_vec()
}
/// Creates a `Model` from a specified path to an Onnx file.
/// # Arguments
/// * `reader` - A reader for an Onnx file.
@@ -493,7 +508,7 @@ impl Model {
) -> Result<GraphSettings, Box<dyn Error>> {
let instance_shapes = self.instance_shapes()?;
#[cfg(not(target_arch = "wasm32"))]
info!(
debug!(
"{} {} {}",
"model has".blue(),
instance_shapes.len().to_string().blue(),
@@ -501,42 +516,43 @@ impl Model {
);
// this is the total number of variables we will need to allocate
// for the circuit
let (num_rows, linear_coord, total_const_size) =
self.dummy_layout(run_args, &self.graph.input_shapes()?)?;
let default_value = if !self.visibility.input.is_fixed() {
ValType::Value(Value::<Fp>::unknown())
} else {
ValType::Constant(Fp::ONE)
};
// extract the requisite lookup ops from the model
let mut lookup_ops: Vec<LookupOp> = self.required_lookups();
// extract the requisite lookup ops from the model
let mut range_checks: Vec<Range> = self.required_range_checks();
let inputs: Vec<ValTensor<Fp>> = self
.graph
.input_shapes()?
.iter()
.map(|shape| {
let mut t: ValTensor<Fp> =
vec![default_value.clone(); shape.iter().product()].into();
t.reshape(shape)?;
Ok(t)
})
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let res = self.dummy_layout(run_args, &inputs, false)?;
// if we're using percentage tolerance, we need to add the necessary range check ops for it.
if run_args.tolerance.val > 0.0 {
for scale in self.graph.get_output_scales()? {
let mut tolerance = run_args.tolerance;
tolerance.scale = scale_to_multiplier(scale).into();
let opkind: Box<dyn Op<Fp>> = Box::new(HybridOp::RangeCheck(tolerance));
lookup_ops.extend(opkind.required_lookups());
}
}
let set: HashSet<_> = lookup_ops.drain(..).collect(); // dedup
lookup_ops.extend(set.into_iter().sorted());
let set: HashSet<_> = range_checks.drain(..).collect(); // dedup
range_checks.extend(set.into_iter().sorted());
Ok(GraphSettings {
run_args: run_args.clone(),
model_instance_shapes: instance_shapes,
module_sizes: crate::graph::modules::ModuleSizes::default(),
num_rows,
total_assignments: linear_coord,
required_lookups: lookup_ops,
required_range_checks: range_checks,
num_rows: res.num_rows,
total_assignments: res.linear_coord,
required_lookups: res.lookup_ops.into_iter().collect(),
required_range_checks: res.range_checks.into_iter().collect(),
model_output_scales: self.graph.get_output_scales()?,
model_input_scales: self.graph.get_input_scales(),
total_const_size,
num_dynamic_lookups: res.num_dynamic_lookups,
total_dynamic_col_size: res.dynamic_lookup_col_coord,
num_shuffles: res.num_shuffles,
total_shuffle_col_size: res.shuffle_col_coord,
total_const_size: res.total_const_size,
check_mode,
version: env!("CARGO_PKG_VERSION").to_string(),
num_blinding_factors: None,
@@ -557,205 +573,18 @@ impl Model {
/// * `reader` - A reader for an Onnx file.
/// * `model_inputs` - A vector of [Tensor]s to use as inputs to the model.
/// * `run_args` - [RunArgs]
pub fn forward(&self, model_inputs: &[Tensor<Fp>]) -> Result<ForwardResult, Box<dyn Error>> {
let mut results: BTreeMap<&usize, Vec<Tensor<Fp>>> = BTreeMap::new();
let mut max_lookup_inputs = 0;
let mut min_lookup_inputs = 0;
let input_shapes = self.graph.input_shapes()?;
for (i, input_idx) in self.graph.inputs.iter().enumerate() {
let mut input = model_inputs[i].clone();
input.reshape(&input_shapes[i])?;
results.insert(input_idx, vec![input]);
}
for (idx, n) in self.graph.nodes.iter() {
let mut inputs = vec![];
if n.is_input() {
let t = results.get(idx).ok_or(GraphError::MissingResults)?[0].clone();
inputs.push(t);
} else {
for (idx, outlet) in n.inputs().iter() {
match results.get(&idx) {
Some(value) => inputs.push(value[*outlet].clone()),
None => return Err(Box::new(GraphError::MissingNode(*idx))),
}
}
};
debug!("executing {}: {}", idx, n.as_str());
debug!("dims: {:?}", n.out_dims());
debug!(
"input_dims: {:?}",
inputs.iter().map(|x| x.dims()).collect::<Vec<_>>()
);
if n.is_lookup() {
let (mut min, mut max) = (0, 0);
for i in &inputs {
max = max.max(
i.iter()
.map(|x| felt_to_i128(*x))
.max()
.ok_or("missing max")?,
);
min = min.min(
i.iter()
.map(|x| felt_to_i128(*x))
.min()
.ok_or("missing min")?,
);
}
max_lookup_inputs = max_lookup_inputs.max(max);
min_lookup_inputs = min_lookup_inputs.min(min);
debug!("max lookup inputs: {}", max);
debug!("min lookup inputs: {}", min);
}
match n {
NodeType::Node(n) => {
// execute the op
let start = instant::Instant::now();
let mut res = Op::<Fp>::f(&n.opkind, &inputs)?;
res.output.reshape(&n.out_dims)?;
let elapsed = start.elapsed();
trace!("op took: {:?}", elapsed);
// see if any of the intermediate lookup calcs are the max
if !res.intermediate_lookups.is_empty() {
let (mut min, mut max) = (0, 0);
for i in &res.intermediate_lookups {
max = max.max(i.clone().into_iter().max().ok_or("missing max")?);
min = min.min(i.clone().into_iter().min().ok_or("missing min")?);
}
max_lookup_inputs = max_lookup_inputs.max(max);
min_lookup_inputs = min_lookup_inputs.min(min);
debug!("intermediate max lookup inputs: {}", max);
debug!("intermediate min lookup inputs: {}", min);
}
debug!(
"------------ output node int {}: {} \n ------------ float: {} \n ------------ max: {} \n ------------ min: {} \n ------------ scale: {}",
idx,
res.output.map(crate::fieldutils::felt_to_i32).show(),
res.output
.map(|x| crate::fieldutils::felt_to_f64(x)
/ scale_to_multiplier(n.out_scale))
.show(),
res.output.clone().into_iter().map(crate::fieldutils::felt_to_i128).max().unwrap_or(0),
res.output.clone().into_iter().map(crate::fieldutils::felt_to_i128).min().unwrap_or(0),
n.out_scale
);
results.insert(idx, vec![res.output]);
}
NodeType::SubGraph {
model,
output_mappings,
input_mappings,
inputs: input_tuple,
..
} => {
let orig_inputs = inputs.clone();
let input_mappings = input_mappings.clone();
let input_dims = inputs.iter().map(|inp| inp.dims());
let num_iter = number_of_iterations(&input_mappings, input_dims.collect());
debug!(
"{} iteration(s) in a subgraph with inputs {:?} and sources {:?}",
num_iter, input_tuple, model.graph.inputs
);
debug!("input_mappings: {:?}", input_mappings);
let mut full_results: Vec<Tensor<Fp>> = vec![];
for i in 0..num_iter {
// replace the Stacked input with the current chunk iter
for ((mapping, inp), og_input) in
input_mappings.iter().zip(&mut inputs).zip(&orig_inputs)
{
if let InputMapping::Stacked { axis, chunk } = mapping {
let start = i * chunk;
let end = (i + 1) * chunk;
let t = crate::tensor::ops::slice(og_input, axis, &start, &end)?;
*inp = t;
}
}
let res = model.forward(&inputs)?;
// recursively get the max lookup inputs for subgraphs
max_lookup_inputs = max_lookup_inputs.max(res.max_lookup_inputs);
min_lookup_inputs = min_lookup_inputs.min(res.min_lookup_inputs);
let mut outlets = BTreeMap::new();
for (mappings, outlet_res) in output_mappings.iter().zip(res.outputs) {
for mapping in mappings {
match mapping {
OutputMapping::Single { outlet, .. } => {
outlets.insert(outlet, outlet_res.clone());
}
OutputMapping::Stacked { outlet, axis, .. } => {
if !full_results.is_empty() {
let stacked_res = crate::tensor::ops::concat(
&[&full_results[*outlet], &outlet_res],
*axis,
)?;
outlets.insert(outlet, stacked_res);
} else {
outlets.insert(outlet, outlet_res.clone());
}
}
}
}
}
full_results = outlets.into_values().collect_vec();
let output_states = output_state_idx(output_mappings);
let input_states = input_state_idx(&input_mappings);
assert_eq!(input_states.len(), output_states.len());
for (input_idx, output_idx) in input_states.iter().zip(output_states) {
inputs[*input_idx] = full_results[output_idx].clone();
}
}
trace!(
"------------ output subgraph node {}: {:?}",
idx,
full_results
.iter()
.map(|x|
// convert to tensor i32
x.map(crate::fieldutils::felt_to_i32).show())
.collect_vec()
);
results.insert(idx, full_results);
}
}
}
let output_nodes = self.graph.outputs.iter();
debug!(
"model outputs are nodes: {:?}",
output_nodes.clone().collect_vec()
);
let outputs = output_nodes
.map(|(idx, outlet)| {
Ok(results.get(&idx).ok_or(GraphError::MissingResults)?[*outlet].clone())
})
.collect::<Result<Vec<_>, GraphError>>()?;
let res = ForwardResult {
outputs,
max_lookup_inputs,
min_lookup_inputs,
};
Ok(res)
pub fn forward(
&self,
model_inputs: &[Tensor<Fp>],
run_args: &RunArgs,
throw_range_check_error: bool,
) -> Result<ForwardResult, Box<dyn Error>> {
let valtensor_inputs: Vec<ValTensor<Fp>> = model_inputs
.iter()
.map(|x| x.map(|elem| ValType::Value(Value::known(elem))).into())
.collect();
let res = self.dummy_layout(run_args, &valtensor_inputs, throw_range_check_error)?;
Ok(res.into())
}
/// Loads an Onnx model from a specified path.
@@ -767,7 +596,7 @@ impl Model {
fn load_onnx_using_tract(
reader: &mut dyn std::io::Read,
run_args: &RunArgs,
) -> Result<(Graph<TypedFact, Box<dyn TypedOp>>, SymbolValues), Box<dyn Error>> {
) -> Result<TractResult, Box<dyn Error>> {
use tract_onnx::{
tract_core::internal::IntoArcTensor, tract_hir::internal::GenericFactoid,
};
@@ -806,7 +635,7 @@ impl Model {
for (symbol, value) in run_args.variables.iter() {
let symbol = model.symbol_table.sym(symbol);
symbol_values = symbol_values.with(&symbol, *value as i64);
info!("set {} to {}", symbol, value);
debug!("set {} to {}", symbol, value);
}
// Note: do not optimize the model, as the layout will depend on underlying hardware
@@ -1066,6 +895,7 @@ impl Model {
i,
symbol_values,
run_args.div_rebasing,
run_args.rebase_frac_zero_constants,
)?;
if let Some(ref scales) = override_input_scales {
if let Some(inp) = n.opkind.get_input() {
@@ -1185,35 +1015,51 @@ impl Model {
/// # Arguments
/// * `meta` - The constraint system.
/// * `vars` - The variables for the circuit.
/// * `run_args` - [RunArgs]
/// * `required_lookups` - The required lookup operations for the circuit.
/// * `settings` - [GraphSettings]
pub fn configure(
meta: &mut ConstraintSystem<Fp>,
vars: &ModelVars<Fp>,
lookup_range: Range,
logrows: usize,
required_lookups: Vec<LookupOp>,
required_range_checks: Vec<Range>,
check_mode: CheckMode,
settings: &GraphSettings,
) -> Result<PolyConfig<Fp>, Box<dyn Error>> {
info!("configuring model");
debug!("configuring model");
let lookup_range = settings.run_args.lookup_range;
let logrows = settings.run_args.logrows as usize;
let required_lookups = settings.required_lookups.clone();
let required_range_checks = settings.required_range_checks.clone();
let mut base_gate = PolyConfig::configure(
meta,
vars.advices[0..2].try_into()?,
&vars.advices[2],
check_mode,
settings.check_mode,
);
// set scale for HybridOp::RangeCheck and call self.conf_lookup on that op for percentage tolerance case
let input = &vars.advices[0];
let output = &vars.advices[1];
let index = &vars.advices[2];
let output = &vars.advices[2];
let index = &vars.advices[1];
for op in required_lookups {
base_gate.configure_lookup(meta, input, output, index, lookup_range, logrows, &op)?;
}
for range in required_range_checks {
base_gate.configure_range_check(meta, input, range)?;
base_gate.configure_range_check(meta, input, index, range, logrows)?;
}
if settings.requires_dynamic_lookup() {
base_gate.configure_dynamic_lookup(
meta,
vars.advices[0..3].try_into()?,
vars.advices[3..6].try_into()?,
)?;
}
if settings.requires_shuffle() {
base_gate.configure_shuffles(
meta,
vars.advices[0..2].try_into()?,
vars.advices[3..5].try_into()?,
)?;
}
Ok(base_gate)
@@ -1326,7 +1172,7 @@ impl Model {
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
info!(
debug!(
"{} {} {} (coord={}, constants={})",
"model uses".blue(),
num_rows.to_string().blue(),
@@ -1368,18 +1214,29 @@ impl Model {
};
debug!(
"laying out {}: {}, row:{}, coord:{}, total_constants: {}",
"laying out {}: {}, row:{}, coord:{}, total_constants: {}, max_lookup_inputs: {}, min_lookup_inputs: {}",
idx,
node.as_str(),
region.row(),
region.linear_coord(),
region.total_constants()
region.total_constants(),
region.max_lookup_inputs(),
region.min_lookup_inputs()
);
debug!("dims: {:?}", node.out_dims());
debug!(
"input_dims {:?}",
values.iter().map(|v| v.dims()).collect_vec()
);
debug!("output scales: {:?}", node.out_scales());
debug!("input indices: {:?}", node.inputs());
debug!(
"input scales: {:?}",
node.inputs()
.iter()
.map(|(idx, outlet)| self.graph.nodes[idx].out_scales()[*outlet])
.collect_vec()
);
match &node {
NodeType::Node(n) => {
@@ -1522,28 +1379,14 @@ impl Model {
pub fn dummy_layout(
&self,
run_args: &RunArgs,
input_shapes: &[Vec<usize>],
) -> Result<(usize, usize, usize), Box<dyn Error>> {
info!("calculating num of constraints using dummy model layout...");
inputs: &[ValTensor<Fp>],
throw_range_check_error: bool,
) -> Result<DummyPassRes, Box<dyn Error>> {
debug!("calculating num of constraints using dummy model layout...");
let start_time = instant::Instant::now();
let mut results = BTreeMap::<usize, Vec<ValTensor<Fp>>>::new();
let default_value = if !self.visibility.input.is_fixed() {
ValType::Value(Value::<Fp>::unknown())
} else {
ValType::Constant(Fp::ONE)
};
let inputs: Vec<ValTensor<Fp>> = input_shapes
.iter()
.map(|shape| {
let mut t: ValTensor<Fp> =
vec![default_value.clone(); shape.iter().product()].into();
t.reshape(shape)?;
Ok(t)
})
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
for (i, input_idx) in self.graph.inputs.iter().enumerate() {
results.insert(*input_idx, vec![inputs[i].clone()]);
@@ -1556,7 +1399,7 @@ impl Model {
vars: ModelVars::new_dummy(),
};
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols);
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, throw_range_check_error);
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;
@@ -1567,27 +1410,26 @@ impl Model {
ValType::Constant(Fp::ONE)
};
let comparator = outputs
let output_scales = self.graph.get_output_scales()?;
let res = outputs
.iter()
.map(|x| {
let mut v: ValTensor<Fp> =
vec![default_value.clone(); x.dims().iter().product::<usize>()].into();
v.reshape(x.dims())?;
Ok(v)
})
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
.enumerate()
.map(|(i, output)| {
let mut tolerance = run_args.tolerance;
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
let mut comparator: ValTensor<Fp> =
vec![default_value.clone(); output.dims().iter().product::<usize>()].into();
comparator.reshape(output.dims())?;
let _ = outputs
.into_iter()
.zip(comparator)
.map(|(o, c)| {
dummy_config.layout(
&mut region,
&[o, c],
Box::new(HybridOp::RangeCheck(run_args.tolerance)),
&[output.clone(), comparator],
Box::new(HybridOp::RangeCheck(tolerance)),
)
})
.collect::<Result<Vec<_>, _>>()?;
.collect::<Result<Vec<_>, _>>();
res?;
} else if !self.visibility.output.is_private() {
for output in &outputs {
region.increment_total_constants(output.num_constants());
@@ -1599,7 +1441,7 @@ impl Model {
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
info!(
debug!(
"{} {} {} (coord={}, constants={})",
"model uses".blue(),
region.row().to_string().blue(),
@@ -1608,11 +1450,31 @@ impl Model {
region.total_constants().to_string().red()
);
Ok((
region.row(),
region.linear_coord(),
region.total_constants(),
))
let outputs = outputs
.iter()
.map(|x| {
x.get_felt_evals()
.unwrap_or(Tensor::new(Some(&[Fp::ZERO]), &[1]).unwrap())
})
.collect();
let res = DummyPassRes {
num_rows: region.row(),
linear_coord: region.linear_coord(),
total_const_size: region.total_constants(),
lookup_ops: region.used_lookups(),
range_checks: region.used_range_checks(),
max_lookup_inputs: region.max_lookup_inputs(),
min_lookup_inputs: region.min_lookup_inputs(),
max_range_size: region.max_range_size(),
num_dynamic_lookups: region.dynamic_lookup_index(),
dynamic_lookup_col_coord: region.dynamic_lookup_col_coord(),
num_shuffles: region.shuffle_index(),
shuffle_col_coord: region.shuffle_col_coord(),
outputs,
};
Ok(res)
}
/// Retrieves all constants from the model.

View File

@@ -12,16 +12,12 @@ use crate::circuit::Constant;
use crate::circuit::Input;
use crate::circuit::Op;
use crate::circuit::Unknown;
use crate::fieldutils::felt_to_i128;
use crate::fieldutils::i128_to_felt;
#[cfg(not(target_arch = "wasm32"))]
use crate::graph::new_op_from_onnx;
use crate::tensor::Tensor;
use crate::tensor::TensorError;
use halo2curves::bn256::Fr as Fp;
#[cfg(not(target_arch = "wasm32"))]
use itertools::Itertools;
#[cfg(not(target_arch = "wasm32"))]
use log::trace;
use serde::Deserialize;
use serde::Serialize;
@@ -94,10 +90,6 @@ impl Op<Fp> for Rescaled {
Op::<Fp>::out_scale(&*self.inner, in_scales)
}
fn required_lookups(&self) -> Vec<LookupOp> {
self.inner.required_lookups()
}
fn layout(
&self,
config: &mut crate::circuit::BaseConfig<Fp>,
@@ -126,14 +118,14 @@ impl Op<Fp> for Rescaled {
pub struct RebaseScale {
/// The operation that has to be rescaled.
pub inner: Box<SupportedOp>,
/// the multiplier applied to the node output
pub multiplier: f64,
/// rebase op
pub rebase_op: HybridOp,
/// scale being rebased to
pub target_scale: i32,
/// The original scale of the operation's inputs.
pub original_scale: i32,
/// if true then the operation is a multiplicative division
pub div_rebasing: bool,
/// multiplier
pub multiplier: f64,
}
impl RebaseScale {
@@ -152,20 +144,27 @@ impl RebaseScale {
let multiplier =
scale_to_multiplier(op_out_scale - global_scale * scale_rebase_multiplier as i32);
if let Some(op) = inner.get_rebased() {
let multiplier = op.multiplier * multiplier;
SupportedOp::RebaseScale(RebaseScale {
inner: op.inner.clone(),
target_scale: op.target_scale,
multiplier: op.multiplier * multiplier,
multiplier,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32((multiplier) as f32),
use_range_check_for_int: !div_rebasing,
},
original_scale: op.original_scale,
div_rebasing,
})
} else {
SupportedOp::RebaseScale(RebaseScale {
inner: Box::new(inner),
target_scale: global_scale * scale_rebase_multiplier as i32,
multiplier,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32(multiplier as f32),
use_range_check_for_int: !div_rebasing,
},
original_scale: op_out_scale,
div_rebasing,
})
}
} else {
@@ -183,12 +182,16 @@ impl RebaseScale {
if (op_out_scale < (target_scale)) && !inner.is_constant() && !inner.is_input() {
let multiplier = scale_to_multiplier(op_out_scale - target_scale);
if let Some(op) = inner.get_rebased() {
let multiplier = op.multiplier * multiplier;
SupportedOp::RebaseScale(RebaseScale {
inner: op.inner.clone(),
target_scale: op.target_scale,
multiplier: op.multiplier * multiplier,
multiplier,
original_scale: op.original_scale,
div_rebasing,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32((multiplier) as f32),
use_range_check_for_int: !div_rebasing,
},
})
} else {
SupportedOp::RebaseScale(RebaseScale {
@@ -196,22 +199,16 @@ impl RebaseScale {
target_scale,
multiplier,
original_scale: op_out_scale,
div_rebasing,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32(multiplier as f32),
use_range_check_for_int: !div_rebasing,
},
})
}
} else {
inner
}
}
/// Calculate the require range bracket for the operation
fn range_bracket(&self) -> i128 {
if self.div_rebasing {
0
} else {
self.multiplier as i128 - 1
}
}
}
impl Op<Fp> for RebaseScale {
@@ -220,28 +217,17 @@ impl Op<Fp> for RebaseScale {
}
fn f(&self, x: &[Tensor<Fp>]) -> Result<crate::circuit::ForwardResult<Fp>, TensorError> {
let mut res = Op::<Fp>::f(&*self.inner, x)?;
if self.div_rebasing {
let ri = res.output.map(felt_to_i128);
let rescaled = crate::tensor::ops::nonlinearities::const_div(&ri, self.multiplier);
res.output = rescaled.map(i128_to_felt);
res.intermediate_lookups.push(ri);
} else {
let ri = res.output.map(felt_to_i128);
let divisor = Tensor::from(vec![self.multiplier as i128].into_iter());
let rescaled = crate::tensor::ops::div(&[ri, divisor.clone()])?;
res.output = rescaled.map(i128_to_felt);
res.intermediate_lookups.extend([-divisor.clone(), divisor]);
}
let rebase_res = Op::<Fp>::f(&self.rebase_op, &[res.output])?;
res.output = rebase_res.output;
Ok(res)
}
fn as_string(&self) -> String {
format!(
"REBASED (div={:?}, div_r={}) ({})",
"REBASED (div={:?}, rebasing_op={}) ({})",
self.multiplier,
self.div_rebasing,
<HybridOp as Op<Fp>>::as_string(&self.rebase_op),
self.inner.as_string()
)
}
@@ -250,25 +236,6 @@ impl Op<Fp> for RebaseScale {
Ok(self.target_scale)
}
fn required_lookups(&self) -> Vec<LookupOp> {
let mut lookups: Vec<LookupOp> = self.inner.required_lookups();
if self.div_rebasing {
lookups.push(LookupOp::Div {
denom: crate::circuit::utils::F32(self.multiplier as f32),
});
}
lookups
}
fn required_range_checks(&self) -> Vec<crate::circuit::table::Range> {
let mut range_checks = self.inner.required_range_checks();
if !self.div_rebasing {
let bracket = self.range_bracket();
range_checks.push((-bracket, bracket));
}
range_checks
}
fn layout(
&self,
config: &mut crate::circuit::BaseConfig<Fp>,
@@ -278,25 +245,8 @@ impl Op<Fp> for RebaseScale {
let original_res = self
.inner
.layout(config, region, values)?
.ok_or("no layout")?;
if !self.div_rebasing {
Ok(Some(crate::circuit::layouts::div(
config,
region,
&[original_res],
Fp::from(self.multiplier as u64),
)?))
} else {
Ok(Some(crate::circuit::layouts::nonlinearity(
config,
region,
&[original_res],
&LookupOp::Div {
denom: crate::circuit::utils::F32(self.multiplier as f32),
},
)?))
}
.ok_or("no inner layout")?;
self.rebase_op.layout(config, region, &[original_res])
}
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
@@ -479,14 +429,6 @@ impl Op<Fp> for SupportedOp {
self
}
fn required_lookups(&self) -> Vec<LookupOp> {
self.as_op().required_lookups()
}
fn required_range_checks(&self) -> Vec<crate::circuit::table::Range> {
self.as_op().required_range_checks()
}
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
self.as_op().out_scale(in_scales)
}
@@ -520,15 +462,7 @@ impl Tabled for Node {
fn headers() -> Vec<std::borrow::Cow<'static, str>> {
let mut headers = Vec::with_capacity(Self::LENGTH);
for i in [
"idx",
"opkind",
"out_scale",
"inputs",
"out_dims",
"required_lookups",
"required_range_checks",
] {
for i in ["idx", "opkind", "out_scale", "inputs", "out_dims"] {
headers.push(std::borrow::Cow::Borrowed(i));
}
headers
@@ -541,18 +475,6 @@ impl Tabled for Node {
fields.push(std::borrow::Cow::Owned(self.out_scale.to_string()));
fields.push(std::borrow::Cow::Owned(display_vector(&self.inputs)));
fields.push(std::borrow::Cow::Owned(display_vector(&self.out_dims)));
fields.push(std::borrow::Cow::Owned(format!(
"{:?}",
self.opkind
.required_lookups()
.iter()
.map(<LookupOp as Op<Fp>>::as_string)
.collect_vec()
)));
fields.push(std::borrow::Cow::Owned(format!(
"{:?}",
self.opkind.required_range_checks()
)));
fields
}
}
@@ -575,6 +497,7 @@ impl Node {
/// * `public_params` - flag if parameters of model are public
/// * `idx` - The node's unique identifier.
#[cfg(not(target_arch = "wasm32"))]
#[allow(clippy::too_many_arguments)]
pub fn new(
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
other_nodes: &mut BTreeMap<usize, super::NodeType>,
@@ -583,9 +506,8 @@ impl Node {
idx: usize,
symbol_values: &SymbolValues,
div_rebasing: bool,
rebase_frac_zero_constants: bool,
) -> Result<Self, Box<dyn Error>> {
use log::warn;
trace!("Create {:?}", node);
trace!("Create op {:?}", node.op);
@@ -623,6 +545,7 @@ impl Node {
node.clone(),
&mut inputs,
symbol_values,
rebase_frac_zero_constants,
)?; // parses the op name
// we can only take the inputs as mutable once -- so we need to collect them first
@@ -678,8 +601,6 @@ impl Node {
input_node.bump_scale(out_scale);
in_scales[input] = out_scale;
}
} else {
warn!("input {} not found for rescaling, skipping ...", input);
}
}

View File

@@ -243,6 +243,7 @@ pub fn new_op_from_onnx(
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
inputs: &mut [super::NodeType],
symbol_values: &SymbolValues,
rebase_frac_zero_constants: bool,
) -> Result<(SupportedOp, Vec<usize>), Box<dyn std::error::Error>> {
use crate::circuit::InputType;
@@ -261,7 +262,9 @@ pub fn new_op_from_onnx(
inputs[index].bump_scale(scale);
c.rebase_scale(scale)?;
inputs[index].replace_opkind(SupportedOp::Constant(c.clone()));
Ok(SupportedOp::Linear(PolyOp::Identity))
Ok(SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(scale),
}))
} else {
Ok(default_op)
}
@@ -282,8 +285,8 @@ pub fn new_op_from_onnx(
"shift left".to_string(),
)));
}
SupportedOp::Nonlinear(LookupOp::Div {
denom: crate::circuit::utils::F32(1.0 / 2.0f32.powf(raw_values[0])),
SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(input_scales[0] - raw_values[0] as i32),
})
} else {
return Err(Box::new(GraphError::OpMismatch(
@@ -304,8 +307,8 @@ pub fn new_op_from_onnx(
"shift right".to_string(),
)));
}
SupportedOp::Nonlinear(LookupOp::Div {
denom: crate::circuit::utils::F32(2.0f32.powf(raw_values[0])),
SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(input_scales[0] + raw_values[0] as i32),
})
} else {
return Err(Box::new(GraphError::OpMismatch(
@@ -436,17 +439,16 @@ pub fn new_op_from_onnx(
let op = load_op::<ScatterElements>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
let mut op =
SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::ScatterElements {
dim: axis,
constant_idx: None,
});
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
dim: axis,
constant_idx: None,
});
// if param_visibility.is_public() {
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::ScatterElements {
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| x as usize)),
})
@@ -475,17 +477,16 @@ pub fn new_op_from_onnx(
let op = load_op::<GatherElements>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
let mut op =
SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::GatherElements {
dim: axis,
constant_idx: None,
});
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
dim: axis,
constant_idx: None,
});
// if param_visibility.is_public() {
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(inputs.len() - 1);
op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::GatherElements {
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| x as usize)),
})
@@ -544,7 +545,7 @@ pub fn new_op_from_onnx(
// Raw values are always f32
let raw_value = extract_tensor_value(op.0)?;
// If bool or a tensor dimension then don't scale
let constant_scale = match dt {
let mut constant_scale = match dt {
DatumType::Bool
| DatumType::TDim
| DatumType::I64
@@ -559,6 +560,12 @@ pub fn new_op_from_onnx(
_ => return Err(Box::new(GraphError::UnsupportedDataType)),
};
// if all raw_values are round then set scale to 0
let all_round = raw_value.iter().all(|x| (x).fract() == 0.0);
if all_round && rebase_frac_zero_constants {
constant_scale = 0;
}
// Quantize the raw value
let quantized_value =
quantize_tensor(raw_value.clone(), constant_scale, param_visibility)?;
@@ -665,8 +672,10 @@ pub fn new_op_from_onnx(
if unit == 0. {
SupportedOp::Nonlinear(LookupOp::ReLU)
} else {
// get the non-constant index
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
SupportedOp::Nonlinear(LookupOp::Max {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
scale: scale_to_multiplier(inputs[non_const_idx].out_scales()[0]).into(),
a: crate::circuit::utils::F32(unit),
})
}
@@ -707,8 +716,11 @@ pub fn new_op_from_onnx(
deleted_indices.push(const_idx);
}
// get the non-constant index
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
SupportedOp::Nonlinear(LookupOp::Min {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
scale: scale_to_multiplier(inputs[non_const_idx].out_scales()[0]).into(),
a: crate::circuit::utils::F32(unit),
})
} else {
@@ -717,16 +729,12 @@ pub fn new_op_from_onnx(
}
"Recip" => {
let in_scale = inputs[0].out_scales()[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale);
// If the input scale is larger than the params scale
let scale_diff = std::cmp::max(scales.input, scales.params) - inputs[0].out_scales()[0];
let additional_scale = if scale_diff > 0 {
scale_to_multiplier(scale_diff)
} else {
1.0
};
SupportedOp::Nonlinear(LookupOp::Recip {
scale: (scale_to_multiplier(in_scale).powf(2.0) * additional_scale).into(),
SupportedOp::Hybrid(HybridOp::Recip {
input_scale: (scale_to_multiplier(in_scale) as f32).into(),
output_scale: (scale_to_multiplier(max_scale) as f32).into(),
use_range_check_for_int: true,
})
}
@@ -751,7 +759,9 @@ pub fn new_op_from_onnx(
"Scan" => {
return Err("scan should never be analyzed explicitly".into());
}
"QuantizeLinearU8" | "DequantizeLinearF32" => SupportedOp::Linear(PolyOp::Identity),
"QuantizeLinearU8" | "DequantizeLinearF32" => {
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
"Abs" => SupportedOp::Nonlinear(LookupOp::Abs),
"Neg" => SupportedOp::Linear(PolyOp::Neg),
"Sigmoid" => SupportedOp::Nonlinear(LookupOp::Sigmoid {
@@ -856,11 +866,11 @@ pub fn new_op_from_onnx(
}),
)?
} else {
SupportedOp::Linear(PolyOp::Identity)
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
}
DatumType::F16 | DatumType::F32 | DatumType::F64 => {
SupportedOp::Linear(PolyOp::Identity)
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
_ => return Err(Box::new(GraphError::UnsupportedDataType)),
}
@@ -885,12 +895,15 @@ pub fn new_op_from_onnx(
let const_idx = const_idx[0];
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
if c.raw_values.len() == 1 && c.raw_values[0] < 1. {
inputs[const_idx].decrement_use();
deleted_indices.push(const_idx);
op = SupportedOp::Nonlinear(LookupOp::Div {
// we invert the constant for division
denom: crate::circuit::utils::F32(1. / c.raw_values[0]),
})
// if not divisible by 2 then we need to add a range check
let raw_values = 1.0 / c.raw_values[0];
if raw_values.log2().fract() == 0.0 {
inputs[const_idx].decrement_use();
deleted_indices.push(const_idx);
op = SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(input_scales[0] + raw_values.log2() as i32),
});
}
}
}
}

View File

@@ -1,4 +1,5 @@
use std::error::Error;
use std::fmt::Display;
use crate::tensor::TensorType;
use crate::tensor::{ValTensor, VarTensor};
@@ -14,6 +15,7 @@ use pyo3::{
};
use serde::{Deserialize, Serialize};
use tosubcommand::ToFlags;
use super::*;
@@ -40,6 +42,33 @@ pub enum Visibility {
Fixed,
}
impl Display for Visibility {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Visibility::KZGCommit => write!(f, "kzgcommit"),
Visibility::Private => write!(f, "private"),
Visibility::Public => write!(f, "public"),
Visibility::Fixed => write!(f, "fixed"),
Visibility::Hashed {
hash_is_public,
outlets,
} => {
if *hash_is_public {
write!(f, "hashed/public")
} else {
write!(f, "hashed/private/{}", outlets.iter().join(","))
}
}
}
}
}
impl ToFlags for Visibility {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl<'a> From<&'a str> for Visibility {
fn from(s: &'a str) -> Self {
if s.contains("hashed/private") {
@@ -202,17 +231,6 @@ impl Visibility {
vec![]
}
}
impl std::fmt::Display for Visibility {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Visibility::KZGCommit => write!(f, "kzgcommit"),
Visibility::Private => write!(f, "private"),
Visibility::Public => write!(f, "public"),
Visibility::Fixed => write!(f, "fixed"),
Visibility::Hashed { .. } => write!(f, "hashed"),
}
}
}
/// Represents the scale of the model input, model parameters.
#[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq, PartialOrd)]
@@ -237,6 +255,11 @@ impl VarScales {
std::cmp::max(self.input, self.params)
}
///
pub fn get_min(&self) -> crate::Scale {
std::cmp::min(self.input, self.params)
}
/// Place in [VarScales] struct.
pub fn from_args(args: &RunArgs) -> Result<Self, Box<dyn Error>> {
Ok(Self {
@@ -397,20 +420,34 @@ impl<F: PrimeField + TensorType + PartialOrd> ModelVars<F> {
}
/// Allocate all columns that will be assigned to by a model.
pub fn new(
cs: &mut ConstraintSystem<F>,
logrows: usize,
var_len: usize,
num_inner_cols: usize,
num_constants: usize,
module_requires_fixed: bool,
) -> Self {
info!("number of blinding factors: {}", cs.blinding_factors());
pub fn new(cs: &mut ConstraintSystem<F>, params: &GraphSettings) -> Self {
debug!("number of blinding factors: {}", cs.blinding_factors());
let advices = (0..3)
let logrows = params.run_args.logrows as usize;
let var_len = params.total_assignments;
let num_inner_cols = params.run_args.num_inner_cols;
let num_constants = params.total_const_size;
let module_requires_fixed = params.module_requires_fixed();
let requires_dynamic_lookup = params.requires_dynamic_lookup();
let requires_shuffle = params.requires_shuffle();
let dynamic_lookup_and_shuffle_size = params.dynamic_lookup_and_shuffle_col_size();
let mut advices = (0..3)
.map(|_| VarTensor::new_advice(cs, logrows, num_inner_cols, var_len))
.collect_vec();
if requires_dynamic_lookup || requires_shuffle {
let num_cols = if requires_dynamic_lookup { 3 } else { 2 };
for _ in 0..num_cols {
let dynamic_lookup =
VarTensor::new_advice(cs, logrows, 1, dynamic_lookup_and_shuffle_size);
if dynamic_lookup.num_blocks() > 1 {
panic!("dynamic lookup or shuffle should only have one block");
};
advices.push(dynamic_lookup);
}
}
debug!(
"model uses {} advice blocks (size={})",
advices.iter().map(|v| v.num_blocks()).sum::<usize>(),

View File

@@ -7,7 +7,6 @@
overflowing_literals,
path_statements,
patterns_in_fns_without_body,
private_in_public,
unconditional_recursion,
unused,
unused_allocation,
@@ -33,6 +32,7 @@ use circuit::{table::Range, CheckMode, Tolerance};
use clap::Args;
use graph::Visibility;
use serde::{Deserialize, Serialize};
use tosubcommand::ToFlags;
/// Methods for configuring tensor operations and assigning values to them in a Halo2 circuit.
pub mod circuit;
@@ -71,11 +71,34 @@ pub mod tensor;
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
pub mod wasm;
#[cfg(not(target_arch = "wasm32"))]
use lazy_static::lazy_static;
/// The denominator in the fixed point representation used when quantizing inputs
pub type Scale = i32;
#[cfg(not(target_arch = "wasm32"))]
// Buf writer capacity
lazy_static! {
/// The capacity of the buffer used for writing to disk
pub static ref EZKL_BUF_CAPACITY: usize = std::env::var("EZKL_BUF_CAPACITY")
.unwrap_or("8000".to_string())
.parse()
.unwrap();
/// The serialization format for the keys
pub static ref EZKL_KEY_FORMAT: String = std::env::var("EZKL_KEY_FORMAT")
.unwrap_or("raw-bytes".to_string());
}
#[cfg(target_arch = "wasm32")]
const EZKL_KEY_FORMAT: &str = "raw-bytes";
#[cfg(target_arch = "wasm32")]
const EZKL_BUF_CAPACITY: &usize = &8000;
/// Parameters specific to a proving run
#[derive(Debug, Args, Deserialize, Serialize, Clone, PartialEq, PartialOrd)]
#[derive(Debug, Args, Deserialize, Serialize, Clone, PartialEq, PartialOrd, ToFlags)]
pub struct RunArgs {
/// The tolerance for error on model outputs
#[arg(short = 'T', long, default_value = "0")]
@@ -90,7 +113,7 @@ pub struct RunArgs {
#[arg(long, default_value = "1")]
pub scale_rebase_multiplier: u32,
/// The min and max elements in the lookup table input column
#[arg(short = 'B', long, value_parser = parse_tuple::<i128>, default_value = "(-32768,32768)")]
#[arg(short = 'B', long, value_parser = parse_key_val::<i128, i128>, default_value = "-32768->32768")]
pub lookup_range: Range,
/// The log_2 number of rows
#[arg(short = 'K', long, default_value = "17")]
@@ -99,7 +122,7 @@ pub struct RunArgs {
#[arg(short = 'N', long, default_value = "2")]
pub num_inner_cols: usize,
/// Hand-written parser for graph variables, eg. batch_size=1
#[arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size=1", value_delimiter = ',')]
#[arg(short = 'V', long, value_parser = parse_key_val::<String, usize>, default_value = "batch_size->1", value_delimiter = ',')]
pub variables: Vec<(String, usize)>,
/// Flags whether inputs are public, private, hashed
#[arg(long, default_value = "private")]
@@ -111,8 +134,11 @@ pub struct RunArgs {
#[arg(long, default_value = "private")]
pub param_visibility: Visibility,
#[arg(long, default_value = "false")]
/// Multiplicative division
/// Rebase the scale using lookup table for division instead of using a range check
pub div_rebasing: bool,
/// Should constants with 0.0 fraction be rebased to scale 0
#[arg(long, default_value = "false")]
pub rebase_frac_zero_constants: bool,
/// check mode (safe, unsafe, etc)
#[arg(long, default_value = "unsafe")]
pub check_mode: CheckMode,
@@ -133,6 +159,7 @@ impl Default for RunArgs {
output_visibility: Visibility::Public,
param_visibility: Visibility::Private,
div_rebasing: false,
rebase_frac_zero_constants: false,
check_mode: CheckMode::UNSAFE,
}
}
@@ -153,6 +180,9 @@ impl RunArgs {
if self.num_inner_cols < 1 {
return Err("num_inner_cols must be >= 1".into());
}
if self.tolerance.val > 0.0 && self.output_visibility != Visibility::Public {
return Err("tolerance > 0.0 requires output_visibility to be public".into());
}
Ok(())
}
@@ -177,34 +207,15 @@ fn parse_key_val<T, U>(
s: &str,
) -> Result<(T, U), Box<dyn std::error::Error + Send + Sync + 'static>>
where
T: std::str::FromStr,
T: std::str::FromStr + std::fmt::Debug,
T::Err: std::error::Error + Send + Sync + 'static,
U: std::str::FromStr,
U: std::str::FromStr + std::fmt::Debug,
U::Err: std::error::Error + Send + Sync + 'static,
{
let pos = s
.find('=')
.ok_or_else(|| format!("invalid KEY=value: no `=` found in `{s}`"))?;
Ok((s[..pos].parse()?, s[pos + 1..].parse()?))
}
/// Parse a tuple
fn parse_tuple<T>(s: &str) -> Result<(T, T), Box<dyn std::error::Error + Send + Sync + 'static>>
where
T: std::str::FromStr + Clone,
T::Err: std::error::Error + Send + Sync + 'static,
{
let res = s.trim_matches(|p| p == '(' || p == ')').split(',');
let res = res
.map(|x| {
// remove blank space
let x = x.trim();
x.parse::<T>()
})
.collect::<Result<Vec<_>, _>>()?;
if res.len() != 2 {
return Err("invalid tuple".into());
}
Ok((res[0].clone(), res[1].clone()))
.find("->")
.ok_or_else(|| format!("invalid x->y: no `->` found in `{s}`"))?;
let a = s[..pos].parse()?;
let b = s[pos + 2..].parse()?;
Ok((a, b))
}

View File

@@ -8,6 +8,7 @@ use crate::circuit::CheckMode;
use crate::graph::GraphWitness;
use crate::pfsys::evm::aggregation::PoseidonTranscript;
use crate::tensor::TensorType;
use crate::{EZKL_BUF_CAPACITY, EZKL_KEY_FORMAT};
use clap::ValueEnum;
use halo2_proofs::circuit::Value;
use halo2_proofs::plonk::{
@@ -39,9 +40,19 @@ use std::io::{self, BufReader, BufWriter, Cursor, Write};
use std::ops::Deref;
use std::path::PathBuf;
use thiserror::Error as thisError;
use tosubcommand::ToFlags;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
match s {
"processed" => halo2_proofs::SerdeFormat::Processed,
"raw-bytes-unchecked" => halo2_proofs::SerdeFormat::RawBytesUnchecked,
"raw-bytes" => halo2_proofs::SerdeFormat::RawBytes,
_ => panic!("invalid serde format"),
}
}
#[allow(missing_docs)]
#[derive(
ValueEnum, Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd,
@@ -52,6 +63,25 @@ pub enum ProofType {
ForAggr,
}
impl std::fmt::Display for ProofType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
ProofType::Single => "single",
ProofType::ForAggr => "for-aggr",
}
)
}
}
impl ToFlags for ProofType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl From<ProofType> for TranscriptType {
fn from(val: ProofType) -> Self {
match val {
@@ -154,6 +184,25 @@ pub enum TranscriptType {
EVM,
}
impl std::fmt::Display for TranscriptType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
TranscriptType::Poseidon => "poseidon",
TranscriptType::EVM => "evm",
}
)
}
}
impl ToFlags for TranscriptType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
#[cfg(feature = "python-bindings")]
impl ToPyObject for TranscriptType {
fn to_object(&self, py: Python) -> PyObject {
@@ -167,8 +216,8 @@ impl ToPyObject for TranscriptType {
#[cfg(feature = "python-bindings")]
///
pub fn g1affine_to_pydict(g1affine_dict: &PyDict, g1affine: &G1Affine) {
let g1affine_x = field_to_string_montgomery(&g1affine.x);
let g1affine_y = field_to_string_montgomery(&g1affine.y);
let g1affine_x = field_to_string(&g1affine.x);
let g1affine_y = field_to_string(&g1affine.y);
g1affine_dict.set_item("x", g1affine_x).unwrap();
g1affine_dict.set_item("y", g1affine_y).unwrap();
}
@@ -178,23 +227,23 @@ use halo2curves::bn256::G1;
#[cfg(feature = "python-bindings")]
///
pub fn g1_to_pydict(g1_dict: &PyDict, g1: &G1) {
let g1_x = field_to_string_montgomery(&g1.x);
let g1_y = field_to_string_montgomery(&g1.y);
let g1_z = field_to_string_montgomery(&g1.z);
let g1_x = field_to_string(&g1.x);
let g1_y = field_to_string(&g1.y);
let g1_z = field_to_string(&g1.z);
g1_dict.set_item("x", g1_x).unwrap();
g1_dict.set_item("y", g1_y).unwrap();
g1_dict.set_item("z", g1_z).unwrap();
}
/// converts fp into `Vec<u64>` in Montgomery form
pub fn field_to_string_montgomery<F: PrimeField + SerdeObject + Serialize>(fp: &F) -> String {
/// converts fp into a little endian Hex string
pub fn field_to_string<F: PrimeField + SerdeObject + Serialize>(fp: &F) -> String {
let repr = serde_json::to_string(&fp).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
b
}
/// converts `Vec<u64>` in Montgomery form into fp
pub fn string_to_field_montgomery<F: PrimeField + SerdeObject + Serialize + DeserializeOwned>(
/// converts a little endian Hex string into a field element
pub fn string_to_field<F: PrimeField + SerdeObject + Serialize + DeserializeOwned>(
b: &String,
) -> F {
let repr = serde_json::to_string(&b).unwrap();
@@ -259,7 +308,7 @@ where
let field_elems: Vec<Vec<String>> = self
.instances
.iter()
.map(|x| x.iter().map(|fp| field_to_string_montgomery(fp)).collect())
.map(|x| x.iter().map(|fp| field_to_string(fp)).collect())
.collect::<Vec<_>>();
dict.set_item("instances", field_elems).unwrap();
let hex_proof = hex::encode(&self.proof);
@@ -315,7 +364,7 @@ where
/// Saves the Proof to a specified `proof_path`.
pub fn save(&self, proof_path: &PathBuf) -> Result<(), Box<dyn Error>> {
let file = std::fs::File::create(proof_path)?;
let mut writer = BufWriter::new(file);
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, file);
serde_json::to_writer(&mut writer, &self)?;
Ok(())
}
@@ -328,8 +377,10 @@ where
<C as CurveAffine>::ScalarExt: FromUniformBytes<64>,
{
trace!("reading proof");
let data = std::fs::read_to_string(proof_path)?;
serde_json::from_str(&data).map_err(|e| e.into())
let file = std::fs::File::open(proof_path)?;
let reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, file);
let proof: Self = serde_json::from_reader(reader)?;
Ok(proof)
}
}
@@ -433,7 +484,7 @@ where
pub fn create_keys<Scheme: CommitmentScheme, F: PrimeField + TensorType, C: Circuit<F>>(
circuit: &C,
params: &'_ Scheme::ParamsProver,
compress_selectors: bool,
disable_selector_compression: bool,
) -> Result<ProvingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
@@ -445,7 +496,7 @@ where
// Initialize verifying key
let now = Instant::now();
trace!("preparing VK");
let vk = keygen_vk_custom(params, &empty_circuit, compress_selectors)?;
let vk = keygen_vk_custom(params, &empty_circuit, !disable_selector_compression)?;
let elapsed = now.elapsed();
info!("VK took {}.{}", elapsed.as_secs(), elapsed.subsec_millis());
@@ -555,6 +606,7 @@ where
verifier_params,
pk.get_vk(),
strategy,
verifier_params.n(),
)?;
}
let elapsed = now.elapsed();
@@ -642,6 +694,7 @@ pub fn verify_proof_circuit<
params: &'params Scheme::ParamsVerifier,
vk: &VerifyingKey<Scheme::Curve>,
strategy: Strategy,
orig_n: u64,
) -> Result<Strategy::Output, halo2_proofs::plonk::Error>
where
Scheme::Scalar: SerdeObject
@@ -662,7 +715,7 @@ where
trace!("instances {:?}", instances);
let mut transcript = TranscriptReadBuffer::init(Cursor::new(snark.proof.clone()));
verify_proof::<Scheme, V, _, TR, _>(params, vk, strategy, instances, &mut transcript)
verify_proof::<Scheme, V, _, TR, _>(params, vk, strategy, instances, &mut transcript, orig_n)
}
/// Loads a [VerifyingKey] at `path`.
@@ -678,13 +731,14 @@ where
info!("loading verification key from {:?}", path);
let f =
File::open(path.clone()).map_err(|_| format!("failed to load vk at {}", path.display()))?;
let mut reader = BufReader::new(f);
VerifyingKey::<Scheme::Curve>::read::<_, C>(
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
let vk = VerifyingKey::<Scheme::Curve>::read::<_, C>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
serde_format_from_str(&EZKL_KEY_FORMAT),
params,
)
.map_err(Box::<dyn Error>::from)
)?;
info!("done loading verification key ✅");
Ok(vk)
}
/// Loads a [ProvingKey] at `path`.
@@ -700,19 +754,20 @@ where
info!("loading proving key from {:?}", path);
let f =
File::open(path.clone()).map_err(|_| format!("failed to load pk at {}", path.display()))?;
let mut reader = BufReader::new(f);
ProvingKey::<Scheme::Curve>::read::<_, C>(
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
let pk = ProvingKey::<Scheme::Curve>::read::<_, C>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
serde_format_from_str(&EZKL_KEY_FORMAT),
params,
)
.map_err(Box::<dyn Error>::from)
)?;
info!("done loading proving key ✅");
Ok(pk)
}
/// Saves a [ProvingKey] to `path`.
pub fn save_pk<Scheme: CommitmentScheme>(
path: &PathBuf,
vk: &ProvingKey<Scheme::Curve>,
pk: &ProvingKey<Scheme::Curve>,
) -> Result<(), io::Error>
where
Scheme::Curve: SerdeObject + CurveAffine,
@@ -720,9 +775,10 @@ where
{
info!("saving proving key 💾");
let f = File::create(path)?;
let mut writer = BufWriter::new(f);
vk.write(&mut writer, halo2_proofs::SerdeFormat::RawBytes)?;
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
pk.write(&mut writer, serde_format_from_str(&EZKL_KEY_FORMAT))?;
writer.flush()?;
info!("done saving proving key ✅");
Ok(())
}
@@ -737,9 +793,10 @@ where
{
info!("saving verification key 💾");
let f = File::create(path)?;
let mut writer = BufWriter::new(f);
vk.write(&mut writer, halo2_proofs::SerdeFormat::RawBytes)?;
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
vk.write(&mut writer, serde_format_from_str(&EZKL_KEY_FORMAT))?;
writer.flush()?;
info!("done saving verification key ✅");
Ok(())
}
@@ -750,7 +807,7 @@ pub fn save_params<Scheme: CommitmentScheme>(
) -> Result<(), io::Error> {
info!("saving parameters 💾");
let f = File::create(path)?;
let mut writer = BufWriter::new(f);
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
params.write(&mut writer)?;
writer.flush()?;
Ok(())
@@ -840,6 +897,7 @@ pub(crate) fn verify_proof_circuit_kzg<
proof: Snark<Fr, G1Affine>,
vk: &VerifyingKey<G1Affine>,
strategy: Strategy,
orig_n: u64,
) -> Result<Strategy::Output, halo2_proofs::plonk::Error> {
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
@@ -849,7 +907,7 @@ pub(crate) fn verify_proof_circuit_kzg<
_,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, params, vk, strategy),
>(&proof, params, vk, strategy, orig_n),
TranscriptType::Poseidon => verify_proof_circuit::<
Fr,
VerifierSHPLONK<'_, Bn256>,
@@ -857,7 +915,7 @@ pub(crate) fn verify_proof_circuit_kzg<
_,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, params, vk, strategy),
>(&proof, params, vk, strategy, orig_n),
}
}

View File

@@ -15,10 +15,9 @@ use crate::graph::{
use crate::pfsys::evm::aggregation::AggregationCircuit;
use crate::pfsys::{
load_pk, load_vk, save_params, save_vk, srs::gen_srs as ezkl_gen_srs, srs::load_srs, ProofType,
Snark, TranscriptType,
TranscriptType,
};
use crate::RunArgs;
use ethers::types::H160;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2curves::bn256::{Bn256, Fq, Fr, G1Affine, G1};
use pyo3::exceptions::{PyIOError, PyRuntimeError};
@@ -26,7 +25,6 @@ use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
use pyo3_log;
use snark_verifier::util::arithmetic::PrimeField;
use std::str::FromStr;
use std::{fs::File, path::PathBuf};
use tokio::runtime::Runtime;
@@ -65,9 +63,9 @@ struct PyG1 {
impl From<G1> for PyG1 {
fn from(g1: G1) -> Self {
PyG1 {
x: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.x),
y: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.y),
z: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.z),
x: crate::pfsys::field_to_string::<Fq>(&g1.x),
y: crate::pfsys::field_to_string::<Fq>(&g1.y),
z: crate::pfsys::field_to_string::<Fq>(&g1.z),
}
}
}
@@ -75,9 +73,9 @@ impl From<G1> for PyG1 {
impl From<PyG1> for G1 {
fn from(val: PyG1) -> Self {
G1 {
x: crate::pfsys::string_to_field_montgomery::<Fq>(&val.x),
y: crate::pfsys::string_to_field_montgomery::<Fq>(&val.y),
z: crate::pfsys::string_to_field_montgomery::<Fq>(&val.z),
x: crate::pfsys::string_to_field::<Fq>(&val.x),
y: crate::pfsys::string_to_field::<Fq>(&val.y),
z: crate::pfsys::string_to_field::<Fq>(&val.z),
}
}
}
@@ -108,8 +106,8 @@ pub struct PyG1Affine {
impl From<G1Affine> for PyG1Affine {
fn from(g1: G1Affine) -> Self {
PyG1Affine {
x: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.x),
y: crate::pfsys::field_to_string_montgomery::<Fq>(&g1.y),
x: crate::pfsys::field_to_string::<Fq>(&g1.x),
y: crate::pfsys::field_to_string::<Fq>(&g1.y),
}
}
}
@@ -117,8 +115,8 @@ impl From<G1Affine> for PyG1Affine {
impl From<PyG1Affine> for G1Affine {
fn from(val: PyG1Affine) -> Self {
G1Affine {
x: crate::pfsys::string_to_field_montgomery::<Fq>(&val.x),
y: crate::pfsys::string_to_field_montgomery::<Fq>(&val.y),
x: crate::pfsys::string_to_field::<Fq>(&val.x),
y: crate::pfsys::string_to_field::<Fq>(&val.y),
}
}
}
@@ -162,6 +160,8 @@ struct PyRunArgs {
#[pyo3(get, set)]
pub div_rebasing: bool,
#[pyo3(get, set)]
pub rebase_frac_zero_constants: bool,
#[pyo3(get, set)]
pub check_mode: CheckMode,
}
@@ -190,6 +190,7 @@ impl From<PyRunArgs> for RunArgs {
param_visibility: py_run_args.param_visibility,
variables: py_run_args.variables,
div_rebasing: py_run_args.div_rebasing,
rebase_frac_zero_constants: py_run_args.rebase_frac_zero_constants,
check_mode: py_run_args.check_mode,
}
}
@@ -210,58 +211,57 @@ impl Into<PyRunArgs> for RunArgs {
param_visibility: self.param_visibility,
variables: self.variables,
div_rebasing: self.div_rebasing,
rebase_frac_zero_constants: self.rebase_frac_zero_constants,
check_mode: self.check_mode,
}
}
}
/// Converts 4 u64s to a field element
/// Converts a felt to big endian
#[pyfunction(signature = (
array,
felt,
))]
fn string_to_felt(array: PyFelt) -> PyResult<String> {
Ok(format!(
"{:?}",
crate::pfsys::string_to_field_montgomery::<Fr>(&array)
))
fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
Ok(format!("{:?}", felt))
}
/// Converts 4 u64s representing a field element directly to an integer
/// Converts a field element hex string to an integer
#[pyfunction(signature = (
array,
))]
fn string_to_int(array: PyFelt) -> PyResult<i128> {
let felt = crate::pfsys::string_to_field_montgomery::<Fr>(&array);
fn felt_to_int(array: PyFelt) -> PyResult<i128> {
let felt = crate::pfsys::string_to_field::<Fr>(&array);
let int_rep = felt_to_i128(felt);
Ok(int_rep)
}
/// Converts 4 u64s representing a field element directly to a (rescaled from fixed point scaling) floating point
/// Converts a field eleement hex string to a floating point number
#[pyfunction(signature = (
array,
scale
))]
fn string_to_float(array: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::string_to_field_montgomery::<Fr>(&array);
fn felt_to_float(array: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::string_to_field::<Fr>(&array);
let int_rep = felt_to_i128(felt);
let multiplier = scale_to_multiplier(scale);
let float_rep = int_rep as f64 / multiplier;
Ok(float_rep)
}
/// Converts a floating point element to 4 u64s representing a fixed point field element
/// Converts a floating point element to a field element hex string
#[pyfunction(signature = (
input,
scale
))]
fn float_to_string(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
let int_rep = quantize_float(&input, 0.0, scale)
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
let felt = i128_to_felt(int_rep);
Ok(crate::pfsys::field_to_string_montgomery::<Fr>(&felt))
Ok(crate::pfsys::field_to_string::<Fr>(&felt))
}
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
/// Converts a buffer to vector of field elements
#[pyfunction(signature = (
buffer
))]
@@ -314,7 +314,10 @@ fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
.map(|x| PrimeField::from_u128(u8_array_to_u128_le(*x)))
.collect();
let field_elements: Vec<String> = field_elements.iter().map(|x| format!("{:?}", x)).collect();
let field_elements: Vec<String> = field_elements
.iter()
.map(|x| crate::pfsys::field_to_string::<Fr>(x))
.collect();
Ok(field_elements)
}
@@ -326,7 +329,7 @@ fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
let message: Vec<Fr> = message
.iter()
.map(crate::pfsys::string_to_field_montgomery::<Fr>)
.map(crate::pfsys::string_to_field::<Fr>)
.collect::<Vec<_>>();
let output =
@@ -337,7 +340,7 @@ fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
let hash = output[0]
.iter()
.map(crate::pfsys::field_to_string_montgomery::<Fr>)
.map(crate::pfsys::field_to_string::<Fr>)
.collect::<Vec<_>>();
Ok(hash)
}
@@ -357,7 +360,7 @@ fn kzg_commit(
) -> PyResult<Vec<PyG1Affine>> {
let message: Vec<Fr> = message
.iter()
.map(crate::pfsys::string_to_field_montgomery::<Fr>)
.map(crate::pfsys::string_to_field::<Fr>)
.collect::<Vec<_>>();
let settings = GraphSettings::load(&settings_path)
@@ -521,7 +524,7 @@ fn gen_settings(
scales = None,
scale_rebase_multiplier = DEFAULT_SCALE_REBASE_MULTIPLIERS.split(",").map(|x| x.parse().unwrap()).collect(),
max_logrows = None,
div_rebasing = None,
only_range_check_rebase = DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap(),
))]
fn calibrate_settings(
data: PathBuf,
@@ -532,7 +535,7 @@ fn calibrate_settings(
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
max_logrows: Option<u32>,
div_rebasing: Option<bool>,
only_range_check_rebase: bool,
) -> Result<bool, PyErr> {
crate::execute::calibrate(
model,
@@ -542,7 +545,7 @@ fn calibrate_settings(
lookup_safety_margin,
scales,
scale_rebase_multiplier,
div_rebasing,
only_range_check_rebase,
max_logrows,
)
.map_err(|e| {
@@ -619,7 +622,7 @@ fn mock_aggregate(
pk_path=PathBuf::from(DEFAULT_PK),
srs_path=None,
witness_path = None,
compress_selectors=DEFAULT_COMPRESS_SELECTORS.parse().unwrap(),
disable_selector_compression=DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap(),
))]
fn setup(
model: PathBuf,
@@ -627,7 +630,7 @@ fn setup(
pk_path: PathBuf,
srs_path: Option<PathBuf>,
witness_path: Option<PathBuf>,
compress_selectors: bool,
disable_selector_compression: bool,
) -> Result<bool, PyErr> {
crate::execute::setup(
model,
@@ -635,7 +638,7 @@ fn setup(
vk_path,
pk_path,
witness_path,
compress_selectors,
disable_selector_compression,
)
.map_err(|e| {
let err_str = format!("Failed to run setup: {}", e);
@@ -685,14 +688,23 @@ fn prove(
settings_path=PathBuf::from(DEFAULT_SETTINGS),
vk_path=PathBuf::from(DEFAULT_VK),
srs_path=None,
non_reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse::<bool>().unwrap(),
))]
fn verify(
proof_path: PathBuf,
settings_path: PathBuf,
vk_path: PathBuf,
srs_path: Option<PathBuf>,
non_reduced_srs: bool,
) -> Result<bool, PyErr> {
crate::execute::verify(proof_path, settings_path, vk_path, srs_path).map_err(|e| {
crate::execute::verify(
proof_path,
settings_path,
vk_path,
srs_path,
non_reduced_srs,
)
.map_err(|e| {
let err_str = format!("Failed to run verify: {}", e);
PyRuntimeError::new_err(err_str)
})?;
@@ -707,7 +719,7 @@ fn verify(
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
split_proofs = false,
srs_path = None,
compress_selectors=DEFAULT_COMPRESS_SELECTORS.parse().unwrap(),
disable_selector_compression=DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap(),
))]
fn setup_aggregate(
sample_snarks: Vec<PathBuf>,
@@ -716,7 +728,7 @@ fn setup_aggregate(
logrows: u32,
split_proofs: bool,
srs_path: Option<PathBuf>,
compress_selectors: bool,
disable_selector_compression: bool,
) -> Result<bool, PyErr> {
crate::execute::setup_aggregate(
sample_snarks,
@@ -725,7 +737,7 @@ fn setup_aggregate(
srs_path,
logrows,
split_proofs,
compress_selectors,
disable_selector_compression,
)
.map_err(|e| {
let err_str = format!("Failed to setup aggregate: {}", e);
@@ -1018,24 +1030,15 @@ fn verify_evm(
addr_da: Option<&str>,
addr_vk: Option<&str>,
) -> Result<bool, PyErr> {
let addr_verifier = H160::from_str(addr_verifier).map_err(|e| {
let err_str = format!("address is invalid: {}", e);
PyRuntimeError::new_err(err_str)
})?;
let addr_verifier = H160Flag::from(addr_verifier);
let addr_da = if let Some(addr_da) = addr_da {
let addr_da = H160::from_str(addr_da).map_err(|e| {
let err_str = format!("address is invalid: {}", e);
PyRuntimeError::new_err(err_str)
})?;
let addr_da = H160Flag::from(addr_da);
Some(addr_da)
} else {
None
};
let addr_vk = if let Some(addr_vk) = addr_vk {
let addr_vk = H160::from_str(addr_vk).map_err(|e| {
let err_str = format!("address is invalid: {}", e);
PyRuntimeError::new_err(err_str)
})?;
let addr_vk = H160Flag::from(addr_vk);
Some(addr_vk)
} else {
None
@@ -1093,16 +1096,6 @@ fn create_evm_verifier_aggr(
Ok(true)
}
/// print hex representation of a proof
#[pyfunction(signature = (proof_path))]
fn print_proof_hex(proof_path: PathBuf) -> Result<String, PyErr> {
let proof = Snark::load::<KZGCommitmentScheme<Bn256>>(&proof_path)
.map_err(|_| PyIOError::new_err("Failed to load proof"))?;
let hex_str = hex::encode(proof.proof);
Ok(format!("0x{}", hex_str))
}
// Python Module
#[pymodule]
fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
@@ -1111,13 +1104,13 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_class::<PyG1Affine>()?;
m.add_class::<PyG1>()?;
m.add_class::<PyTestDataSource>()?;
m.add_function(wrap_pyfunction!(string_to_felt, m)?)?;
m.add_function(wrap_pyfunction!(string_to_int, m)?)?;
m.add_function(wrap_pyfunction!(string_to_float, m)?)?;
m.add_function(wrap_pyfunction!(felt_to_big_endian, m)?)?;
m.add_function(wrap_pyfunction!(felt_to_int, m)?)?;
m.add_function(wrap_pyfunction!(felt_to_float, m)?)?;
m.add_function(wrap_pyfunction!(kzg_commit, m)?)?;
m.add_function(wrap_pyfunction!(swap_proof_commitments, m)?)?;
m.add_function(wrap_pyfunction!(poseidon_hash, m)?)?;
m.add_function(wrap_pyfunction!(float_to_string, m)?)?;
m.add_function(wrap_pyfunction!(float_to_felt, m)?)?;
m.add_function(wrap_pyfunction!(buffer_to_felts, m)?)?;
m.add_function(wrap_pyfunction!(gen_vk_from_pk_aggr, m)?)?;
m.add_function(wrap_pyfunction!(gen_vk_from_pk_single, m)?)?;
@@ -1141,7 +1134,6 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(deploy_vk_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
m.add_function(wrap_pyfunction!(print_proof_hex, m)?)?;
m.add_function(wrap_pyfunction!(setup_test_evm_witness, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_data_attestation, m)?)?;

View File

@@ -580,16 +580,16 @@ impl<T: Clone + TensorType> Tensor<T> {
/// use ezkl::tensor::Tensor;
/// let mut a = Tensor::<i32>::new(Some(&[1,2,3,4,5,6]), &[2, 3]).unwrap();
/// let expected = Tensor::<i32>::new(Some(&[1, 2, 3, 4, 5, 6, 0, 0]), &[8]).unwrap();
/// assert_eq!(a.pad_to_zero_rem(4).unwrap(), expected);
/// assert_eq!(a.pad_to_zero_rem(4, 0).unwrap(), expected);
///
/// let expected = Tensor::<i32>::new(Some(&[1, 2, 3, 4, 5, 6, 0, 0, 0]), &[9]).unwrap();
/// assert_eq!(a.pad_to_zero_rem(9).unwrap(), expected);
/// assert_eq!(a.pad_to_zero_rem(9, 0).unwrap(), expected);
/// ```
pub fn pad_to_zero_rem(&self, n: usize) -> Result<Tensor<T>, TensorError> {
pub fn pad_to_zero_rem(&self, n: usize, pad: T) -> Result<Tensor<T>, TensorError> {
let mut inner = self.inner.clone();
let remainder = self.len() % n;
if remainder != 0 {
inner.resize(self.len() + n - remainder, T::zero().unwrap());
inner.resize(self.len() + n - remainder, pad);
}
Tensor::new(Some(&inner), &[inner.len()])
}
@@ -1526,18 +1526,20 @@ pub fn get_broadcasted_shape(
let num_dims_a = shape_a.len();
let num_dims_b = shape_b.len();
// reewrite the below using match
if num_dims_a == num_dims_b {
let mut broadcasted_shape = Vec::with_capacity(num_dims_a);
for (dim_a, dim_b) in shape_a.iter().zip(shape_b.iter()) {
let max_dim = dim_a.max(dim_b);
broadcasted_shape.push(*max_dim);
match (num_dims_a, num_dims_b) {
(a, b) if a == b => {
let mut broadcasted_shape = Vec::with_capacity(num_dims_a);
for (dim_a, dim_b) in shape_a.iter().zip(shape_b.iter()) {
let max_dim = dim_a.max(dim_b);
broadcasted_shape.push(*max_dim);
}
Ok(broadcasted_shape)
}
Ok(broadcasted_shape)
} else if num_dims_a < num_dims_b {
Ok(shape_b.to_vec())
} else {
Ok(shape_a.to_vec())
(a, b) if a < b => Ok(shape_b.to_vec()),
(a, b) if a > b => Ok(shape_a.to_vec()),
_ => Err(Box::new(TensorError::DimError(
"Unknown condition for broadcasting".to_string(),
))),
}
}
////////////////////////

View File

@@ -243,7 +243,7 @@ pub fn and<
/// Some(&[1, 0, 1, 0, 1, 0]),
/// &[2, 3],
/// ).unwrap();
/// let result = equals(&a, &b).unwrap().0;
/// let result = equals(&a, &b).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 1, 0, 1, 1]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
@@ -260,7 +260,7 @@ pub fn equals<
>(
a: &Tensor<T>,
b: &Tensor<T>,
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
) -> Result<Tensor<T>, TensorError> {
let a = a.clone();
let b = b.clone();
@@ -268,7 +268,7 @@ pub fn equals<
let result = nonlinearities::kronecker_delta(&diff);
Ok((result, vec![diff]))
Ok(result)
}
/// Greater than operation.
@@ -289,7 +289,7 @@ pub fn equals<
/// ).unwrap();
/// let result = greater(&a, &b).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 1, 1, 0, 0, 0]), &[2, 3]).unwrap();
/// assert_eq!(result.0, expected);
/// assert_eq!(result, expected);
/// ```
pub fn greater<
T: TensorType
@@ -302,7 +302,7 @@ pub fn greater<
>(
a: &Tensor<T>,
b: &Tensor<T>,
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
) -> Result<Tensor<T>, TensorError> {
let mask_inter = (a.clone() - b.clone())?;
let mask = mask_inter.map(|x| {
if x > T::zero().ok_or(TensorError::Unsupported).unwrap() {
@@ -311,7 +311,7 @@ pub fn greater<
T::zero().ok_or(TensorError::Unsupported).unwrap()
}
});
Ok((mask, vec![mask_inter]))
Ok(mask)
}
/// Greater equals than operation.
@@ -332,7 +332,7 @@ pub fn greater<
/// ).unwrap();
/// let result = greater_equal(&a, &b).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 1, 1, 1, 0, 0]), &[2, 3]).unwrap();
/// assert_eq!(result.0, expected);
/// assert_eq!(result, expected);
/// ```
pub fn greater_equal<
T: TensorType
@@ -345,7 +345,7 @@ pub fn greater_equal<
>(
a: &Tensor<T>,
b: &Tensor<T>,
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
) -> Result<Tensor<T>, TensorError> {
let mask_inter = (a.clone() - b.clone())?;
let mask = mask_inter.map(|x| {
if x >= T::zero().ok_or(TensorError::Unsupported).unwrap() {
@@ -354,7 +354,7 @@ pub fn greater_equal<
T::zero().ok_or(TensorError::Unsupported).unwrap()
}
});
Ok((mask, vec![mask_inter]))
Ok(mask)
}
/// Less than to operation.
@@ -375,7 +375,7 @@ pub fn greater_equal<
/// ).unwrap();
/// let result = less(&a, &b).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 1, 0, 0, 0, 1]), &[2, 3]).unwrap();
/// assert_eq!(result.0, expected);
/// assert_eq!(result, expected);
/// ```
///
pub fn less<
@@ -389,7 +389,7 @@ pub fn less<
>(
a: &Tensor<T>,
b: &Tensor<T>,
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
) -> Result<Tensor<T>, TensorError> {
// a < b <=> b > a
greater(b, a)
}
@@ -412,7 +412,7 @@ pub fn less<
/// ).unwrap();
/// let result = less_equal(&a, &b).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 1, 0, 1, 1, 1]), &[2, 3]).unwrap();
/// assert_eq!(result.0, expected);
/// assert_eq!(result, expected);
/// ```
///
pub fn less_equal<
@@ -426,7 +426,7 @@ pub fn less_equal<
>(
a: &Tensor<T>,
b: &Tensor<T>,
) -> Result<(Tensor<T>, Vec<Tensor<T>>), TensorError> {
) -> Result<Tensor<T>, TensorError> {
// a < b <=> b > a
greater_equal(b, a)
}
@@ -992,45 +992,6 @@ pub fn mult<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::S
Ok(output)
}
/// Divides multiple tensors.
/// # Arguments
/// * `t` - Tensors
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::tensor::ops::div;
/// let x = Tensor::<i128>::new(
/// Some(&[2, 1, 2, 1, 1, 1]),
/// &[2, 3],
/// ).unwrap();
/// let k = Tensor::<i128>::new(
/// Some(&[2, 3, 2, 1, 1, 1]),
/// &[2, 3],
/// ).unwrap();
/// let result = div(&[x, k]).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 1, 1, 1, 1]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn div<
T: TensorType
+ Div<Output = T>
+ Mul<Output = T>
+ From<u64>
+ std::marker::Send
+ std::marker::Sync,
>(
t: &[Tensor<T>],
) -> Result<Tensor<T>, TensorError> {
// calculate value of output
let mut output: Tensor<T> = t[0].clone();
for e in t[1..].iter() {
output = (output / e.clone())?;
}
Ok(output)
}
/// Rescale a tensor with a const integer (similar to const_mult).
/// # Arguments
///
@@ -2339,12 +2300,12 @@ pub fn deconv<
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6]),
/// &[1, 1, 3, 3],
/// ).unwrap();
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), false).unwrap().0;
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), false).unwrap();
/// let expected: Tensor<i128> = Tensor::<i128>::new(Some(&[11, 8, 8, 10]), &[1, 1, 2, 2]).unwrap();
/// assert_eq!(pooled, expected);
///
/// // This time with normalization
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), true).unwrap().0;
/// let pooled = sumpool(&x, [(0, 0); 2], (1, 1), (2, 2), true).unwrap();
/// let expected: Tensor<i128> = Tensor::<i128>::new(Some(&[3, 2, 2, 3]), &[1, 1, 2, 2]).unwrap();
/// assert_eq!(pooled, expected);
/// ```
@@ -2354,7 +2315,7 @@ pub fn sumpool(
stride: (usize, usize),
kernel_shape: (usize, usize),
normalize: bool,
) -> Result<(Tensor<i128>, Vec<Tensor<i128>>), TensorError> {
) -> Result<Tensor<i128>, TensorError> {
let image_dims = image.dims();
let batch_size = image_dims[0];
let image_channels = image_dims[1];
@@ -2384,15 +2345,12 @@ pub fn sumpool(
let mut combined = res.combine()?;
combined.reshape(&[&[batch_size, image_channels], shape].concat())?;
let mut inter = vec![];
if normalize {
inter.push(combined.clone());
let norm = kernel.len();
combined = nonlinearities::const_div(&combined, norm as f64);
}
Ok((combined, inter))
Ok(combined)
}
/// Applies 2D max pooling over a 4D tensor of shape B x C x H x W.
@@ -3087,11 +3045,7 @@ pub mod nonlinearities {
}
/// softmax layout
pub fn softmax_axes(
a: &Tensor<i128>,
scale: f64,
axes: &[usize],
) -> (Tensor<i128>, Vec<Tensor<i128>>) {
pub fn softmax_axes(a: &Tensor<i128>, scale: f64, axes: &[usize]) -> Tensor<i128> {
// we want this to be as small as possible so we set the output scale to 1
let dims = a.dims();
@@ -3099,8 +3053,6 @@ pub mod nonlinearities {
return softmax(a, scale);
}
let mut intermediate_values = vec![];
let cartesian_coord = dims[..dims.len() - 1]
.iter()
.map(|x| 0..*x)
@@ -3123,8 +3075,7 @@ pub mod nonlinearities {
let res = softmax(&softmax_input, scale);
outputs.push(res.0);
intermediate_values.extend(res.1);
outputs.push(res);
}
let mut res = Tensor::new(Some(&outputs), &[outputs.len()])
@@ -3132,7 +3083,7 @@ pub mod nonlinearities {
.combine()
.unwrap();
res.reshape(dims).unwrap();
(res, intermediate_values)
res
}
/// Applies softmax
@@ -3149,24 +3100,20 @@ pub mod nonlinearities {
/// Some(&[2, 2, 3, 2, 2, 0]),
/// &[2, 3],
/// ).unwrap();
/// let result = softmax(&x, 128.0).0;
/// let result = softmax(&x, 128.0);
/// // doubles the scale of the input
/// let expected = Tensor::<i128>::new(Some(&[2730, 2730, 2751, 2730, 2730, 2688]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn softmax(a: &Tensor<i128>, scale: f64) -> (Tensor<i128>, Vec<Tensor<i128>>) {
pub fn softmax(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
// the more accurate calculation is commented out and we implement as below so it matches the steps in layout
let mut intermediate_values = vec![];
intermediate_values.push(a.clone());
let exp = exp(a, scale);
let sum = sum(&exp).unwrap();
intermediate_values.push(sum.clone());
let inv_denom = recip(&sum, scale.powf(2.0));
let inv_denom = recip(&sum, scale, scale);
((exp * inv_denom).unwrap(), intermediate_values)
(exp * inv_denom).unwrap()
}
/// Applies range_check_percent
@@ -3201,7 +3148,7 @@ pub mod nonlinearities {
// the more accurate calculation is commented out and we implement as below so it matches the steps in layout
let scale = input_scale * output_scale;
let diff: Tensor<i128> = sub(t).unwrap();
let recip = recip(&t[0], scale as f64);
let recip = recip(&t[0], input_scale as f64, output_scale as f64);
let product = mult(&[diff, recip]).unwrap();
let _tol = ((tol / 100.0) * scale as f32).round() as f64;
let upper_bound = greater_than(&product, _tol);
@@ -3812,14 +3759,39 @@ pub mod nonlinearities {
/// &[2, 3],
/// ).unwrap();
/// let k = 2_f64;
/// let result = recip(&x, k);
/// let result = recip(&x, 1.0, k);
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 1, 0, 2, 2]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn recip(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
pub fn recip(a: &Tensor<i128>, input_scale: f64, out_scale: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let denom = (1_f64) / (a_i as f64 + f64::EPSILON);
let d_inv_x = scale * denom;
let rescaled = (a_i as f64) / input_scale;
let denom = (1_f64) / (rescaled + f64::EPSILON);
let d_inv_x = out_scale * denom;
Ok::<_, TensorError>(d_inv_x.round() as i128)
})
.unwrap()
}
/// Elementwise inverse.
/// # Arguments
/// * `out_scale` - Single value
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::tensor::ops::nonlinearities::zero_recip;
/// let k = 2_f64;
/// let result = zero_recip(1.0);
/// let expected = Tensor::<i128>::new(Some(&[4503599627370496]), &[1]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn zero_recip(out_scale: f64) -> Tensor<i128> {
let a = Tensor::<i128>::new(Some(&[0]), &[1]).unwrap();
a.par_enum_map(|_, a_i| {
let rescaled = a_i as f64;
let denom = (1_f64) / (rescaled + f64::EPSILON);
let d_inv_x = out_scale * denom;
Ok::<_, TensorError>(d_inv_x.round() as i128)
})
.unwrap()

View File

@@ -4,6 +4,37 @@ use super::{
};
use halo2_proofs::{arithmetic::Field, plonk::Instance};
pub(crate) fn create_constant_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
>(
val: F,
len: usize,
) -> ValTensor<F> {
let mut constant = Tensor::from(vec![ValType::Constant(val); len].into_iter());
constant.set_visibility(&crate::graph::Visibility::Fixed);
ValTensor::from(constant)
}
pub(crate) fn create_unit_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
>(
len: usize,
) -> ValTensor<F> {
let mut unit = Tensor::from(vec![ValType::Constant(F::ONE); len].into_iter());
unit.set_visibility(&crate::graph::Visibility::Fixed);
ValTensor::from(unit)
}
pub(crate) fn create_zero_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
>(
len: usize,
) -> ValTensor<F> {
let mut zero = Tensor::from(vec![ValType::Constant(F::ZERO); len].into_iter());
zero.set_visibility(&crate::graph::Visibility::Fixed);
ValTensor::from(zero)
}
#[derive(Debug, Clone)]
/// A [ValType] is a wrapper around Halo2 value(s).
pub enum ValType<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd> {
@@ -318,6 +349,19 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
matches!(self, ValTensor::Instance { .. })
}
/// reverse order of elements whilst preserving the shape
pub fn reverse(&mut self) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value { inner: v, .. } => {
v.reverse();
}
ValTensor::Instance { .. } => {
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(())
}
///
pub fn set_initial_instance_offset(&mut self, offset: usize) {
if let ValTensor::Instance { initial_offset, .. } = self {
@@ -454,12 +498,12 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
}
/// Calls `pad_to_zero_rem` on the inner tensor.
pub fn pad_to_zero_rem(&mut self, n: usize) -> Result<(), Box<dyn Error>> {
pub fn pad_to_zero_rem(&mut self, n: usize, pad: ValType<F>) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.pad_to_zero_rem(n)?;
*v = v.pad_to_zero_rem(n, pad)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
@@ -672,7 +716,7 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
}
Ok(indices)
}
ValTensor::Instance { .. } => Err(TensorError::WrongMethod),
ValTensor::Instance { .. } => Ok(vec![]),
}
}
@@ -690,7 +734,7 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
}
Ok(indices)
}
ValTensor::Instance { .. } => Err(TensorError::WrongMethod),
ValTensor::Instance { .. } => Ok(vec![]),
}
}
@@ -709,7 +753,11 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
if indices.is_empty() {
return Ok(());
} else {
return Err(TensorError::WrongMethod);
}
}
}
Ok(())
@@ -871,3 +919,30 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
/// inverts the inner values
pub fn inverse(&self) -> Result<ValTensor<F>, Box<dyn Error>> {
let mut cloned_self = self.clone();
match &mut cloned_self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.map(|x| match x {
ValType::AssignedValue(v) => ValType::AssignedValue(v.invert()),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
ValType::AssignedValue(v.value_field().invert())
}
ValType::Value(v) => ValType::Value(v.map(|x| x.invert().unwrap_or(F::ZERO))),
ValType::Constant(v) => ValType::Constant(v.invert().unwrap_or(F::ZERO)),
});
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(cloned_self)
}
}

View File

@@ -33,10 +33,7 @@ pub enum VarTensor {
impl VarTensor {
///
pub fn is_advice(&self) -> bool {
match self {
VarTensor::Advice { .. } => true,
_ => false,
}
matches!(self, VarTensor::Advice { .. })
}
///

View File

@@ -69,19 +69,30 @@ pub fn encodeVerifierCalldata(
Ok(encoded)
}
/// Converts 4 u64s to a field element
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn stringToFelt(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
pub fn feltToBigEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(format!("{:?}", felt))
}
/// Converts 4 u64s representing a field element directly to an integer
/// Converts a felt to a little endian string
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn stringToInt(
pub fn feltToLittleEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let repr = serde_json::to_string(&felt).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
Ok(b)
}
/// Converts a hex string to a byte array
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn feltToInt(
array: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
@@ -92,10 +103,10 @@ pub fn stringToInt(
))
}
/// Converts 4 u64s representing a field element directly to a (rescaled from fixed point scaling) floating point
/// Converts felts to a floating point element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn stringToFloat(
pub fn feltToFloat(
array: wasm_bindgen::Clamped<Vec<u8>>,
scale: crate::Scale,
) -> Result<f64, JsError> {
@@ -106,26 +117,26 @@ pub fn stringToFloat(
Ok(int_rep as f64 / multiplier)
}
/// Converts a floating point element to 4 u64s representing a fixed point field element
/// Converts a floating point number to a hex string representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn floatTostring(
pub fn floatToFelt(
input: f64,
scale: crate::Scale,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let int_rep =
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
let felt = i128_to_felt(int_rep);
let vec = crate::pfsys::field_to_string_montgomery::<halo2curves::bn256::Fr>(&felt);
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|e| JsError::new(&format!("Failed to serialize string_montgomery{}", e)),
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
)?))
}
/// Converts a buffer to vector of 4 u64s representing a fixed point field element
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn bufferToVecOfstring(
pub fn bufferToVecOfFelt(
buffer: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
// Convert the buffer to a slice
@@ -211,7 +222,7 @@ pub fn genWitness(
.map_err(|e| JsError::new(&format!("{}", e)))?;
let witness = circuit
.forward(&mut input, None, None)
.forward(&mut input, None, None, false)
.map_err(|e| JsError::new(&format!("{}", e)))?;
serde_json::to_vec(&witness)
@@ -311,13 +322,19 @@ pub fn verify(
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
circuit_settings.clone(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
let result = verify_proof_circuit_kzg(params.verifier_params(), snark, &vk, strategy);
let result = verify_proof_circuit_kzg(
params.verifier_params(),
snark,
&vk,
strategy,
1 << circuit_settings.run_args.logrows,
);
match result {
Ok(_) => Ok(true),
@@ -387,15 +404,6 @@ pub fn prove(
.into_bytes())
}
/// print hex representation of a proof
#[wasm_bindgen]
#[allow(non_snake_case)]
pub fn printProofHex(proof: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
let hex_str = hex::encode(proof.proof);
Ok(format!("0x{}", hex_str))
}
// VALIDATION FUNCTIONS
/// Witness file validation

View File

@@ -2,6 +2,8 @@
#[cfg(test)]
mod native_tests {
use ezkl::circuit::Tolerance;
use ezkl::fieldutils::{felt_to_i128, i128_to_felt};
// use ezkl::circuit::table::RESERVED_BLINDING_ROWS_PAD;
use ezkl::graph::input::{FileSource, FileSourceInner, GraphData};
use ezkl::graph::{DataSource, GraphSettings, GraphWitness};
@@ -182,99 +184,100 @@ mod native_tests {
"mnist_gan",
];
const ACCURACY_CAL_TESTS: [&str; 5] = [
const ACCURACY_CAL_TESTS: [&str; 6] = [
"accuracy",
"1l_mlp",
"4l_relu_conv_fc",
"1l_elu",
"1l_prelu",
"1l_tiny_div",
];
const TESTS: [&str; 77] = [
"1l_mlp",
"1l_mlp", //0
"1l_slice",
"1l_concat",
"1l_flatten",
// "1l_average",
"1l_div",
"1l_pad",
"1l_pad", // 5
"1l_reshape",
"1l_eltwise_div",
"1l_sigmoid",
"1l_sqrt",
"1l_softmax",
"1l_softmax", //10
// "1l_instance_norm",
"1l_batch_norm",
"1l_prelu",
"1l_leakyrelu",
"1l_gelu_noappx",
// "1l_gelu_tanh_appx",
"1l_relu",
"1l_relu", //15
"1l_downsample",
"1l_tanh",
"2l_relu_sigmoid_small",
"2l_relu_fc",
"2l_relu_small",
"2l_relu_small", //20
"2l_relu_sigmoid",
"1l_conv",
"2l_sigmoid_small",
"2l_relu_sigmoid_conv",
"3l_relu_conv_fc",
"3l_relu_conv_fc", //25
"4l_relu_conv_fc",
"1l_erf",
"1l_var",
"1l_elu", //30
"min",
"1l_elu",
"min", //30
"max",
"1l_max_pool",
"1l_conv_transpose",
"1l_upsample", //35
"1l_identity",
"1l_upsample",
"1l_identity", //35
"idolmodel",
"trig",
"prelu_gmm",
"lstm", //40
"rnn",
"lstm",
"rnn", //40
"quantize_dequantize",
"1l_where",
"boolean",
"boolean_identity",
"decision_tree", // "variable_cnn",
"decision_tree", // 45
"random_forest",
"gradient_boosted_trees",
"1l_topk",
"xgboost", //50
"lightgbm",
"xgboost",
"lightgbm", //50
"hummingbird_decision_tree",
"oh_decision_tree",
"linear_svc",
"gather_elements",
"less",
"less", //55
"xgboost_reg",
"1l_powf",
"scatter_elements",
"1l_linear", //60
"linear_regression",
"1l_linear",
"linear_regression", //60
"sklearn_mlp",
"1l_mean",
"rounding_ops",
// "mean_as_constrain",
"arange",
"layernorm",
"layernorm", //65
"bitwise_ops",
"blackman_window",
"softsign", //70
"softsign", //68
"softplus",
"selu",
"selu", //70
"hard_sigmoid",
"log_softmax",
"eye",
"ltsf",
"remainder",
"remainder", //75
"bitshift",
];
const WASM_TESTS: [&str; 48] = [
const WASM_TESTS: [&str; 46] = [
"1l_mlp",
"1l_slice",
"1l_concat",
@@ -323,8 +326,6 @@ mod native_tests {
"1l_where",
"boolean",
"boolean_identity",
"decision_tree", // "variable_cnn",
"random_forest",
"gradient_boosted_trees",
"1l_topk",
// "xgboost",
@@ -476,6 +477,7 @@ mod native_tests {
use crate::native_tests::kzg_fuzz;
use crate::native_tests::render_circuit;
use crate::native_tests::model_serialization_different_binaries;
use rand::Rng;
use tempdir::TempDir;
#[test]
@@ -489,13 +491,13 @@ mod native_tests {
test_dir.close().unwrap();
}
seq!(N in 0..=4 {
seq!(N in 0..=5 {
#(#[test_case(ACCURACY_CAL_TESTS[N])])*
fn mock_accuracy_cal_tests(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None);
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None, 0.0);
test_dir.close().unwrap();
}
});
@@ -559,7 +561,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 18.0, false);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 3.1, false);
test_dir.close().unwrap();
}
@@ -568,10 +570,23 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None);
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn mock_tolerance_public_outputs_(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
// gen random number between 0.0 and 1.0
let tolerance = rand::thread_rng().gen_range(0.0..1.0) * 100.0;
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance);
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn mock_large_batch_public_outputs_(test: &str) {
crate::native_tests::init_binary();
@@ -579,7 +594,7 @@ mod native_tests {
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let large_batch_dir = &format!("large_batches_{}", test);
crate::native_tests::mk_data_batches_(path, test, &large_batch_dir, 10);
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None);
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -588,7 +603,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None);
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -597,7 +612,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None);
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -606,7 +621,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None);
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -615,7 +630,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None);
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -624,7 +639,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None);
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -633,7 +648,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "kzgcommit", "private", "public", 1, "resources", None);
mock(path, test.to_string(), "kzgcommit", "private", "public", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -643,7 +658,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None);
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -653,7 +668,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "kzgcommit", "public", 1, "resources", None);
mock(path, test.to_string(), "private", "kzgcommit", "public", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -662,7 +677,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None);
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -672,7 +687,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "private", "kzgcommit", 1, "resources", None);
mock(path, test.to_string(), "public", "private", "kzgcommit", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -681,7 +696,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None);
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -691,7 +706,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "kzgcommit", "hashed", 1, "resources", None);
mock(path, test.to_string(), "public", "kzgcommit", "hashed", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -701,7 +716,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "kzgcommit", "kzgcommit", "kzgcommit", 1, "resources", None);
mock(path, test.to_string(), "kzgcommit", "kzgcommit", "kzgcommit", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -711,7 +726,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None);
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -721,7 +736,7 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
// needs an extra row for the large model
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None);
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -731,7 +746,7 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
// needs an extra row for the large model
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None);
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
@@ -827,7 +842,7 @@ mod native_tests {
});
seq!(N in 0..=47 {
seq!(N in 0..=45 {
#(#[test_case(WASM_TESTS[N])])*
fn kzg_prove_and_verify_with_overflow_(test: &str) {
@@ -875,7 +890,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None);
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
});
@@ -1272,7 +1287,9 @@ mod native_tests {
batch_size: usize,
cal_target: &str,
scales_to_use: Option<Vec<u32>>,
tolerance: f32,
) {
let mut tolerance = tolerance;
gen_circuit_settings_and_witness(
test_dir,
example_name.clone(),
@@ -1284,19 +1301,131 @@ mod native_tests {
scales_to_use,
2,
false,
&mut tolerance,
);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"mock",
"-W",
format!("{}/{}/witness.json", test_dir, example_name).as_str(),
"-M",
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
])
.status()
.expect("failed to execute process");
assert!(status.success());
if tolerance > 0.0 {
// load witness and shift the output by a small amount that is less than tolerance percent
let witness = GraphWitness::from_path(
format!("{}/{}/witness.json", test_dir, example_name).into(),
)
.unwrap();
let witness = witness.clone();
let outputs = witness.outputs.clone();
// get values as i128
let output_perturbed_safe: Vec<Vec<halo2curves::bn256::Fr>> = outputs
.iter()
.map(|sv| {
sv.iter()
.map(|v| {
// randomly perturb by a small amount less than tolerance
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
halo2curves::bn256::Fr::zero()
} else {
i128_to_felt(
(felt_to_i128(*v) as f32
* (rand::thread_rng().gen_range(-0.01..0.01) * tolerance))
as i128,
)
};
*v + perturbation
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
// get values as i128
let output_perturbed_bad: Vec<Vec<halo2curves::bn256::Fr>> = outputs
.iter()
.map(|sv| {
sv.iter()
.map(|v| {
// randomly perturb by a small amount less than tolerance
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
halo2curves::bn256::Fr::from(2)
} else {
i128_to_felt(
(felt_to_i128(*v) as f32
* (rand::thread_rng().gen_range(0.02..0.1) * tolerance))
as i128,
)
};
*v + perturbation
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let good_witness = GraphWitness {
outputs: output_perturbed_safe,
..witness.clone()
};
// save
good_witness
.save(format!("{}/{}/witness_ok.json", test_dir, example_name).into())
.unwrap();
let bad_witness = GraphWitness {
outputs: output_perturbed_bad,
..witness.clone()
};
// save
bad_witness
.save(format!("{}/{}/witness_bad.json", test_dir, example_name).into())
.unwrap();
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"mock",
"-W",
format!("{}/{}/witness.json", test_dir, example_name).as_str(),
"-M",
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"mock",
"-W",
format!("{}/{}/witness_ok.json", test_dir, example_name).as_str(),
"-M",
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"mock",
"-W",
format!("{}/{}/witness_bad.json", test_dir, example_name).as_str(),
"-M",
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
])
.status()
.expect("failed to execute process");
assert!(!status.success());
} else {
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"mock",
"-W",
format!("{}/{}/witness.json", test_dir, example_name).as_str(),
"-M",
format!("{}/{}/network.compiled", test_dir, example_name).as_str(),
])
.status()
.expect("failed to execute process");
assert!(status.success());
}
}
#[allow(clippy::too_many_arguments)]
@@ -1311,6 +1440,7 @@ mod native_tests {
scales_to_use: Option<Vec<u32>>,
num_inner_columns: usize,
div_rebasing: bool,
tolerance: &mut f32,
) {
let mut args = vec![
"gen-settings".to_string(),
@@ -1320,11 +1450,12 @@ mod native_tests {
"--settings-path={}/{}/settings.json",
test_dir, example_name
),
format!("--variables=batch_size={}", batch_size),
format!("--variables=batch_size->{}", batch_size),
format!("--input-visibility={}", input_visibility),
format!("--param-visibility={}", param_visibility),
format!("--output-visibility={}", output_visibility),
format!("--num-inner-cols={}", num_inner_columns),
format!("--tolerance={}", tolerance),
];
if div_rebasing {
@@ -1367,6 +1498,24 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
let mut settings =
GraphSettings::load(&format!("{}/{}/settings.json", test_dir, example_name).into())
.unwrap();
let any_output_scales_smol = settings.model_output_scales.iter().any(|s| *s <= 0);
if any_output_scales_smol {
// set the tolerance to 0.0
settings.run_args.tolerance = Tolerance {
val: 0.0,
scale: 0.0.into(),
};
settings
.save(&format!("{}/{}/settings.json", test_dir, example_name).into())
.unwrap();
*tolerance = 0.0;
}
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"compile-circuit",
@@ -1401,6 +1550,7 @@ mod native_tests {
}
// Mock prove (fast, but does not cover some potential issues)
#[allow(clippy::too_many_arguments)]
fn accuracy_measurement(
test_dir: &str,
example_name: String,
@@ -1423,6 +1573,7 @@ mod native_tests {
None,
2,
div_rebasing,
&mut 0.0,
);
println!(
@@ -1454,7 +1605,7 @@ mod native_tests {
format!("{}/{}/network.onnx", test_dir, example_name).as_str(),
"-O",
format!("{}/{}/render.png", test_dir, example_name).as_str(),
"--lookup-range=(-32768,32768)",
"--lookup-range=-32768->32768",
"-K=17",
])
.status()
@@ -1682,6 +1833,7 @@ mod native_tests {
scales_to_use,
num_inner_columns,
false,
&mut 0.0,
);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
@@ -1697,6 +1849,7 @@ mod native_tests {
&format!("{}/{}/key.pk", test_dir, example_name),
"--vk-path",
&format!("{}/{}/key.vk", test_dir, example_name),
"--disable-selector-compression",
])
.status()
.expect("failed to execute process");
@@ -1744,6 +1897,30 @@ mod native_tests {
.status()
.expect("failed to execute process");
assert!(status.success());
// load settings file
let settings =
std::fs::read_to_string(settings_path.clone()).expect("failed to read settings file");
let graph_settings = serde_json::from_str::<GraphSettings>(&settings)
.expect("failed to parse settings file");
// get_srs for the graph_settings_num_instances
download_srs(graph_settings.log2_total_instances());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"verify",
format!("--settings-path={}", settings_path).as_str(),
"--proof-path",
&format!("{}/{}/proof.pf", test_dir, example_name),
"--vk-path",
&format!("{}/{}/key.vk", test_dir, example_name),
"--reduced-srs",
])
.status()
.expect("failed to execute process");
assert!(status.success());
}
// prove-serialize-verify, the usual full path
@@ -1759,6 +1936,7 @@ mod native_tests {
None,
2,
false,
&mut 0.0,
);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
@@ -2032,9 +2210,10 @@ mod native_tests {
1,
"resources",
// we need the accuracy
Some(vec![7, 8]),
Some(vec![4]),
1,
false,
&mut 0.0,
);
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);

View File

@@ -12,7 +12,7 @@ def get_ezkl_output(witness_file, settings_file):
outputs = witness_output['outputs']
with open(settings_file) as f:
settings = json.load(f)
ezkl_outputs = [[ezkl.string_to_float(
ezkl_outputs = [[ezkl.felt_to_float(
outputs[i][j], settings['model_output_scales'][i]) for j in range(len(outputs[i]))] for i in range(len(outputs))]
return ezkl_outputs
@@ -78,16 +78,20 @@ def compare_outputs(zk_output, onnx_output):
zip_object = zip(np.array(zk_output).flatten(),
np.array(onnx_output).flatten())
for list1_i, list2_i in zip_object:
for (i, (list1_i, list2_i)) in enumerate(zip_object):
if list1_i == 0.0 and list2_i == 0.0:
res.append(0)
else:
diff = list1_i - list2_i
res.append(100 * (diff) / (list2_i))
# iterate and print the diffs if they are greater than 0.0
if abs(diff) > 0.0:
print("------- index: ", i)
print("------- diff: ", diff)
print("------- zk_output: ", list1_i)
print("------- onnx_output: ", list2_i)
print("res: ", res)
return np.mean(np.abs(res))
return res
if __name__ == '__main__':
@@ -107,6 +111,9 @@ if __name__ == '__main__':
onnx_output = get_onnx_output(model_file, input_file)
# compare the outputs
percentage_difference = compare_outputs(ezkl_output, onnx_output)
mean_percentage_difference = np.mean(np.abs(percentage_difference))
max_percentage_difference = np.max(np.abs(percentage_difference))
# print the percentage difference
print("mean percent diff: ", percentage_difference)
assert percentage_difference < target, "Percentage difference is too high"
print("mean percent diff: ", mean_percentage_difference)
print("max percent diff: ", max_percentage_difference)
assert mean_percentage_difference < target, "Percentage difference is too high"

View File

@@ -131,7 +131,7 @@ mod py_tests {
"simple_demo_aggregated_proofs.ipynb",
"ezkl_demo.ipynb", // 10
"lstm.ipynb",
"set_membership.ipynb",
"set_membership.ipynb", // 12
"decision_tree.ipynb",
"random_forest.ipynb",
"gradient_boosted_trees.ipynb", // 15

View File

@@ -56,9 +56,9 @@ def test_poseidon_hash():
Test for poseidon_hash
"""
message = [1.0, 2.0, 3.0, 4.0]
message = [ezkl.float_to_string(x, 7) for x in message]
message = [ezkl.float_to_felt(x, 7) for x in message]
res = ezkl.poseidon_hash(message)
assert ezkl.string_to_felt(
assert ezkl.felt_to_big_endian(
res[0]) == "0x0da7e5e5c8877242fa699f586baf770d731defd54f952d4adeb85047a0e32f45"
@@ -70,14 +70,14 @@ def test_field_serialization():
input = 890
scale = 7
felt = ezkl.float_to_string(input, scale)
roundtrip_input = ezkl.string_to_float(felt, scale)
felt = ezkl.float_to_felt(input, scale)
roundtrip_input = ezkl.felt_to_float(felt, scale)
assert input == roundtrip_input
input = -700
scale = 7
felt = ezkl.float_to_string(input, scale)
roundtrip_input = ezkl.string_to_float(felt, scale)
felt = ezkl.float_to_felt(input, scale)
roundtrip_input = ezkl.felt_to_float(felt, scale)
assert input == roundtrip_input
@@ -88,12 +88,12 @@ def test_buffer_to_felts():
buffer = bytearray("a sample string!", 'utf-8')
felts = ezkl.buffer_to_felts(buffer)
ref_felt_1 = "0x0000000000000000000000000000000021676e6972747320656c706d61732061"
assert felts == [ref_felt_1]
assert ezkl.felt_to_big_endian(felts[0]) == ref_felt_1
buffer = bytearray("a sample string!"+"high", 'utf-8')
felts = ezkl.buffer_to_felts(buffer)
ref_felt_2 = "0x0000000000000000000000000000000000000000000000000000000068676968"
assert felts == [ref_felt_1, ref_felt_2]
assert [ezkl.felt_to_big_endian(felts[0]), ezkl.felt_to_big_endian(felts[1])] == [ref_felt_1, ref_felt_2]
def test_gen_srs():
@@ -113,13 +113,13 @@ def test_calibrate_over_user_range():
data_path = os.path.join(
examples_path,
'onnx',
'1l_average',
'1l_relu',
'input.json'
)
model_path = os.path.join(
examples_path,
'onnx',
'1l_average',
'1l_relu',
'network.onnx'
)
output_path = os.path.join(
@@ -147,13 +147,13 @@ def test_calibrate():
data_path = os.path.join(
examples_path,
'onnx',
'1l_average',
'1l_relu',
'input.json'
)
model_path = os.path.join(
examples_path,
'onnx',
'1l_average',
'1l_relu',
'network.onnx'
)
output_path = os.path.join(
@@ -183,7 +183,7 @@ def test_model_compile():
model_path = os.path.join(
examples_path,
'onnx',
'1l_average',
'1l_relu',
'network.onnx'
)
compiled_model_path = os.path.join(
@@ -205,7 +205,7 @@ def test_forward():
data_path = os.path.join(
examples_path,
'onnx',
'1l_average',
'1l_relu',
'input.json'
)
model_path = os.path.join(
@@ -392,9 +392,7 @@ def test_prove_evm():
assert res['transcript_type'] == 'EVM'
assert os.path.isfile(proof_path)
res = ezkl.print_proof_hex(proof_path)
# to figure out a better way of testing print_proof_hex
assert type(res) == str
def test_create_evm_verifier():

View File

@@ -8,9 +8,9 @@ mod wasm32 {
use ezkl::graph::GraphWitness;
use ezkl::pfsys;
use ezkl::wasm::{
bufferToVecOfstring, compiledCircuitValidation, encodeVerifierCalldata, genPk, genVk,
genWitness, inputValidation, pkValidation, poseidonHash, printProofHex, proofValidation,
prove, settingsValidation, srsValidation, stringToFelt, stringToFloat, stringToInt,
bufferToVecOfFelt, compiledCircuitValidation, encodeVerifierCalldata, feltToBigEndian,
feltToFloat, feltToInt, feltToLittleEndian, genPk, genVk, genWitness, inputValidation,
pkValidation, poseidonHash, proofValidation, prove, settingsValidation, srsValidation,
u8_array_to_u128_le, verify, vkValidation, witnessValidation,
};
use halo2_solidity_verifier::encode_calldata;
@@ -76,22 +76,29 @@ mod wasm32 {
for i in 0..32 {
let field_element = Fr::from(i);
let serialized = serde_json::to_vec(&field_element).unwrap();
let clamped = wasm_bindgen::Clamped(serialized);
let scale = 2;
let floating_point = stringToFloat(clamped.clone(), scale)
let floating_point = feltToFloat(clamped.clone(), scale)
.map_err(|_| "failed")
.unwrap();
assert_eq!(floating_point, (i as f64) / 4.0);
let integer: i128 = serde_json::from_slice(
&stringToInt(clamped.clone()).map_err(|_| "failed").unwrap(),
)
.unwrap();
let integer: i128 =
serde_json::from_slice(&feltToInt(clamped.clone()).map_err(|_| "failed").unwrap())
.unwrap();
assert_eq!(integer, i as i128);
let hex_string = format!("{:?}", field_element);
let returned_string = stringToFelt(clamped).map_err(|_| "failed").unwrap();
let hex_string = format!("{:?}", field_element.clone());
let returned_string: String = feltToBigEndian(clamped.clone())
.map_err(|_| "failed")
.unwrap();
assert_eq!(hex_string, returned_string);
let repr = serde_json::to_string(&field_element).unwrap();
let little_endian_string: String = serde_json::from_str(&repr).unwrap();
let returned_string: String =
feltToLittleEndian(clamped).map_err(|_| "failed").unwrap();
assert_eq!(little_endian_string, returned_string);
}
}
@@ -101,7 +108,7 @@ mod wasm32 {
let mut buffer = string_high.clone().into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
@@ -118,7 +125,7 @@ mod wasm32 {
let buffer = string_sample.clone().into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
@@ -133,7 +140,7 @@ mod wasm32 {
let buffer = string_concat.into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfstring(clamped).map_err(|_| "failed").unwrap();
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
@@ -258,15 +265,6 @@ mod wasm32 {
assert!(value);
}
#[wasm_bindgen_test]
async fn print_proof_hex_test() {
let proof = printProofHex(wasm_bindgen::Clamped(PROOF.to_vec()))
.map_err(|_| "failed")
.unwrap();
assert!(proof.len() > 0);
}
#[wasm_bindgen_test]
async fn verify_validations() {
// Run witness validation on network (should fail)

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -23,9 +23,14 @@
"output_visibility": "Public",
"param_visibility": "Private",
"div_rebasing": false,
"rebase_frac_zero_constants": false,
"check_mode": "UNSAFE"
},
"num_rows": 16,
"total_dynamic_col_size": 0,
"num_dynamic_lookups": 0,
"num_shuffles": 0,
"total_shuffle_col_size": 0,
"total_assignments": 32,
"total_const_size": 8,
"model_instance_shapes": [

View File

@@ -38,7 +38,10 @@ describe('Generate witness, prove and verify', () => {
let pk = await readEzklArtifactsFile(path, example, 'key.pk');
let circuit_ser = await readEzklArtifactsFile(path, example, 'network.compiled');
circuit_settings_ser = await readEzklArtifactsFile(path, example, 'settings.json');
params_ser = await readEzklSrsFile(path, example);
// get the log rows from the circuit settings
const circuit_settings = deserialize(circuit_settings_ser) as any;
const logrows = circuit_settings.run_args.logrows as string;
params_ser = await readEzklSrsFile(logrows);
const startTimeProve = Date.now();
result = wasmFunctions.prove(witness, pk, circuit_ser, params_ser);
const endTimeProve = Date.now();
@@ -54,6 +57,7 @@ describe('Generate witness, prove and verify', () => {
let result
const vk = await readEzklArtifactsFile(path, example, 'key.vk');
const startTimeVerify = Date.now();
params_ser = await readEzklSrsFile("1");
result = wasmFunctions.verify(proof_ser, vk, circuit_settings_ser, params_ser);
const result_ref = wasmFunctions.verify(proof_ser_ref, vk, circuit_settings_ser, params_ser);
const endTimeVerify = Date.now();

View File

@@ -16,15 +16,7 @@ export async function readEzklArtifactsFile(path: string, example: string, filen
return new Uint8ClampedArray(buffer.buffer);
}
export async function readEzklSrsFile(path: string, example: string): Promise<Uint8ClampedArray> {
// const settingsPath = path.join(__dirname, '..', '..', 'ezkl', 'examples', 'onnx', example, 'settings.json');
const settingsPath = `${path}/${example}/settings.json`
const settingsBuffer = await fs.readFile(settingsPath, { encoding: 'utf-8' });
const settings = JSONBig.parse(settingsBuffer);
const logrows = settings.run_args.logrows;
// const filePath = path.join(__dirname, '..', '..', 'ezkl', 'examples', 'onnx', `kzg${logrows}.srs`);
// srs path is at $HOME/.ezkl/srs
export async function readEzklSrsFile(logrows: string): Promise<Uint8ClampedArray> {
const filePath = `${userHomeDir}/.ezkl/srs/kzg${logrows}.srs`
const buffer = await fs.readFile(filePath);
return new Uint8ClampedArray(buffer.buffer);

Binary file not shown.

View File

@@ -1 +1 @@
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":-1}
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":-1,"max_range_size":0}