Compare commits

...

9 Commits

76 changed files with 2455 additions and 1386 deletions

View File

@@ -11,7 +11,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: nanoGPT Mock

View File

@@ -22,7 +22,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
@@ -30,13 +30,13 @@ jobs:
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- name: Install binaryen
run: |
set -e
curl -L https://github.com/WebAssembly/binaryen/releases/download/version_116/binaryen-version_116-x86_64-linux.tar.gz | tar xzf -
export PATH=$PATH:$PWD/binaryen-version_116/bin
wasm-opt --version
set -e
curl -L https://github.com/WebAssembly/binaryen/releases/download/version_116/binaryen-version_116-x86_64-linux.tar.gz | tar xzf -
export PATH=$PATH:$PWD/binaryen-version_116/bin
wasm-opt --version
- name: Build wasm files for both web and nodejs compilation targets
run: |
wasm-pack build --release --target nodejs --out-dir ./pkg/nodejs . -- -Z build-std="panic_abort,std"
@@ -92,7 +92,7 @@ jobs:
const jsonObject = JSONBig.parse(string);
return jsonObject;
}
function serialize(data) { // data is an object // return a Uint8ClampedArray
// Step 1: Stringify the Object with BigInt support
if (typeof data === "object") {
@@ -100,11 +100,11 @@ jobs:
}
// Step 2: Encode the JSON String
const uint8Array = new TextEncoder().encode(data);
// Step 3: Convert to Uint8ClampedArray
return new Uint8ClampedArray(uint8Array.buffer);
}
module.exports = {
deserialize,
serialize
@@ -123,7 +123,7 @@ jobs:
const jsonObject = parse(string);
return jsonObject;
}
export function serialize(data) { // data is an object // return a Uint8ClampedArray
// Step 1: Stringify the Object with BigInt support
if (typeof data === "object") {
@@ -131,7 +131,7 @@ jobs:
}
// Step 2: Encode the JSON String
const uint8Array = new TextEncoder().encode(data);
// Step 3: Convert to Uint8ClampedArray
return new Uint8ClampedArray(uint8Array.buffer);
}

View File

@@ -139,6 +139,20 @@ jobs:
target: ${{ matrix.target }}
manylinux: auto
args: --release --out dist --features python-bindings
before-script-linux: |
# If we're running on rhel centos, install needed packages.
if command -v yum &> /dev/null; then
yum update -y && yum install -y perl-core openssl openssl-devel pkgconfig libatomic
# If we're running on i686 we need to symlink libatomic
# in order to build openssl with -latomic flag.
if [[ ! -d "/usr/lib64" ]]; then
ln -s /usr/lib/libatomic.so.1 /usr/lib/libatomic.so
fi
else
# If we're running on debian-based system.
apt update -y && apt-get install -y libssl-dev openssl pkg-config
fi
- name: Install built wheel
if: matrix.target == 'x86_64'

View File

@@ -32,7 +32,7 @@ jobs:
token: ${{ secrets.RELEASE_TOKEN }}
tag_name: ${{ env.EZKL_VERSION }}
build-release-gpu:
build-release-gpu:
name: build-release-gpu
needs: ["create-release"]
runs-on: GPU
@@ -45,7 +45,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Checkout repo
@@ -60,16 +60,15 @@ jobs:
- name: Set Cargo.toml version to match github tag
shell: bash
run: |
mv Cargo.toml Cargo.toml.orig
sed "s/0\\.0\\.0/${EZKL_VERSION//v}/" Cargo.toml.orig >Cargo.toml
mv Cargo.lock Cargo.lock.orig
sed "s/0\\.0\\.0/${EZKL_VERSION//v}/" Cargo.lock.orig >Cargo.lock
mv Cargo.toml Cargo.toml.orig
sed "s/0\\.0\\.0/${EZKL_VERSION//v}/" Cargo.toml.orig >Cargo.toml
mv Cargo.lock Cargo.lock.orig
sed "s/0\\.0\\.0/${EZKL_VERSION//v}/" Cargo.lock.orig >Cargo.lock
- name: Install dependencies
shell: bash
run: |
sudo apt-get update
sudo apt-get update
- name: Build release binary
run: cargo build --release -Z sparse-registry --features icicle
@@ -91,7 +90,6 @@ jobs:
asset_name: ${{ env.ASSET }}
asset_content_type: application/octet-stream
build-release:
name: build-release
needs: ["create-release"]

View File

@@ -26,7 +26,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Build
@@ -38,7 +38,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Docs
@@ -50,7 +50,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -73,7 +73,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -106,7 +106,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -139,7 +139,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -172,7 +172,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -189,7 +189,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
@@ -199,7 +199,7 @@ jobs:
- name: Install wasm32-unknown-unknown
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- name: Run wasm verifier tests
# on mac:
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
@@ -212,7 +212,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -229,7 +229,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -286,7 +286,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -303,6 +303,8 @@ jobs:
with:
node-version: "18.12.1"
cache: "pnpm"
- name: "Add rust-src"
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- name: Install dependencies for js tests and in-browser-evm-verifier package
run: |
pnpm install --no-frozen-lockfile
@@ -324,7 +326,7 @@ jobs:
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: KZG prove and verify tests (EVM + VK rendered seperately)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_render_seperately_ --test-threads 1
- name: KZG prove and verify tests (EVM + kzg all)
@@ -357,7 +359,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
@@ -365,7 +367,7 @@ jobs:
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- name: Use pnpm 8
uses: pnpm/action-setup@v2
@@ -392,6 +394,8 @@ jobs:
- name: Replace memory definition in nodejs
run: |
sed -i "3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|" tests/wasm/nodejs/ezkl.js
- name: KZG prove and verify tests (public outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_tight_lookup_::t
- name: IPA prove and verify tests
run: cargo nextest run --release --verbose tests::ipa_prove_and_verify_::t --test-threads 1
- name: IPA prove and verify tests (ipa outputs)
@@ -412,8 +416,6 @@ jobs:
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_
- name: KZG prove and verify tests (public outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t
- name: KZG prove and verify tests (public inputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input
- name: KZG prove and verify tests (fixed params)
@@ -429,11 +431,11 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-01-04-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- uses: baptiste0928/cargo-install@v1
with:
@@ -456,7 +458,6 @@ jobs:
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
prove-and-verify-mock-aggr-tests:
runs-on: self-hosted
needs: [build, library-tests]
@@ -464,7 +465,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -482,7 +483,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -499,7 +500,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -509,8 +510,6 @@ jobs:
- name: KZG tests
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 4 -- --include-ignored
prove-and-verify-aggr-evm-tests:
runs-on: large-self-hosted
needs: [build, library-tests]
@@ -518,7 +517,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -528,7 +527,7 @@ jobs:
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: KZG prove and verify aggr tests
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
@@ -539,7 +538,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -561,7 +560,7 @@ jobs:
python-version: "3.7"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Install solc
@@ -569,7 +568,7 @@ jobs:
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Run pytest
@@ -585,7 +584,7 @@ jobs:
python-version: "3.7"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -616,7 +615,7 @@ jobs:
python-version: "3.10"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-01-04
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -626,7 +625,7 @@ jobs:
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev b320f350156a0fb15c2eb13dc380deb2367c4474 --profile local --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
- name: Build python ezkl

1663
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -15,14 +15,14 @@ crate-type = ["cdylib", "rlib"]
[dependencies]
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch = "main" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch = "main" }
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "9fff22c", features = [
"derive_serde",
] }
rand = { version = "0.8", default_features = false }
itertools = { version = "0.10.3", default_features = false }
clap = { version = "4.3.3", features = ["derive"] }
clap = { version = "4.5.3", features = ["derive"] }
serde = { version = "1.0.126", features = ["derive"], optional = true }
serde_json = { version = "1.0.97", default_features = false, features = [
"float_roundtrip",
@@ -203,5 +203,9 @@ no-banner = []
[patch.'https://github.com/ingonyama-zk/icicle']
icicle = { git = "https://github.com/ingonyama-zk/icicle?rev=45b00fb", package = "icicle", branch = "fix/vhnat/ezkl-build-fix" }
[patch.'https://github.com/zkonduit/halo2']
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/optional-selector-poly#54f54453cf186aa5d89579c4e7663f9a27cfb89a", package = "halo2_proofs", branch = "ac/optional-selector-poly" }
[profile.release]
rustflags = ["-C", "relocation-model=pic"]

View File

@@ -31,9 +31,9 @@ EZKL
[![Notebook](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/zkonduit/ezkl/blob/main/examples/notebooks/simple_demo_all_public.ipynb)
In the backend we use [Halo2](https://github.com/privacy-scaling-explorations/halo2) as a proof system.
In the backend we use the collaboratively-developed [Halo2](https://github.com/privacy-scaling-explorations/halo2) as a proof system.
The generated proofs can then be used on-chain to verify computation, only the Ethereum Virtual Machine (EVM) is supported at the moment.
The generated proofs can then be verified with much less computational resources, including on-chain (with the Ethereum Virtual Machine), in a browser, or on a device.
- If you have any questions, we'd love for you to open up a discussion topic in [Discussions](https://github.com/zkonduit/ezkl/discussions). Alternatively, you can join the ✨[EZKL Community Telegram Group](https://t.me/+QRzaRvTPIthlYWMx)💫.
@@ -45,6 +45,8 @@ The generated proofs can then be used on-chain to verify computation, only the E
### getting started ⚙️
The easiest way to get started is to try out a notebook.
#### Python
Install the python bindings by calling.
@@ -70,7 +72,7 @@ curl https://raw.githubusercontent.com/zkonduit/ezkl/main/install_ezkl_cli.sh |
https://user-images.githubusercontent.com/45801863/236771676-5bbbbfd1-ba6f-418a-902e-20738ce0e9f0.mp4
For more details visit the [docs](https://docs.ezkl.xyz).
For more details visit the [docs](https://docs.ezkl.xyz). The CLI is faster than Python, as it has less overhead. For even more speed and convenience, check out the [remote proving service](https://ei40vx5x6j0.typeform.com/to/sFv1oxvb), which feels like the CLI but is backed by a tuned cluster.
Build the auto-generated rust documentation and open the docs in your browser locally. `cargo doc --open`
@@ -124,17 +126,6 @@ unset ENABLE_ICICLE_GPU
**NOTE:** Even with the above environment variable set, icicle is disabled for circuits where k <= 8. To change the value of `k` where icicle is enabled, you can set the environment variable `ICICLE_SMALL_K`.
### repos
The EZKL project has several libraries and repos.
| Repo | Description |
| --- | --- |
| [@zkonduit/ezkl](https://github.com/zkonduit/ezkl) | the main ezkl repo in rust with wasm and python bindings |
| [@zkonduit/ezkljs](https://github.com/zkonduit/ezkljs) | typescript and javascript tooling to help integrate ezkl into web apps |
----------------------
### contributing 🌎
If you're interested in contributing and are unsure where to start, reach out to one of the maintainers:
@@ -151,7 +142,7 @@ More broadly:
- To report bugs or request new features [create a new issue within Issues](https://github.com/zkonduit/ezkl/issues) to inform the greater community.
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you shall be licensed to Zkonduit Inc. under the terms and conditions specified in the [CLA](https://github.com/zkonduit/ezkl/blob/main/cla.md), which you agree to by intentionally submitting a contribution. In particular, you have the right to submit the contribution and we can distribute it under the Apache 2.0 license, among other terms and conditions.
Any contribution intentionally submitted for inclusion in the work by you shall be licensed to Zkonduit Inc. under the terms and conditions specified in the [CLA](https://github.com/zkonduit/ezkl/blob/main/cla.md), which you agree to by intentionally submitting a contribution. In particular, you have the right to submit the contribution and we can distribute it, among other terms and conditions.
### no security guarantees
@@ -159,4 +150,7 @@ Ezkl is unaudited, beta software undergoing rapid development. There may be bugs
> NOTE: Because operations are quantized when they are converted from an onnx file to a zk-circuit, outputs in python and ezkl may differ slightly.
### no warranty
Copyright (c) 2024 Zkonduit Inc. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,3 +1,5 @@
use std::collections::HashMap;
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ezkl::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use ezkl::circuit::modules::poseidon::{PoseidonChip, PoseidonConfig};
@@ -48,7 +50,7 @@ impl Circuit<Fr> for MyCircuit {
) -> Result<(), Error> {
let chip: PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, L> =
PoseidonChip::new(config);
chip.layout(&mut layouter, &[self.image.clone()], 0)?;
chip.layout(&mut layouter, &[self.image.clone()], 0, &mut HashMap::new())?;
Ok(())
}
}

View File

@@ -696,10 +696,12 @@
"for i, value in enumerate(proof[\"instances\"]):\n",
" for j, field_element in enumerate(value):\n",
" onchain_input_array.append(ezkl.felt_to_big_endian(field_element))\n",
" formatted_output += str(onchain_input_array[-1])\n",
" formatted_output += '\"' + str(onchain_input_array[-1]) + '\"'\n",
" if j != len(value) - 1:\n",
" formatted_output += \", \"\n",
" formatted_output += \"]\"\n",
" if i != len(proof[\"instances\"]) - 1:\n",
" formatted_output += \", \"\n",
"formatted_output += \"]\"\n",
"\n",
"# This will be the values you use onchain\n",
"# copy them over to remix and see if they verify\n",

View File

@@ -0,0 +1,40 @@
from torch import nn
import torch
import json
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer = nn.LPPool2d(2, 1, (1, 1))
def forward(self, x):
return self.layer(x)[0]
circuit = Model()
x = torch.empty(1, 3, 2, 2).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.7549541592597961, 0.990360677242279, 0.9473411440849304, 0.3951031565666199, 0.8500555753707886, 0.9352139830589294, 0.11867779493331909, 0.9493132829666138, 0.6588345766067505, 0.1933223009109497, 0.12139874696731567, 0.8547163605690002]]}

Binary file not shown.

42
examples/onnx/celu/gen.py Normal file
View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.CELU()(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.35387128591537476, 0.030473172664642334, 0.08707714080810547, 0.2429301142692566, 0.45228832960128784, 0.496021032333374, 0.13245105743408203, 0.8497090339660645]]}

Binary file not shown.

41
examples/onnx/clip/gen.py Normal file
View File

@@ -0,0 +1,41 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.clamp(x, min=0.4, max=0.8)
return m
circuit = MyModel()
x = torch.empty(1, 2, 2, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.03297048807144165, 0.46362626552581787, 0.6044231057167053, 0.4949902892112732, 0.48823297023773193, 0.6798646450042725, 0.6824942231178284, 0.03491640090942383, 0.19608813524246216, 0.24129939079284668, 0.9769315123558044, 0.6306831240653992, 0.7690497636795044, 0.252221941947937, 0.9167693853378296, 0.3882681131362915, 0.9307044148445129, 0.33559417724609375, 0.7815426588058472, 0.3435332179069519, 0.7871478796005249, 0.12240773439407349, 0.5295405983924866, 0.4874419569969177, 0.08262640237808228, 0.1124718189239502, 0.5834914445877075, 0.30927878618240356, 0.48899340629577637, 0.9376634955406189, 0.21893149614334106, 0.526070773601532]]}

View File

@@ -0,0 +1,24 @@
pytorch2.2.1:±
?/Constant_output_0 /Constant"Constant*
value*JÍÌÌ> 
C/Constant_1_output_0 /Constant_1"Constant*
value*JÍÌL? 
F
input
/Constant_output_0
/Constant_1_output_0output/Clip"Clip
main_graphZ)
input


batch_size


b*
output


batch_size


B

41
examples/onnx/gru/gen.py Normal file
View File

@@ -0,0 +1,41 @@
import random
import math
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import json
model = nn.GRU(3, 3) # Input dim is 3, output dim is 3
x = torch.randn(1, 3) # make a sequence of length 5
print(x)
# Flips the neural net into inference mode
model.eval()
model.to('cpu')
# Export the model
torch.onnx.export(model, # model being run
# model input (or a tuple for multiple inputs)
x,
# where to save the model (can be a file or file-like object)
"network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
data_array = ((x).detach().numpy()).reshape([-1]).tolist()
data_json = dict(input_data=[data_array])
print(data_json)
# Serialize data into file:
json.dump(data_json, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.4145222008228302, -0.4043896496295929, 0.7545749545097351]]}

Binary file not shown.

View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.argmax(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.5505883693695068, 0.0766521692276001, 0.12006187438964844, 0.9497959017753601, 0.9100563526153564, 0.968717098236084, 0.5978299379348755, 0.9419963359832764]]}

Binary file not shown.

View File

@@ -9,7 +9,7 @@ class MyModel(nn.Module):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.Logsoftmax()(x)
m = nn.Hardsigmoid()(x)
return m

View File

@@ -1 +1 @@
{"input_data": [[0.2971532940864563, 0.3465197682380676, 0.05381882190704346, 0.058654189109802246, 0.014198064804077148, 0.06088751554489136, 0.1723427176475525, 0.5115123987197876]]}
{"input_data": [[0.8326942324638367, 0.2796096205711365, 0.600328266620636, 0.3701696991920471, 0.17832040786743164, 0.6247223019599915, 0.501872718334198, 0.6961578726768494]]}

View File

@@ -1,4 +1,4 @@
pytorch2.1.0:<3A>
pytorch2.2.1:<3A>
;
inputoutput /HardSigmoid" HardSigmoid*
alpha«ª*> 

View File

@@ -0,0 +1,41 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.Hardswish()(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.6996762752532959, 0.42992985248565674, 0.5102168321609497, 0.5540387630462646, 0.8489438891410828, 0.8533616065979004, 0.36736780405044556, 0.5859147310256958]]}

View File

@@ -0,0 +1,15 @@
pytorch2.2.1:{
&
inputoutput
/HardSwish" HardSwish
main_graphZ!
input


batch_size
b"
output


batch_size
B

View File

@@ -9,7 +9,7 @@ class MyModel(nn.Module):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.Hardsigmoid()(x)
m = nn.LogSoftmax()(x)
return m

View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.logsumexp(x, dim=1)
return m
circuit = MyModel()
x = torch.empty(1, 2, 2, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.7973018884658813, 0.5245689153671265, 0.34149593114852905, 0.1455438733100891, 0.9482707381248474, 0.4221445322036743, 0.001363217830657959, 0.8736765384674072, 0.42954301834106445, 0.7199509739875793, 0.37641745805740356, 0.5920265316963196, 0.42270803451538086, 0.41761744022369385, 0.603948712348938, 0.7250819802284241, 0.047173500061035156, 0.5115441679954529, 0.3743387460708618, 0.16794061660766602, 0.5352339148521423, 0.037976861000061035, 0.65323406457901, 0.5585184097290039, 0.10559147596359253, 0.07827490568161011, 0.6717077493667603, 0.6480781435966492, 0.9780838489532471, 0.8353415131568909, 0.6491701006889343, 0.6573048233985901]]}

Binary file not shown.

42
examples/onnx/mish/gen.py Normal file
View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = nn.Mish()(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.18563222885131836, 0.4843214750289917, 0.9991059899330139, 0.02534431219100952, 0.8105666041374207, 0.9658406376838684, 0.681107759475708, 0.5365872979164124]]}

View File

@@ -0,0 +1,19 @@
pytorch2.2.1:ä
0
input/Softplus_output_0 /Softplus"Softplus
1
/Softplus_output_0/Tanh_output_0/Tanh"Tanh
*
input
/Tanh_output_0output/Mul"Mul
main_graphZ!
input


batch_size
b"
output


batch_size
B

View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.norm(x, p=1, dim=1)
return m
circuit = MyModel()
x = torch.empty(1, 2, 2, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.02284395694732666, 0.7941043376922607, 0.07971876859664917, 0.8898420929908752, 0.8233054280281067, 0.11066079139709473, 0.4424799084663391, 0.4355071783065796, 0.6723723411560059, 0.6818525195121765, 0.8726171851158142, 0.17742449045181274, 0.054257750511169434, 0.5775953531265259, 0.7758923172950745, 0.8431423306465149, 0.7602444887161255, 0.29686522483825684, 0.22489851713180542, 0.0675363540649414, 0.981339693069458, 0.15771394968032837, 0.5801441669464111, 0.9044001698493958, 0.49266451597213745, 0.42621421813964844, 0.35345613956451416, 0.042848050594329834, 0.6908614039421082, 0.5422852039337158, 0.01975083351135254, 0.5772860050201416]]}

Binary file not shown.

View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.norm(x, p=2, dim=1)
return m
circuit = MyModel()
x = torch.empty(1, 2, 2, 8).uniform_(0, 1)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.8709188103675842, 0.11553549766540527, 0.27376580238342285, 0.7518517971038818, 0.7879393100738525, 0.8765475749969482, 0.14315760135650635, 0.8982420563697815, 0.7274006605148315, 0.39007169008255005, 0.729040801525116, 0.11306107044219971, 0.658822774887085, 0.666404664516449, 0.3001367449760437, 0.45343858003616333, 0.7460223436355591, 0.7423691749572754, 0.7544230818748474, 0.5674425959587097, 0.8728761672973633, 0.27062875032424927, 0.1595977544784546, 0.22975260019302368, 0.6711723208427429, 0.8265992403030396, 0.48679041862487793, 0.689740777015686, 0.330846905708313, 0.5630669593811035, 0.8058932423591614, 0.5802426338195801]]}

Binary file not shown.

42
examples/onnx/tril/gen.py Normal file
View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.triu(x)
return m
circuit = MyModel()
x = torch.empty(1, 3, 3).uniform_(0, 5)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.4870188236236572, 2.275230646133423, 3.126268148422241, 0.6412187218666077, 0.9967470169067383, 1.9814395904541016, 1.6355383396148682, 0.6397527456283569, 0.7825168967247009]]}

Binary file not shown.

42
examples/onnx/triu/gen.py Normal file
View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.tril(x)
return m
circuit = MyModel()
x = torch.empty(1, 3, 3).uniform_(0, 5)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[0.2898547053337097, 1.8070811033248901, 0.30266255140304565, 3.00581955909729, 0.5379888415336609, 1.7057424783706665, 2.415961265563965, 0.589233934879303, 0.03824889659881592]]}

Binary file not shown.

View File

@@ -17,7 +17,7 @@
"clean": "rm -r dist || true",
"build:commonjs": "tsc --project tsconfig.commonjs.json && resolve-tspaths -p tsconfig.commonjs.json",
"build:esm": "tsc --project tsconfig.esm.json && resolve-tspaths -p tsconfig.esm.json",
"build": "pnpm run clean && pnpm run build:commonjs && pnpm run build:esm"
"build": "npm run clean && npm run build:commonjs && npm run build:esm"
},
"dependencies": {
"@ethereumjs/common": "^4.0.0",

View File

@@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2023-08-24"
channel = "nightly-2024-02-06"
components = ["rustfmt", "clippy"]

View File

@@ -15,6 +15,8 @@ pub use planner::*;
use crate::tensor::{TensorType, ValTensor};
use super::region::ConstantsMap;
/// Module trait used to extend ezkl functionality
pub trait Module<F: PrimeField + TensorType + PartialOrd> {
/// Config
@@ -39,6 +41,7 @@ pub trait Module<F: PrimeField + TensorType + PartialOrd> {
&self,
layouter: &mut impl Layouter<F>,
input: &[ValTensor<F>],
constants: &mut ConstantsMap<F>,
) -> Result<Self::InputAssignments, Error>;
/// Layout
fn layout(
@@ -46,6 +49,7 @@ pub trait Module<F: PrimeField + TensorType + PartialOrd> {
layouter: &mut impl Layouter<F>,
input: &[ValTensor<F>],
row_offset: usize,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, Error>;
/// Number of instance values the module uses every time it is applied
fn instance_increment_input(&self) -> Vec<usize>;

View File

@@ -4,6 +4,8 @@ is already implemented in halo2_gadgets, there is no wrapper chip that makes it
Thanks to https://github.com/summa-dev/summa-solvency/blob/master/src/chips/poseidon/hash.rs for the inspiration (and also helping us understand how to use this).
*/
use std::collections::HashMap;
// This chip adds a set of advice columns to the gadget Chip to store the inputs of the hash
use halo2_proofs::halo2curves::bn256::Fr as Fp;
use halo2_proofs::poly::commitment::{Blind, CommitmentScheme, Params};
@@ -13,6 +15,7 @@ use halo2curves::group::prime::PrimeCurveAffine;
use halo2curves::group::Curve;
use halo2curves::CurveAffine;
use crate::circuit::region::ConstantsMap;
use crate::tensor::{Tensor, ValTensor, ValType, VarTensor};
use super::Module;
@@ -107,6 +110,7 @@ impl Module<Fp> for PolyCommitChip {
&self,
_: &mut impl Layouter<Fp>,
_: &[ValTensor<Fp>],
_: &mut ConstantsMap<Fp>,
) -> Result<Self::InputAssignments, Error> {
Ok(())
}
@@ -119,11 +123,24 @@ impl Module<Fp> for PolyCommitChip {
layouter: &mut impl Layouter<Fp>,
input: &[ValTensor<Fp>],
_: usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<ValTensor<Fp>, Error> {
assert_eq!(input.len(), 1);
let local_constants = constants.clone();
layouter.assign_region(
|| "PolyCommit",
|mut region| self.config.inputs.assign(&mut region, 0, &input[0]),
|mut region| {
let mut local_inner_constants = local_constants.clone();
let res = self.config.inputs.assign(
&mut region,
0,
&input[0],
&mut local_inner_constants,
)?;
*constants = local_inner_constants;
Ok(res)
},
)
}
@@ -184,7 +201,12 @@ mod tests {
mut layouter: impl Layouter<Fp>,
) -> Result<(), Error> {
let polycommit_chip = PolyCommitChip::new(config);
polycommit_chip.layout(&mut layouter, &[self.message.clone()], 0);
polycommit_chip.layout(
&mut layouter,
&[self.message.clone()],
0,
&mut HashMap::new(),
);
Ok(())
}

View File

@@ -18,6 +18,7 @@ use maybe_rayon::slice::ParallelSlice;
use std::marker::PhantomData;
use crate::circuit::region::ConstantsMap;
use crate::tensor::{Tensor, ValTensor, ValType};
use super::Module;
@@ -172,12 +173,15 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
&self,
layouter: &mut impl Layouter<Fp>,
message: &[ValTensor<Fp>],
constants: &mut ConstantsMap<Fp>,
) -> Result<Self::InputAssignments, Error> {
assert_eq!(message.len(), 1);
let message = message[0].clone();
let start_time = instant::Instant::now();
let local_constants = constants.clone();
let res = layouter.assign_region(
|| "load message",
|mut region| {
@@ -199,12 +203,26 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
Ok(v.clone())
}
ValType::Constant(f) => region.assign_advice_from_constant(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
*f,
),
ValType::Constant(f) => {
if local_constants.contains_key(f) {
Ok(constants.get(f).unwrap().assigned_cell().ok_or({
log::error!("constant not previously assigned");
Error::Synthesis
})?)
} else {
let res = region.assign_advice_from_constant(
|| format!("load message_{}", i),
self.config.hash_inputs[x],
y,
*f,
)?;
constants
.insert(*f, ValType::AssignedConstant(res.clone(), *f));
Ok(res)
}
}
e => {
log::error!(
"wrong input type {:?}, must be previously assigned",
@@ -270,8 +288,9 @@ impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, con
layouter: &mut impl Layouter<Fp>,
input: &[ValTensor<Fp>],
row_offset: usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<ValTensor<Fp>, Error> {
let (mut input_cells, zero_val) = self.layout_inputs(layouter, input)?;
let (mut input_cells, zero_val) = self.layout_inputs(layouter, input, constants)?;
// extract the values from the input cells
let mut assigned_input: Tensor<ValType<Fp>> =
input_cells.iter().map(|e| ValType::from(e.clone())).into();
@@ -434,7 +453,7 @@ mod tests {
*,
};
use std::marker::PhantomData;
use std::{collections::HashMap, marker::PhantomData};
use halo2_gadgets::poseidon::primitives::Spec;
use halo2_proofs::{
@@ -477,7 +496,12 @@ mod tests {
mut layouter: impl Layouter<Fp>,
) -> Result<(), Error> {
let chip: PoseidonChip<PoseidonSpec, WIDTH, RATE, L> = PoseidonChip::new(config);
chip.layout(&mut layouter, &[self.message.clone()], 0)?;
chip.layout(
&mut layouter,
&[self.message.clone()],
0,
&mut HashMap::new(),
)?;
Ok(())
}

View File

@@ -345,7 +345,7 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
/// Returns a new [BaseConfig] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
Self {

View File

@@ -46,7 +46,8 @@ pub enum HybridOp {
dim: usize,
},
Softmax {
scale: utils::F32,
input_scale: utils::F32,
output_scale: utils::F32,
axes: Vec<usize>,
},
RangeCheck(Tolerance),
@@ -70,7 +71,7 @@ pub enum HybridOp {
},
}
impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for HybridOp {
///
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
match self {
@@ -130,9 +131,16 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
kernel_shape,
normalized,
} => tensor::ops::sumpool(&x, *padding, *stride, *kernel_shape, *normalized)?,
HybridOp::Softmax { scale, axes } => {
tensor::ops::nonlinearities::softmax_axes(&x, scale.into(), axes)
}
HybridOp::Softmax {
input_scale,
output_scale,
axes,
} => tensor::ops::nonlinearities::softmax_axes(
&x,
input_scale.into(),
output_scale.into(),
axes,
),
HybridOp::RangeCheck(tol) => {
let y = inputs[1].clone().map(|x| felt_to_i128(x));
tensor::ops::nonlinearities::range_check_percent(&[x, y], 128, 128, tol.val)
@@ -203,8 +211,15 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
),
HybridOp::ReduceMin { axes } => format!("REDUCEMIN (axes={:?})", axes),
HybridOp::ReduceArgMin { dim } => format!("REDUCEARGMIN (dim={})", dim),
HybridOp::Softmax { scale, axes } => {
format!("SOFTMAX (scale={}, axes={:?})", scale, axes)
HybridOp::Softmax {
input_scale,
output_scale,
axes,
} => {
format!(
"SOFTMAX (input_scale={}, output_scale={}, axes={:?})",
input_scale, output_scale, axes
)
}
HybridOp::RangeCheck(p) => format!("RANGECHECK (tol={:?})", p),
HybridOp::Greater => "GREATER".into(),
@@ -324,9 +339,18 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
HybridOp::ReduceArgMin { dim } => {
layouts::argmin_axes(config, region, values[..].try_into()?, *dim)?
}
HybridOp::Softmax { scale, axes } => {
layouts::softmax_axes(config, region, values[..].try_into()?, *scale, axes)?
}
HybridOp::Softmax {
input_scale,
output_scale,
axes,
} => layouts::softmax_axes(
config,
region,
values[..].try_into()?,
*input_scale,
*output_scale,
axes,
)?,
HybridOp::RangeCheck(tol) => layouts::range_check_percent(
config,
region,
@@ -359,8 +383,9 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for HybridOp {
| HybridOp::ReduceArgMax { .. }
| HybridOp::OneHot { .. }
| HybridOp::ReduceArgMin { .. } => 0,
HybridOp::Softmax { .. } => 2 * in_scales[0],
HybridOp::Recip { output_scale, .. } => multiplier_to_scale(output_scale.0 as f64),
HybridOp::Softmax { output_scale, .. } | HybridOp::Recip { output_scale, .. } => {
multiplier_to_scale(output_scale.0 as f64)
}
_ => in_scales[0],
};
Ok(scale)

View File

@@ -9,6 +9,7 @@ use halo2curves::ff::PrimeField;
use itertools::Itertools;
use log::{error, trace};
use maybe_rayon::{
iter::IntoParallelRefIterator,
prelude::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator},
slice::ParallelSliceMut,
};
@@ -33,7 +34,7 @@ use super::*;
use crate::circuit::ops::lookup::LookupOp;
/// Same as div but splits the division into N parts
pub(crate) fn loop_div<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn loop_div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
value: &[ValTensor<F>; 1],
@@ -68,7 +69,7 @@ pub(crate) fn loop_div<F: PrimeField + TensorType + PartialOrd>(
}
/// Div accumulated layout
pub(crate) fn div<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
value: &[ValTensor<F>; 1],
@@ -93,9 +94,9 @@ pub(crate) fn div<F: PrimeField + TensorType + PartialOrd>(
let mut claimed_output: ValTensor<F> = if is_assigned {
let input_evals = input.get_int_evals()?;
tensor::ops::nonlinearities::const_div(&input_evals.clone(), felt_to_i128(div) as f64)
.iter()
.map(|x| Ok(Value::known(i128_to_felt(*x))))
.collect::<Result<Tensor<Value<F>>, Box<dyn Error>>>()?
.par_iter()
.map(|x| Value::known(i128_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
.into()
} else {
Tensor::new(
@@ -133,7 +134,7 @@ pub(crate) fn div<F: PrimeField + TensorType + PartialOrd>(
}
/// recip accumulated layout
pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
value: &[ValTensor<F>; 1],
@@ -166,9 +167,9 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd>(
felt_to_i128(input_scale) as f64,
felt_to_i128(output_scale) as f64,
)
.iter()
.map(|x| Ok(Value::known(i128_to_felt(*x))))
.collect::<Result<Tensor<Value<F>>, Box<dyn Error>>>()?
.par_iter()
.map(|x| Value::known(i128_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
.into()
} else {
Tensor::new(
@@ -226,7 +227,7 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd>(
}
/// Dot product accumulated layout
pub(crate) fn dot<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn dot<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -337,7 +338,7 @@ pub(crate) fn dot<F: PrimeField + TensorType + PartialOrd>(
}
/// Einsum
pub(crate) fn einsum<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn einsum<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
inputs: &[ValTensor<F>],
@@ -524,14 +525,7 @@ pub(crate) fn einsum<F: PrimeField + TensorType + PartialOrd>(
// Compute the product of all input tensors
for pair in input_pairs {
let product_across_pair = prod(
config,
region,
&[pair.try_into().map_err(|e| {
error!("{}", e);
halo2_proofs::plonk::Error::Synthesis
})?],
)?;
let product_across_pair = prod(config, region, &[pair.into()])?;
if let Some(product) = prod_res {
prod_res = Some(
@@ -563,7 +557,7 @@ pub(crate) fn einsum<F: PrimeField + TensorType + PartialOrd>(
Ok(output)
}
fn _sort_ascending<F: PrimeField + TensorType + PartialOrd>(
fn _sort_ascending<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -574,12 +568,12 @@ fn _sort_ascending<F: PrimeField + TensorType + PartialOrd>(
let is_assigned = !input.any_unknowns()?;
let sorted = if is_assigned {
input
.get_int_evals()?
.iter()
.sorted_by(|a, b| a.cmp(b))
.map(|x| Ok(Value::known(i128_to_felt(*x))))
.collect::<Result<Tensor<Value<F>>, Box<dyn Error>>>()?
let mut int_evals = input.get_int_evals()?;
int_evals.par_sort_unstable_by(|a, b| a.cmp(b));
int_evals
.par_iter()
.map(|x| Value::known(i128_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
} else {
Tensor::new(
Some(&vec![Value::<F>::unknown(); input.len()]),
@@ -607,7 +601,7 @@ fn _sort_ascending<F: PrimeField + TensorType + PartialOrd>(
}
///
fn _select_topk<F: PrimeField + TensorType + PartialOrd>(
fn _select_topk<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -622,7 +616,7 @@ fn _select_topk<F: PrimeField + TensorType + PartialOrd>(
}
/// Select top k elements
pub(crate) fn topk_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn topk_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -642,11 +636,12 @@ pub(crate) fn topk_axes<F: PrimeField + TensorType + PartialOrd>(
Ok(output)
}
fn select<F: PrimeField + TensorType + PartialOrd>(
fn select<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
) -> Result<ValTensor<F>, Box<dyn Error>> {
let start = instant::Instant::now();
let (mut input, index) = (values[0].clone(), values[1].clone());
input.flatten();
@@ -656,12 +651,13 @@ fn select<F: PrimeField + TensorType + PartialOrd>(
let is_assigned = !input.any_unknowns()? && !index.any_unknowns()?;
let output: ValTensor<F> = if is_assigned {
let output: ValTensor<F> = if is_assigned && region.witness_gen() {
let felt_evals = input.get_felt_evals()?;
index
.get_int_evals()?
.iter()
.map(|x| Ok(Value::known(input.get_felt_evals()?.get(&[*x as usize]))))
.collect::<Result<Tensor<Value<F>>, Box<dyn Error>>>()?
.par_iter()
.map(|x| Value::known(felt_evals.get(&[*x as usize])))
.collect::<Tensor<Value<F>>>()
} else {
Tensor::new(
Some(&vec![Value::<F>::unknown(); index.len()]),
@@ -673,10 +669,13 @@ fn select<F: PrimeField + TensorType + PartialOrd>(
let (_, assigned_output) =
dynamic_lookup(config, region, &[index, output], &[dim_indices, input])?;
let end = start.elapsed();
trace!("select took: {:?}", end);
Ok(assigned_output)
}
fn one_hot<F: PrimeField + TensorType + PartialOrd>(
fn one_hot<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -692,7 +691,7 @@ fn one_hot<F: PrimeField + TensorType + PartialOrd>(
let output: ValTensor<F> = if is_assigned {
let int_evals = input.get_int_evals()?;
let res = tensor::ops::one_hot(&int_evals, num_classes, 1)?;
res.iter()
res.par_iter()
.map(|x| Value::known(i128_to_felt(*x)))
.collect::<Tensor<_>>()
} else {
@@ -728,12 +727,13 @@ fn one_hot<F: PrimeField + TensorType + PartialOrd>(
}
/// Dynamic lookup
pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
lookups: &[ValTensor<F>; 2],
tables: &[ValTensor<F>; 2],
) -> Result<(ValTensor<F>, ValTensor<F>), Box<dyn Error>> {
let start = instant::Instant::now();
// if not all lookups same length err
if lookups[0].len() != lookups[1].len() {
return Err("lookups must be same length".into());
@@ -753,20 +753,28 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd>(
let _table_1 = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[1], &table_1)?;
let table_len = table_0.len();
trace!("assigning tables took: {:?}", start.elapsed());
// now create a vartensor of constants for the dynamic lookup index
let table_index = create_constant_tensor(F::from(dynamic_lookup_index as u64), table_len);
let _table_index =
region.assign_dynamic_lookup(&config.dynamic_lookups.tables[2], &table_index)?;
trace!("assigning table index took: {:?}", start.elapsed());
let lookup_0 = region.assign(&config.dynamic_lookups.inputs[0], &lookup_0)?;
let lookup_1 = region.assign(&config.dynamic_lookups.inputs[1], &lookup_1)?;
let lookup_len = lookup_0.len();
trace!("assigning lookups took: {:?}", start.elapsed());
// now set the lookup index
let lookup_index = create_constant_tensor(F::from(dynamic_lookup_index as u64), lookup_len);
let _lookup_index = region.assign(&config.dynamic_lookups.inputs[2], &lookup_index)?;
trace!("assigning lookup index took: {:?}", start.elapsed());
if !region.is_dummy() {
(0..table_len)
.map(|i| {
@@ -802,11 +810,14 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd>(
region.increment_dynamic_lookup_index(1);
region.increment(lookup_len);
let end = start.elapsed();
trace!("dynamic lookup took: {:?}", end);
Ok((lookup_0, lookup_1))
}
/// Shuffle arg
pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
input: &[ValTensor<F>; 1],
@@ -869,7 +880,7 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd>(
}
/// One hot accumulated layout
pub(crate) fn one_hot_axis<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn one_hot_axis<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -922,7 +933,7 @@ pub(crate) fn one_hot_axis<F: PrimeField + TensorType + PartialOrd>(
}
/// Gather accumulated layout
pub(crate) fn gather<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn gather<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -950,7 +961,7 @@ pub(crate) fn gather<F: PrimeField + TensorType + PartialOrd>(
}
/// Gather accumulated layout
pub(crate) fn gather_elements<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn gather_elements<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -973,7 +984,7 @@ pub(crate) fn gather_elements<F: PrimeField + TensorType + PartialOrd>(
}
/// Gather accumulated layout
pub(crate) fn gather_nd<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn gather_nd<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -1024,7 +1035,7 @@ pub(crate) fn gather_nd<F: PrimeField + TensorType + PartialOrd>(
/// Takes a tensor representing a multi-dimensional index and returns a tensor representing the linearized index.
/// The linearized index is the index of the element in the flattened tensor.
/// FOr instance if the dims is [3,5,2], the linearized index of [2] at dim 1 is 2*5 + 3 = 13
pub(crate) fn linearize_element_index<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn linearize_element_index<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1032,6 +1043,7 @@ pub(crate) fn linearize_element_index<F: PrimeField + TensorType + PartialOrd>(
dim: usize,
is_flat_index: bool,
) -> Result<ValTensor<F>, Box<dyn Error>> {
let start_time = instant::Instant::now();
let index = values[0].clone();
if !is_flat_index {
assert_eq!(index.dims().len(), dims.len());
@@ -1105,6 +1117,9 @@ pub(crate) fn linearize_element_index<F: PrimeField + TensorType + PartialOrd>(
region.apply_in_loop(&mut output, inner_loop_function)?;
let elapsed = start_time.elapsed();
trace!("linearize_element_index took: {:?}", elapsed);
Ok(output.into())
}
@@ -1125,7 +1140,7 @@ pub(crate) fn linearize_element_index<F: PrimeField + TensorType + PartialOrd>(
/// If indices_shape[-1] == r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensors containing 1-D tensors of dimension r-b, where N is an integer equals to the product of 1 and all the elements in the batch dimensions of the indices_shape.
/// Let us think of each such r-b ranked tensor as indices_slice. Each scalar value corresponding to data[0:b-1,indices_slice] is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Example 1 below)
/// If indices_shape[-1] < r-b, since the rank of indices is q, indices can be thought of as N (q-b-1)-dimensional tensor containing 1-D tensors of dimension < r-b. Let us think of each such tensors as indices_slice. Each tensor slice corresponding to data[0:b-1, indices_slice , :] is filled into the corresponding location of the (q-b-1)-dimensional tensor to form the output tensor (Examples 2, 3, 4 and 5 below)
pub(crate) fn linearize_nd_index<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn linearize_nd_index<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1232,7 +1247,6 @@ pub(crate) fn linearize_nd_index<F: PrimeField + TensorType + PartialOrd>(
const_offset += F::from(coord[i] as u64) * dim_multiplier[i];
}
let const_offset = create_constant_tensor(const_offset, 1);
let mut results = vec![];
@@ -1250,16 +1264,18 @@ pub(crate) fn linearize_nd_index<F: PrimeField + TensorType + PartialOrd>(
let res = sum(config, region, &[res])?;
results.push(res.get_inner_tensor()?.clone());
// assert than res is less than the product of the dims
assert!(
if region.witness_gen() {
assert!(
res.get_int_evals()?
.iter()
.all(|x| *x < dims.iter().product::<usize>() as i128),
"res is greater than the product of the dims {} (coord={}, index_dim_multiplier={}, res={})",
dims.iter().product::<usize>(),
index_val.show(),
index_val.show(),
index_dim_multiplier.show(),
res.show()
);
}
}
let result_tensor = Tensor::from(results.into_iter());
@@ -1273,7 +1289,9 @@ pub(crate) fn linearize_nd_index<F: PrimeField + TensorType + PartialOrd>(
Ok(output.into())
}
pub(crate) fn get_missing_set_elements<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn get_missing_set_elements<
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -1304,7 +1322,7 @@ pub(crate) fn get_missing_set_elements<F: PrimeField + TensorType + PartialOrd>(
}
fullset_evals
.iter()
.par_iter()
.map(|x| Value::known(i128_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
.into()
@@ -1337,7 +1355,7 @@ pub(crate) fn get_missing_set_elements<F: PrimeField + TensorType + PartialOrd>(
}
/// Gather accumulated layout
pub(crate) fn scatter_elements<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn scatter_elements<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 3],
@@ -1354,14 +1372,14 @@ pub(crate) fn scatter_elements<F: PrimeField + TensorType + PartialOrd>(
let is_assigned = !input.any_unknowns()? && !index.any_unknowns()? && !src.any_unknowns()?;
let claimed_output: ValTensor<F> = if is_assigned {
let claimed_output: ValTensor<F> = if is_assigned && region.witness_gen() {
let input_inner = input.get_int_evals()?;
let index_inner = index.get_int_evals()?.map(|x| x as usize);
let src_inner = src.get_int_evals()?;
let res = tensor::ops::scatter(&input_inner, &index_inner, &src_inner, dim)?;
res.iter()
res.par_iter()
.map(|x| Value::known(i128_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
.into()
@@ -1419,7 +1437,7 @@ pub(crate) fn scatter_elements<F: PrimeField + TensorType + PartialOrd>(
}
/// Scatter Nd
pub(crate) fn scatter_nd<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn scatter_nd<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 3],
@@ -1433,14 +1451,14 @@ pub(crate) fn scatter_nd<F: PrimeField + TensorType + PartialOrd>(
let is_assigned = !input.any_unknowns()? && !index.any_unknowns()? && !src.any_unknowns()?;
let claimed_output: ValTensor<F> = if is_assigned {
let claimed_output: ValTensor<F> = if is_assigned && region.witness_gen() {
let input_inner = input.get_int_evals()?;
let index_inner = index.get_int_evals()?.map(|x| x as usize);
let src_inner = src.get_int_evals()?;
let res = tensor::ops::scatter_nd(&input_inner, &index_inner, &src_inner)?;
res.iter()
res.par_iter()
.map(|x| Value::known(i128_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
.into()
@@ -1457,7 +1475,6 @@ pub(crate) fn scatter_nd<F: PrimeField + TensorType + PartialOrd>(
region.increment(claimed_output.len());
claimed_output.reshape(input.dims())?;
// scatter elements is the inverse of gather elements
let (gather_src, linear_index) =
gather_nd(config, region, &[claimed_output.clone(), index.clone()], 0)?;
@@ -1498,7 +1515,7 @@ pub(crate) fn scatter_nd<F: PrimeField + TensorType + PartialOrd>(
}
/// sum accumulated layout
pub(crate) fn sum<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn sum<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1581,7 +1598,7 @@ pub(crate) fn sum<F: PrimeField + TensorType + PartialOrd>(
}
/// product accumulated layout
pub(crate) fn prod<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn prod<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1661,7 +1678,7 @@ pub(crate) fn prod<F: PrimeField + TensorType + PartialOrd>(
}
/// Axes wise op wrapper
fn axes_wise_op<F: PrimeField + TensorType + PartialOrd>(
fn axes_wise_op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1722,7 +1739,7 @@ fn axes_wise_op<F: PrimeField + TensorType + PartialOrd>(
}
/// Sum accumulated layout
pub(crate) fn prod_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn prod_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1733,7 +1750,7 @@ pub(crate) fn prod_axes<F: PrimeField + TensorType + PartialOrd>(
}
/// Sum accumulated layout
pub(crate) fn sum_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn sum_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1744,7 +1761,7 @@ pub(crate) fn sum_axes<F: PrimeField + TensorType + PartialOrd>(
}
/// argmax layout
pub(crate) fn argmax_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn argmax_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1762,7 +1779,7 @@ pub(crate) fn argmax_axes<F: PrimeField + TensorType + PartialOrd>(
}
/// Max accumulated layout
pub(crate) fn max_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn max_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1774,7 +1791,7 @@ pub(crate) fn max_axes<F: PrimeField + TensorType + PartialOrd>(
}
/// Argmin layout
pub(crate) fn argmin_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn argmin_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1792,7 +1809,7 @@ pub(crate) fn argmin_axes<F: PrimeField + TensorType + PartialOrd>(
}
/// Min accumulated layout
pub(crate) fn min_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn min_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1804,7 +1821,7 @@ pub(crate) fn min_axes<F: PrimeField + TensorType + PartialOrd>(
}
/// Pairwise (elementwise) op layout
pub(crate) fn pairwise<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn pairwise<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -1959,7 +1976,7 @@ pub(crate) fn pairwise<F: PrimeField + TensorType + PartialOrd>(
}
/// expand the tensor to the given shape
pub(crate) fn expand<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn expand<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -1972,7 +1989,7 @@ pub(crate) fn expand<F: PrimeField + TensorType + PartialOrd>(
}
///
pub(crate) fn greater<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn greater<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -1995,7 +2012,7 @@ pub(crate) fn greater<F: PrimeField + TensorType + PartialOrd>(
}
///
pub(crate) fn greater_equal<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn greater_equal<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2018,7 +2035,7 @@ pub(crate) fn greater_equal<F: PrimeField + TensorType + PartialOrd>(
}
///
pub(crate) fn less<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn less<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2028,7 +2045,7 @@ pub(crate) fn less<F: PrimeField + TensorType + PartialOrd>(
}
///
pub(crate) fn less_equal<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn less_equal<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2038,7 +2055,7 @@ pub(crate) fn less_equal<F: PrimeField + TensorType + PartialOrd>(
}
/// And boolean operation
pub(crate) fn and<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn and<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2049,7 +2066,7 @@ pub(crate) fn and<F: PrimeField + TensorType + PartialOrd>(
}
/// Or boolean operation
pub(crate) fn or<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn or<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2065,7 +2082,7 @@ pub(crate) fn or<F: PrimeField + TensorType + PartialOrd>(
}
/// Equality boolean operation
pub(crate) fn equals<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn equals<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2075,7 +2092,7 @@ pub(crate) fn equals<F: PrimeField + TensorType + PartialOrd>(
}
/// Equality boolean operation
pub(crate) fn equals_zero<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn equals_zero<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2109,7 +2126,7 @@ pub(crate) fn equals_zero<F: PrimeField + TensorType + PartialOrd>(
}
/// Xor boolean operation
pub(crate) fn xor<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn xor<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2135,7 +2152,7 @@ pub(crate) fn xor<F: PrimeField + TensorType + PartialOrd>(
}
/// Not boolean operation
pub(crate) fn not<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn not<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2151,7 +2168,7 @@ pub(crate) fn not<F: PrimeField + TensorType + PartialOrd>(
}
/// Iff
pub(crate) fn iff<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn iff<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 3],
@@ -2175,7 +2192,7 @@ pub(crate) fn iff<F: PrimeField + TensorType + PartialOrd>(
}
/// Negation operation accumulated layout
pub(crate) fn neg<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn neg<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2185,7 +2202,7 @@ pub(crate) fn neg<F: PrimeField + TensorType + PartialOrd>(
}
/// Sumpool accumulated layout
pub(crate) fn sumpool<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn sumpool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>],
@@ -2239,7 +2256,7 @@ pub(crate) fn sumpool<F: PrimeField + TensorType + PartialOrd>(
}
/// Convolution accumulated layout
pub(crate) fn max_pool2d<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn max_pool2d<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2304,7 +2321,7 @@ pub(crate) fn max_pool2d<F: PrimeField + TensorType + PartialOrd>(
/// DeConvolution accumulated layout
pub(crate) fn deconv<
F: PrimeField + TensorType + PartialOrd + std::marker::Send + std::marker::Sync,
F: PrimeField + TensorType + PartialOrd + std::hash::Hash + std::marker::Send + std::marker::Sync,
>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
@@ -2397,7 +2414,7 @@ pub(crate) fn deconv<
/// Convolution accumulated layout
pub(crate) fn conv<
F: PrimeField + TensorType + PartialOrd + std::marker::Send + std::marker::Sync,
F: PrimeField + TensorType + PartialOrd + std::hash::Hash + std::marker::Send + std::marker::Sync,
>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
@@ -2578,7 +2595,7 @@ pub(crate) fn conv<
}
/// Power accumulated layout
pub(crate) fn pow<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn pow<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2594,7 +2611,7 @@ pub(crate) fn pow<F: PrimeField + TensorType + PartialOrd>(
}
/// Rescaled op accumulated layout
pub(crate) fn rescale<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn rescale<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>],
@@ -2616,7 +2633,7 @@ pub(crate) fn rescale<F: PrimeField + TensorType + PartialOrd>(
}
/// Dummy (no contraints) reshape layout
pub(crate) fn reshape<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn reshape<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
values: &[ValTensor<F>; 1],
new_dims: &[usize],
) -> Result<ValTensor<F>, Box<dyn Error>> {
@@ -2626,7 +2643,7 @@ pub(crate) fn reshape<F: PrimeField + TensorType + PartialOrd>(
}
/// Dummy (no contraints) move_axis layout
pub(crate) fn move_axis<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn move_axis<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
values: &[ValTensor<F>; 1],
source: usize,
destination: usize,
@@ -2637,7 +2654,7 @@ pub(crate) fn move_axis<F: PrimeField + TensorType + PartialOrd>(
}
/// resize layout
pub(crate) fn resize<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn resize<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2651,7 +2668,7 @@ pub(crate) fn resize<F: PrimeField + TensorType + PartialOrd>(
}
/// Slice layout
pub(crate) fn slice<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn slice<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2660,15 +2677,45 @@ pub(crate) fn slice<F: PrimeField + TensorType + PartialOrd>(
end: &usize,
) -> Result<ValTensor<F>, Box<dyn Error>> {
// assigns the instance to the advice.
let mut output = region.assign(&config.custom_gates.output, &values[0])?;
region.increment(output.len());
let mut output = values[0].clone();
let is_assigned = output.all_prev_assigned();
if !is_assigned {
output = region.assign(&config.custom_gates.output, &values[0])?;
region.increment(output.len());
}
output.slice(axis, start, end)?;
Ok(output)
}
/// Trilu layout
pub(crate) fn trilu<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
k: &i32,
upper: &bool,
) -> Result<ValTensor<F>, Box<dyn Error>> {
// assigns the instance to the advice.
let mut output = values[0].clone();
let is_assigned = output.all_prev_assigned();
if !is_assigned {
output = region.assign(&config.custom_gates.inputs[0], &values[0])?;
}
let res = tensor::ops::trilu(output.get_inner_tensor()?, *k, *upper)?;
let output = region.assign(&config.custom_gates.output, &res.into())?;
region.increment(output.len());
Ok(output)
}
/// Concat layout
pub(crate) fn concat<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn concat<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
values: &[ValTensor<F>],
axis: &usize,
) -> Result<ValTensor<F>, Box<dyn Error>> {
@@ -2680,7 +2727,7 @@ pub(crate) fn concat<F: PrimeField + TensorType + PartialOrd>(
}
/// Identity constraint. Usually used to constrain an instance column to an advice so the returned cells / values can be operated upon.
pub(crate) fn identity<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn identity<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2695,7 +2742,7 @@ pub(crate) fn identity<F: PrimeField + TensorType + PartialOrd>(
}
/// Boolean identity constraint. Usually used to constrain an instance column to an advice so the returned cells / values can be operated upon.
pub(crate) fn boolean_identity<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn boolean_identity<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2731,7 +2778,7 @@ pub(crate) fn boolean_identity<F: PrimeField + TensorType + PartialOrd>(
}
/// Downsample layout
pub(crate) fn downsample<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn downsample<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2748,7 +2795,7 @@ pub(crate) fn downsample<F: PrimeField + TensorType + PartialOrd>(
}
/// layout for enforcing two sets of cells to be equal
pub(crate) fn enforce_equality<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn enforce_equality<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
@@ -2774,7 +2821,7 @@ pub(crate) fn enforce_equality<F: PrimeField + TensorType + PartialOrd>(
}
/// layout for range check.
pub(crate) fn range_check<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn range_check<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2830,12 +2877,13 @@ pub(crate) fn range_check<F: PrimeField + TensorType + PartialOrd>(
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
}
if region.throw_range_check_error() {
let is_assigned = !w.any_unknowns()?;
if is_assigned && region.witness_gen() {
// assert is within range
let int_values = w.get_int_evals()?;
for v in int_values {
if v < range.0 || v > range.1 {
log::debug!("Value ({:?}) out of range: {:?}", v, range);
for v in int_values.iter() {
if v < &range.0 || v > &range.1 {
log::error!("Value ({:?}) out of range: {:?}", v, range);
return Err(Box::new(TensorError::TableLookupError));
}
}
@@ -2855,7 +2903,7 @@ pub(crate) fn range_check<F: PrimeField + TensorType + PartialOrd>(
}
/// layout for nonlinearity check.
pub(crate) fn nonlinearity<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn nonlinearity<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2957,7 +3005,7 @@ pub(crate) fn nonlinearity<F: PrimeField + TensorType + PartialOrd>(
}
/// Argmax
pub(crate) fn argmax<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn argmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -2993,7 +3041,7 @@ pub(crate) fn argmax<F: PrimeField + TensorType + PartialOrd>(
}
/// Argmin
pub(crate) fn argmin<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn argmin<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -3029,7 +3077,7 @@ pub(crate) fn argmin<F: PrimeField + TensorType + PartialOrd>(
}
/// max layout
pub(crate) fn max<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn max<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -3039,7 +3087,7 @@ pub(crate) fn max<F: PrimeField + TensorType + PartialOrd>(
}
/// min layout
pub(crate) fn min<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn min<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -3047,7 +3095,7 @@ pub(crate) fn min<F: PrimeField + TensorType + PartialOrd>(
_sort_ascending(config, region, values)?.get_slice(&[0..1])
}
fn multi_dim_axes_op<F: PrimeField + TensorType + PartialOrd>(
fn multi_dim_axes_op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
@@ -3150,18 +3198,19 @@ fn multi_dim_axes_op<F: PrimeField + TensorType + PartialOrd>(
}
/// softmax layout
pub(crate) fn softmax_axes<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn softmax_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
scale: utils::F32,
input_scale: utils::F32,
output_scale: utils::F32,
axes: &[usize],
) -> Result<ValTensor<F>, Box<dyn Error>> {
let soft_max_at_scale = move |config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1]|
-> Result<ValTensor<F>, Box<dyn Error>> {
softmax(config, region, values, scale)
softmax(config, region, values, input_scale, output_scale)
};
let output = multi_dim_axes_op(config, region, values, axes, soft_max_at_scale)?;
@@ -3169,33 +3218,66 @@ pub(crate) fn softmax_axes<F: PrimeField + TensorType + PartialOrd>(
Ok(output)
}
/// softmax func
pub(crate) fn softmax<F: PrimeField + TensorType + PartialOrd>(
/// percent func
pub(crate) fn percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
scale: utils::F32,
input_scale: utils::F32,
output_scale: utils::F32,
) -> Result<ValTensor<F>, Box<dyn Error>> {
// elementwise exponential
let ex = nonlinearity(config, region, values, &LookupOp::Exp { scale })?;
let is_assigned = values[0].all_prev_assigned();
let mut input = values[0].clone();
if !is_assigned {
input = region.assign(&config.custom_gates.inputs[0], &values[0])?;
region.increment(input.len());
};
// sum of exps
let denom = sum(config, region, &[ex.clone()])?;
// get the inverse
let felt_scale = F::from(scale.0 as u64);
let inv_denom = recip(config, region, &[denom], felt_scale, felt_scale)?;
let denom = sum(config, region, &[input.clone()])?;
let input_felt_scale = F::from(input_scale.0 as u64);
let output_felt_scale = F::from(output_scale.0 as u64);
let inv_denom = recip(
config,
region,
&[denom],
input_felt_scale,
output_felt_scale,
)?;
// product of num * (1 / denom) = 2*output_scale
let softmax = pairwise(config, region, &[ex, inv_denom], BaseOp::Mult)?;
let percent = pairwise(config, region, &[input, inv_denom], BaseOp::Mult)?;
Ok(softmax)
// rebase the percent to 2x the scale
loop_div(config, region, &[percent], input_felt_scale)
}
/// softmax func
pub(crate) fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
input_scale: utils::F32,
output_scale: utils::F32,
) -> Result<ValTensor<F>, Box<dyn Error>> {
// get the max then subtract it
let max_val = max(config, region, values)?;
// rebase the input to 0
let sub = pairwise(config, region, &[values[0].clone(), max_val], BaseOp::Sub)?;
// elementwise exponential
let ex = nonlinearity(
config,
region,
&[sub],
&LookupOp::Exp { scale: input_scale },
)?;
percent(config, region, &[ex.clone()], input_scale, output_scale)
}
/// Checks that the percent error between the expected public output and the actual output value
/// is within the percent error expressed by the `tol` input, where `tol == 1.0` means the percent
/// error tolerance is 1 percent.
pub(crate) fn range_check_percent<F: PrimeField + TensorType + PartialOrd>(
pub(crate) fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],

View File

@@ -123,6 +123,9 @@ pub enum LookupOp {
scale: utils::F32,
a: utils::F32,
},
HardSwish {
scale: utils::F32,
},
}
impl LookupOp {
@@ -134,7 +137,7 @@ impl LookupOp {
}
}
impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for LookupOp {
/// Returns a reference to the Any trait.
fn as_any(&self) -> &dyn Any {
self
@@ -223,6 +226,9 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
LookupOp::ATan { scale } => Ok(tensor::ops::nonlinearities::atan(&x, scale.into())),
LookupOp::ATanh { scale } => Ok(tensor::ops::nonlinearities::atanh(&x, scale.into())),
LookupOp::Tanh { scale } => Ok(tensor::ops::nonlinearities::tanh(&x, scale.into())),
LookupOp::HardSwish { scale } => {
Ok(tensor::ops::nonlinearities::hardswish(&x, scale.into()))
}
}?;
let output = res.map(|x| i128_to_felt(x));
@@ -276,6 +282,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for LookupOp {
LookupOp::ASin { scale } => format!("ASIN(scale={})", scale),
LookupOp::Sinh { scale } => format!("SINH(scale={})", scale),
LookupOp::ASinh { scale } => format!("ASINH(scale={})", scale),
LookupOp::HardSwish { scale } => format!("HARDSWISH(scale={})", scale),
}
}

View File

@@ -27,12 +27,14 @@ pub mod region;
/// A struct representing the result of a forward pass.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd> {
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
pub(crate) output: Tensor<F>,
}
/// A trait representing operations that can be represented as constraints in a circuit.
pub trait Op<F: PrimeField + TensorType + PartialOrd>: std::fmt::Debug + Send + Sync + Any {
pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>:
std::fmt::Debug + Send + Sync + Any
{
/// Matches a [Op] to an operation in the `tensor::ops` module.
fn f(&self, x: &[Tensor<F>]) -> Result<ForwardResult<F>, TensorError>;
/// Returns a string representation of the operation.
@@ -98,7 +100,7 @@ pub trait Op<F: PrimeField + TensorType + PartialOrd>: std::fmt::Debug + Send +
}
}
impl<F: PrimeField + TensorType + PartialOrd> Clone for Box<dyn Op<F>> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Clone for Box<dyn Op<F>> {
fn clone(&self) -> Self {
self.clone_dyn()
}
@@ -165,7 +167,7 @@ pub struct Input {
pub datum_type: InputType,
}
impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Input {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input {
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
Ok(self.scale)
}
@@ -226,7 +228,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Input {
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Unknown;
impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Unknown {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Unknown {
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
Ok(0)
}
@@ -256,7 +258,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Op<F> for Unknown {
///
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Constant<F: PrimeField + TensorType + PartialOrd> {
pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
///
pub quantized_values: Tensor<F>,
///
@@ -266,7 +268,7 @@ pub struct Constant<F: PrimeField + TensorType + PartialOrd> {
pub pre_assigned_val: Option<ValTensor<F>>,
}
impl<F: PrimeField + TensorType + PartialOrd> Constant<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
///
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>) -> Self {
Self {
@@ -293,8 +295,14 @@ impl<F: PrimeField + TensorType + PartialOrd> Constant<F> {
}
}
impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<'de>> Op<F>
for Constant<F>
impl<
F: PrimeField
+ TensorType
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>,
> Op<F> for Constant<F>
{
fn as_any(&self) -> &dyn Any {
self

View File

@@ -83,10 +83,20 @@ pub enum PolyOp {
And,
Or,
Xor,
Trilu {
upper: bool,
k: i32,
},
}
impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<'de>> Op<F>
for PolyOp
impl<
F: PrimeField
+ TensorType
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>,
> Op<F> for PolyOp
{
/// Returns a reference to the Any trait.
fn as_any(&self) -> &dyn Any {
@@ -114,7 +124,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
PolyOp::Add => "ADD".into(),
PolyOp::Mult => "MULT".into(),
PolyOp::Sub => "SUB".into(),
PolyOp::Sum { .. } => "SUM".into(),
PolyOp::Sum { axes } => format!("SUM (axes={:?})", axes),
PolyOp::Prod { .. } => "PROD".into(),
PolyOp::Pow(_) => "POW".into(),
PolyOp::Conv { .. } => "CONV".into(),
@@ -128,6 +138,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
PolyOp::And => "AND".into(),
PolyOp::Or => "OR".into(),
PolyOp::Xor => "XOR".into(),
PolyOp::Trilu { upper, k } => format!("TRILU (upper={}, k={})", upper, k),
}
}
@@ -265,6 +276,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
};
tensor::ops::scatter_nd(&x, &idx, &src)
}
PolyOp::Trilu { upper, k } => tensor::ops::trilu(&inputs[0], *k, *upper),
}?;
Ok(ForwardResult { output: res })
@@ -384,6 +396,9 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
PolyOp::Slice { axis, start, end } => {
layouts::slice(config, region, values[..].try_into()?, axis, start, end)?
}
PolyOp::Trilu { upper, k } => {
layouts::trilu(config, region, values[..].try_into()?, k, upper)?
}
}))
}

View File

@@ -2,24 +2,28 @@ use crate::{
circuit::table::Range,
tensor::{Tensor, TensorError, TensorType, ValTensor, ValType, VarTensor},
};
#[cfg(not(target_arch = "wasm32"))]
use colored::Colorize;
use halo2_proofs::{
circuit::Region,
plonk::{Error, Selector},
};
use halo2curves::ff::PrimeField;
use portable_atomic::AtomicI128 as AtomicInt;
use std::{
cell::RefCell,
collections::HashSet,
collections::{HashMap, HashSet},
sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex,
},
};
use portable_atomic::AtomicI128 as AtomicInt;
use super::lookup::LookupOp;
/// Constants map
pub type ConstantsMap<F> = HashMap<F, ValType<F>>;
/// Dynamic lookup index
#[derive(Clone, Debug, Default)]
pub struct DynamicLookupIndex {
@@ -120,12 +124,11 @@ impl From<Box<dyn std::error::Error>> for RegionError {
#[derive(Debug)]
/// A context for a region
pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd> {
pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
region: Option<RefCell<Region<'a, F>>>,
row: usize,
linear_coord: usize,
num_inner_cols: usize,
total_constants: usize,
dynamic_lookup_index: DynamicLookupIndex,
shuffle_index: ShuffleIndex,
used_lookups: HashSet<LookupOp>,
@@ -133,13 +136,34 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd> {
max_lookup_inputs: i128,
min_lookup_inputs: i128,
max_range_size: i128,
throw_range_check_error: bool,
witness_gen: bool,
assigned_constants: ConstantsMap<F>,
}
impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a, F> {
#[cfg(not(target_arch = "wasm32"))]
///
pub fn increment_total_constants(&mut self, n: usize) {
self.total_constants += n;
pub fn debug_report(&self) {
log::debug!(
"(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={})",
self.row().to_string().blue(),
self.linear_coord().to_string().yellow(),
self.total_constants().to_string().red(),
self.max_lookup_inputs().to_string().green(),
self.min_lookup_inputs().to_string().green(),
self.max_range_size().to_string().green(),
self.dynamic_lookup_col_coord().to_string().green(),
self.shuffle_col_coord().to_string().green());
}
///
pub fn assigned_constants(&self) -> &ConstantsMap<F> {
&self.assigned_constants
}
///
pub fn update_constants(&mut self, constants: ConstantsMap<F>) {
self.assigned_constants.extend(constants);
}
///
@@ -163,8 +187,8 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
}
///
pub fn throw_range_check_error(&self) -> bool {
self.throw_range_check_error
pub fn witness_gen(&self) -> bool {
self.witness_gen
}
/// Create a new region context
@@ -177,7 +201,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
num_inner_cols,
row,
linear_coord,
total_constants: 0,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
@@ -185,9 +208,22 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error: false,
witness_gen: true,
assigned_constants: HashMap::new(),
}
}
/// Create a new region context
pub fn new_with_constants(
region: Region<'a, F>,
row: usize,
num_inner_cols: usize,
constants: ConstantsMap<F>,
) -> RegionCtx<'a, F> {
let mut new_self = Self::new(region, row, num_inner_cols);
new_self.assigned_constants = constants;
new_self
}
/// Create a new region context from a wrapped region
pub fn from_wrapped_region(
region: Option<RefCell<Region<'a, F>>>,
@@ -202,7 +238,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
num_inner_cols,
linear_coord,
row,
total_constants: 0,
dynamic_lookup_index,
shuffle_index,
used_lookups: HashSet::new(),
@@ -210,16 +245,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error: false,
witness_gen: false,
assigned_constants: HashMap::new(),
}
}
/// Create a new region context
pub fn new_dummy(
row: usize,
num_inner_cols: usize,
throw_range_check_error: bool,
) -> RegionCtx<'a, F> {
pub fn new_dummy(row: usize, num_inner_cols: usize, witness_gen: bool) -> RegionCtx<'a, F> {
let region = None;
let linear_coord = row * num_inner_cols;
@@ -228,7 +260,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
num_inner_cols,
linear_coord,
row,
total_constants: 0,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
@@ -236,17 +267,17 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error,
witness_gen,
assigned_constants: HashMap::new(),
}
}
/// Create a new region context
pub fn new_dummy_with_constants(
pub fn new_dummy_with_linear_coord(
row: usize,
linear_coord: usize,
total_constants: usize,
num_inner_cols: usize,
throw_range_check_error: bool,
witness_gen: bool,
) -> RegionCtx<'a, F> {
let region = None;
RegionCtx {
@@ -254,7 +285,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
num_inner_cols,
linear_coord,
row,
total_constants,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
used_lookups: HashSet::new(),
@@ -262,7 +292,8 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
throw_range_check_error,
witness_gen,
assigned_constants: HashMap::new(),
}
}
@@ -312,29 +343,27 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
) -> Result<(), RegionError> {
let row = AtomicUsize::new(self.row());
let linear_coord = AtomicUsize::new(self.linear_coord());
let constants = AtomicUsize::new(self.total_constants());
let max_lookup_inputs = AtomicInt::new(self.max_lookup_inputs());
let min_lookup_inputs = AtomicInt::new(self.min_lookup_inputs());
let lookups = Arc::new(Mutex::new(self.used_lookups.clone()));
let range_checks = Arc::new(Mutex::new(self.used_range_checks.clone()));
let dynamic_lookup_index = Arc::new(Mutex::new(self.dynamic_lookup_index.clone()));
let shuffle_index = Arc::new(Mutex::new(self.shuffle_index.clone()));
let constants = Arc::new(Mutex::new(self.assigned_constants.clone()));
*output = output
.par_enum_map(|idx, _| {
// we kick off the loop with the current offset
let starting_offset = row.load(Ordering::SeqCst);
let starting_linear_coord = linear_coord.load(Ordering::SeqCst);
let starting_constants = constants.load(Ordering::SeqCst);
// get inner value of the locked lookups
// we need to make sure that the region is not shared between threads
let mut local_reg = Self::new_dummy_with_constants(
let mut local_reg = Self::new_dummy_with_linear_coord(
starting_offset,
starting_linear_coord,
starting_constants,
self.num_inner_cols,
self.throw_range_check_error,
self.witness_gen,
);
let res = inner_loop_function(idx, &mut local_reg);
// we update the offset and constants
@@ -343,10 +372,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
local_reg.linear_coord() - starting_linear_coord,
Ordering::SeqCst,
);
constants.fetch_add(
local_reg.total_constants() - starting_constants,
Ordering::SeqCst,
);
max_lookup_inputs.fetch_max(local_reg.max_lookup_inputs(), Ordering::SeqCst);
min_lookup_inputs.fetch_min(local_reg.min_lookup_inputs(), Ordering::SeqCst);
@@ -362,11 +387,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
// update the shuffle index
let mut shuffle_index = shuffle_index.lock().unwrap();
shuffle_index.update(&local_reg.shuffle_index);
// update the constants
let mut constants = constants.lock().unwrap();
constants.extend(local_reg.assigned_constants);
res
})
.map_err(|e| RegionError::from(format!("dummy_loop: {:?}", e)))?;
self.total_constants = constants.into_inner();
self.linear_coord = linear_coord.into_inner();
#[allow(trivial_numeric_casts)]
{
@@ -410,6 +437,14 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get shuffle index: {:?}", e))
})?;
self.assigned_constants = Arc::try_unwrap(constants)
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get constants: {:?}", e))
})?
.into_inner()
.map_err(|e| {
RegionError::from(format!("dummy_loop: failed to get constants: {:?}", e))
})?;
Ok(())
}
@@ -435,7 +470,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
range: Range,
) -> Result<(), Box<dyn std::error::Error>> {
if range.0 > range.1 {
return Err("update_max_min_lookup_range: invalid range".into());
return Err(format!("update_max_min_lookup_range: invalid range {:?}", range).into());
}
let range_size = (range.1 - range.0).abs();
@@ -477,7 +512,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
/// Get the total number of constants
pub fn total_constants(&self) -> usize {
self.total_constants
self.assigned_constants.len()
}
/// Get the dynamic lookup index
@@ -525,26 +560,24 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
self.max_range_size
}
/// Assign a constant value
pub fn assign_constant(&mut self, var: &VarTensor, value: F) -> Result<ValType<F>, Error> {
self.total_constants += 1;
if let Some(region) = &self.region {
let cell = var.assign_constant(&mut region.borrow_mut(), self.linear_coord, value)?;
Ok(cell.into())
} else {
Ok(value.into())
}
}
/// Assign a valtensor to a vartensor
pub fn assign(
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, Error> {
self.total_constants += values.num_constants();
if let Some(region) = &self.region {
var.assign(&mut region.borrow_mut(), self.linear_coord, values)
var.assign(
&mut region.borrow_mut(),
self.linear_coord,
values,
&mut self.assigned_constants,
)
} else {
if !values.is_instance() {
let values_map = values.create_constants_map_iterator();
self.assigned_constants.extend(values_map);
}
Ok(values.clone())
}
}
@@ -560,14 +593,18 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, Error> {
self.total_constants += values.num_constants();
if let Some(region) = &self.region {
var.assign(
&mut region.borrow_mut(),
self.combined_dynamic_shuffle_coord(),
values,
&mut self.assigned_constants,
)
} else {
if !values.is_instance() {
let values_map = values.create_constants_map_iterator();
self.assigned_constants.extend(values_map);
}
Ok(values.clone())
}
}
@@ -594,13 +631,20 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
self.linear_coord,
values,
ommissions,
&mut self.assigned_constants,
)
} else {
self.total_constants += values.num_constants();
let inner_tensor = values.get_inner_tensor().unwrap();
let mut values_map = values.create_constants_map();
for o in ommissions {
self.total_constants -= inner_tensor.get_flat_index(**o).is_constant() as usize;
if let ValType::Constant(value) = inner_tensor.get_flat_index(**o) {
values_map.remove(&value);
}
}
self.assigned_constants.extend(values_map);
Ok(values.clone())
}
}
@@ -615,24 +659,24 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
) -> Result<(ValTensor<F>, usize), Error> {
if let Some(region) = &self.region {
// duplicates every nth element to adjust for column overflow
let (res, len, total_assigned_constants) = var.assign_with_duplication(
let (res, len) = var.assign_with_duplication(
&mut region.borrow_mut(),
self.row,
self.linear_coord,
values,
check_mode,
single_inner_col,
&mut self.assigned_constants,
)?;
self.total_constants += total_assigned_constants;
Ok((res, len))
} else {
let (_, len, total_assigned_constants) = var.dummy_assign_with_duplication(
let (_, len) = var.dummy_assign_with_duplication(
self.row,
self.linear_coord,
values,
single_inner_col,
&mut self.assigned_constants,
)?;
self.total_constants += total_assigned_constants;
Ok((values.clone(), len))
}
}
@@ -699,9 +743,4 @@ impl<'a, F: PrimeField + TensorType + PartialOrd> RegionCtx<'a, F> {
}
Ok(())
}
/// increment constants
pub fn increment_constants(&mut self, n: usize) {
self.total_constants += n
}
}

View File

@@ -6,7 +6,7 @@ use halo2_proofs::{
circuit::{Layouter, Value},
plonk::{ConstraintSystem, Expression, TableColumn},
};
use log::warn;
use log::{debug, warn};
use maybe_rayon::prelude::{IntoParallelIterator, ParallelIterator};
use crate::{
@@ -98,7 +98,7 @@ pub struct Table<F: PrimeField> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
/// get column index given input
pub fn get_col_index(&self, input: F) -> F {
// range is split up into chunks of size col_size, find the chunk that input is in
@@ -138,7 +138,7 @@ pub fn num_cols_required(range_len: i128, col_size: usize) -> usize {
(range_len / (col_size as i128)) as usize + 1
}
impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
/// Configures the table.
pub fn configure(
cs: &mut ConstraintSystem<F>,
@@ -152,7 +152,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
// number of cols needed to store the range
let num_cols = num_cols_required((range.1 - range.0).abs(), col_size);
log::debug!("table range: {:?}", range);
debug!("table range: {:?}", range);
let table_inputs = preexisting_inputs.unwrap_or_else(|| {
let mut cols = vec![];
@@ -275,7 +275,7 @@ pub struct RangeCheck<F: PrimeField> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
/// get first_element of column
pub fn get_first_element(&self, chunk: usize) -> F {
let chunk = chunk as i128;
@@ -303,7 +303,7 @@ impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
}
}
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
/// Configures the table.
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range, logrows: usize) -> RangeCheck<F> {
log::debug!("range check range: {:?}", range);

View File

@@ -1911,6 +1911,8 @@ mod add_with_overflow {
#[cfg(test)]
mod add_with_overflow_and_poseidon {
use std::collections::HashMap;
use halo2curves::bn256::Fr;
use crate::circuit::modules::{
@@ -1969,8 +1971,10 @@ mod add_with_overflow_and_poseidon {
let poseidon_chip: PoseidonChip<PoseidonSpec, WIDTH, RATE, WIDTH> =
PoseidonChip::new(config.poseidon.clone());
let assigned_inputs_a = poseidon_chip.layout(&mut layouter, &self.inputs[0..1], 0)?;
let assigned_inputs_b = poseidon_chip.layout(&mut layouter, &self.inputs[1..2], 1)?;
let assigned_inputs_a =
poseidon_chip.layout(&mut layouter, &self.inputs[0..1], 0, &mut HashMap::new())?;
let assigned_inputs_b =
poseidon_chip.layout(&mut layouter, &self.inputs[1..2], 1, &mut HashMap::new())?;
layouter.assign_region(|| "_new_module", |_| Ok(()))?;

View File

@@ -24,6 +24,8 @@ use crate::pfsys::{
use crate::pfsys::{save_vk, srs::*};
use crate::tensor::TensorError;
use crate::{Commitments, RunArgs};
#[cfg(not(target_arch = "wasm32"))]
use colored::Colorize;
#[cfg(unix)]
use gag::Gag;
use halo2_proofs::dev::VerifyFailure;
@@ -538,7 +540,7 @@ fn check_srs_hash(
let path = get_srs_path(logrows, srs_path, commitment);
let hash = get_file_hash(&path)?;
let predefined_hash = match { crate::srs_sha::PUBLIC_SRS_SHA256_HASHES.get(&logrows) } {
let predefined_hash = match crate::srs_sha::PUBLIC_SRS_SHA256_HASHES.get(&logrows) {
Some(h) => h,
None => return Err(format!("SRS (k={}) hash not found in public set", logrows).into()),
};
@@ -684,7 +686,7 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
Some(&srs),
false,
true,
)?
}
Commitments::IPA => {
@@ -698,16 +700,16 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
Some(&srs),
false,
true,
)?
}
}
} else {
warn!("SRS for poly commit does not exist (will be ignored)");
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, false)?
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, true)?
}
} else {
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, false)?
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, true)?
};
// print each variable tuple (symbol, value) as symbol=value
@@ -819,7 +821,15 @@ impl AccuracyResults {
let error = (original.clone() - calibrated.clone())?;
let abs_error = error.map(|x| x.abs());
let squared_error = error.map(|x| x.powi(2));
let percentage_error = error.enum_map(|i, x| Ok::<_, TensorError>(x / original[i]))?;
let percentage_error = error.enum_map(|i, x| {
// if everything is 0 then we can't divide by 0 so we just return 0
let res = if original[i] == 0.0 && x == 0.0 {
0.0
} else {
x / original[i]
};
Ok::<f32, TensorError>(res)
})?;
let abs_percentage_error = percentage_error.map(|x| x.abs());
errors.extend(error);
@@ -888,6 +898,7 @@ pub(crate) fn calibrate(
only_range_check_rebase: bool,
max_logrows: Option<u32>,
) -> Result<GraphSettings, Box<dyn Error>> {
use log::error;
use std::collections::HashMap;
use tabled::Table;
@@ -900,9 +911,9 @@ pub(crate) fn calibrate(
let model = Model::from_run_args(&settings.run_args, &model_path)?;
let chunks = data.split_into_batches(model.graph.input_shapes()?)?;
info!("num of calibration batches: {}", chunks.len());
info!("num calibration batches: {}", chunks.len());
info!("running onnx predictions...");
debug!("running onnx predictions...");
let original_predictions = Model::run_onnx_predictions(
&settings.run_args,
&model_path,
@@ -970,10 +981,18 @@ pub(crate) fn calibrate(
let pb = init_bar(range_grid.len() as u64);
pb.set_message("calibrating...");
let mut num_failed = 0;
let mut num_passed = 0;
for (((input_scale, param_scale), scale_rebase_multiplier), div_rebasing) in range_grid {
pb.set_message(format!(
"input scale: {}, param scale: {}, scale rebase multiplier: {}, div rebasing: {}",
input_scale, param_scale, scale_rebase_multiplier, div_rebasing
"i-scale: {}, p-scale: {}, rebase-(x): {}, div-rebase: {}, fail: {}, pass: {}",
input_scale.to_string().blue(),
param_scale.to_string().blue(),
scale_rebase_multiplier.to_string().blue(),
div_rebasing.to_string().yellow(),
num_failed.to_string().red(),
num_passed.to_string().green()
));
let key = (
@@ -1007,7 +1026,9 @@ pub(crate) fn calibrate(
let mut circuit = match GraphCircuit::from_run_args(&local_run_args, &model_path) {
Ok(c) => c,
Err(e) => {
debug!("circuit creation from run args failed: {:?}", e);
error!("circuit creation from run args failed: {:?}", e);
pb.inc(1);
num_failed += 1;
continue;
}
};
@@ -1039,7 +1060,9 @@ pub(crate) fn calibrate(
Ok(_) => (),
// typically errors will be due to the circuit overflowing the i128 limit
Err(e) => {
debug!("forward pass failed: {:?}", e);
error!("forward pass failed: {:?}", e);
pb.inc(1);
num_failed += 1;
continue;
}
}
@@ -1104,8 +1127,10 @@ pub(crate) fn calibrate(
"found settings: \n {}",
found_settings.as_json()?.to_colored_json_auto()?
);
num_passed += 1;
} else {
debug!("calibration failed {}", res.err().unwrap());
error!("calibration failed {}", res.err().unwrap());
num_failed += 1;
}
pb.inc(1);
@@ -1879,7 +1904,9 @@ pub(crate) fn mock_aggregate(
}
Err(_) => {
return Err(
format!("invalid sample commitment type for aggregation, must be KZG").into(),
"invalid sample commitment type for aggregation, must be KZG"
.to_string()
.into(),
);
}
}
@@ -1922,7 +1949,9 @@ pub(crate) fn setup_aggregate(
}
Err(_) => {
return Err(
format!("invalid sample commitment type for aggregation, must be KZG",).into(),
"invalid sample commitment type for aggregation, must be KZG"
.to_string()
.into(),
);
}
}
@@ -1983,7 +2012,9 @@ pub(crate) fn aggregate(
}
Err(_) => {
return Err(
format!("invalid sample commitment type for aggregation, must be KZG").into(),
"invalid sample commitment type for aggregation, must be KZG"
.to_string()
.into(),
);
}
}

View File

@@ -26,6 +26,7 @@ use self::input::{FileSource, GraphData};
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
use crate::circuit::lookup::LookupOp;
use crate::circuit::modules::ModulePlanner;
use crate::circuit::region::ConstantsMap;
use crate::circuit::table::{num_cols_required, Range, Table, RESERVED_BLINDING_ROWS_PAD};
use crate::circuit::{CheckMode, InputType};
use crate::fieldutils::felt_to_f64;
@@ -155,7 +156,7 @@ use std::cell::RefCell;
thread_local!(
/// This is a global variable that holds the settings for the graph
/// This is used to pass settings to the layouter and other parts of the circuit without needing to heavily modify the Halo2 API in a new fork
pub static GLOBAL_SETTINGS: RefCell<Option<GraphSettings>> = RefCell::new(None)
pub static GLOBAL_SETTINGS: RefCell<Option<GraphSettings>> = const { RefCell::new(None) }
);
/// Result from a forward pass
@@ -503,7 +504,9 @@ impl GraphSettings {
}
fn constants_logrows(&self) -> u32 {
(self.total_const_size as f64).log2().ceil() as u32
(self.total_const_size as f64 / self.run_args.num_inner_cols as f64)
.log2()
.ceil() as u32
}
/// calculate the total number of instances
@@ -1049,16 +1052,10 @@ impl GraphCircuit {
}
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: i128) -> Range {
let mut margin = (
(
lookup_safety_margin * min_max_lookup.0,
lookup_safety_margin * min_max_lookup.1,
);
if lookup_safety_margin == 1 {
margin.0 += 4;
margin.1 += 4;
}
margin
)
}
fn calc_num_cols(range_len: i128, max_logrows: u32) -> usize {
@@ -1171,17 +1168,6 @@ impl GraphCircuit {
.settings()
.clone();
// recalculate the logrows if there has been overflow on the constants
settings_mut.run_args.logrows = std::cmp::max(
settings_mut.run_args.logrows,
settings_mut.constants_logrows(),
);
// recalculate the logrows if there has been overflow for the model constraints
settings_mut.run_args.logrows = std::cmp::max(
settings_mut.run_args.logrows,
settings_mut.model_constraint_logrows(),
);
debug!(
"setting lookup_range to: {:?}, setting logrows to: {}",
self.settings().run_args.lookup_range,
@@ -1253,7 +1239,7 @@ impl GraphCircuit {
inputs: &mut [Tensor<Fp>],
vk: Option<&VerifyingKey<G1Affine>>,
srs: Option<&Scheme::ParamsProver>,
throw_range_check_error: bool,
witness_gen: bool,
) -> Result<GraphWitness, Box<dyn std::error::Error>> {
let original_inputs = inputs.to_vec();
@@ -1302,7 +1288,7 @@ impl GraphCircuit {
let mut model_results =
self.model()
.forward(inputs, &self.settings().run_args, throw_range_check_error)?;
.forward(inputs, &self.settings().run_args, witness_gen)?;
if visibility.output.requires_processing() {
let module_outlets = visibility.output.overwrites_inputs();
@@ -1617,6 +1603,8 @@ impl Circuit<Fp> for GraphCircuit {
let output_vis = &self.settings().run_args.output_visibility;
let mut graph_modules = GraphModules::new();
let mut constants = ConstantsMap::new();
let mut config = config.clone();
let mut inputs = self
@@ -1662,6 +1650,7 @@ impl Circuit<Fp> for GraphCircuit {
&mut input_outlets,
input_visibility,
&mut instance_offset,
&mut constants,
)?;
// replace inputs with the outlets
for (i, outlet) in outlets.iter().enumerate() {
@@ -1674,6 +1663,7 @@ impl Circuit<Fp> for GraphCircuit {
&mut inputs,
input_visibility,
&mut instance_offset,
&mut constants,
)?;
}
@@ -1710,6 +1700,7 @@ impl Circuit<Fp> for GraphCircuit {
&mut flattened_params,
param_visibility,
&mut instance_offset,
&mut constants,
)?;
let shapes = self.model().const_shapes();
@@ -1738,6 +1729,7 @@ impl Circuit<Fp> for GraphCircuit {
&inputs,
&mut vars,
&outputs,
&mut constants,
)
.map_err(|e| {
log::error!("{}", e);
@@ -1762,6 +1754,7 @@ impl Circuit<Fp> for GraphCircuit {
&mut output_outlets,
&self.settings().run_args.output_visibility,
&mut instance_offset,
&mut constants,
)?;
// replace outputs with the outlets
@@ -1775,6 +1768,7 @@ impl Circuit<Fp> for GraphCircuit {
&mut outputs,
&self.settings().run_args.output_visibility,
&mut instance_offset,
&mut constants,
)?;
}

View File

@@ -5,6 +5,7 @@ use super::vars::*;
use super::GraphError;
use super::GraphSettings;
use crate::circuit::hybrid::HybridOp;
use crate::circuit::region::ConstantsMap;
use crate::circuit::region::RegionCtx;
use crate::circuit::table::Range;
use crate::circuit::Input;
@@ -404,7 +405,7 @@ impl ParsedNodes {
.get(input)
.ok_or(GraphError::MissingNode(*input))?;
let input_dims = node.out_dims();
let input_dim = input_dims.get(0).ok_or(GraphError::MissingNode(*input))?;
let input_dim = input_dims.first().ok_or(GraphError::MissingNode(*input))?;
inputs.push(input_dim.clone());
}
@@ -514,21 +515,24 @@ impl Model {
instance_shapes.len().to_string().blue(),
"instances".blue()
);
// this is the total number of variables we will need to allocate
// for the circuit
let default_value = if !self.visibility.input.is_fixed() {
ValType::Value(Value::<Fp>::unknown())
} else {
ValType::Constant(Fp::ONE)
};
let inputs: Vec<ValTensor<Fp>> = self
.graph
.input_shapes()?
.iter()
.map(|shape| {
let mut t: ValTensor<Fp> =
vec![default_value.clone(); shape.iter().product()].into();
let len = shape.iter().product();
let mut t: ValTensor<Fp> = (0..len)
.map(|_| {
if !self.visibility.input.is_fixed() {
ValType::Value(Value::<Fp>::unknown())
} else {
ValType::Constant(Fp::random(&mut rand::thread_rng()))
}
})
.collect::<Vec<_>>()
.into();
t.reshape(shape)?;
Ok(t)
})
@@ -577,13 +581,13 @@ impl Model {
&self,
model_inputs: &[Tensor<Fp>],
run_args: &RunArgs,
throw_range_check_error: bool,
witness_gen: bool,
) -> Result<ForwardResult, Box<dyn Error>> {
let valtensor_inputs: Vec<ValTensor<Fp>> = model_inputs
.iter()
.map(|x| x.map(|elem| ValType::Value(Value::known(elem))).into())
.collect();
let res = self.dummy_layout(run_args, &valtensor_inputs, throw_range_check_error)?;
let res = self.dummy_layout(run_args, &valtensor_inputs, witness_gen)?;
Ok(res.into())
}
@@ -1071,6 +1075,8 @@ impl Model {
/// * `layouter` - Halo2 Layouter.
/// * `inputs` - The values to feed into the circuit.
/// * `vars` - The variables for the circuit.
/// * `witnessed_outputs` - The values to compare against.
/// * `constants` - The constants for the circuit.
pub fn layout(
&self,
mut config: ModelConfig,
@@ -1079,6 +1085,7 @@ impl Model {
inputs: &[ValTensor<Fp>],
vars: &mut ModelVars<Fp>,
witnessed_outputs: &[ValTensor<Fp>],
constants: &mut ConstantsMap<Fp>,
) -> Result<Vec<ValTensor<Fp>>, Box<dyn Error>> {
info!("model layout...");
@@ -1104,14 +1111,12 @@ impl Model {
config.base.layout_tables(layouter)?;
config.base.layout_range_checks(layouter)?;
let mut num_rows = 0;
let mut linear_coord = 0;
let mut total_const_size = 0;
let original_constants = constants.clone();
let outputs = layouter.assign_region(
|| "model",
|region| {
let mut thread_safe_region = RegionCtx::new(region, 0, run_args.num_inner_cols);
let mut thread_safe_region = RegionCtx::new_with_constants(region, 0, run_args.num_inner_cols, original_constants.clone());
// we need to do this as this loop is called multiple times
vars.set_instance_idx(instance_idx);
@@ -1157,29 +1162,17 @@ impl Model {
error!("{}", e);
halo2_proofs::plonk::Error::Synthesis
})?;
} else if !run_args.output_visibility.is_private() {
for output in &outputs {
thread_safe_region.increment_total_constants(output.num_constants());
}
}
num_rows = thread_safe_region.row();
linear_coord = thread_safe_region.linear_coord();
total_const_size = thread_safe_region.total_constants();
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
thread_safe_region.debug_report();
*constants = thread_safe_region.assigned_constants().clone();
Ok(outputs)
},
)?;
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
debug!(
"{} {} {} (coord={}, constants={})",
"model uses".blue(),
num_rows.to_string().blue(),
"rows".blue(),
linear_coord.to_string().yellow(),
total_const_size.to_string().red()
);
)?;
let duration = start_time.elapsed();
trace!("model layout took: {:?}", duration);
@@ -1213,16 +1206,10 @@ impl Model {
vec![results.get(idx).ok_or(GraphError::MissingResults)?[0].clone()]
};
debug!(
"laying out {}: {}, row:{}, coord:{}, total_constants: {}, max_lookup_inputs: {}, min_lookup_inputs: {}",
idx,
node.as_str(),
region.row(),
region.linear_coord(),
region.total_constants(),
region.max_lookup_inputs(),
region.min_lookup_inputs()
);
debug!("laying out {}: {}", idx, node.as_str(),);
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
region.debug_report();
debug!("dims: {:?}", node.out_dims());
debug!(
"input_dims {:?}",
@@ -1380,7 +1367,7 @@ impl Model {
&self,
run_args: &RunArgs,
inputs: &[ValTensor<Fp>],
throw_range_check_error: bool,
witness_gen: bool,
) -> Result<DummyPassRes, Box<dyn Error>> {
debug!("calculating num of constraints using dummy model layout...");
@@ -1399,29 +1386,31 @@ impl Model {
vars: ModelVars::new_dummy(),
};
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, throw_range_check_error);
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, witness_gen);
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;
if self.visibility.output.is_public() || self.visibility.output.is_fixed() {
let default_value = if !self.visibility.output.is_fixed() {
ValType::Value(Value::<Fp>::unknown())
} else {
ValType::Constant(Fp::ONE)
};
let output_scales = self.graph.get_output_scales()?;
let res = outputs
.iter()
.enumerate()
.map(|(i, output)| {
let mut comparator: ValTensor<Fp> = (0..output.len())
.map(|_| {
if !self.visibility.output.is_fixed() {
ValType::Value(Value::<Fp>::unknown())
} else {
ValType::Constant(Fp::random(&mut rand::thread_rng()))
}
})
.collect::<Vec<_>>()
.into();
comparator.reshape(output.dims())?;
let mut tolerance = run_args.tolerance;
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
let mut comparator: ValTensor<Fp> =
vec![default_value.clone(); output.dims().iter().product::<usize>()].into();
comparator.reshape(output.dims())?;
dummy_config.layout(
&mut region,
&[output.clone(), comparator],
@@ -1432,7 +1421,7 @@ impl Model {
res?;
} else if !self.visibility.output.is_private() {
for output in &outputs {
region.increment_total_constants(output.num_constants());
region.update_constants(output.create_constants_map());
}
}
@@ -1441,14 +1430,7 @@ impl Model {
// Then number of columns in the circuits
#[cfg(not(target_arch = "wasm32"))]
debug!(
"{} {} {} (coord={}, constants={})",
"model uses".blue(),
region.row().to_string().blue(),
"rows".blue(),
region.linear_coord().to_string().yellow(),
region.total_constants().to_string().red()
);
region.debug_report();
let outputs = outputs
.iter()

View File

@@ -2,6 +2,7 @@ use crate::circuit::modules::polycommit::{PolyCommitChip, PolyCommitConfig};
use crate::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use crate::circuit::modules::poseidon::{PoseidonChip, PoseidonConfig};
use crate::circuit::modules::Module;
use crate::circuit::region::ConstantsMap;
use crate::tensor::{Tensor, ValTensor};
use halo2_proofs::circuit::Layouter;
use halo2_proofs::plonk::{Column, ConstraintSystem, Error, Instance, VerifyingKey};
@@ -211,12 +212,13 @@ impl GraphModules {
layouter: &mut impl Layouter<Fp>,
x: &mut Vec<ValTensor<Fp>>,
instance_offset: &mut usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<(), Error> {
// reserve module 0 for ... modules
// hash the input and replace the constrained cells in the input
let cloned_x = (*x).clone();
x[0] = module
.layout(layouter, &cloned_x, instance_offset.to_owned())
.layout(layouter, &cloned_x, instance_offset.to_owned(), constants)
.unwrap();
for inc in module.instance_increment_input().iter() {
// increment the instance offset to make way for future module layouts
@@ -234,6 +236,7 @@ impl GraphModules {
values: &mut [ValTensor<Fp>],
element_visibility: &Visibility,
instance_offset: &mut usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<(), Error> {
if element_visibility.is_polycommit() && !values.is_empty() {
// concat values and sk to get the inputs
@@ -248,7 +251,7 @@ impl GraphModules {
layouter
.assign_region(|| format!("_enter_module_{}", module_offset), |_| Ok(()))
.unwrap();
Self::layout_module(&chip, layouter, x, instance_offset).unwrap();
Self::layout_module(&chip, layouter, x, instance_offset, constants).unwrap();
// increment the current index
self.polycommit_idx += 1;
});
@@ -270,7 +273,7 @@ impl GraphModules {
let mut inputs = values.iter_mut().map(|x| vec![x.clone()]).collect_vec();
// layout the module
inputs.iter_mut().for_each(|x| {
Self::layout_module(&chip, layouter, x, instance_offset).unwrap();
Self::layout_module(&chip, layouter, x, instance_offset, constants).unwrap();
});
// replace the inputs with the outputs
values.iter_mut().enumerate().for_each(|(i, x)| {

View File

@@ -248,6 +248,8 @@ pub fn new_op_from_onnx(
symbol_values: &SymbolValues,
rebase_frac_zero_constants: bool,
) -> Result<(SupportedOp, Vec<usize>), Box<dyn std::error::Error>> {
use tract_onnx::tract_core::ops::array::Trilu;
use crate::circuit::InputType;
let input_scales = inputs
@@ -363,6 +365,26 @@ pub fn new_op_from_onnx(
SupportedOp::Constant(c)
}
"Trilu" => {
let op = load_op::<Trilu>(node.op(), idx, node.op().name().to_string())?;
let upper = op.upper;
// assert second input is a constant
let diagonal = if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
let raw_values = &c.raw_values;
if raw_values.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "trilu".to_string())));
}
raw_values[0] as i32
} else {
return Err("we only support constant inputs for trilu diagonal".into());
};
SupportedOp::Linear(PolyOp::Trilu { upper, k: diagonal })
}
"Gather" => {
if inputs.len() != 2 {
return Err(Box::new(GraphError::InvalidDims(idx, "gather".to_string())));
@@ -839,6 +861,9 @@ pub fn new_op_from_onnx(
}
"Abs" => SupportedOp::Nonlinear(LookupOp::Abs),
"Neg" => SupportedOp::Linear(PolyOp::Neg),
"HardSwish" => SupportedOp::Nonlinear(LookupOp::HardSwish {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sigmoid" => SupportedOp::Nonlinear(LookupOp::Sigmoid {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
@@ -1047,8 +1072,12 @@ pub fn new_op_from_onnx(
}
};
let in_scale = inputs[0].out_scales()[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale);
SupportedOp::Hybrid(HybridOp::Softmax {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
input_scale: scale_to_multiplier(in_scale).into(),
output_scale: scale_to_multiplier(max_scale).into(),
axes: softmax_op.axes.to_vec(),
})
}

View File

@@ -346,7 +346,7 @@ pub struct ModelVars<F: PrimeField + TensorType + PartialOrd> {
pub instance: Option<ValTensor<F>>,
}
impl<F: PrimeField + TensorType + PartialOrd> ModelVars<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
/// Get instance col
pub fn get_instance_col(&self) -> Option<&Column<Instance>> {
if let Some(instance) = &self.instance {

View File

@@ -23,7 +23,6 @@
)]
// we allow this for our dynamic range based indexing scheme
#![allow(clippy::single_range_in_vec_init)]
#![feature(round_ties_even)]
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
//!

View File

@@ -378,7 +378,7 @@ impl<F: PrimeField + Clone + TensorType + PartialOrd> From<Tensor<AssignedCell<A
{
fn from(value: Tensor<AssignedCell<Assigned<F>, F>>) -> Tensor<Value<F>> {
let mut output = Vec::new();
for (_, x) in value.iter().enumerate() {
for x in value.iter() {
output.push(x.value_field().evaluate());
}
Tensor::new(Some(&output), value.dims()).unwrap()
@@ -434,6 +434,18 @@ impl<F: PrimeField + TensorType + Clone> From<Tensor<i128>> for Tensor<Value<F>>
}
}
impl<T: Clone + TensorType + std::marker::Send + std::marker::Sync>
maybe_rayon::iter::FromParallelIterator<T> for Tensor<T>
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: maybe_rayon::iter::IntoParallelIterator<Item = T>,
{
let inner: Vec<T> = par_iter.into_par_iter().collect();
Tensor::new(Some(&inner), &[inner.len()]).unwrap()
}
}
impl<T: Clone + TensorType + std::marker::Send + std::marker::Sync>
maybe_rayon::iter::IntoParallelIterator for Tensor<T>
{

View File

@@ -8,6 +8,142 @@ use maybe_rayon::{
use std::collections::{HashMap, HashSet};
pub use std::ops::{Add, Div, Mul, Neg, Sub};
/// Trilu operation.
/// # Arguments
/// * `a` - Tensor
/// * `k` - i32
/// * `upper` - Boolean
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::tensor::ops::trilu;
/// let a = Tensor::<i128>::new(
/// Some(&[1, 2, 3, 4, 5, 6]),
/// &[1, 3, 2],
/// ).unwrap();
/// let result = trilu(&a, 1, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 2, 0, 0, 0, 0]), &[1, 3, 2]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 1, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 3, 4, 5, 6]), &[1, 3, 2]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 0, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 0, 4, 0, 0]), &[1, 3, 2]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 0, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 3, 4, 5, 6]), &[1, 3, 2]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, -1, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 3, 4, 0, 6]), &[1, 3, 2]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, -1, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 0, 3, 0, 5, 6]), &[1, 3, 2]).unwrap();
/// assert_eq!(result, expected);
///
/// let a = Tensor::<i128>::new(
/// Some(&[1, 2, 3, 4, 5, 6]),
/// &[1, 2, 3],
/// ).unwrap();
/// let result = trilu(&a, 1, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 2, 3, 0, 0, 6]), &[1, 2, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 1, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 0, 4, 5, 6]), &[1, 2, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 0, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 3, 0, 5, 6]), &[1, 2, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 0, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 0, 4, 5, 0]), &[1, 2, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, -1, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 3, 4, 5, 6]), &[1, 2, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, -1, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 0, 0, 4, 0, 0]), &[1, 2, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let a = Tensor::<i128>::new(
/// Some(&[1, 2, 3, 4, 5, 6, 7, 8, 9]),
/// &[1, 3, 3],
/// ).unwrap();
/// let result = trilu(&a, 1, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 2, 3, 0, 0, 6, 0, 0, 0]), &[1, 3, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 1, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 0, 4, 5, 6, 7, 8, 9]), &[1, 3, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 0, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 3, 0, 5, 6, 0, 0, 9]), &[1, 3, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, 0, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 0, 4, 5, 0, 7, 8, 9]), &[1, 3, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, -1, true).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[1, 2, 3, 4, 5, 6, 0, 8, 9]), &[1, 3, 3]).unwrap();
/// assert_eq!(result, expected);
///
/// let result = trilu(&a, -1, false).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[0, 0, 0, 4, 0, 0, 7, 8, 0]), &[1, 3, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn trilu<T: TensorType + std::marker::Send + std::marker::Sync>(
a: &Tensor<T>,
k: i32,
upper: bool,
) -> Result<Tensor<T>, TensorError> {
let mut output = a.clone();
// Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s).
// The attribute “upper” determines whether the upper or lower part is retained.
// If set to true, the upper triangular matrix is retained. Lower triangular matrix is retained otherwise.
// Default value for the “upper” attribute is true. Trilu takes one input tensor of shape [*, N, M], where * is zero or more batch dimensions.
// The upper triangular part consists of the elements on and above the given diagonal (k).
// The lower triangular part consists of elements on and below the diagonal. All other elements in the matrix are set to zero.
let batch_dims = &a.dims()[0..a.dims().len() - 2];
let batch_cartiesian = batch_dims.iter().map(|d| 0..*d).multi_cartesian_product();
for b in batch_cartiesian {
for i in 0..a.dims()[1] {
for j in 0..a.dims()[2] {
let mut coord = b.clone();
coord.push(i);
coord.push(j);
// If k = 0, the triangular part on and above/below the main diagonal is retained.
if upper {
// If upper is set to true, a positive k retains the upper triangular matrix excluding the main diagonal and (k-1) diagonals above it.
if (j as i32) < (i as i32) + k {
output.set(&coord, T::zero().ok_or(TensorError::Unsupported)?);
}
} else {
// If upper is set to false, a positive k retains the lower triangular matrix including the main diagonal and k diagonals above it.
if (j as i32) > (i as i32) + k {
output.set(&coord, T::zero().ok_or(TensorError::Unsupported)?);
}
}
}
}
}
Ok(output)
}
/// IFF operation.
/// # Arguments
/// * `mask` - Tensor of 0s and 1s
@@ -1739,11 +1875,7 @@ pub fn topk<T: TensorType + PartialOrd>(
let mut indexed_a = a.clone();
indexed_a.flatten();
let mut indexed_a = a
.iter()
.enumerate()
.map(|(i, x)| (i, x))
.collect::<Vec<_>>();
let mut indexed_a = a.iter().enumerate().collect::<Vec<_>>();
if largest {
indexed_a.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap());
@@ -3256,6 +3388,47 @@ pub mod nonlinearities {
.unwrap()
}
/// Elementwise applies hardswish to a tensor of integers.
/// Hardswish is defined as:
// Hardswish(x)={0if x≤3,xif x≥+3,x⋅(x+3)/6otherwise
// Hardswish(x)=⎩
// ⎨
// ⎧0xx⋅(x+3)/6if x≤3,if x≥+3,otherwise
/// # Arguments
///
/// * `a` - Tensor
/// * `scale_input` - Single value
/// * `scale_output` - Single value
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::tensor::ops::nonlinearities::hardswish;
/// let x = Tensor::<i128>::new(
/// Some(&[-12, -3, 2, 1, 1, 15]),
/// &[2, 3],
/// ).unwrap();
/// let result = hardswish(&x, 1.0);
/// let expected = Tensor::<i128>::new(Some(&[0, 0, 2, 1, 1, 15]), &[2, 3]).unwrap();
///
/// assert_eq!(result, expected);
///
/// ```
pub fn hardswish(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let res = if kix <= -3.0 {
0.0
} else if kix >= 3.0 {
kix
} else {
kix * (kix + 3.0) / 6.0
};
let rounded = (res * scale_input).round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
/// Elementwise applies exponential to a tensor of integers.
/// # Arguments
///
@@ -3355,12 +3528,17 @@ pub mod nonlinearities {
}
/// softmax layout
pub fn softmax_axes(a: &Tensor<i128>, scale: f64, axes: &[usize]) -> Tensor<i128> {
pub fn softmax_axes(
a: &Tensor<i128>,
input_scale: f64,
output_scale: f64,
axes: &[usize],
) -> Tensor<i128> {
// we want this to be as small as possible so we set the output scale to 1
let dims = a.dims();
if dims.len() == 1 {
return softmax(a, scale);
return softmax(a, input_scale, output_scale);
}
let cartesian_coord = dims[..dims.len() - 1]
@@ -3383,7 +3561,7 @@ pub mod nonlinearities {
let softmax_input = a.get_slice(&sum_dims).unwrap();
let res = softmax(&softmax_input, scale);
let res = softmax(&softmax_input, input_scale, output_scale);
outputs.push(res);
}
@@ -3410,20 +3588,25 @@ pub mod nonlinearities {
/// Some(&[2, 2, 3, 2, 2, 0]),
/// &[2, 3],
/// ).unwrap();
/// let result = softmax(&x, 128.0);
/// let result = softmax(&x, 128.0, 128.0 * 128.0);
/// // doubles the scale of the input
/// let expected = Tensor::<i128>::new(Some(&[2730, 2730, 2751, 2730, 2730, 2688]), &[2, 3]).unwrap();
/// let expected = Tensor::<i128>::new(Some(&[2734, 2734, 2755, 2734, 2734, 2692]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn softmax(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
pub fn softmax(a: &Tensor<i128>, input_scale: f64, output_scale: f64) -> Tensor<i128> {
// the more accurate calculation is commented out and we implement as below so it matches the steps in layout
let exp = exp(a, scale);
let exp = exp(a, input_scale);
let sum = sum(&exp).unwrap();
let inv_denom = recip(&sum, scale, scale);
(exp * inv_denom).unwrap()
let inv_denom = recip(&sum, input_scale, output_scale);
let mut res = (exp * inv_denom).unwrap();
res = res
.iter()
.map(|x| ((*x as f64) / input_scale).round() as i128)
.collect();
res.reshape(a.dims()).unwrap();
res
}
/// Applies range_check_percent

View File

@@ -1,8 +1,10 @@
use crate::circuit::region::ConstantsMap;
use super::{
ops::{intercalate_values, pad, resize},
*,
};
use halo2_proofs::{arithmetic::Field, plonk::Instance};
use halo2_proofs::{arithmetic::Field, circuit::Cell, plonk::Instance};
pub(crate) fn create_constant_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
@@ -51,6 +53,24 @@ pub enum ValType<F: PrimeField + TensorType + std::marker::Send + std::marker::S
}
impl<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd> ValType<F> {
/// Returns the inner cell of the [ValType].
pub fn cell(&self) -> Option<Cell> {
match self {
ValType::PrevAssigned(cell) => Some(cell.cell()),
ValType::AssignedConstant(cell, _) => Some(cell.cell()),
_ => None,
}
}
/// Returns the assigned cell of the [ValType].
pub fn assigned_cell(&self) -> Option<AssignedCell<F, F>> {
match self {
ValType::PrevAssigned(cell) => Some(cell.clone()),
ValType::AssignedConstant(cell, _) => Some(cell.clone()),
_ => None,
}
}
/// Returns true if the value is previously assigned.
pub fn is_prev_assigned(&self) -> bool {
matches!(
@@ -293,7 +313,7 @@ impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<AssignedCell<F, F>>> f
}
}
impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
/// Allocate a new [ValTensor::Instance] from the ConstraintSystem with the given tensor `dims`, optionally enabling `equality`.
pub fn new_instance(
cs: &mut ConstraintSystem<F>,
@@ -428,10 +448,40 @@ impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
}
/// Returns the number of constants in the [ValTensor].
pub fn num_constants(&self) -> usize {
pub fn create_constants_map_iterator(
&self,
) -> core::iter::FilterMap<
core::slice::Iter<'_, ValType<F>>,
fn(&ValType<F>) -> Option<(F, ValType<F>)>,
> {
match self {
ValTensor::Value { inner, .. } => inner.iter().filter(|x| x.is_constant()).count(),
ValTensor::Instance { .. } => 0,
ValTensor::Value { inner, .. } => inner.iter().filter_map(|x| {
if let ValType::Constant(v) = x {
Some((*v, x.clone()))
} else {
None
}
}),
ValTensor::Instance { .. } => {
unreachable!("Instance tensors do not have constants")
}
}
}
/// Returns the number of constants in the [ValTensor].
pub fn create_constants_map(&self) -> ConstantsMap<F> {
match self {
ValTensor::Value { inner, .. } => inner
.par_iter()
.filter_map(|x| {
if let ValType::Constant(v) = x {
Some((*v, x.clone()))
} else {
None
}
})
.collect(),
ValTensor::Instance { .. } => ConstantsMap::new(),
}
}

View File

@@ -1,8 +1,8 @@
use std::collections::HashSet;
use log::{error, warn};
use log::{debug, error, warn};
use crate::circuit::CheckMode;
use crate::circuit::{region::ConstantsMap, CheckMode};
use super::*;
/// A wrapper around Halo2's `Column<Fixed>` or `Column<Advice>`.
@@ -104,7 +104,7 @@ impl VarTensor {
let mut advices = vec![];
if modulo > 1 {
warn!("using column duplication for {} advice blocks", modulo - 1);
debug!("using column duplication for {} advice blocks", modulo - 1);
}
for _ in 0..modulo {
@@ -150,7 +150,7 @@ impl VarTensor {
modulo = (num_constants + modulo) / max_rows + 1;
if modulo > 1 {
warn!("using column duplication for {} fixed columns", modulo - 1);
debug!("using column duplication for {} fixed columns", modulo - 1);
}
for _ in 0..modulo {
@@ -289,9 +289,10 @@ impl VarTensor {
&self,
region: &mut Region<F>,
offset: usize,
coord: usize,
constant: F,
) -> Result<AssignedCell<F, F>, halo2_proofs::plonk::Error> {
let (x, y, z) = self.cartesian_coord(offset);
let (x, y, z) = self.cartesian_coord(offset + coord);
match &self {
VarTensor::Advice { inner: advices, .. } => {
region.assign_advice_from_constant(|| "constant", advices[x][y], z, constant)
@@ -304,33 +305,28 @@ impl VarTensor {
}
/// Assigns [ValTensor] to the columns of the inner tensor.
pub fn assign_with_omissions<F: PrimeField + TensorType + PartialOrd>(
pub fn assign_with_omissions<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
omissions: &HashSet<&usize>,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, halo2_proofs::plonk::Error> {
let mut assigned_coord = 0;
let mut res: ValTensor<F> = match values {
ValTensor::Instance { .. } => {
unimplemented!("cannot assign instance to advice columns with omissions")
}
ValTensor::Value { inner: v, .. } => Ok::<_, halo2_proofs::plonk::Error>(
ValTensor::Value { inner: v, .. } => Ok::<ValTensor<F>, halo2_proofs::plonk::Error>(
v.enum_map(|coord, k| {
if omissions.contains(&coord) {
return Ok(k);
return Ok::<_, halo2_proofs::plonk::Error>(k);
}
let cell = self.assign_value(region, offset, k.clone(), assigned_coord)?;
let cell =
self.assign_value(region, offset, k.clone(), assigned_coord, constants)?;
assigned_coord += 1;
match k {
ValType::Constant(f) => Ok::<ValType<F>, halo2_proofs::plonk::Error>(
ValType::AssignedConstant(cell, f),
),
ValType::AssignedConstant(_, f) => Ok(ValType::AssignedConstant(cell, f)),
_ => Ok(ValType::PrevAssigned(cell)),
}
Ok::<_, halo2_proofs::plonk::Error>(cell)
})?
.into(),
),
@@ -340,11 +336,12 @@ impl VarTensor {
}
/// Assigns [ValTensor] to the columns of the inner tensor.
pub fn assign<F: PrimeField + TensorType + PartialOrd>(
pub fn assign<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, halo2_proofs::plonk::Error> {
let mut res: ValTensor<F> = match values {
ValTensor::Instance {
@@ -382,14 +379,7 @@ impl VarTensor {
},
ValTensor::Value { inner: v, .. } => Ok(v
.enum_map(|coord, k| {
let cell = self.assign_value(region, offset, k.clone(), coord)?;
match k {
ValType::Constant(f) => Ok::<ValType<F>, halo2_proofs::plonk::Error>(
ValType::AssignedConstant(cell, f),
),
ValType::AssignedConstant(_, f) => Ok(ValType::AssignedConstant(cell, f)),
_ => Ok(ValType::PrevAssigned(cell)),
}
self.assign_value(region, offset, k.clone(), coord, constants)
})?
.into()),
}?;
@@ -399,13 +389,16 @@ impl VarTensor {
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
/// Duplication occurs by copying the last cell of the column to the first cell next column and creating a copy constraint between the two.
pub fn dummy_assign_with_duplication<F: PrimeField + TensorType + PartialOrd>(
pub fn dummy_assign_with_duplication<
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
>(
&self,
row: usize,
offset: usize,
values: &ValTensor<F>,
single_inner_col: bool,
) -> Result<(ValTensor<F>, usize, usize), halo2_proofs::plonk::Error> {
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
match values {
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you require more rows."),
ValTensor::Value { inner: v, dims , ..} => {
@@ -430,21 +423,24 @@ impl VarTensor {
// duplicates every nth element to adjust for column overflow
let mut res: ValTensor<F> = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap().into();
let constants_map = res.create_constants_map();
constants.extend(constants_map);
let total_used_len = res.len();
let total_constants = res.num_constants();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
res.reshape(dims).unwrap();
res.set_scale(values.scale());
Ok((res, total_used_len, total_constants))
Ok((res, total_used_len))
}
}
}
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
/// Duplication occurs by copying the last cell of the column to the first cell next column and creating a copy constraint between the two.
pub fn assign_with_duplication<F: PrimeField + TensorType + PartialOrd>(
pub fn assign_with_duplication<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
row: usize,
@@ -452,7 +448,8 @@ impl VarTensor {
values: &ValTensor<F>,
check_mode: &CheckMode,
single_inner_col: bool,
) -> Result<(ValTensor<F>, usize, usize), halo2_proofs::plonk::Error> {
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
let mut prev_cell = None;
match values {
@@ -494,7 +491,7 @@ impl VarTensor {
assert_eq!(Into::<i32>::into(k.clone()), Into::<i32>::into(v[coord - 1].clone()));
};
let cell = self.assign_value(region, offset, k.clone(), coord * step)?;
let cell = self.assign_value(region, offset, k.clone(), coord * step, constants)?;
if single_inner_col {
if z == 0 {
@@ -502,28 +499,23 @@ impl VarTensor {
prev_cell = Some(cell.clone());
} else if coord > 0 && z == 0 && single_inner_col {
if let Some(prev_cell) = prev_cell.as_ref() {
region.constrain_equal(prev_cell.cell(),cell.cell())?;
let cell = cell.cell().ok_or({
error!("Error getting cell: {:?}", (x,y));
halo2_proofs::plonk::Error::Synthesis})?;
let prev_cell = prev_cell.cell().ok_or({
error!("Error getting cell: {:?}", (x,y));
halo2_proofs::plonk::Error::Synthesis})?;
region.constrain_equal(prev_cell,cell)?;
} else {
error!("Error copy-constraining previous value: {:?}", (x,y));
return Err(halo2_proofs::plonk::Error::Synthesis);
}
}}
match k {
ValType::Constant(f) => {
Ok(ValType::AssignedConstant(cell, f))
},
ValType::AssignedConstant(_, f) => {
Ok(ValType::AssignedConstant(cell, f))
},
_ => {
Ok(ValType::PrevAssigned(cell))
}
}
Ok(cell)
})?.into()};
let total_used_len = res.len();
let total_constants = res.num_constants();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
res.reshape(dims).unwrap();
@@ -542,42 +534,61 @@ impl VarTensor {
)};
}
Ok((res, total_used_len, total_constants))
Ok((res, total_used_len))
}
}
}
fn assign_value<F: PrimeField + TensorType + PartialOrd>(
fn assign_value<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
k: ValType<F>,
coord: usize,
) -> Result<AssignedCell<F, F>, halo2_proofs::plonk::Error> {
constants: &mut ConstantsMap<F>,
) -> Result<ValType<F>, halo2_proofs::plonk::Error> {
let (x, y, z) = self.cartesian_coord(offset + coord);
match k {
let res = match k {
ValType::Value(v) => match &self {
VarTensor::Advice { inner: advices, .. } => {
region.assign_advice(|| "k", advices[x][y], z, || v)
ValType::PrevAssigned(region.assign_advice(|| "k", advices[x][y], z, || v)?)
}
_ => unimplemented!(),
},
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => match &self {
ValType::PrevAssigned(v) => match &self {
VarTensor::Advice { inner: advices, .. } => {
v.copy_advice(|| "k", region, advices[x][y], z)
ValType::PrevAssigned(v.copy_advice(|| "k", region, advices[x][y], z)?)
}
_ => {
error!("PrevAssigned is only supported for advice columns");
Err(halo2_proofs::plonk::Error::Synthesis)
_ => unimplemented!(),
},
ValType::AssignedConstant(v, val) => match &self {
VarTensor::Advice { inner: advices, .. } => {
ValType::AssignedConstant(v.copy_advice(|| "k", region, advices[x][y], z)?, val)
}
_ => unimplemented!(),
},
ValType::AssignedValue(v) => match &self {
VarTensor::Advice { inner: advices, .. } => region
.assign_advice(|| "k", advices[x][y], z, || v)
.map(|a| a.evaluate()),
VarTensor::Advice { inner: advices, .. } => ValType::PrevAssigned(
region
.assign_advice(|| "k", advices[x][y], z, || v)?
.evaluate(),
),
_ => unimplemented!(),
},
ValType::Constant(v) => self.assign_constant(region, offset + coord, v),
}
ValType::Constant(v) => {
if let std::collections::hash_map::Entry::Vacant(e) = constants.entry(v) {
let value = ValType::AssignedConstant(
self.assign_constant(region, offset, coord, v)?,
v,
);
e.insert(value.clone());
value
} else {
let cell = constants.get(&v).unwrap();
self.assign_value(region, offset, cell.clone(), coord, constants)?
}
}
};
Ok(res)
}
}

View File

@@ -200,7 +200,7 @@ mod native_tests {
"1l_tiny_div",
];
const TESTS: [&str; 79] = [
const TESTS: [&str; 91] = [
"1l_mlp", //0
"1l_slice",
"1l_concat",
@@ -284,6 +284,18 @@ mod native_tests {
"bitshift",
"gather_nd",
"scatter_nd",
"celu",
"gru", // 80
"hard_swish", // 81
"hard_max",
"tril", // 83
"triu", // 84
"logsumexp", // 85
"clip",
"mish",
"reducel1",
"reducel2", // 89
"1l_lppool",
];
const WASM_TESTS: [&str; 46] = [
@@ -522,7 +534,7 @@ mod native_tests {
}
});
seq!(N in 0..=78 {
seq!(N in 0..=90 {
#(#[test_case(TESTS[N])])*
#[ignore]
@@ -778,7 +790,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -787,7 +799,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 3, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 3, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -796,7 +808,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 4, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 4, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -805,7 +817,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 8, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 8, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -814,7 +826,16 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn kzg_prove_and_verify_tight_lookup_(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, false, "single", Commitments::KZG, 1);
test_dir.close().unwrap();
}
@@ -823,7 +844,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, false, "single", Commitments::IPA);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, false, "single", Commitments::IPA, 2);
test_dir.close().unwrap();
}
@@ -832,7 +853,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "public", "private", "public", 1, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "public", "private", "public", 1, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -841,7 +862,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -850,7 +871,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "hashed", 1, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "hashed", 1, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -859,7 +880,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "polycommit", 1, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "polycommit", 1, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -868,7 +889,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "polycommit", 1, None, false, "single", Commitments::IPA);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "polycommit", 1, None, false, "single", Commitments::IPA, 2);
test_dir.close().unwrap();
}
@@ -883,7 +904,7 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
env_logger::init();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, true, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, true, "single", Commitments::KZG, 2);
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testWasm", false);
// test_dir.close().unwrap();
@@ -896,7 +917,7 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
env_logger::init();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, None, true, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, None, true, "single", Commitments::KZG, 2);
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testWasm", false);
test_dir.close().unwrap();
@@ -912,7 +933,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
prove_and_verify(path, test.to_string(), "unsafe", "private", "fixed", "public", 1, None, false, "single", Commitments::KZG);
prove_and_verify(path, test.to_string(), "unsafe", "private", "fixed", "public", 1, None, false, "single", Commitments::KZG, 2);
test_dir.close().unwrap();
}
@@ -1326,6 +1347,7 @@ mod native_tests {
false,
&mut tolerance,
Commitments::KZG,
2,
);
if tolerance > 0.0 {
@@ -1466,6 +1488,7 @@ mod native_tests {
div_rebasing: bool,
tolerance: &mut f32,
commitment: Commitments,
lookup_safety_margin: usize,
) {
let mut args = vec![
"gen-settings".to_string(),
@@ -1506,6 +1529,7 @@ mod native_tests {
test_dir, example_name
),
format!("--target={}", cal_target),
format!("--lookup-safety-margin={}", lookup_safety_margin),
];
if let Some(scales) = scales_to_use {
@@ -1601,6 +1625,7 @@ mod native_tests {
div_rebasing,
&mut 0.0,
Commitments::KZG,
2,
);
println!(
@@ -1654,6 +1679,7 @@ mod native_tests {
false,
"for-aggr",
Commitments::KZG,
2,
);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
@@ -1688,6 +1714,7 @@ mod native_tests {
false,
"for-aggr",
Commitments::KZG,
2,
);
download_srs(23, commitment);
@@ -1849,6 +1876,7 @@ mod native_tests {
overflow: bool,
proof_type: &str,
commitment: Commitments,
lookup_safety_margin: usize,
) {
let target_str = if overflow {
"resources/col-overflow"
@@ -1869,6 +1897,7 @@ mod native_tests {
false,
&mut 0.0,
commitment,
lookup_safety_margin,
);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
@@ -1981,6 +2010,7 @@ mod native_tests {
false,
"single",
Commitments::KZG,
2,
);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
@@ -2075,6 +2105,7 @@ mod native_tests {
false,
"single",
Commitments::KZG,
2,
);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
@@ -2223,6 +2254,7 @@ mod native_tests {
false,
&mut 0.0,
Commitments::KZG,
2,
);
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);