Compare commits

..

1 Commits

Author SHA1 Message Date
github-actions[bot]
94abf91400 ci: update version string in docs 2024-06-14 16:32:09 +00:00
83 changed files with 3317 additions and 5226 deletions

View File

@@ -22,18 +22,15 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- name: Install binaryen
run: |
set -e

View File

@@ -11,7 +11,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: nanoGPT Mock

View File

@@ -40,7 +40,7 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
@@ -86,7 +86,7 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy

View File

@@ -45,7 +45,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Checkout repo
@@ -106,27 +106,27 @@ jobs:
include:
- build: windows-msvc
os: windows-latest
rust: nightly-2024-07-18
rust: nightly-2024-02-06
target: x86_64-pc-windows-msvc
- build: macos
os: macos-13
rust: nightly-2024-07-18
rust: nightly-2024-02-06
target: x86_64-apple-darwin
- build: macos-aarch64
os: macos-13
rust: nightly-2024-07-18
rust: nightly-2024-02-06
target: aarch64-apple-darwin
- build: linux-musl
os: ubuntu-22.04
rust: nightly-2024-07-18
rust: nightly-2024-02-06
target: x86_64-unknown-linux-musl
- build: linux-gnu
os: ubuntu-22.04
rust: nightly-2024-07-18
rust: nightly-2024-02-06
target: x86_64-unknown-linux-gnu
- build: linux-aarch64
os: ubuntu-22.04
rust: nightly-2024-07-18
rust: nightly-2024-02-06
target: aarch64-unknown-linux-gnu
steps:
@@ -181,14 +181,9 @@ jobs:
echo "target flag is: ${{ env.TARGET_FLAGS }}"
echo "target dir is: ${{ env.TARGET_DIR }}"
- name: Build release binary (no asm)
if: matrix.build != 'linux-gnu'
- name: Build release binary
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry
- name: Build release binary (asm)
if: matrix.build == 'linux-gnu'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features asm
- name: Strip release binary
if: matrix.build != 'windows-msvc' && matrix.build != 'linux-aarch64'
run: strip "target/${{ matrix.target }}/release/ezkl"

View File

@@ -26,7 +26,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Build
@@ -38,7 +38,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Docs
@@ -50,7 +50,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -65,40 +65,40 @@ jobs:
- name: Library tests (original lookup)
run: cargo nextest run --lib --verbose --no-default-features --features ezkl
# ultra-overflow-tests-gpu:
# runs-on: GPU
# env:
# ENABLE_ICICLE_GPU: true
# steps:
# - uses: actions/checkout@v4
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - uses: mwilliamson/setup-wasmtime-action@v2
# with:
# wasmtime-version: "3.0.1"
# - name: Install wasm32-wasi
# run: rustup target add wasm32-wasi
# - name: Install cargo-wasi
# run: cargo install cargo-wasi
# # - name: Matmul overflow (wasi)
# # run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# # - name: Conv overflow (wasi)
# # run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
# - name: lookup overflow
# run: cargo nextest run lookup_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Matmul overflow
# run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Conv overflow
# run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture --features icicle -- --include-ignored
# - name: Conv + relu overflow
# run: cargo nextest run conv_relu_col_ultra_overflow --no-capture --features icicle -- --include-ignored
ultra-overflow-tests-gpu:
runs-on: GPU
env:
ENABLE_ICICLE_GPU: true
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- uses: mwilliamson/setup-wasmtime-action@v2
with:
wasmtime-version: "3.0.1"
- name: Install wasm32-wasi
run: rustup target add wasm32-wasi
- name: Install cargo-wasi
run: cargo install cargo-wasi
# - name: Matmul overflow (wasi)
# run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# - name: Conv overflow (wasi)
# run: cargo wasi test conv_col_ultra_overflow -- --include-ignored --nocapture
- name: lookup overflow
run: cargo nextest run --release lookup_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Matmul overflow
run: RUST_LOG=debug cargo nextest run matmul_col_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Conv overflow
run: RUST_LOG=debug cargo nextest run conv_col_ultra_overflow --no-capture --features icicle -- --include-ignored
- name: Conv + relu overflow
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --features icicle -- --include-ignored
ultra-overflow-tests_og-lookup:
runs-on: non-gpu
@@ -106,7 +106,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -139,7 +139,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -172,7 +172,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -184,24 +184,22 @@ jobs:
wasm32-tests:
runs-on: ubuntu-latest
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- uses: nanasess/setup-chromedriver@v2
# with:
# chromedriver-version: "115.0.5790.102"
- name: Install wasm32-unknown-unknown
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- name: Run wasm verifier tests
# on mac:
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
@@ -214,7 +212,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -231,19 +229,17 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
# - name: The Worm Mock
# run: cargo nextest run --release --verbose tests::large_mock_::large_tests_5_expects -- --include-ignored
- name: public outputs and tolerance > 0
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
- name: public outputs + batch size == 10
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 16
run: cargo nextest run --release --verbose tests::mock_large_batch_public_outputs_ --test-threads 32
- name: kzg inputs
run: cargo nextest run --release --verbose tests::mock_kzg_input_::t --test-threads 32
- name: kzg params
@@ -262,8 +258,6 @@ jobs:
run: cargo nextest run --release --verbose tests::mock_hashed_input_::t --test-threads 32
- name: hashed params
run: cargo nextest run --release --verbose tests::mock_hashed_params_::t --test-threads 32
- name: hashed params public inputs
run: cargo nextest run --release --verbose tests::mock_hashed_params_public_inputs_::t --test-threads 32
- name: hashed outputs
run: cargo nextest run --release --verbose tests::mock_hashed_output_::t --test-threads 32
- name: hashed inputs + params + outputs
@@ -292,7 +286,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -310,7 +304,7 @@ jobs:
node-version: "18.12.1"
cache: "pnpm"
- name: "Add rust-src"
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- name: Install dependencies for js tests and in-browser-evm-verifier package
run: |
pnpm install --frozen-lockfile
@@ -329,12 +323,12 @@ jobs:
cd in-browser-evm-verifier
pnpm build:commonjs
cd ..
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
- name: KZG prove and verify tests (EVM + reusable verifier + col-overflow)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_reusable_verifier --test-threads 1
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: KZG prove and verify tests (EVM + VK rendered seperately)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_prove_and_verify_render_seperately_ --test-threads 1
- name: KZG prove and verify tests (EVM + kzg all)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_kzg_all_prove_and_verify --test-threads 1
- name: KZG prove and verify tests (EVM + kzg inputs)
@@ -371,18 +365,15 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@v0.4.0
with:
# Pin to version 0.12.1
version: 'v0.12.1'
- name: Add wasm32-unknown-unknown target
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- name: Use pnpm 8
uses: pnpm/action-setup@v2
@@ -440,40 +431,40 @@ jobs:
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed
# prove-and-verify-tests-gpu:
# runs-on: GPU
# env:
# ENABLE_ICICLE_GPU: true
# steps:
# - uses: actions/checkout@v4
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - name: Add rust-src
# run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
# - uses: actions/checkout@v3
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - name: KZG prove and verify tests (kzg outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + fixed params + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public outputs + column overflow)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
# - name: KZG prove and verify tests (public inputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
# - name: KZG prove and verify tests (fixed params)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
# - name: KZG prove and verify tests (hashed outputs)
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
prove-and-verify-tests-gpu:
runs-on: GPU
env:
ENABLE_ICICLE_GPU: true
steps:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2024-02-06-x86_64-unknown-linux-gnu
- uses: actions/checkout@v3
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
- name: KZG prove and verify tests (kzg outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + fixed params + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
- name: KZG prove and verify tests (public outputs + column overflow)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
- name: KZG prove and verify tests (public inputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
- name: KZG prove and verify tests (fixed params)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
- name: KZG prove and verify tests (hashed outputs)
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
prove-and-verify-mock-aggr-tests:
runs-on: self-hosted
@@ -482,7 +473,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -500,7 +491,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -508,7 +499,7 @@ jobs:
crate: cargo-nextest
locked: true
- name: KZG )tests
run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
prove-and-verify-aggr-tests:
runs-on: large-self-hosted
@@ -517,7 +508,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -534,17 +525,17 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: KZG prove and verify aggr tests
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
@@ -555,7 +546,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -575,17 +566,17 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- name: Install cmake
run: sudo apt-get install -y cmake
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Setup Virtual Env and Install python dependencies
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt;
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Run pytest
@@ -601,7 +592,7 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -650,17 +641,17 @@ jobs:
python-version: "3.11"
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly-2024-07-18
toolchain: nightly-2024-02-06
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
with:
crate: cargo-nextest
locked: true
# - name: Install solc
# run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install solc
run: (hash svm 2>/dev/null || cargo install svm-rs) && svm install 0.8.20 && solc --version
- name: Install Anvil
run: cargo install --git https://github.com/foundry-rs/foundry --rev 62cdea8ff9e6efef011f77e295823b5f2dbeb3a1 --locked anvil --force
run: cargo install --git https://github.com/foundry-rs/foundry --rev c2233ec9fe61e0920c61c6d779bc707252852037 --profile local --locked anvil --force
- name: Install pip
run: python -m ensurepip --upgrade
- name: Setup Virtual Env and Install python dependencies
@@ -686,5 +677,3 @@ jobs:
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
# - name: Reusable verifier tutorial
# run: source .env/bin/activate; cargo nextest run py_tests::tests::reusable_

764
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,19 +16,19 @@ crate-type = ["cdylib", "rlib"]
[dependencies]
halo2_gadgets = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "b753a832e92d5c86c5c997327a9cf9de86a18851", features = [
halo2_proofs = { git = "https://github.com/zkonduit/halo2", branch = "ac/optional-selector-poly" }
halo2curves = { git = "https://github.com/privacy-scaling-explorations/halo2curves", rev = "9fff22c", features = [
"derive_serde",
] }
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/cache-lookup-commitments#8b13a0d2a7a34d8daab010dadb2c47dfa47d37d0", package = "halo2_proofs", branch = "ac/cache-lookup-commitments" }
rand = { version = "0.8", default_features = false }
itertools = { version = "0.10.3", default_features = false }
clap = { version = "4.5.3", features = ["derive"] }
clap_complete = "4.5.2"
serde = { version = "1.0.126", features = ["derive"], optional = true }
serde_json = { version = "1.0.97", default_features = false, features = [
"float_roundtrip",
"raw_value",
], optional = true }
clap_complete = "4.5.2"
log = { version = "0.4.17", default_features = false, optional = true }
thiserror = { version = "1.0.38", default_features = false }
hex = { version = "0.4.3", default_features = false }
@@ -36,26 +36,21 @@ halo2_wrong_ecc = { git = "https://github.com/zkonduit/halo2wrong", branch = "ac
snark-verifier = { git = "https://github.com/zkonduit/snark-verifier", branch = "ac/chunked-mv-lookup", features = [
"derive_serde",
] }
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "ac/update-h2-curves" }
halo2_solidity_verifier = { git = "https://github.com/alexander-camuto/halo2-solidity-verifier", branch = "main" }
maybe-rayon = { version = "0.1.1", default_features = false }
bincode = { version = "1.3.3", default_features = false }
ark-std = { version = "^0.3.0", default-features = false }
unzip-n = "0.1.2"
num = "0.4.1"
portable-atomic = "1.6.0"
tosubcommand = { git = "https://github.com/zkonduit/enum_to_subcommand", package = "tosubcommand" }
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
semver = "1.0.22"
# evm related deps
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = [
"provider-http",
"signers",
"contract",
"rpc-types-eth",
"signer-wallet",
"node-bindings",
] }
foundry-compilers = { version = "0.4.1", features = ["svm-solc"] }
alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev="5fbf57bac99edef9d8475190109a7ea9fb7e5e83", features = ["provider-http", "signers", "contract", "rpc-types-eth", "signer-wallet", "node-bindings"] }
foundry-compilers = {version = "0.4.1", features = ["svm-solc"]}
ethabi = "18"
indicatif = { version = "0.17.5", features = ["rayon"] }
gag = { version = "1.0.0", default_features = false }
@@ -68,29 +63,31 @@ reqwest = { version = "0.12.4", default-features = false, features = [
openssl = { version = "0.10.55", features = ["vendored"] }
tokio-postgres = "0.7.10"
pg_bigdecimal = "0.1.5"
futures-util = "0.3.30"
lazy_static = "1.4.0"
colored_json = { version = "3.0.1", default_features = false, optional = true }
plotters = { version = "0.3.0", default_features = false, optional = true }
regex = { version = "1", default_features = false }
tokio = { version = "1.35.0", default_features = false, features = [
tokio = { version = "1.35", default_features = false, features = [
"macros",
"rt-multi-thread",
"rt-multi-thread"
] }
tokio-util = { version = "0.7.9", features = ["codec"] }
pyo3 = { version = "0.21.2", features = [
"extension-module",
"abi3-py37",
"macros",
], default_features = false, optional = true }
pyo3-asyncio = { git = "https://github.com/jopemachine/pyo3-asyncio/", branch = "migration-pyo3-0.21", features = [
pyo3-asyncio = { git = "https://github.com/jopemachine/pyo3-asyncio/", branch="migration-pyo3-0.21", features = [
"attributes",
"tokio-runtime",
], default_features = false, optional = true }
pyo3-log = { version = "0.10.0", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "40c64319291184814d9fea5fdf4fa16f5a4f7116", default_features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "05ebf550aa9922b221af4635c21a67a8d2af12a9", default_features = false, optional = true }
tabled = { version = "0.12.0", optional = true }
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
objc = { version = "0.2.4", optional = true }
mimalloc = "0.1"
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies]
colored = { version = "2.0.0", default_features = false, optional = true }
@@ -98,7 +95,6 @@ env_logger = { version = "0.10.0", default_features = false, optional = true }
chrono = "0.4.31"
sha256 = "1.4.0"
[target.'cfg(target_arch = "wasm32")'.dependencies]
getrandom = { version = "0.2.8", features = ["js"] }
instant = { version = "0.1", features = ["wasm-bindgen", "inaccurate"] }
@@ -112,10 +108,8 @@ console_error_panic_hook = "0.1.7"
wasm-bindgen-console-logger = "0.1.1"
[target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dev-dependencies]
criterion = { version = "0.5.1", features = ["html_reports"] }
[dev-dependencies]
criterion = { version = "0.3", features = ["html_reports"] }
tempfile = "3.3.0"
lazy_static = "1.4.0"
mnist = "0.5"
@@ -169,10 +163,6 @@ harness = false
name = "relu"
harness = false
[[bench]]
name = "relu_lookupless"
harness = false
[[bench]]
name = "accum_matmul_relu"
harness = false
@@ -190,13 +180,7 @@ required-features = ["ezkl"]
[features]
web = ["wasm-bindgen-rayon"]
default = [
"ezkl",
"mv-lookup",
"precompute-coset",
"no-banner",
"parallel-poly-read",
]
default = ["ezkl", "mv-lookup", "no-banner"]
onnx = ["dep:tract-onnx"]
python-bindings = ["pyo3", "pyo3-log", "pyo3-asyncio"]
ezkl = [
@@ -210,30 +194,25 @@ ezkl = [
"colored_json",
"halo2_proofs/circuit-params",
]
parallel-poly-read = ["halo2_proofs/parallel-poly-read"]
mv-lookup = [
"halo2_proofs/mv-lookup",
"snark-verifier/mv-lookup",
"halo2_solidity_verifier/mv-lookup",
]
asm = ["halo2curves/asm", "halo2_proofs/asm"]
precompute-coset = ["halo2_proofs/precompute-coset"]
det-prove = []
icicle = ["halo2_proofs/icicle_gpu"]
empty-cmd = []
no-banner = []
no-update = []
metal = ["dep:metal", "dep:objc"]
# icicle patch to 0.1.0 if feature icicle is enabled
[patch.'https://github.com/ingonyama-zk/icicle']
icicle = { git = "https://github.com/ingonyama-zk/icicle?rev=45b00fb", package = "icicle", branch = "fix/vhnat/ezkl-build-fix" }
[patch.'https://github.com/zkonduit/halo2']
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/cache-lookup-commitments#8b13a0d2a7a34d8daab010dadb2c47dfa47d37d0", package = "halo2_proofs", branch = "ac/cache-lookup-commitments" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2?branch=ac/optional-selector-poly#54f54453cf186aa5d89579c4e7663f9a27cfb89a", package = "halo2_proofs", branch = "ac/optional-selector-poly" }
[profile.release]
rustflags = ["-C", "relocation-model=pic"]
lto = "fat"
codegen-units = 1
# panic = "abort"

View File

@@ -64,7 +64,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -72,7 +72,6 @@ impl Circuit<Fr> for MyCircuit {
Box::new(PolyOp::Conv {
padding: vec![(0, 0)],
stride: vec![1; 2],
group: 1,
}),
)
.unwrap();

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,

View File

@@ -57,7 +57,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,

View File

@@ -57,15 +57,7 @@ impl Circuit<Fr> for MyCircuit {
// sets up a new relu table
base_config
.configure_lookup(
cs,
&b,
&output,
&a,
BITS,
K,
&LookupOp::LeakyReLU { slope: 0.0.into() },
)
.configure_lookup(cs, &b, &output, &a, BITS, K, &LookupOp::ReLU)
.unwrap();
MyConfig { base_config }
@@ -83,18 +75,14 @@ impl Circuit<Fr> for MyCircuit {
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
let output = config
.base_config
.layout(&mut region, &self.inputs, Box::new(op))
.unwrap();
let _output = config
.base_config
.layout(
&mut region,
&[output.unwrap()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.unwrap();
Ok(())
},

View File

@@ -58,15 +58,7 @@ impl Circuit<Fr> for MyCircuit {
// sets up a new relu table
base_config
.configure_lookup(
cs,
&b,
&output,
&a,
BITS,
k,
&LookupOp::LeakyReLU { slope: 0.0.into() },
)
.configure_lookup(cs, &b, &output, &a, BITS, k, &LookupOp::ReLU)
.unwrap();
MyConfig { base_config }
@@ -84,18 +76,14 @@ impl Circuit<Fr> for MyCircuit {
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
let output = config
.base_config
.layout(&mut region, &self.inputs, Box::new(op))
.unwrap();
let _output = config
.base_config
.layout(
&mut region,
&[output.unwrap()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.unwrap();
Ok(())
},

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,

View File

@@ -59,7 +59,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,

View File

@@ -55,7 +55,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = region::RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = region::RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs, Box::new(PolyOp::Add))
.unwrap();

View File

@@ -56,7 +56,7 @@ impl Circuit<Fr> for MyCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs, Box::new(PolyOp::Pow(4)))
.unwrap();

View File

@@ -2,7 +2,6 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Through
use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::table::Range;
use ezkl::circuit::{ops::lookup::LookupOp, BaseConfig as Config, CheckMode};
use ezkl::fieldutils::IntegerRep;
use ezkl::pfsys::create_proof_circuit;
use ezkl::pfsys::TranscriptType;
use ezkl::pfsys::{create_keys, srs::gen_srs};
@@ -42,7 +41,7 @@ impl Circuit<Fr> for NLCircuit {
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
.collect::<Vec<_>>();
let nl = LookupOp::LeakyReLU { slope: 0.0.into() };
let nl = LookupOp::ReLU;
let mut config = Config::default();
@@ -63,13 +62,9 @@ impl Circuit<Fr> for NLCircuit {
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
&[self.input.clone()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
.unwrap();
Ok(())
},
@@ -89,7 +84,7 @@ fn runrelu(c: &mut Criterion) {
};
let input: Tensor<Value<Fr>> =
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
Tensor::<i32>::from((0..len).map(|_| rng.gen_range(0..10))).into();
let circuit = NLCircuit {
input: ValTensor::from(input.clone()),

View File

@@ -1,143 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use ezkl::circuit::poly::PolyOp;
use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{BaseConfig as Config, CheckMode};
use ezkl::fieldutils::IntegerRep;
use ezkl::pfsys::create_proof_circuit;
use ezkl::pfsys::TranscriptType;
use ezkl::pfsys::{create_keys, srs::gen_srs};
use ezkl::tensor::*;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2_proofs::poly::kzg::multiopen::{ProverSHPLONK, VerifierSHPLONK};
use halo2_proofs::poly::kzg::strategy::SingleStrategy;
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
plonk::{Circuit, ConstraintSystem, Error},
};
use halo2curves::bn256::{Bn256, Fr};
use rand::Rng;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
static mut LEN: usize = 4;
const K: usize = 16;
#[derive(Clone)]
struct NLCircuit {
pub input: ValTensor<Fr>,
}
impl Circuit<Fr> for NLCircuit {
type Config = Config<Fr>;
type FloorPlanner = SimpleFloorPlanner;
type Params = ();
fn without_witnesses(&self) -> Self {
self.clone()
}
fn configure(cs: &mut ConstraintSystem<Fr>) -> Self::Config {
unsafe {
let advices = (0..3)
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
.collect::<Vec<_>>();
let mut config = Config::default();
config
.configure_range_check(cs, &advices[0], &advices[1], (-1, 1), K)
.unwrap();
config
.configure_range_check(cs, &advices[0], &advices[1], (0, 1023), K)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K, LEN, false);
config
}
}
fn synthesize(
&self,
mut config: Self::Config,
mut layouter: impl Layouter<Fr>, // layouter is our 'write buffer' for the circuit
) -> Result<(), Error> {
config.layout_range_checks(&mut layouter).unwrap();
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
config
.layout(&mut region, &[self.input.clone()], Box::new(PolyOp::ReLU))
.unwrap();
Ok(())
},
)?;
Ok(())
}
}
fn runrelu(c: &mut Criterion) {
let mut group = c.benchmark_group("relu");
let mut rng = rand::thread_rng();
let params = gen_srs::<KZGCommitmentScheme<_>>(17);
for &len in [4, 8].iter() {
unsafe {
LEN = len;
};
let input: Tensor<Value<Fr>> =
Tensor::<IntegerRep>::from((0..len).map(|_| rng.gen_range(0..10))).into();
let circuit = NLCircuit {
input: ValTensor::from(input.clone()),
};
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| {
b.iter(|| {
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, &params, true)
.unwrap();
});
});
let pk =
create_keys::<KZGCommitmentScheme<Bn256>, NLCircuit>(&circuit, &params, true).unwrap();
group.throughput(Throughput::Elements(len as u64));
group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| {
b.iter(|| {
let prover = create_proof_circuit::<
KZGCommitmentScheme<_>,
NLCircuit,
ProverSHPLONK<_>,
VerifierSHPLONK<_>,
SingleStrategy<_>,
_,
EvmTranscript<_, _, _, _>,
EvmTranscript<_, _, _, _>,
>(
circuit.clone(),
vec![],
&params,
&pk,
CheckMode::UNSAFE,
ezkl::Commitments::KZG,
TranscriptType::EVM,
None,
None,
);
prover.unwrap();
});
});
}
group.finish();
}
criterion_group! {
name = benches;
config = Criterion::default().with_plots();
targets = runrelu
}
criterion_main!(benches);

View File

@@ -1,4 +1,4 @@
ezkl==14.1.0
ezkl==11.4.2
sphinx
sphinx-rtd-theme
sphinxcontrib-napoleon

View File

@@ -1,7 +1,7 @@
import ezkl
project = 'ezkl'
release = '14.1.0'
release = '11.4.2'
version = release

View File

@@ -2,7 +2,8 @@ use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
};
use ezkl::fieldutils::{self, integer_rep_to_felt, IntegerRep};
use ezkl::fieldutils;
use ezkl::fieldutils::i32_to_felt;
use ezkl::tensor::*;
use halo2_proofs::dev::MockProver;
use halo2_proofs::poly::commitment::Params;
@@ -41,8 +42,8 @@ const NUM_INNER_COLS: usize = 1;
struct Config<
const LEN: usize, //LEN = CHOUT x OH x OW flattened //not supported yet in rust stable
const CLASSES: usize,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -65,8 +66,8 @@ struct Config<
struct MyCircuit<
const LEN: usize, //LEN = CHOUT x OH x OW flattened
const CLASSES: usize,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -89,8 +90,8 @@ struct MyCircuit<
impl<
const LEN: usize,
const CLASSES: usize,
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
// Convolution
const KERNEL_HEIGHT: usize,
const KERNEL_WIDTH: usize,
@@ -163,7 +164,7 @@ where
&params,
(LOOKUP_MIN, LOOKUP_MAX),
K,
&LookupOp::LeakyReLU { slope: 0.0.into() },
&LookupOp::ReLU,
)
.unwrap();
@@ -199,12 +200,11 @@ where
.assign_region(
|| "mlp_4d",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 1024, 2);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
let op = PolyOp::Conv {
padding: vec![(PADDING, PADDING); 2],
stride: vec![STRIDE; 2],
group: 1,
};
let x = config
.layer_config
@@ -221,11 +221,7 @@ where
let x = config
.layer_config
.layout(
&mut region,
&[x.unwrap()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.layout(&mut region, &[x.unwrap()], Box::new(LookupOp::ReLU))
.unwrap();
let mut x = config
@@ -319,11 +315,7 @@ pub fn runconv() {
.test_set_length(10_000)
.finalize();
let mut train_data = Tensor::from(
trn_img
.iter()
.map(|x| integer_rep_to_felt::<F>(*x as IntegerRep / 16)),
);
let mut train_data = Tensor::from(trn_img.iter().map(|x| i32_to_felt::<F>(*x as i32 / 16)));
train_data.reshape(&[50_000, 28, 28]).unwrap();
let mut train_labels = Tensor::from(trn_lbl.iter().map(|x| *x as f32));
@@ -351,8 +343,8 @@ pub fn runconv() {
.map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
}),
);
@@ -363,8 +355,7 @@ pub fn runconv() {
let l0_kernels = l0_kernels.try_into().unwrap();
let mut l0_bias =
Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::integer_rep_to_felt(0)));
let mut l0_bias = Tensor::<F>::from((0..OUT_CHANNELS).map(|_| fieldutils::i32_to_felt(0)));
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
let l0_bias = l0_bias.try_into().unwrap();
@@ -372,8 +363,8 @@ pub fn runconv() {
let mut l2_biases = Tensor::<F>::from(myparams.biases.into_iter().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
}));
l2_biases.set_visibility(&ezkl::graph::Visibility::Private);
l2_biases.reshape(&[l2_biases.len(), 1]).unwrap();
@@ -383,8 +374,8 @@ pub fn runconv() {
let mut l2_weights = Tensor::<F>::from(myparams.weights.into_iter().flatten().map(|fl| {
let dx = fl * 32_f32;
let rounded = dx.round();
let integral: IntegerRep = unsafe { rounded.to_int_unchecked() };
fieldutils::integer_rep_to_felt(integral)
let integral: i32 = unsafe { rounded.to_int_unchecked() };
fieldutils::i32_to_felt(integral)
}));
l2_weights.set_visibility(&ezkl::graph::Visibility::Private);
l2_weights.reshape(&[CLASSES, LEN]).unwrap();
@@ -410,13 +401,13 @@ pub fn runconv() {
l2_params: [l2_weights, l2_biases],
};
let public_input: Tensor<IntegerRep> = vec![
-25124, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
let public_input: Tensor<i32> = vec![
-25124i32, -19304, -16668, -4399, -6209, -4548, -2317, -8349, -6117, -23461,
]
.into_iter()
.into();
let pi_inner: Tensor<F> = public_input.map(integer_rep_to_felt::<F>);
let pi_inner: Tensor<F> = public_input.map(i32_to_felt::<F>);
println!("MOCK PROVING");
let now = Instant::now();

View File

@@ -2,7 +2,7 @@ use ezkl::circuit::region::RegionCtx;
use ezkl::circuit::{
ops::lookup::LookupOp, ops::poly::PolyOp, BaseConfig as PolyConfig, CheckMode,
};
use ezkl::fieldutils::{integer_rep_to_felt, IntegerRep};
use ezkl::fieldutils::i32_to_felt;
use ezkl::tensor::*;
use halo2_proofs::dev::MockProver;
use halo2_proofs::{
@@ -23,8 +23,8 @@ struct MyConfig {
#[derive(Clone)]
struct MyCircuit<
const LEN: usize, //LEN = CHOUT x OH x OW flattened
const LOOKUP_MIN: IntegerRep,
const LOOKUP_MAX: IntegerRep,
const LOOKUP_MIN: i64,
const LOOKUP_MAX: i64,
> {
// Given the stateless MyConfig type information, a DNN trace is determined by its input and the parameters of its layers.
// Computing the trace still requires a forward pass. The intermediate activations are stored only by the layouter.
@@ -34,7 +34,7 @@ struct MyCircuit<
_marker: PhantomData<F>,
}
impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRep> Circuit<F>
impl<const LEN: usize, const LOOKUP_MIN: i64, const LOOKUP_MAX: i64> Circuit<F>
for MyCircuit<LEN, LOOKUP_MIN, LOOKUP_MAX>
{
type Config = MyConfig;
@@ -69,7 +69,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
&params,
(LOOKUP_MIN, LOOKUP_MAX),
K,
&LookupOp::LeakyReLU { slope: 0.0.into() },
&LookupOp::ReLU,
)
.unwrap();
@@ -108,7 +108,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
.assign_region(
|| "mlp_4d",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = RegionCtx::new(region, 0, 1);
let x = config
.layer_config
.layout(
@@ -141,11 +141,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
println!("x shape: {:?}", x.dims());
let mut x = config
.layer_config
.layout(
&mut region,
&[x],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
.unwrap()
.unwrap();
println!("3");
@@ -181,11 +177,7 @@ impl<const LEN: usize, const LOOKUP_MIN: IntegerRep, const LOOKUP_MAX: IntegerRe
println!("x shape: {:?}", x.dims());
let x = config
.layer_config
.layout(
&mut region,
&[x],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.layout(&mut region, &[x], Box::new(LookupOp::ReLU))
.unwrap();
println!("6");
println!("offset: {}", region.row());
@@ -223,33 +215,33 @@ pub fn runmlp() {
#[cfg(not(target_arch = "wasm32"))]
env_logger::init();
// parameters
let mut l0_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
let mut l0_kernel: Tensor<F> = Tensor::<i32>::new(
Some(&[10, 0, 0, -1, 0, 10, 1, 0, 0, 1, 10, 0, 1, 0, 0, 10]),
&[4, 4],
)
.unwrap()
.map(integer_rep_to_felt);
.map(i32_to_felt);
l0_kernel.set_visibility(&ezkl::graph::Visibility::Private);
let mut l0_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
let mut l0_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
.unwrap()
.map(integer_rep_to_felt);
.map(i32_to_felt);
l0_bias.set_visibility(&ezkl::graph::Visibility::Private);
let mut l2_kernel: Tensor<F> = Tensor::<IntegerRep>::new(
let mut l2_kernel: Tensor<F> = Tensor::<i32>::new(
Some(&[0, 3, 10, -1, 0, 10, 1, 0, 0, 1, 0, 12, 1, -2, 32, 0]),
&[4, 4],
)
.unwrap()
.map(integer_rep_to_felt);
.map(i32_to_felt);
l2_kernel.set_visibility(&ezkl::graph::Visibility::Private);
// input data, with 1 padding to allow for bias
let input: Tensor<Value<F>> = Tensor::<IntegerRep>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
let input: Tensor<Value<F>> = Tensor::<i32>::new(Some(&[-30, -21, 11, 40]), &[4, 1])
.unwrap()
.into();
let mut l2_bias: Tensor<F> = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1]), &[4, 1])
let mut l2_bias: Tensor<F> = Tensor::<i32>::new(Some(&[0, 0, 0, 1]), &[4, 1])
.unwrap()
.map(integer_rep_to_felt);
.map(i32_to_felt);
l2_bias.set_visibility(&ezkl::graph::Visibility::Private);
let circuit = MyCircuit::<4, -8192, 8192> {
@@ -259,12 +251,12 @@ pub fn runmlp() {
_marker: PhantomData,
};
let public_input: Vec<IntegerRep> = unsafe {
let public_input: Vec<i32> = unsafe {
vec![
(531f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(103f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(4469f32 / 128f32).round().to_int_unchecked::<IntegerRep>(),
(2849f32 / 128f32).to_int_unchecked::<IntegerRep>(),
(531f32 / 128f32).round().to_int_unchecked::<i32>(),
(103f32 / 128f32).round().to_int_unchecked::<i32>(),
(4469f32 / 128f32).round().to_int_unchecked::<i32>(),
(2849f32 / 128f32).to_int_unchecked::<i32>(),
]
};
@@ -273,10 +265,7 @@ pub fn runmlp() {
let prover = MockProver::run(
K as u32,
&circuit,
vec![public_input
.iter()
.map(|x| integer_rep_to_felt::<F>(*x))
.collect()],
vec![public_input.iter().map(|x| i32_to_felt::<F>(*x)).collect()],
)
.unwrap();
prover.assert_satisfied();

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -232,7 +232,7 @@
"run_args.param_visibility = \"fixed\"\n",
"run_args.output_visibility = \"public\"\n",
"run_args.input_scale = 2\n",
"run_args.logrows = 15\n",
"run_args.logrows = 8\n",
"\n",
"ezkl.get_srs(logrows=run_args.logrows, commitment=ezkl.PyCommitments.KZG)"
]
@@ -404,7 +404,7 @@
"run_args.output_visibility = \"polycommit\"\n",
"run_args.variables = [(\"batch_size\", 1)]\n",
"run_args.input_scale = 2\n",
"run_args.logrows = 15\n"
"run_args.logrows = 8\n"
]
},
{
@@ -466,7 +466,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.5"
"version": "3.12.2"
},
"orig_nbformat": 4
},

View File

@@ -1,339 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Reusable Verifiers \n",
"\n",
"This notebook demonstrates how to create and reuse the same set of separated verifiers for different models. Specifically, we will use the same verifier for the following four models:\n",
"\n",
"- `1l_mlp sigmoid`\n",
"- `1l_mlp relu`\n",
"- `1l_conv sigmoid`\n",
"- `1l_conv relu`\n",
"\n",
"When deploying EZKL verifiers on the blockchain, each associated model typically requires its own unique verifier, leading to increased on-chain state usage. \n",
"However, with the reusable verifier, we can deploy a single verifier that can be used to verify proofs for any valid H2 circuit. This notebook shows how to do so. \n",
"\n",
"By reusing the same verifier across multiple models, we significantly reduce the amount of state bloat on the blockchain. Instead of deploying a unique verifier for each model, we deploy a unique and much smaller verifying key artifact (VKA) contract for each model while sharing a common separated verifier. The VKA contains the VK for the model as well circuit specific metadata that was otherwise hardcoded into the stack of the original non-reusable verifier."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.onnx\n",
"\n",
"# Define the models\n",
"class MLP_Sigmoid(nn.Module):\n",
" def __init__(self):\n",
" super(MLP_Sigmoid, self).__init__()\n",
" self.fc = nn.Linear(3, 3)\n",
" self.sigmoid = nn.Sigmoid()\n",
"\n",
" def forward(self, x):\n",
" x = self.fc(x)\n",
" x = self.sigmoid(x)\n",
" return x\n",
"\n",
"class MLP_Relu(nn.Module):\n",
" def __init__(self):\n",
" super(MLP_Relu, self).__init__()\n",
" self.fc = nn.Linear(3, 3)\n",
" self.relu = nn.ReLU()\n",
"\n",
" def forward(self, x):\n",
" x = self.fc(x)\n",
" x = self.relu(x)\n",
" return x\n",
"\n",
"class Conv_Sigmoid(nn.Module):\n",
" def __init__(self):\n",
" super(Conv_Sigmoid, self).__init__()\n",
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
" self.sigmoid = nn.Sigmoid()\n",
"\n",
" def forward(self, x):\n",
" x = self.conv(x)\n",
" x = self.sigmoid(x)\n",
" return x\n",
"\n",
"class Conv_Relu(nn.Module):\n",
" def __init__(self):\n",
" super(Conv_Relu, self).__init__()\n",
" self.conv = nn.Conv1d(1, 1, kernel_size=3, stride=1)\n",
" self.relu = nn.ReLU()\n",
"\n",
" def forward(self, x):\n",
" x = self.conv(x)\n",
" x = self.relu(x)\n",
" return x\n",
"\n",
"# Instantiate the models\n",
"mlp_sigmoid = MLP_Sigmoid()\n",
"mlp_relu = MLP_Relu()\n",
"conv_sigmoid = Conv_Sigmoid()\n",
"conv_relu = Conv_Relu()\n",
"\n",
"# Dummy input tensor for mlp\n",
"dummy_input_mlp = torch.tensor([[-1.5737053155899048, -1.708398461341858, 0.19544155895709991]])\n",
"input_mlp_path = 'mlp_input.json'\n",
"\n",
"# Dummy input tensor for conv\n",
"dummy_input_conv = torch.tensor([[[1.4124163389205933, 0.6938204169273376, 1.0664031505584717]]])\n",
"input_conv_path = 'conv_input.json'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"names = ['mlp_sigmoid', 'mlp_relu', 'conv_sigmoid', 'conv_relu']\n",
"models = [mlp_sigmoid, mlp_relu, conv_sigmoid, conv_relu]\n",
"inputs = [dummy_input_mlp, dummy_input_mlp, dummy_input_conv, dummy_input_conv]\n",
"input_paths = [input_mlp_path, input_mlp_path, input_conv_path, input_conv_path]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"import torch\n",
"import ezkl\n",
"\n",
"for name, model, x, input_path in zip(names, models, inputs, input_paths):\n",
" # Create a new directory for the model if it doesn't exist\n",
" if not os.path.exists(name):\n",
" os.mkdir(name)\n",
" # Store the paths in each of their respective directories\n",
" model_path = os.path.join(name, \"network.onnx\")\n",
" compiled_model_path = os.path.join(name, \"network.compiled\")\n",
" pk_path = os.path.join(name, \"test.pk\")\n",
" vk_path = os.path.join(name, \"test.vk\")\n",
" settings_path = os.path.join(name, \"settings.json\")\n",
"\n",
" witness_path = os.path.join(name, \"witness.json\")\n",
" sol_code_path = os.path.join(name, 'test.sol')\n",
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
" abi_path = os.path.join(name, 'test.abi')\n",
" proof_path = os.path.join(name, \"proof.json\")\n",
"\n",
" # Flips the neural net into inference mode\n",
" model.eval()\n",
"\n",
" # Export the model\n",
" torch.onnx.export(model, x, model_path, export_params=True, opset_version=10,\n",
" do_constant_folding=True, input_names=['input'],\n",
" output_names=['output'], dynamic_axes={'input': {0: 'batch_size'},\n",
" 'output': {0: 'batch_size'}})\n",
"\n",
" data_array = ((x).detach().numpy()).reshape([-1]).tolist()\n",
" data = dict(input_data=[data_array])\n",
" json.dump(data, open(input_path, 'w'))\n",
"\n",
" py_run_args = ezkl.PyRunArgs()\n",
" py_run_args.input_visibility = \"private\"\n",
" py_run_args.output_visibility = \"public\"\n",
" py_run_args.param_visibility = \"fixed\" # private by default\n",
"\n",
" res = ezkl.gen_settings(model_path, settings_path, py_run_args=py_run_args)\n",
" assert res == True\n",
"\n",
" await ezkl.calibrate_settings(input_path, model_path, settings_path, \"resources\")\n",
"\n",
" res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)\n",
" assert res == True\n",
"\n",
" res = await ezkl.get_srs(settings_path)\n",
" assert res == True\n",
"\n",
" # now generate the witness file\n",
" res = await ezkl.gen_witness(input_path, compiled_model_path, witness_path)\n",
" assert os.path.isfile(witness_path) == True\n",
"\n",
" # SETUP \n",
" # We recommend disabling selector compression for the setup as it decreases the size of the VK artifact\n",
" res = ezkl.setup(compiled_model_path, vk_path, pk_path, disable_selector_compression=True)\n",
" assert res == True\n",
" assert os.path.isfile(vk_path)\n",
" assert os.path.isfile(pk_path)\n",
" assert os.path.isfile(settings_path)\n",
"\n",
" # GENERATE A PROOF\n",
" res = ezkl.prove(witness_path, compiled_model_path, pk_path, proof_path, \"single\")\n",
" assert os.path.isfile(proof_path)\n",
"\n",
" res = await ezkl.create_evm_verifier(vk_path, settings_path, sol_code_path, abi_path, reusable=True)\n",
" assert res == True\n",
"\n",
" res = await ezkl.create_evm_vka(vk_path, settings_path, sol_key_code_path, abi_path)\n",
" assert res == True\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import subprocess\n",
"import time\n",
"\n",
"# make sure anvil is running locally\n",
"# $ anvil -p 3030\n",
"\n",
"RPC_URL = \"http://localhost:3030\"\n",
"\n",
"# Save process globally\n",
"anvil_process = None\n",
"\n",
"def start_anvil():\n",
" global anvil_process\n",
" if anvil_process is None:\n",
" anvil_process = subprocess.Popen([\"anvil\", \"-p\", \"3030\", \"--code-size-limit=41943040\"])\n",
" if anvil_process.returncode is not None:\n",
" raise Exception(\"failed to start anvil process\")\n",
" time.sleep(3)\n",
"\n",
"def stop_anvil():\n",
" global anvil_process\n",
" if anvil_process is not None:\n",
" anvil_process.terminate()\n",
" anvil_process = None\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Check that the generated verifiers are identical for all models."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"start_anvil()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import filecmp\n",
"\n",
"def compare_files(file1, file2):\n",
" return filecmp.cmp(file1, file2, shallow=False)\n",
"\n",
"sol_code_path_0 = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
"sol_code_path_1 = os.path.join(\"mlp_relu\", 'test.sol')\n",
"\n",
"sol_code_path_2 = os.path.join(\"conv_sigmoid\", 'test.sol')\n",
"sol_code_path_3 = os.path.join(\"conv_relu\", 'test.sol')\n",
"\n",
"\n",
"assert compare_files(sol_code_path_0, sol_code_path_1) == True\n",
"assert compare_files(sol_code_path_2, sol_code_path_3) == True"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here we deploy separate verifier that will be shared by the four models. We picked the `1l_mlp sigmoid` model as an example but you could have used any of the generated verifiers since they are all identical. "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os \n",
"addr_path_verifier = \"addr_verifier.txt\"\n",
"sol_code_path = os.path.join(\"mlp_sigmoid\", 'test.sol')\n",
"\n",
"res = await ezkl.deploy_evm(\n",
" addr_path_verifier,\n",
" sol_code_path,\n",
" 'http://127.0.0.1:3030',\n",
" \"verifier/reusable\"\n",
")\n",
"\n",
"assert res == True\n",
"\n",
"with open(addr_path_verifier, 'r') as file:\n",
" addr = file.read().rstrip()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally we deploy each of the unique VK-artifacts and verify them using the shared verifier deployed in the previous step."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for name in names:\n",
" addr_path_vk = \"addr_vk.txt\"\n",
" sol_key_code_path = os.path.join(name, 'test_key.sol')\n",
" res = await ezkl.deploy_evm(addr_path_vk, sol_key_code_path, 'http://127.0.0.1:3030', \"vka\")\n",
" assert res == True\n",
"\n",
" with open(addr_path_vk, 'r') as file:\n",
" addr_vk = file.read().rstrip()\n",
" \n",
" proof_path = os.path.join(name, \"proof.json\")\n",
" sol_code_path = os.path.join(name, 'vk.sol')\n",
" res = await ezkl.verify_evm(\n",
" addr,\n",
" proof_path,\n",
" \"http://127.0.0.1:3030\",\n",
" addr_vk = addr_vk\n",
" )\n",
" assert res == True"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".env",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@@ -39,7 +39,7 @@
"import json\n",
"import numpy as np\n",
"from sklearn.svm import SVC\n",
"from hummingbird.ml import convert\n",
"import sk2torch\n",
"import torch\n",
"import ezkl\n",
"import os\n",
@@ -59,11 +59,11 @@
"# Train an SVM on the data and wrap it in PyTorch.\n",
"sk_model = SVC(probability=True)\n",
"sk_model.fit(xs, ys)\n",
"model = convert(sk_model, \"torch\").model\n",
"model = sk2torch.wrap(sk_model)\n",
"\n",
"\n",
"\n",
"\n",
"model\n",
"\n"
]
},
@@ -84,6 +84,33 @@
"data_path = os.path.join('input.json')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7f0ca328",
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"# Create a coordinate grid to compute a vector field on.\n",
"spaced = np.linspace(-2, 2, num=25)\n",
"grid_xs = torch.tensor([[x, y] for x in spaced for y in spaced], requires_grad=True)\n",
"\n",
"\n",
"# Compute the gradients of the SVM output.\n",
"outputs = model.predict_proba(grid_xs)[:, 1]\n",
"(input_grads,) = torch.autograd.grad(outputs.sum(), (grid_xs,))\n",
"\n",
"\n",
"# Create a quiver plot of the vector field.\n",
"plt.quiver(\n",
" grid_xs[:, 0].detach().numpy(),\n",
" grid_xs[:, 1].detach().numpy(),\n",
" input_grads[:, 0].detach().numpy(),\n",
" input_grads[:, 1].detach().numpy(),\n",
")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
@@ -92,14 +119,14 @@
"outputs": [],
"source": [
"\n",
"spaced = np.linspace(-2, 2, num=25)\n",
"grid_xs = torch.tensor([[x, y] for x in spaced for y in spaced], requires_grad=True)\n",
"\n",
"# export to onnx format\n",
"# !!!!!!!!!!!!!!!!! This will flash a warning but it is fine !!!!!!!!!!!!!!!!!!!!!\n",
"\n",
"# Input to the model\n",
"shape = xs.shape[1:]\n",
"x = grid_xs[0:1]\n",
"torch_out = model.predict(x)\n",
"# Export the model\n",
"torch.onnx.export(model, # model being run\n",
" # model input (or a tuple for multiple inputs)\n",
@@ -116,7 +143,9 @@
"\n",
"d = ((x).detach().numpy()).reshape([-1]).tolist()\n",
"\n",
"data = dict(input_data=[d])\n",
"data = dict(input_shapes=[shape],\n",
" input_data=[d],\n",
" output_data=[o.reshape([-1]).tolist() for o in torch_out])\n",
"\n",
"# Serialize data into file:\n",
"json.dump(data, open(\"input.json\", 'w'))\n"
@@ -138,7 +167,6 @@
{
"cell_type": "code",
"execution_count": null,
"id": "0bee4d7f",
"metadata": {},
"outputs": [],
"source": [
@@ -192,7 +220,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"id": "b1c561a8",
"metadata": {},
"outputs": [],
@@ -413,9 +441,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.3"
"version": "3.9.15"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -1 +0,0 @@
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":7,"param_scale":7,"scale_rebase_multiplier":10,"lookup_range":[0,0],"logrows":13,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private"},"num_constraints":5619,"total_const_size":513,"model_instance_shapes":[[1,3,10,10]],"model_output_scales":[14],"model_input_scales":[7],"module_sizes":{"kzg":[],"poseidon":[0,[0]],"elgamal":[0,[0]]},"required_lookups":[],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null}

View File

@@ -9,9 +9,7 @@ class MyModel(nn.Module):
super(MyModel, self).__init__()
def forward(self, w, x, y, z):
a = (x & y)
b = (y & (z ^ w))
return [a & b]
return [((x & y)) == (x & (y | (z ^ w)))]
circuit = MyModel()

View File

@@ -1 +1 @@
{"input_data": [[false, true, true], [false, true, true], [true, false, false], [false, true, true]]}
{"input_data": [[false, true, false], [true, false, false], [true, false, false], [false, false, false]]}

View File

@@ -1,17 +1,21 @@
pytorch2.2.2:„
*
pytorch1.12.1:«
+
input1
input2
/And_output_0/And"And
input2
onnx::Equal_4And_0"And
'
input3
input
input3
input
onnx::Or_5Xor_1"Xor
+
5
input2
/Xor_output_0/And_1_output_0/And_1"And
5
input2
onnx::Or_5 onnx::And_6Or_2"Or
0
input1
onnx::And_6
onnx::Equal_7And_3"And
6
onnx::Equal_4

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -1 +0,0 @@
network.onnx filter=lfs diff=lfs merge=lfs -text

View File

@@ -1,47 +0,0 @@
## The worm
This is an onnx file for a [WormVAE](https://github.com/TuragaLab/wormvae?tab=readme-ov-file) model, which is a VAE / latent-space representation of the C. elegans connectome.
The model "is a large-scale latent variable model with a very high-dimensional latent space
consisting of voltage dynamics of 300 neurons over 5 minutes of time at the simulation frequency
of 160 Hz. The generative model for these latent variables is described by stochastic differential
equations modeling the nonlinear dynamics of the network activity." (see [here](https://openreview.net/pdf?id=CJzi3dRlJE-)).
In effect this is a generative model for a worm's voltage dynamics, which can be used to generate new worm-like voltage dynamics given previous connectome state.
Using ezkl you can create a zk circuit equivalent to the wormvae model, allowing you to "prove" execution of the worm model. If you're feeling particularly adventurous, you can also use the zk circuit to generate new worm-state that can be verified on chain.
To do so you'll first want to fetch the files using git-lfs (as the onnx file is too large to be stored in git).
```bash
git lfs fetch --all
```
You'll then want to use the usual ezkl loop to generate the zk circuit. We recommend using fixed visibility for the model parameters, as the model is quite large and this will prune the circuit significantly.
```bash
ezkl gen-settings --param-visibility=fixed
cp input.json calibration.json
ezkl calibrate-settings
ezkl compile-circuit
ezkl gen-witness
ezkl prove
```
You might also need to aggregate the proof to get it to fit on chain.
```bash
ezkl aggregate
```
You can then create a smart contract that verifies this aggregate proof
```bash
ezkl create-evm-verifier-aggr
```
This can then be deployed on the chain of your choice.
> Note: the model is large and thus we recommend a machine with at least 512GB of RAM to run the above commands. If you're ever compute constrained you can always use the lilith service to generate the zk circuit. Message us on discord or telegram for more details :)

File diff suppressed because one or more lines are too long

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2f88c5901d3768ec21e3cf2f2840d255e84fa13c364df86b24d960cca3333769
size 82095882

View File

@@ -1 +0,0 @@
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":0,"param_scale":6,"scale_rebase_multiplier":1,"lookup_range":[-32768,32768],"logrows":17,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Fixed"},"num_constraints":367422820,"total_const_size":365577160,"model_instance_shapes":[[1,300,1200]],"model_output_scales":[6],"model_input_scales":[0,0,0],"module_sizes":{"kzg":[],"poseidon":[0,[0]],"elgamal":[0,[0]]},"required_lookups":[{"Div":{"denom":64.0}},"ReLU",{"Ln":{"scale":64.0}},{"Exp":{"scale":64.0}}],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null}

View File

@@ -9,6 +9,7 @@ import { EVM } from '@ethereumjs/evm'
import { buildTransaction, encodeDeployment } from './utils/tx-builder'
import { getAccountNonce, insertAccount } from './utils/account-utils'
import { encodeVerifierCalldata } from '../nodejs/ezkl';
import { error } from 'console'
async function deployContract(
vm: VM,
@@ -65,7 +66,7 @@ async function verify(
vkAddress = new Uint8Array(uint8Array.buffer);
// convert uitn8array of length
console.error('vkAddress', vkAddress)
error('vkAddress', vkAddress)
}
const data = encodeVerifierCalldata(proof, vkAddress)

View File

@@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2024-07-18"
channel = "nightly-2024-02-06"
components = ["rustfmt", "clippy"]

View File

@@ -1,7 +1,4 @@
// ignore file if compiling for wasm
#[global_allocator]
#[cfg(not(target_arch = "wasm32"))]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[cfg(not(target_arch = "wasm32"))]
use clap::{CommandFactory, Parser};

View File

@@ -22,7 +22,7 @@ use crate::{
table::{Range, RangeCheck, Table},
utils,
},
tensor::{Tensor, TensorType, ValTensor, VarTensor},
tensor::{IntoI64, Tensor, TensorType, ValTensor, VarTensor},
};
use std::{collections::BTreeMap, marker::PhantomData};
@@ -327,7 +327,7 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> BaseConfig<F> {
/// Returns a new [BaseConfig] with no inputs, no selectors, and no tables.
pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self {
Self {

View File

@@ -1,6 +1,6 @@
use std::convert::Infallible;
use crate::{fieldutils::IntegerRep, tensor::TensorError};
use crate::tensor::TensorError;
use halo2_proofs::plonk::Error as PlonkError;
use thiserror::Error;
@@ -57,7 +57,7 @@ pub enum CircuitError {
InvalidConversion(#[from] Infallible),
/// Invalid min/max lookup range
#[error("invalid min/max lookup range: min: {0}, max: {1}")]
InvalidMinMaxRange(IntegerRep, IntegerRep),
InvalidMinMaxRange(i64, i64),
/// Missing product in einsum
#[error("missing product in einsum")]
MissingEinsumProduct,
@@ -81,7 +81,7 @@ pub enum CircuitError {
MissingSelectors(String),
/// Table lookup error
#[error("value ({0}) out of range: ({1}, {2})")]
TableOOR(IntegerRep, IntegerRep, IntegerRep),
TableOOR(i64, i64, i64),
/// Loookup not configured
#[error("lookup not configured: {0}")]
LookupNotConfigured(String),
@@ -91,7 +91,4 @@ pub enum CircuitError {
/// Missing layout
#[error("missing layout for op: {0}")]
MissingLayout(String),
#[error("[io] {0}")]
/// IO error
IoError(#[from] std::io::Error),
}

View File

@@ -1,7 +1,7 @@
use super::*;
use crate::{
circuit::{layouts, utils, Tolerance},
fieldutils::integer_rep_to_felt,
fieldutils::i64_to_felt,
graph::multiplier_to_scale,
tensor::{self, Tensor, TensorType, ValTensor},
};
@@ -71,17 +71,12 @@ pub enum HybridOp {
},
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for HybridOp {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for HybridOp {
///
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
match self {
HybridOp::Greater { .. }
| HybridOp::Less { .. }
| HybridOp::Equals { .. }
| HybridOp::GreaterEqual { .. }
| HybridOp::LessEqual { .. } => {
vec![0, 1]
}
HybridOp::Greater | HybridOp::Less | HybridOp::Equals => vec![0, 1],
HybridOp::GreaterEqual | HybridOp::LessEqual => vec![0, 1],
_ => vec![],
}
}
@@ -140,10 +135,10 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
)
}
HybridOp::RangeCheck(p) => format!("RANGECHECK (tol={:?})", p),
HybridOp::Greater => "GREATER".to_string(),
HybridOp::GreaterEqual => "GREATEREQUAL".to_string(),
HybridOp::Less => "LESS".to_string(),
HybridOp::LessEqual => "LESSEQUAL".to_string(),
HybridOp::Greater => "GREATER".into(),
HybridOp::GreaterEqual => "GREATEREQUAL".into(),
HybridOp::Less => "LESS".into(),
HybridOp::LessEqual => "LESSEQUAL".into(),
HybridOp::Equals => "EQUALS".into(),
HybridOp::Gather { dim, .. } => format!("GATHER (dim={})", dim),
HybridOp::TopK { k, dim, largest } => {
@@ -189,8 +184,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
config,
region,
values[..].try_into()?,
integer_rep_to_felt(input_scale.0 as i128),
integer_rep_to_felt(output_scale.0 as i128),
i64_to_felt(input_scale.0 as i64),
i64_to_felt(output_scale.0 as i64),
)?
} else {
layouts::nonlinearity(
@@ -214,7 +209,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
config,
region,
values[..].try_into()?,
integer_rep_to_felt(denom.0 as i128),
i64_to_felt(denom.0 as i64),
)?
} else {
layouts::nonlinearity(

File diff suppressed because it is too large Load Diff

View File

@@ -3,9 +3,9 @@ use serde::{Deserialize, Serialize};
use crate::{
circuit::{layouts, table::Range, utils},
fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep},
fieldutils::{felt_to_i64, i64_to_felt},
graph::multiplier_to_scale,
tensor::{self, Tensor, TensorError, TensorType},
tensor::{self, IntoI64, Tensor, TensorError, TensorType},
};
use super::Op;
@@ -15,12 +15,14 @@ use halo2curves::ff::PrimeField;
/// An enum representing the operations that can be used to express more complex operations via accumulation
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize)]
pub enum LookupOp {
Abs,
Div {
denom: utils::F32,
},
Cast {
scale: utils::F32,
},
ReLU,
Max {
scale: utils::F32,
a: utils::F32,
@@ -102,6 +104,19 @@ pub enum LookupOp {
Erf {
scale: utils::F32,
},
GreaterThan {
a: utils::F32,
},
LessThan {
a: utils::F32,
},
GreaterThanEqual {
a: utils::F32,
},
LessThanEqual {
a: utils::F32,
},
Sign,
KroneckerDelta,
Pow {
scale: utils::F32,
@@ -116,165 +131,109 @@ impl LookupOp {
/// Returns the range of values that can be represented by the table
pub fn bit_range(max_len: usize) -> Range {
let range = (max_len - 1) as f64 / 2_f64;
let range = range as IntegerRep;
let range = range as i64;
(-range, range)
}
/// as path
pub fn as_path(&self) -> String {
match self {
LookupOp::Ceil { scale } => format!("ceil_{}", scale),
LookupOp::Floor { scale } => format!("floor_{}", scale),
LookupOp::Round { scale } => format!("round_{}", scale),
LookupOp::RoundHalfToEven { scale } => format!("round_half_to_even_{}", scale),
LookupOp::Pow { scale, a } => format!("pow_{}_{}", scale, a),
LookupOp::KroneckerDelta => "kronecker_delta".into(),
LookupOp::Max { scale, a } => format!("max_{}_{}", scale, a),
LookupOp::Min { scale, a } => format!("min_{}_{}", scale, a),
LookupOp::Div { denom } => format!("div_{}", denom),
LookupOp::Cast { scale } => format!("cast_{}", scale),
LookupOp::Recip {
input_scale,
output_scale,
} => format!("recip_{}_{}", input_scale, output_scale),
LookupOp::LeakyReLU { slope: a } => format!("leaky_relu_{}", a),
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
LookupOp::Sqrt { scale } => format!("sqrt_{}", scale),
LookupOp::Rsqrt { scale } => format!("rsqrt_{}", scale),
LookupOp::Erf { scale } => format!("erf_{}", scale),
LookupOp::Exp { scale } => format!("exp_{}", scale),
LookupOp::Ln { scale } => format!("ln_{}", scale),
LookupOp::Cos { scale } => format!("cos_{}", scale),
LookupOp::ACos { scale } => format!("acos_{}", scale),
LookupOp::Cosh { scale } => format!("cosh_{}", scale),
LookupOp::ACosh { scale } => format!("acosh_{}", scale),
LookupOp::Sin { scale } => format!("sin_{}", scale),
LookupOp::ASin { scale } => format!("asin_{}", scale),
LookupOp::Sinh { scale } => format!("sinh_{}", scale),
LookupOp::ASinh { scale } => format!("asinh_{}", scale),
LookupOp::Tan { scale } => format!("tan_{}", scale),
LookupOp::ATan { scale } => format!("atan_{}", scale),
LookupOp::ATanh { scale } => format!("atanh_{}", scale),
LookupOp::Tanh { scale } => format!("tanh_{}", scale),
LookupOp::HardSwish { scale } => format!("hardswish_{}", scale),
}
}
/// Matches a [Op] to an operation in the `tensor::ops` module.
pub(crate) fn f<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
pub(crate) fn f<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64>(
&self,
x: &[Tensor<F>],
) -> Result<ForwardResult<F>, TensorError> {
let x = x[0].clone().map(|x| felt_to_integer_rep(x));
let res =
match &self {
LookupOp::Ceil { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::ceil(&x, scale.into()))
}
LookupOp::Floor { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::floor(&x, scale.into()))
}
LookupOp::Round { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::round(&x, scale.into()))
}
LookupOp::RoundHalfToEven { scale } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::round_half_to_even(&x, scale.into()),
),
LookupOp::Pow { scale, a } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::pow(&x, scale.0.into(), a.0.into()),
),
LookupOp::KroneckerDelta => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::kronecker_delta(&x))
}
LookupOp::Max { scale, a } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::max(&x, scale.0.into(), a.0.into()),
),
LookupOp::Min { scale, a } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::min(&x, scale.0.into(), a.0.into()),
),
LookupOp::Div { denom } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::const_div(&x, f32::from(*denom).into()),
),
LookupOp::Cast { scale } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::const_div(&x, f32::from(*scale).into()),
),
LookupOp::Recip {
input_scale,
output_scale,
} => Ok::<_, TensorError>(tensor::ops::nonlinearities::recip(
&x,
input_scale.into(),
output_scale.into(),
)),
LookupOp::LeakyReLU { slope: a } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::leakyrelu(&x, a.0.into()))
}
LookupOp::Sigmoid { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sigmoid(&x, scale.into()))
}
LookupOp::Sqrt { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sqrt(&x, scale.into()))
}
LookupOp::Rsqrt { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::rsqrt(&x, scale.into()))
}
LookupOp::Erf { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::erffunc(&x, scale.into()))
}
LookupOp::Exp { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::exp(&x, scale.into()))
}
LookupOp::Ln { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::ln(&x, scale.into()))
}
LookupOp::Cos { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::cos(&x, scale.into()))
}
LookupOp::ACos { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::acos(&x, scale.into()))
}
LookupOp::Cosh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::cosh(&x, scale.into()))
}
LookupOp::ACosh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::acosh(&x, scale.into()))
}
LookupOp::Sin { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sin(&x, scale.into()))
}
LookupOp::ASin { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::asin(&x, scale.into()))
}
LookupOp::Sinh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sinh(&x, scale.into()))
}
LookupOp::ASinh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::asinh(&x, scale.into()))
}
LookupOp::Tan { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::tan(&x, scale.into()))
}
LookupOp::ATan { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::atan(&x, scale.into()))
}
LookupOp::ATanh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::atanh(&x, scale.into()))
}
LookupOp::Tanh { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::tanh(&x, scale.into()))
}
LookupOp::HardSwish { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::hardswish(&x, scale.into()))
}
}?;
let x = x[0].clone().map(|x| felt_to_i64(x));
let res = match &self {
LookupOp::Abs => Ok(tensor::ops::abs(&x)?),
LookupOp::Ceil { scale } => Ok(tensor::ops::nonlinearities::ceil(&x, scale.into())),
LookupOp::Floor { scale } => Ok(tensor::ops::nonlinearities::floor(&x, scale.into())),
LookupOp::Round { scale } => Ok(tensor::ops::nonlinearities::round(&x, scale.into())),
LookupOp::RoundHalfToEven { scale } => Ok(
tensor::ops::nonlinearities::round_half_to_even(&x, scale.into()),
),
LookupOp::Pow { scale, a } => Ok(tensor::ops::nonlinearities::pow(
&x,
scale.0.into(),
a.0.into(),
)),
LookupOp::KroneckerDelta => Ok(tensor::ops::nonlinearities::kronecker_delta(&x)),
LookupOp::Max { scale, a } => Ok(tensor::ops::nonlinearities::max(
&x,
scale.0.into(),
a.0.into(),
)),
LookupOp::Min { scale, a } => Ok(tensor::ops::nonlinearities::min(
&x,
scale.0.into(),
a.0.into(),
)),
LookupOp::Sign => Ok(tensor::ops::nonlinearities::sign(&x)),
LookupOp::LessThan { a } => Ok(tensor::ops::nonlinearities::less_than(
&x,
f32::from(*a).into(),
)),
LookupOp::LessThanEqual { a } => Ok(tensor::ops::nonlinearities::less_than_equal(
&x,
f32::from(*a).into(),
)),
LookupOp::GreaterThan { a } => Ok(tensor::ops::nonlinearities::greater_than(
&x,
f32::from(*a).into(),
)),
LookupOp::GreaterThanEqual { a } => Ok(
tensor::ops::nonlinearities::greater_than_equal(&x, f32::from(*a).into()),
),
LookupOp::Div { denom } => Ok(tensor::ops::nonlinearities::const_div(
&x,
f32::from(*denom).into(),
)),
LookupOp::Cast { scale } => Ok(tensor::ops::nonlinearities::const_div(
&x,
f32::from(*scale).into(),
)),
LookupOp::Recip {
input_scale,
output_scale,
} => Ok(tensor::ops::nonlinearities::recip(
&x,
input_scale.into(),
output_scale.into(),
)),
LookupOp::ReLU => Ok(tensor::ops::nonlinearities::leakyrelu(&x, 0_f64)),
let output = res.map(|x| integer_rep_to_felt(x));
LookupOp::LeakyReLU { slope: a } => {
Ok(tensor::ops::nonlinearities::leakyrelu(&x, a.0.into()))
}
LookupOp::Sigmoid { scale } => {
Ok(tensor::ops::nonlinearities::sigmoid(&x, scale.into()))
}
LookupOp::Sqrt { scale } => Ok(tensor::ops::nonlinearities::sqrt(&x, scale.into())),
LookupOp::Rsqrt { scale } => Ok(tensor::ops::nonlinearities::rsqrt(&x, scale.into())),
LookupOp::Erf { scale } => Ok(tensor::ops::nonlinearities::erffunc(&x, scale.into())),
LookupOp::Exp { scale } => Ok(tensor::ops::nonlinearities::exp(&x, scale.into())),
LookupOp::Ln { scale } => Ok(tensor::ops::nonlinearities::ln(&x, scale.into())),
LookupOp::Cos { scale } => Ok(tensor::ops::nonlinearities::cos(&x, scale.into())),
LookupOp::ACos { scale } => Ok(tensor::ops::nonlinearities::acos(&x, scale.into())),
LookupOp::Cosh { scale } => Ok(tensor::ops::nonlinearities::cosh(&x, scale.into())),
LookupOp::ACosh { scale } => Ok(tensor::ops::nonlinearities::acosh(&x, scale.into())),
LookupOp::Sin { scale } => Ok(tensor::ops::nonlinearities::sin(&x, scale.into())),
LookupOp::ASin { scale } => Ok(tensor::ops::nonlinearities::asin(&x, scale.into())),
LookupOp::Sinh { scale } => Ok(tensor::ops::nonlinearities::sinh(&x, scale.into())),
LookupOp::ASinh { scale } => Ok(tensor::ops::nonlinearities::asinh(&x, scale.into())),
LookupOp::Tan { scale } => Ok(tensor::ops::nonlinearities::tan(&x, scale.into())),
LookupOp::ATan { scale } => Ok(tensor::ops::nonlinearities::atan(&x, scale.into())),
LookupOp::ATanh { scale } => Ok(tensor::ops::nonlinearities::atanh(&x, scale.into())),
LookupOp::Tanh { scale } => Ok(tensor::ops::nonlinearities::tanh(&x, scale.into())),
LookupOp::HardSwish { scale } => {
Ok(tensor::ops::nonlinearities::hardswish(&x, scale.into()))
}
}?;
let output = res.map(|x| i64_to_felt(x));
Ok(ForwardResult { output })
}
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for LookupOp {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for LookupOp {
/// Returns a reference to the Any trait.
fn as_any(&self) -> &dyn Any {
self
@@ -283,6 +242,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
/// Returns the name of the operation
fn as_string(&self) -> String {
match self {
LookupOp::Abs => "ABS".into(),
LookupOp::Ceil { scale } => format!("CEIL(scale={})", scale),
LookupOp::Floor { scale } => format!("FLOOR(scale={})", scale),
LookupOp::Round { scale } => format!("ROUND(scale={})", scale),
@@ -291,6 +251,11 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
LookupOp::KroneckerDelta => "K_DELTA".into(),
LookupOp::Max { scale, a } => format!("MAX(scale={}, a={})", scale, a),
LookupOp::Min { scale, a } => format!("MIN(scale={}, a={})", scale, a),
LookupOp::Sign => "SIGN".into(),
LookupOp::GreaterThan { a } => format!("GREATER_THAN(a={})", a),
LookupOp::GreaterThanEqual { a } => format!("GREATER_THAN_EQUAL(a={})", a),
LookupOp::LessThan { a } => format!("LESS_THAN(a={})", a),
LookupOp::LessThanEqual { a } => format!("LESS_THAN_EQUAL(a={})", a),
LookupOp::Recip {
input_scale,
output_scale,
@@ -301,6 +266,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
LookupOp::Cast { scale } => format!("CAST(scale={})", scale),
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
LookupOp::ReLU => "RELU".to_string(),
LookupOp::LeakyReLU { slope: a } => format!("L_RELU(slope={})", a),
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
LookupOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
@@ -345,7 +311,12 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
in_scale + multiplier_to_scale(1. / scale.0 as f64)
}
LookupOp::Recip { output_scale, .. } => multiplier_to_scale(output_scale.into()),
LookupOp::KroneckerDelta => 0,
LookupOp::Sign
| LookupOp::GreaterThan { .. }
| LookupOp::LessThan { .. }
| LookupOp::GreaterThanEqual { .. }
| LookupOp::LessThanEqual { .. }
| LookupOp::KroneckerDelta => 0,
_ => inputs_scale[0],
};
Ok(scale)

View File

@@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize};
use crate::{
graph::quantize_tensor,
tensor::{self, Tensor, TensorType, ValTensor},
tensor::{self, IntoI64, Tensor, TensorType, ValTensor},
};
use halo2curves::ff::PrimeField;
@@ -31,12 +31,12 @@ pub use errors::CircuitError;
/// A struct representing the result of a forward pass.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> {
pub(crate) output: Tensor<F>,
}
/// A trait representing operations that can be represented as constraints in a circuit.
pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>:
pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64>:
std::fmt::Debug + Send + Sync + Any
{
/// Returns a string representation of the operation.
@@ -75,7 +75,7 @@ pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>:
fn as_any(&self) -> &dyn Any;
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Clone for Box<dyn Op<F>> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Clone for Box<dyn Op<F>> {
fn clone(&self) -> Self {
self.clone_dyn()
}
@@ -142,7 +142,7 @@ pub struct Input {
pub datum_type: InputType,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for Input {
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
Ok(self.scale)
}
@@ -197,7 +197,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
pub struct Unknown;
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Unknown {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Op<F> for Unknown {
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
Ok(0)
}
@@ -224,7 +224,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Unknow
///
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> {
///
pub quantized_values: Tensor<F>,
///
@@ -234,7 +234,7 @@ pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
pub pre_assigned_val: Option<ValTensor<F>>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Constant<F> {
///
pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>) -> Self {
Self {
@@ -267,7 +267,8 @@ impl<
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>,
+ for<'de> Deserialize<'de>
+ IntoI64,
> Op<F> for Constant<F>
{
fn as_any(&self) -> &dyn Any {

View File

@@ -9,9 +9,6 @@ use super::{base::BaseOp, *};
/// An enum representing the operations that can be expressed as arithmetic (non lookup) operations.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum PolyOp {
ReLU,
Abs,
Sign,
GatherElements {
dim: usize,
constant_idx: Option<Tensor<usize>>,
@@ -36,7 +33,6 @@ pub enum PolyOp {
Conv {
padding: Vec<(usize, usize)>,
stride: Vec<usize>,
group: usize,
},
Downsample {
axis: usize,
@@ -47,7 +43,6 @@ pub enum PolyOp {
padding: Vec<(usize, usize)>,
output_padding: Vec<usize>,
stride: Vec<usize>,
group: usize,
},
Add,
Sub,
@@ -102,7 +97,8 @@ impl<
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>,
+ for<'de> Deserialize<'de>
+ IntoI64,
> Op<F> for PolyOp
{
/// Returns a reference to the Any trait.
@@ -112,9 +108,6 @@ impl<
fn as_string(&self) -> String {
match &self {
PolyOp::Abs => "ABS".to_string(),
PolyOp::Sign => "SIGN".to_string(),
PolyOp::ReLU => "RELU".to_string(),
PolyOp::GatherElements { dim, constant_idx } => format!(
"GATHERELEMENTS (dim={}, constant_idx{})",
dim,
@@ -155,25 +148,17 @@ impl<
PolyOp::Sum { axes } => format!("SUM (axes={:?})", axes),
PolyOp::Prod { .. } => "PROD".into(),
PolyOp::Pow(_) => "POW".into(),
PolyOp::Conv {
stride,
padding,
group,
} => {
format!(
"CONV (stride={:?}, padding={:?}, group={})",
stride, padding, group
)
PolyOp::Conv { stride, padding } => {
format!("CONV (stride={:?}, padding={:?})", stride, padding)
}
PolyOp::DeConv {
stride,
padding,
output_padding,
group,
} => {
format!(
"DECONV (stride={:?}, padding={:?}, output_padding={:?}, group={})",
stride, padding, output_padding, group
"DECONV (stride={:?}, padding={:?}, output_padding={:?})",
stride, padding, output_padding
)
}
PolyOp::Concat { axis } => format!("CONCAT (axis={})", axis),
@@ -196,9 +181,6 @@ impl<
values: &[ValTensor<F>],
) -> Result<Option<ValTensor<F>>, CircuitError> {
Ok(Some(match self {
PolyOp::Abs => layouts::abs(config, region, values[..].try_into()?)?,
PolyOp::Sign => layouts::sign(config, region, values[..].try_into()?)?,
PolyOp::ReLU => layouts::relu(config, region, values[..].try_into()?)?,
PolyOp::MultiBroadcastTo { shape } => {
layouts::expand(config, region, values[..].try_into()?, shape)?
}
@@ -230,18 +212,9 @@ impl<
PolyOp::Prod { axes, .. } => {
layouts::prod_axes(config, region, values[..].try_into()?, axes)?
}
PolyOp::Conv {
padding,
stride,
group,
} => layouts::conv(
config,
region,
values[..].try_into()?,
padding,
stride,
*group,
)?,
PolyOp::Conv { padding, stride } => {
layouts::conv(config, region, values[..].try_into()?, padding, stride)?
}
PolyOp::GatherElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
@@ -288,7 +261,6 @@ impl<
padding,
output_padding,
stride,
group,
} => layouts::deconv(
config,
region,
@@ -296,7 +268,6 @@ impl<
padding,
output_padding,
stride,
*group,
)?,
PolyOp::Add => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Add)?,
PolyOp::Sub => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Sub)?,
@@ -376,7 +347,6 @@ impl<
PolyOp::Reshape(_) | PolyOp::Flatten(_) => in_scales[0],
PolyOp::Pow(pow) => in_scales[0] * (*pow as crate::Scale),
PolyOp::Identity { out_scale } => out_scale.unwrap_or(in_scales[0]),
PolyOp::Sign { .. } => 0,
_ => in_scales[0],
};
Ok(scale)

View File

@@ -1,6 +1,5 @@
use crate::{
circuit::table::Range,
fieldutils::IntegerRep,
tensor::{Tensor, TensorType, ValTensor, ValType, VarTensor},
};
#[cfg(not(target_arch = "wasm32"))]
@@ -10,8 +9,7 @@ use halo2_proofs::{
plonk::{Error, Selector},
};
use halo2curves::ff::PrimeField;
use itertools::Itertools;
use maybe_rayon::iter::ParallelExtend;
use portable_atomic::AtomicI64 as AtomicInt;
use std::{
cell::RefCell,
collections::{HashMap, HashSet},
@@ -86,88 +84,6 @@ impl ShuffleIndex {
}
}
#[derive(Debug, Clone)]
/// Some settings for a region to differentiate it across the different phases of proof generation
pub struct RegionSettings {
/// whether we are in witness generation mode
pub witness_gen: bool,
/// whether we should check range checks for validity
pub check_range: bool,
/// base for decompositions
pub base: usize,
/// number of legs for decompositions
pub legs: usize,
}
#[allow(unsafe_code)]
unsafe impl Sync for RegionSettings {}
#[allow(unsafe_code)]
unsafe impl Send for RegionSettings {}
impl RegionSettings {
/// Create a new region settings
pub fn new(witness_gen: bool, check_range: bool, base: usize, legs: usize) -> RegionSettings {
RegionSettings {
witness_gen,
check_range,
base,
legs,
}
}
/// Create a new region settings with all true
pub fn all_true(base: usize, legs: usize) -> RegionSettings {
RegionSettings {
witness_gen: true,
check_range: true,
base,
legs,
}
}
/// Create a new region settings with all false
pub fn all_false(base: usize, legs: usize) -> RegionSettings {
RegionSettings {
witness_gen: false,
check_range: false,
base,
legs,
}
}
}
#[derive(Debug, Default, Clone)]
/// Region statistics
pub struct RegionStatistics {
/// the current maximum value of the lookup inputs
pub max_lookup_inputs: IntegerRep,
/// the current minimum value of the lookup inputs
pub min_lookup_inputs: IntegerRep,
/// the current maximum value of the range size
pub max_range_size: IntegerRep,
/// the current set of used lookups
pub used_lookups: HashSet<LookupOp>,
/// the current set of used range checks
pub used_range_checks: HashSet<Range>,
}
impl RegionStatistics {
/// update the statistics with another set of statistics
pub fn update(&mut self, other: &RegionStatistics) {
self.max_lookup_inputs = self.max_lookup_inputs.max(other.max_lookup_inputs);
self.min_lookup_inputs = self.min_lookup_inputs.min(other.min_lookup_inputs);
self.max_range_size = self.max_range_size.max(other.max_range_size);
self.used_lookups.extend(other.used_lookups.clone());
self.used_range_checks
.extend(other.used_range_checks.clone());
}
}
#[allow(unsafe_code)]
unsafe impl Sync for RegionStatistics {}
#[allow(unsafe_code)]
unsafe impl Send for RegionStatistics {}
#[derive(Debug)]
/// A context for a region
pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> {
@@ -177,22 +93,17 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Ha
num_inner_cols: usize,
dynamic_lookup_index: DynamicLookupIndex,
shuffle_index: ShuffleIndex,
statistics: RegionStatistics,
settings: RegionSettings,
used_lookups: HashSet<LookupOp>,
used_range_checks: HashSet<Range>,
max_lookup_inputs: i64,
min_lookup_inputs: i64,
max_range_size: i64,
witness_gen: bool,
check_lookup_range: bool,
assigned_constants: ConstantsMap<F>,
}
impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a, F> {
/// get the region's decomposition base
pub fn base(&self) -> usize {
self.settings.base
}
/// get the region's decomposition legs
pub fn legs(&self) -> usize {
self.settings.legs
}
#[cfg(not(target_arch = "wasm32"))]
///
pub fn debug_report(&self) {
@@ -240,27 +151,16 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
///
pub fn witness_gen(&self) -> bool {
self.settings.witness_gen
self.witness_gen
}
///
pub fn check_range(&self) -> bool {
self.settings.check_range
}
///
pub fn statistics(&self) -> &RegionStatistics {
&self.statistics
pub fn check_lookup_range(&self) -> bool {
self.check_lookup_range
}
/// Create a new region context
pub fn new(
region: Region<'a, F>,
row: usize,
num_inner_cols: usize,
decomp_base: usize,
decomp_legs: usize,
) -> RegionCtx<'a, F> {
pub fn new(region: Region<'a, F>, row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> {
let region = Some(RefCell::new(region));
let linear_coord = row * num_inner_cols;
@@ -271,8 +171,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
linear_coord,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
statistics: RegionStatistics::default(),
settings: RegionSettings::all_true(decomp_base, decomp_legs),
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen: true,
check_lookup_range: true,
assigned_constants: HashMap::new(),
}
}
@@ -282,20 +187,45 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
region: Region<'a, F>,
row: usize,
num_inner_cols: usize,
decomp_base: usize,
decomp_legs: usize,
constants: ConstantsMap<F>,
) -> RegionCtx<'a, F> {
let mut new_self = Self::new(region, row, num_inner_cols, decomp_base, decomp_legs);
let mut new_self = Self::new(region, row, num_inner_cols);
new_self.assigned_constants = constants;
new_self
}
/// Create a new region context from a wrapped region
pub fn from_wrapped_region(
region: Option<RefCell<Region<'a, F>>>,
row: usize,
num_inner_cols: usize,
dynamic_lookup_index: DynamicLookupIndex,
shuffle_index: ShuffleIndex,
) -> RegionCtx<'a, F> {
let linear_coord = row * num_inner_cols;
RegionCtx {
region,
num_inner_cols,
linear_coord,
row,
dynamic_lookup_index,
shuffle_index,
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen: false,
check_lookup_range: false,
assigned_constants: HashMap::new(),
}
}
/// Create a new region context
pub fn new_dummy(
row: usize,
num_inner_cols: usize,
settings: RegionSettings,
witness_gen: bool,
check_lookup_range: bool,
) -> RegionCtx<'a, F> {
let region = None;
let linear_coord = row * num_inner_cols;
@@ -307,8 +237,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
row,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
statistics: RegionStatistics::default(),
settings,
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen,
check_lookup_range,
assigned_constants: HashMap::new(),
}
}
@@ -318,7 +253,8 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
row: usize,
linear_coord: usize,
num_inner_cols: usize,
settings: RegionSettings,
witness_gen: bool,
check_lookup_range: bool,
) -> RegionCtx<'a, F> {
let region = None;
RegionCtx {
@@ -328,8 +264,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
row,
dynamic_lookup_index: DynamicLookupIndex::default(),
shuffle_index: ShuffleIndex::default(),
statistics: RegionStatistics::default(),
settings,
used_lookups: HashSet::new(),
used_range_checks: HashSet::new(),
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
witness_gen,
check_lookup_range,
assigned_constants: HashMap::new(),
}
}
@@ -380,9 +321,12 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
) -> Result<(), CircuitError> {
let row = AtomicUsize::new(self.row());
let linear_coord = AtomicUsize::new(self.linear_coord());
let statistics = Arc::new(Mutex::new(self.statistics.clone()));
let shuffle_index = Arc::new(Mutex::new(self.shuffle_index.clone()));
let max_lookup_inputs = AtomicInt::new(self.max_lookup_inputs());
let min_lookup_inputs = AtomicInt::new(self.min_lookup_inputs());
let lookups = Arc::new(Mutex::new(self.used_lookups.clone()));
let range_checks = Arc::new(Mutex::new(self.used_range_checks.clone()));
let dynamic_lookup_index = Arc::new(Mutex::new(self.dynamic_lookup_index.clone()));
let shuffle_index = Arc::new(Mutex::new(self.shuffle_index.clone()));
let constants = Arc::new(Mutex::new(self.assigned_constants.clone()));
*output = output.par_enum_map(|idx, _| {
@@ -396,7 +340,8 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
starting_offset,
starting_linear_coord,
self.num_inner_cols,
self.settings.clone(),
self.witness_gen,
self.check_lookup_range,
);
let res = inner_loop_function(idx, &mut local_reg);
// we update the offset and constants
@@ -406,9 +351,14 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
Ordering::SeqCst,
);
max_lookup_inputs.fetch_max(local_reg.max_lookup_inputs(), Ordering::SeqCst);
min_lookup_inputs.fetch_min(local_reg.min_lookup_inputs(), Ordering::SeqCst);
// update the lookups
let mut statistics = statistics.lock().unwrap();
statistics.update(local_reg.statistics());
let mut lookups = lookups.lock().unwrap();
lookups.extend(local_reg.used_lookups());
// update the range checks
let mut range_checks = range_checks.lock().unwrap();
range_checks.extend(local_reg.used_range_checks());
// update the dynamic lookup index
let mut dynamic_lookup_index = dynamic_lookup_index.lock().unwrap();
dynamic_lookup_index.update(&local_reg.dynamic_lookup_index);
@@ -422,11 +372,20 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
res
})?;
self.linear_coord = linear_coord.into_inner();
#[allow(trivial_numeric_casts)]
{
self.max_lookup_inputs = max_lookup_inputs.into_inner();
self.min_lookup_inputs = min_lookup_inputs.into_inner();
}
self.row = row.into_inner();
self.statistics = Arc::try_unwrap(statistics)
self.used_lookups = Arc::try_unwrap(lookups)
.map_err(|e| CircuitError::GetLookupsError(format!("{:?}", e)))?
.into_inner()
.map_err(|e| CircuitError::GetLookupsError(format!("{:?}", e)))?;
self.used_range_checks = Arc::try_unwrap(range_checks)
.map_err(|e| CircuitError::GetRangeChecksError(format!("{:?}", e)))?
.into_inner()
.map_err(|e| CircuitError::GetRangeChecksError(format!("{:?}", e)))?;
self.dynamic_lookup_index = Arc::try_unwrap(dynamic_lookup_index)
.map_err(|e| CircuitError::GetDynamicLookupError(format!("{:?}", e)))?
.into_inner()
@@ -450,11 +409,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
) -> Result<(), CircuitError> {
let (mut min, mut max) = (0, 0);
for i in inputs {
max = max.max(i.int_evals()?.into_iter().max().unwrap_or_default());
min = min.min(i.int_evals()?.into_iter().min().unwrap_or_default());
max = max.max(i.get_int_evals()?.into_iter().max().unwrap_or_default());
min = min.min(i.get_int_evals()?.into_iter().min().unwrap_or_default());
}
self.statistics.max_lookup_inputs = self.statistics.max_lookup_inputs.max(max);
self.statistics.min_lookup_inputs = self.statistics.min_lookup_inputs.min(min);
self.max_lookup_inputs = self.max_lookup_inputs.max(max);
self.min_lookup_inputs = self.min_lookup_inputs.min(min);
Ok(())
}
@@ -466,7 +425,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
let range_size = (range.1 - range.0).abs();
self.statistics.max_range_size = self.statistics.max_range_size.max(range_size);
self.max_range_size = self.max_range_size.max(range_size);
Ok(())
}
@@ -481,13 +440,13 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
lookup: LookupOp,
inputs: &[ValTensor<F>],
) -> Result<(), CircuitError> {
self.statistics.used_lookups.insert(lookup);
self.used_lookups.insert(lookup);
self.update_max_min_lookup_inputs(inputs)
}
/// add used range check
pub fn add_used_range_check(&mut self, range: Range) -> Result<(), CircuitError> {
self.statistics.used_range_checks.insert(range);
self.used_range_checks.insert(range);
self.update_max_min_lookup_range(range)
}
@@ -528,27 +487,27 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
/// get used lookups
pub fn used_lookups(&self) -> HashSet<LookupOp> {
self.statistics.used_lookups.clone()
self.used_lookups.clone()
}
/// get used range checks
pub fn used_range_checks(&self) -> HashSet<Range> {
self.statistics.used_range_checks.clone()
self.used_range_checks.clone()
}
/// max lookup inputs
pub fn max_lookup_inputs(&self) -> IntegerRep {
self.statistics.max_lookup_inputs
pub fn max_lookup_inputs(&self) -> i64 {
self.max_lookup_inputs
}
/// min lookup inputs
pub fn min_lookup_inputs(&self) -> IntegerRep {
self.statistics.min_lookup_inputs
pub fn min_lookup_inputs(&self) -> i64 {
self.min_lookup_inputs
}
/// max range check
pub fn max_range_size(&self) -> IntegerRep {
self.statistics.max_range_size
pub fn max_range_size(&self) -> i64 {
self.max_range_size
}
/// Assign a valtensor to a vartensor
@@ -556,18 +515,18 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, CircuitError> {
) -> Result<ValTensor<F>, Error> {
if let Some(region) = &self.region {
Ok(var.assign(
var.assign(
&mut region.borrow_mut(),
self.linear_coord,
values,
&mut self.assigned_constants,
)?)
)
} else {
if !values.is_instance() {
let values_map = values.create_constants_map_iterator();
self.assigned_constants.par_extend(values_map);
self.assigned_constants.extend(values_map);
}
Ok(values.clone())
}
@@ -583,18 +542,18 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, CircuitError> {
) -> Result<ValTensor<F>, Error> {
if let Some(region) = &self.region {
Ok(var.assign(
var.assign(
&mut region.borrow_mut(),
self.combined_dynamic_shuffle_coord(),
values,
&mut self.assigned_constants,
)?)
)
} else {
if !values.is_instance() {
let values_map = values.create_constants_map_iterator();
self.assigned_constants.par_extend(values_map);
self.assigned_constants.extend(values_map);
}
Ok(values.clone())
}
@@ -605,7 +564,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, CircuitError> {
) -> Result<ValTensor<F>, Error> {
self.assign_dynamic_lookup(var, values)
}
@@ -614,24 +573,27 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
ommissions: &HashSet<usize>,
) -> Result<ValTensor<F>, CircuitError> {
ommissions: &HashSet<&usize>,
) -> Result<ValTensor<F>, Error> {
if let Some(region) = &self.region {
Ok(var.assign_with_omissions(
var.assign_with_omissions(
&mut region.borrow_mut(),
self.linear_coord,
values,
ommissions,
&mut self.assigned_constants,
)?)
)
} else {
let mut values_clone = values.clone();
let mut indices = ommissions.clone().into_iter().collect_vec();
values_clone.remove_indices(&mut indices, false)?;
let inner_tensor = values.get_inner_tensor().unwrap();
let mut values_map = values.create_constants_map();
let values_map = values.create_constants_map();
for o in ommissions {
if let ValType::Constant(value) = inner_tensor.get_flat_index(**o) {
values_map.remove(&value);
}
}
self.assigned_constants.par_extend(values_map);
self.assigned_constants.extend(values_map);
Ok(values.clone())
}

View File

@@ -11,33 +11,20 @@ use maybe_rayon::prelude::{IntoParallelIterator, ParallelIterator};
use crate::{
circuit::CircuitError,
fieldutils::{integer_rep_to_felt, IntegerRep},
tensor::{Tensor, TensorType},
fieldutils::i64_to_felt,
tensor::{IntoI64, Tensor, TensorType},
};
#[cfg(not(target_arch = "wasm32"))]
use crate::execute::EZKL_REPO_PATH;
use crate::circuit::lookup::LookupOp;
/// The range of the lookup table.
pub type Range = (IntegerRep, IntegerRep);
pub type Range = (i64, i64);
/// The safety factor for the range of the lookup table.
pub const RANGE_MULTIPLIER: IntegerRep = 2;
pub const RANGE_MULTIPLIER: i64 = 2;
/// The safety factor offset for the number of rows in the lookup table.
pub const RESERVED_BLINDING_ROWS_PAD: usize = 3;
#[cfg(not(target_arch = "wasm32"))]
lazy_static::lazy_static! {
/// an optional directory to read and write the lookup table cache
pub static ref LOOKUP_CACHE: String = format!("{}/cache", *EZKL_REPO_PATH);
}
/// The lookup table cache is disabled on wasm32 target.
#[cfg(target_arch = "wasm32")]
pub const LOOKUP_CACHE: &str = "";
#[derive(Debug, Clone)]
///
pub struct SelectorConstructor<F: PrimeField> {
@@ -109,22 +96,21 @@ pub struct Table<F: PrimeField> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<F> {
/// get column index given input
pub fn get_col_index(&self, input: F) -> F {
// range is split up into chunks of size col_size, find the chunk that input is in
let chunk = (crate::fieldutils::felt_to_integer_rep(input) - self.range.0).abs()
/ (self.col_size as IntegerRep);
let chunk =
(crate::fieldutils::felt_to_i64(input) - self.range.0).abs() / (self.col_size as i64);
integer_rep_to_felt(chunk)
i64_to_felt(chunk)
}
/// get first_element of column
pub fn get_first_element(&self, chunk: usize) -> (F, F) {
let chunk = chunk as IntegerRep;
let chunk = chunk as i64;
// we index from 1 to prevent soundness issues
let first_element =
integer_rep_to_felt(chunk * (self.col_size as IntegerRep) + self.range.0);
let first_element = i64_to_felt(chunk * (self.col_size as i64) + self.range.0);
let op_f = self
.nonlinearity
.f(&[Tensor::from(vec![first_element].into_iter())])
@@ -144,20 +130,12 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
}
///
pub fn num_cols_required(range_len: IntegerRep, col_size: usize) -> usize {
pub fn num_cols_required(range_len: i64, col_size: usize) -> usize {
// number of cols needed to store the range
(range_len / (col_size as IntegerRep)) as usize + 1
(range_len / (col_size as i64)) as usize + 1
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
fn name(&self) -> String {
format!(
"{}_{}_{}",
self.nonlinearity.as_path(),
self.range.0,
self.range.1
)
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> Table<F> {
/// Configures the table.
pub fn configure(
cs: &mut ConstraintSystem<F>,
@@ -224,51 +202,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
let smallest = self.range.0;
let largest = self.range.1;
let gen_table = || -> Result<(Tensor<F>, Tensor<F>), crate::tensor::TensorError> {
let inputs = Tensor::from(smallest..=largest)
.par_enum_map(|_, x| Ok::<_, crate::tensor::TensorError>(integer_rep_to_felt(x)))?;
let evals = self.nonlinearity.f(&[inputs.clone()])?;
Ok((inputs, evals.output))
};
let (inputs, evals) = if !LOOKUP_CACHE.is_empty() {
let cache = std::path::Path::new(&*LOOKUP_CACHE);
let cache_path = cache.join(self.name());
let input_path = cache_path.join("inputs");
let output_path = cache_path.join("outputs");
if cache_path.exists() {
log::info!("Loading lookup table from cache: {:?}", cache_path);
let (input_cache, output_cache) =
(Tensor::load(&input_path)?, Tensor::load(&output_path)?);
(input_cache, output_cache)
} else {
log::info!(
"Generating lookup table and saving to cache: {:?}",
cache_path
);
// mkdir -p cache_path
std::fs::create_dir_all(&cache_path).map_err(|e| {
CircuitError::TensorError(crate::tensor::TensorError::FileSaveError(
e.to_string(),
))
})?;
let (inputs, evals) = gen_table()?;
inputs.save(&input_path)?;
evals.save(&output_path)?;
(inputs, evals)
}
} else {
log::info!(
"Generating lookup table {} without cache",
self.nonlinearity.as_path()
);
gen_table()?
};
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i64_to_felt(x));
let evals = self.nonlinearity.f(&[inputs.clone()])?;
let chunked_inputs = inputs.chunks(self.col_size);
self.is_assigned = true;
@@ -300,7 +235,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
)?;
}
let output = evals[row_offset];
let output = evals.output[row_offset];
table.assign_cell(
|| format!("nl_o_col row {}", row_offset),
@@ -337,17 +272,12 @@ pub struct RangeCheck<F: PrimeField> {
_marker: PhantomData<F>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
/// as path
pub fn as_path(&self) -> String {
format!("rangecheck_{}_{}", self.range.0, self.range.1)
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeCheck<F> {
/// get first_element of column
pub fn get_first_element(&self, chunk: usize) -> F {
let chunk = chunk as IntegerRep;
let chunk = chunk as i64;
// we index from 1 to prevent soundness issues
integer_rep_to_felt(chunk * (self.col_size as IntegerRep) + self.range.0)
i64_to_felt(chunk * (self.col_size as i64) + self.range.0)
}
///
@@ -363,14 +293,14 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
/// get column index given input
pub fn get_col_index(&self, input: F) -> F {
// range is split up into chunks of size col_size, find the chunk that input is in
let chunk = (crate::fieldutils::felt_to_integer_rep(input) - self.range.0).abs()
/ (self.col_size as IntegerRep);
let chunk =
(crate::fieldutils::felt_to_i64(input) - self.range.0).abs() / (self.col_size as i64);
integer_rep_to_felt(chunk)
i64_to_felt(chunk)
}
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI64> RangeCheck<F> {
/// Configures the table.
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range, logrows: usize) -> RangeCheck<F> {
log::debug!("range check range: {:?}", range);
@@ -420,32 +350,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
let smallest = self.range.0;
let largest = self.range.1;
let inputs: Tensor<F> = if !LOOKUP_CACHE.is_empty() {
let cache = std::path::Path::new(&*LOOKUP_CACHE);
let cache_path = cache.join(self.as_path());
let input_path = cache_path.join("inputs");
if cache_path.exists() {
log::info!("Loading range check table from cache: {:?}", cache_path);
Tensor::load(&input_path)?
} else {
log::info!(
"Generating range check table and saving to cache: {:?}",
cache_path
);
// mkdir -p cache_path
std::fs::create_dir_all(&cache_path)?;
let inputs = Tensor::from(smallest..=largest).map(|x| integer_rep_to_felt(x));
inputs.save(&input_path)?;
inputs
}
} else {
log::info!("Generating range check {} without cache", self.as_path());
Tensor::from(smallest..=largest).map(|x| integer_rep_to_felt(x))
};
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i64_to_felt(x));
let chunked_inputs = inputs.chunks(self.col_size);
self.is_assigned = true;

View File

@@ -8,7 +8,6 @@ use halo2_proofs::{
};
use halo2curves::bn256::Fr as F;
use halo2curves::ff::{Field, PrimeField};
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
use ops::lookup::LookupOp;
use ops::region::RegionCtx;
use rand::rngs::OsRng;
@@ -56,7 +55,7 @@ mod matmul {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -133,7 +132,7 @@ mod matmul_col_overflow_double_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 128, 2);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
config
.layout(
&mut region,
@@ -207,7 +206,7 @@ mod matmul_col_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -257,7 +256,7 @@ mod matmul_col_ultra_overflow_double_col {
use super::*;
const K: usize = 4;
const LEN: usize = 10;
const LEN: usize = 20;
const NUM_INNER_COLS: usize = 2;
#[derive(Clone)]
@@ -291,7 +290,7 @@ mod matmul_col_ultra_overflow_double_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 128, 2);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
config
.layout(
&mut region,
@@ -375,7 +374,7 @@ mod matmul_col_ultra_overflow {
use super::*;
const K: usize = 4;
const LEN: usize = 10;
const LEN: usize = 20;
#[derive(Clone)]
struct MatmulCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -408,7 +407,7 @@ mod matmul_col_ultra_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -519,7 +518,7 @@ mod dot {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -596,7 +595,7 @@ mod dot_col_overflow_triple_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 3, 128, 2);
let mut region = RegionCtx::new(region, 0, 3);
config
.layout(
&mut region,
@@ -669,7 +668,7 @@ mod dot_col_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -742,7 +741,7 @@ mod sum {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -812,7 +811,7 @@ mod sum_col_overflow_double_col {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS, 128, 2);
let mut region = RegionCtx::new(region, 0, NUM_INNER_COLS);
config
.layout(
&mut region,
@@ -881,7 +880,7 @@ mod sum_col_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -952,7 +951,7 @@ mod composition {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
let _ = config
.layout(
&mut region,
@@ -1043,7 +1042,7 @@ mod conv {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -1051,7 +1050,6 @@ mod conv {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis)
@@ -1160,7 +1158,7 @@ mod conv_col_ultra_overflow {
use super::*;
const K: usize = 4;
const LEN: usize = 10;
const LEN: usize = 28;
#[derive(Clone)]
struct ConvCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -1194,7 +1192,7 @@ mod conv_col_ultra_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
@@ -1202,7 +1200,6 @@ mod conv_col_ultra_overflow {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis)
@@ -1298,8 +1295,8 @@ mod conv_relu_col_ultra_overflow {
use super::*;
const K: usize = 8;
const LEN: usize = 15;
const K: usize = 4;
const LEN: usize = 28;
#[derive(Clone)]
struct ConvCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -1318,23 +1315,15 @@ mod conv_relu_col_ultra_overflow {
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let a = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * 4);
let b = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * 4);
let output = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN * 4);
let a = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
let b = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
let output = VarTensor::new_advice(cs, K, 1, LEN * LEN * LEN);
let mut base_config =
Self::Config::configure(cs, &[a.clone(), b.clone()], &output, CheckMode::SAFE);
// sets up a new relu table
base_config
.configure_range_check(cs, &a, &b, (-1, 1), K)
.configure_lookup(cs, &b, &output, &a, (-3, 3), K, &LookupOp::ReLU)
.unwrap();
base_config
.configure_range_check(cs, &a, &b, (0, 1), K)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K, 8, false);
base_config.clone()
}
@@ -1343,12 +1332,12 @@ mod conv_relu_col_ultra_overflow {
mut config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
config.layout_range_checks(&mut layouter).unwrap();
config.layout_tables(&mut layouter).unwrap();
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 2, 2);
let mut region = RegionCtx::new(region, 0, 1);
let output = config
.layout(
&mut region,
@@ -1356,7 +1345,6 @@ mod conv_relu_col_ultra_overflow {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis);
@@ -1364,7 +1352,7 @@ mod conv_relu_col_ultra_overflow {
.layout(
&mut region,
&[output.unwrap().unwrap()],
Box::new(PolyOp::ReLU),
Box::new(LookupOp::ReLU),
)
.unwrap();
Ok(())
@@ -1485,7 +1473,7 @@ mod add_w_shape_casting {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Add))
.map_err(|_| Error::Synthesis)
@@ -1552,7 +1540,7 @@ mod add {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Add))
.map_err(|_| Error::Synthesis)
@@ -1636,7 +1624,7 @@ mod dynamic_lookup {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
for i in 0..NUM_LOOP {
layouts::dynamic_lookup(
&config,
@@ -1778,7 +1766,7 @@ mod shuffle {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
for i in 0..NUM_LOOP {
layouts::shuffles(
&config,
@@ -1893,7 +1881,7 @@ mod add_with_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Add))
.map_err(|_| Error::Synthesis)
@@ -1995,7 +1983,7 @@ mod add_with_overflow_and_poseidon {
layouter.assign_region(
|| "model",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.base
.layout(&mut region, &inputs, Box::new(PolyOp::Add))
@@ -2101,7 +2089,7 @@ mod sub {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Sub))
.map_err(|_| Error::Synthesis)
@@ -2168,7 +2156,7 @@ mod mult {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Mult))
.map_err(|_| Error::Synthesis)
@@ -2235,7 +2223,7 @@ mod pow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &self.inputs.clone(), Box::new(PolyOp::Pow(5)))
.map_err(|_| Error::Synthesis)
@@ -2267,6 +2255,7 @@ mod matmul_relu {
const K: usize = 18;
const LEN: usize = 32;
use crate::circuit::lookup::LookupOp;
#[derive(Clone)]
struct MyCircuit<F: PrimeField + TensorType + PartialOrd> {
@@ -2296,17 +2285,11 @@ mod matmul_relu {
let mut base_config =
BaseConfig::configure(cs, &[a.clone(), b.clone()], &output, CheckMode::SAFE);
// sets up a new relu table
base_config
.configure_range_check(cs, &a, &b, (-1, 1), K)
.configure_lookup(cs, &b, &output, &a, (-32768, 32768), K, &LookupOp::ReLU)
.unwrap();
base_config
.configure_range_check(cs, &a, &b, (0, 1023), K)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K, 8, false);
MyConfig { base_config }
}
@@ -2315,14 +2298,11 @@ mod matmul_relu {
mut config: Self::Config,
mut layouter: impl Layouter<F>,
) -> Result<(), Error> {
config
.base_config
.layout_range_checks(&mut layouter)
.unwrap();
config.base_config.layout_tables(&mut layouter).unwrap();
layouter.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 1024, 2);
let mut region = RegionCtx::new(region, 0, 1);
let op = PolyOp::Einsum {
equation: "ij,jk->ik".to_string(),
};
@@ -2332,7 +2312,7 @@ mod matmul_relu {
.unwrap();
let _output = config
.base_config
.layout(&mut region, &[output.unwrap()], Box::new(PolyOp::ReLU))
.layout(&mut region, &[output.unwrap()], Box::new(LookupOp::ReLU))
.unwrap();
Ok(())
},
@@ -2371,8 +2351,6 @@ mod relu {
plonk::{Circuit, ConstraintSystem, Error},
};
const K: u32 = 8;
#[derive(Clone)]
struct ReLUCircuit<F: PrimeField + TensorType + PartialOrd> {
pub input: ValTensor<F>,
@@ -2389,26 +2367,16 @@ mod relu {
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let advices = (0..3)
.map(|_| VarTensor::new_advice(cs, 8, 1, 3))
.map(|_| VarTensor::new_advice(cs, 4, 1, 3))
.collect::<Vec<_>>();
let mut config = BaseConfig::configure(
cs,
&[advices[0].clone(), advices[1].clone()],
&advices[2],
CheckMode::SAFE,
);
let nl = LookupOp::ReLU;
let mut config = BaseConfig::default();
config
.configure_range_check(cs, &advices[0], &advices[1], (-1, 1), K as usize)
.configure_lookup(cs, &advices[0], &advices[1], &advices[2], (-6, 6), 4, &nl)
.unwrap();
config
.configure_range_check(cs, &advices[0], &advices[1], (0, 1), K as usize)
.unwrap();
let _constant = VarTensor::constant_cols(cs, K as usize, 8, false);
config
}
@@ -2417,15 +2385,15 @@ mod relu {
mut config: Self::Config,
mut layouter: impl Layouter<F>, // layouter is our 'write buffer' for the circuit
) -> Result<(), Error> {
config.layout_range_checks(&mut layouter).unwrap();
config.layout_tables(&mut layouter).unwrap();
layouter
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 2, 2);
Ok(config
.layout(&mut region, &[self.input.clone()], Box::new(PolyOp::ReLU))
.unwrap())
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
.map_err(|_| Error::Synthesis)
},
)
.unwrap();
@@ -2443,7 +2411,7 @@ mod relu {
input: ValTensor::from(input),
};
let prover = MockProver::run(K, &circuit, vec![]).unwrap();
let prover = MockProver::run(4_u32, &circuit, vec![]).unwrap();
prover.assert_satisfied();
}
}
@@ -2482,7 +2450,7 @@ mod lookup_ultra_overflow {
.map(|_| VarTensor::new_advice(cs, 4, 1, 3))
.collect::<Vec<_>>();
let nl = LookupOp::LeakyReLU { slope: 0.0.into() };
let nl = LookupOp::ReLU;
let mut config = BaseConfig::default();
@@ -2510,13 +2478,9 @@ mod lookup_ultra_overflow {
.assign_region(
|| "",
|region| {
let mut region = RegionCtx::new(region, 0, 1, 128, 2);
let mut region = RegionCtx::new(region, 0, 1);
config
.layout(
&mut region,
&[self.input.clone()],
Box::new(LookupOp::LeakyReLU { slope: 0.0.into() }),
)
.layout(&mut region, &[self.input.clone()], Box::new(LookupOp::ReLU))
.map_err(|_| Error::Synthesis)
},
)

View File

@@ -141,23 +141,23 @@ mod tests {
#[test]
fn f32_eq() {
assert!(F32(f32::NAN) == F32(f32::NAN));
assert!(F32(f32::NAN) != F32(5.0));
assert!(F32(5.0) != F32(f32::NAN));
assert!(F32(std::f32::NAN) == F32(std::f32::NAN));
assert!(F32(std::f32::NAN) != F32(5.0));
assert!(F32(5.0) != F32(std::f32::NAN));
assert!(F32(0.0) == F32(-0.0));
}
#[test]
fn f32_cmp() {
assert!(F32(f32::NAN) == F32(f32::NAN));
assert!(F32(f32::NAN) < F32(5.0));
assert!(F32(5.0) > F32(f32::NAN));
assert!(F32(std::f32::NAN) == F32(std::f32::NAN));
assert!(F32(std::f32::NAN) < F32(5.0));
assert!(F32(5.0) > F32(std::f32::NAN));
assert!(F32(0.0) == F32(-0.0));
}
#[test]
fn f32_hash() {
assert!(calculate_hash(&F32(0.0)) == calculate_hash(&F32(-0.0)));
assert!(calculate_hash(&F32(f32::NAN)) == calculate_hash(&F32(-f32::NAN)));
assert!(calculate_hash(&F32(std::f32::NAN)) == calculate_hash(&F32(-std::f32::NAN)));
}
}

View File

@@ -81,10 +81,8 @@ pub const DEFAULT_CALIBRATION_FILE: &str = "calibration.json";
pub const DEFAULT_LOOKUP_SAFETY_MARGIN: &str = "2";
/// Default Compress selectors
pub const DEFAULT_DISABLE_SELECTOR_COMPRESSION: &str = "false";
/// Default render reusable verifier
pub const DEFAULT_RENDER_REUSABLE: &str = "false";
/// Default contract deployment type
pub const DEFAULT_CONTRACT_DEPLOYMENT_TYPE: &str = "verifier";
/// Default render vk separately
pub const DEFAULT_RENDER_VK_SEPERATELY: &str = "false";
/// Default VK sol path
pub const DEFAULT_VK_SOL: &str = "vk.sol";
/// Default VK abi path
@@ -183,67 +181,6 @@ impl From<&str> for CalibrationTarget {
}
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
/// Determines what type of contract (verifier, verifier/reusable, vka) should be deployed
pub enum ContractType {
/// Deploys a verifier contrat tailored to the circuit and not reusable
Verifier {
/// Whether to deploy a reusable verifier. This can reduce state bloat on-chain since you need only deploy a verifying key artifact (vka) for a given circuit which is significantly smaller than the verifier contract (up to 4 times smaller for large circuits)
/// Can also be used as an alternative to aggregation for verifiers that are otherwise too large to fit on-chain.
reusable: bool,
},
/// Deploys a verifying key artifact that the reusable verifier loads into memory during runtime. Encodes the circuit specific data that was otherwise hardcoded onto the stack.
VerifyingKeyArtifact,
}
impl Default for ContractType {
fn default() -> Self {
ContractType::Verifier {
reusable: false,
}
}
}
impl std::fmt::Display for ContractType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
ContractType::Verifier { reusable: true } => {
"verifier/reusable".to_string()
},
ContractType::Verifier {
reusable: false,
} => "verifier".to_string(),
ContractType::VerifyingKeyArtifact => "vka".to_string(),
}
)
}
}
impl ToFlags for ContractType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl From<&str> for ContractType {
fn from(s: &str) -> Self {
match s {
"verifier" => ContractType::Verifier { reusable: false },
"verifier/reusable" => ContractType::Verifier { reusable: true },
"vka" => ContractType::VerifyingKeyArtifact,
_ => {
log::error!("Invalid value for ContractType");
log::warn!("Defaulting to verifier");
ContractType::default()
}
}
}
}
#[cfg(not(target_arch = "wasm32"))]
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, PartialOrd)]
/// wrapper for H160 to make it easy to parse into flag vals
@@ -306,39 +243,6 @@ impl<'source> FromPyObject<'source> for CalibrationTarget {
}
}
}
#[cfg(feature = "python-bindings")]
/// Converts ContractType into a PyObject (Required for ContractType to be compatible with Python)
impl IntoPy<PyObject> for ContractType {
fn into_py(self, py: Python) -> PyObject {
match self {
ContractType::Verifier { reusable: true } => {
"verifier/reusable".to_object(py)
}
ContractType::Verifier {
reusable: false,
} => "verifier".to_object(py),
ContractType::VerifyingKeyArtifact => "vka".to_object(py),
}
}
}
#[cfg(feature = "python-bindings")]
/// Obtains ContractType from PyObject (Required for ContractType to be compatible with Python)
impl<'source> FromPyObject<'source> for ContractType {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
let strval = trystr.to_string();
match strval.to_lowercase().as_str() {
"verifier" => Ok(ContractType::Verifier {
reusable: false,
}),
"verifier/reusable" => Ok(ContractType::Verifier { reusable: true }),
"vka" => Ok(ContractType::VerifyingKeyArtifact),
_ => Err(PyValueError::new_err("Invalid value for ContractType")),
}
}
}
// not wasm
use lazy_static::lazy_static;
@@ -475,9 +379,9 @@ pub enum Commands {
#[arg(long = "target", default_value = DEFAULT_CALIBRATION_TARGET, value_hint = clap::ValueHint::Other)]
/// Target for calibration. Set to "resources" to optimize for computational resource. Otherwise, set to "accuracy" to optimize for accuracy.
target: CalibrationTarget,
/// the lookup safety margin to use for calibration. if the max lookup is 2^k, then the max lookup will be ceil(2^k * lookup_safety_margin). larger = safer but slower
/// the lookup safety margin to use for calibration. if the max lookup is 2^k, then the max lookup will be 2^k * lookup_safety_margin. larger = safer but slower
#[arg(long, default_value = DEFAULT_LOOKUP_SAFETY_MARGIN, value_hint = clap::ValueHint::Other)]
lookup_safety_margin: f64,
lookup_safety_margin: i64,
/// Optional scales to specifically try for calibration. Example, --scales 0,4
#[arg(long, value_delimiter = ',', allow_hyphen_values = true, value_hint = clap::ValueHint::Other)]
scales: Option<Vec<crate::Scale>>,
@@ -762,14 +666,16 @@ pub enum Commands {
/// The path to output the Solidity verifier ABI
#[arg(long, default_value = DEFAULT_VERIFIER_ABI, value_hint = clap::ValueHint::FilePath)]
abi_path: Option<PathBuf>,
/// Whether the to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
#[arg(long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue)]
reusable: Option<bool>,
/// Whether the verifier key should be rendered as a separate contract.
/// We recommend disabling selector compression if this is enabled.
/// To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command.
#[arg(long, default_value = DEFAULT_RENDER_VK_SEPERATELY, action = clap::ArgAction::SetTrue)]
render_vk_seperately: Option<bool>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Creates an Evm verifier artifact for a single proof to be used by the reusable verifier
#[command(name = "create-evm-vka")]
CreateEvmVKArtifact {
/// Creates an Evm verifier for a single proof
#[command(name = "create-evm-vk")]
CreateEvmVK {
/// The path to SRS, if None will use $EZKL_REPO_PATH/srs/kzg{logrows}.srs
#[arg(long, value_hint = clap::ValueHint::FilePath)]
srs_path: Option<PathBuf>,
@@ -833,9 +739,11 @@ pub enum Commands {
// logrows used for aggregation circuit
#[arg(long, default_value = DEFAULT_AGGREGATED_LOGROWS, value_hint = clap::ValueHint::Other)]
logrows: Option<u32>,
/// Whether the to render the verifier as reusable or not. If true, you will need to deploy a VK artifact, passing it as part of the calldata to the verifier.
#[arg(long, default_value = DEFAULT_RENDER_REUSABLE, action = clap::ArgAction::SetTrue)]
reusable: Option<bool>,
/// Whether the verifier key should be rendered as a separate contract.
/// We recommend disabling selector compression if this is enabled.
/// To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command.
#[arg(long, default_value = DEFAULT_RENDER_VK_SEPERATELY, action = clap::ArgAction::SetTrue)]
render_vk_seperately: Option<bool>,
},
/// Verifies a proof, returning accept or reject
Verify {
@@ -877,8 +785,8 @@ pub enum Commands {
commitment: Option<Commitments>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm contract (verifier, reusable verifier, or vk artifact) that is generated by ezkl
DeployEvm {
/// Deploys an evm verifier that is generated by ezkl
DeployEvmVerifier {
/// The path to the Solidity code (generated using the create-evm-verifier command)
#[arg(long, default_value = DEFAULT_SOL_CODE, value_hint = clap::ValueHint::FilePath)]
sol_code_path: Option<PathBuf>,
@@ -894,9 +802,25 @@ pub enum Commands {
/// Private secp256K1 key in hex format, 64 chars, no 0x prefix, of the account signing transactions. If None the private key will be generated by Anvil
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
private_key: Option<String>,
/// Contract type to be deployed
#[arg(long = "contract-type", short = 'C', default_value = DEFAULT_CONTRACT_DEPLOYMENT_TYPE, value_hint = clap::ValueHint::Other)]
contract: ContractType,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm verifier that is generated by ezkl
DeployEvmVK {
/// The path to the Solidity code (generated using the create-evm-verifier command)
#[arg(long, default_value = DEFAULT_VK_SOL, value_hint = clap::ValueHint::FilePath)]
sol_code_path: Option<PathBuf>,
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
rpc_url: Option<String>,
#[arg(long, default_value = DEFAULT_CONTRACT_ADDRESS_VK, value_hint = clap::ValueHint::Other)]
/// The path to output the contract address
addr_path: Option<PathBuf>,
/// The optimizer runs to set on the verifier. Lower values optimize for deployment cost, while higher values optimize for gas cost.
#[arg(long, default_value = DEFAULT_OPTIMIZER_RUNS, value_hint = clap::ValueHint::Other)]
optimizer_runs: usize,
/// Private secp256K1 key in hex format, 64 chars, no 0x prefix, of the account signing transactions. If None the private key will be generated by Anvil
#[arg(short = 'P', long, value_hint = clap::ValueHint::Other)]
private_key: Option<String>,
},
#[cfg(not(target_arch = "wasm32"))]
/// Deploys an evm verifier that allows for data attestation

View File

@@ -731,7 +731,7 @@ pub async fn verify_proof_with_data_attestation(
for val in flattened_instances.clone() {
let bytes = val.to_repr();
let u = U256::from_le_slice(bytes.inner().as_slice());
let u = U256::from_le_slice(bytes.as_slice());
public_inputs.push(u);
}

View File

@@ -1,4 +1,3 @@
use crate::circuit::region::RegionSettings;
use crate::circuit::CheckMode;
#[cfg(not(target_arch = "wasm32"))]
use crate::commands::CalibrationTarget;
@@ -195,7 +194,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
settings_path,
sol_code_path,
abi_path,
reusable,
render_vk_seperately,
} => {
create_evm_verifier(
vk_path.unwrap_or(DEFAULT_VK.into()),
@@ -203,7 +202,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
sol_code_path.unwrap_or(DEFAULT_SOL_CODE.into()),
abi_path.unwrap_or(DEFAULT_VERIFIER_ABI.into()),
reusable.unwrap_or(DEFAULT_RENDER_REUSABLE.parse().unwrap()),
render_vk_seperately.unwrap_or(DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap()),
)
.await
}
@@ -219,14 +218,14 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.map(|e| serde_json::to_string(&e).unwrap()),
Commands::CreateEvmVKArtifact {
Commands::CreateEvmVK {
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
} => {
create_evm_vka(
create_evm_vk(
vk_path.unwrap_or(DEFAULT_VK.into()),
srs_path,
settings_path.unwrap_or(DEFAULT_SETTINGS.into()),
@@ -260,7 +259,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
abi_path,
aggregation_settings,
logrows,
reusable,
render_vk_seperately,
} => {
create_evm_aggregate_verifier(
vk_path.unwrap_or(DEFAULT_VK.into()),
@@ -269,7 +268,7 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
abi_path.unwrap_or(DEFAULT_VERIFIER_AGGREGATED_ABI.into()),
aggregation_settings,
logrows.unwrap_or(DEFAULT_AGGREGATED_LOGROWS.parse().unwrap()),
reusable.unwrap_or(DEFAULT_RENDER_REUSABLE.parse().unwrap()),
render_vk_seperately.unwrap_or(DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap()),
)
.await
}
@@ -434,13 +433,12 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
)
.map(|e| serde_json::to_string(&e).unwrap()),
#[cfg(not(target_arch = "wasm32"))]
Commands::DeployEvm {
Commands::DeployEvmVerifier {
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
contract,
} => {
deploy_evm(
sol_code_path.unwrap_or(DEFAULT_SOL_CODE.into()),
@@ -448,7 +446,25 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
addr_path.unwrap_or(DEFAULT_CONTRACT_ADDRESS.into()),
optimizer_runs,
private_key,
contract,
"Halo2Verifier",
)
.await
}
#[cfg(not(target_arch = "wasm32"))]
Commands::DeployEvmVK {
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
} => {
deploy_evm(
sol_code_path.unwrap_or(DEFAULT_VK_SOL.into()),
rpc_url,
addr_path.unwrap_or(DEFAULT_CONTRACT_ADDRESS_VK.into()),
optimizer_runs,
private_key,
"Halo2VerifyingKey",
)
.await
}
@@ -769,9 +785,6 @@ pub(crate) async fn gen_witness(
let commitment: Commitments = settings.run_args.commitment.into();
let region_settings =
RegionSettings::all_true(settings.run_args.decomp_base, settings.run_args.decomp_legs);
let start_time = Instant::now();
let witness = if settings.module_requires_polycommit() {
if get_srs_path(settings.run_args.logrows, srs_path.clone(), commitment).exists() {
@@ -786,7 +799,8 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
Some(&srs),
region_settings,
true,
true,
)?
}
Commitments::IPA => {
@@ -800,7 +814,8 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
Some(&srs),
region_settings,
true,
true,
)?
}
}
@@ -810,16 +825,12 @@ pub(crate) async fn gen_witness(
&mut input,
vk.as_ref(),
None,
region_settings,
true,
true,
)?
}
} else {
circuit.forward::<KZGCommitmentScheme<Bn256>>(
&mut input,
vk.as_ref(),
None,
region_settings,
)?
circuit.forward::<KZGCommitmentScheme<Bn256>>(&mut input, vk.as_ref(), None, true, true)?
};
// print each variable tuple (symbol, value) as symbol=value
@@ -1002,7 +1013,7 @@ pub(crate) async fn calibrate(
data: PathBuf,
settings_path: PathBuf,
target: CalibrationTarget,
lookup_safety_margin: f64,
lookup_safety_margin: i64,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
only_range_check_rebase: bool,
@@ -1012,8 +1023,6 @@ pub(crate) async fn calibrate(
use std::collections::HashMap;
use tabled::Table;
use crate::fieldutils::IntegerRep;
let data = GraphData::from_path(data)?;
// load the pre-generated settings
let settings = GraphSettings::load(&settings_path)?;
@@ -1122,7 +1131,7 @@ pub(crate) async fn calibrate(
param_scale,
scale_rebase_multiplier,
div_rebasing,
lookup_range: (IntegerRep::MIN, IntegerRep::MAX),
lookup_range: (i64::MIN, i64::MAX),
..settings.run_args.clone()
};
@@ -1162,10 +1171,8 @@ pub(crate) async fn calibrate(
&mut data.clone(),
None,
None,
RegionSettings::all_true(
settings.run_args.decomp_base,
settings.run_args.decomp_legs,
),
true,
false,
)
.map_err(|e| format!("failed to forward: {}", e))?;
@@ -1359,10 +1366,8 @@ pub(crate) async fn calibrate(
let module_log_row = best_params.module_constraint_logrows_with_blinding();
let instance_logrows = best_params.log2_total_instances_with_blinding();
let dynamic_lookup_logrows = best_params.dynamic_lookup_and_shuffle_logrows_with_blinding();
let range_check_logrows = best_params.range_check_log_rows_with_blinding();
let mut reduction = std::cmp::max(lookup_log_rows, module_log_row);
reduction = std::cmp::max(reduction, range_check_logrows);
reduction = std::cmp::max(reduction, instance_logrows);
reduction = std::cmp::max(reduction, dynamic_lookup_logrows);
reduction = std::cmp::max(reduction, crate::graph::MIN_LOGROWS);
@@ -1415,7 +1420,7 @@ pub(crate) async fn create_evm_verifier(
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
reusable: bool,
render_vk_seperately: bool,
) -> Result<String, EZKLError> {
let settings = GraphSettings::load(&settings_path)?;
let commitment: Commitments = settings.run_args.commitment.into();
@@ -1437,16 +1442,16 @@ pub(crate) async fn create_evm_verifier(
halo2_solidity_verifier::BatchOpenScheme::Bdfg21,
num_instance,
);
let (verifier_solidity, name) = if reusable {
(generator.render_separately()?.0, "Halo2VerifierReusable") // ignore the rendered vk artifact for now and generate it in create_evm_vka
let verifier_solidity = if render_vk_seperately {
generator.render_separately()?.0 // ignore the rendered vk for now and generate it in create_evm_vk
} else {
(generator.render()?, "Halo2Verifier")
generator.render()?
};
File::create(sol_code_path.clone())?.write_all(verifier_solidity.as_bytes())?;
// fetch abi of the contract
let (abi, _, _) = get_contract_artifacts(sol_code_path, name, 0).await?;
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2Verifier", 0).await?;
// save abi to file
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
@@ -1454,7 +1459,7 @@ pub(crate) async fn create_evm_verifier(
}
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn create_evm_vka(
pub(crate) async fn create_evm_vk(
vk_path: PathBuf,
srs_path: Option<PathBuf>,
settings_path: PathBuf,
@@ -1487,7 +1492,7 @@ pub(crate) async fn create_evm_vka(
File::create(sol_code_path.clone())?.write_all(vk_solidity.as_bytes())?;
// fetch abi of the contract
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2VerifyingArtifact", 0).await?;
let (abi, _, _) = get_contract_artifacts(sol_code_path, "Halo2VerifyingKey", 0).await?;
// save abi to file
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
@@ -1497,10 +1502,10 @@ pub(crate) async fn create_evm_vka(
#[cfg(not(target_arch = "wasm32"))]
pub(crate) async fn create_evm_data_attestation(
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
input: PathBuf,
witness: Option<PathBuf>,
_sol_code_path: PathBuf,
_abi_path: PathBuf,
_input: PathBuf,
_witness: Option<PathBuf>,
) -> Result<String, EZKLError> {
#[allow(unused_imports)]
use crate::graph::{DataSource, VarVisibility};
@@ -1512,7 +1517,7 @@ pub(crate) async fn create_evm_data_attestation(
trace!("params computed");
// if input is not provided, we just instantiate dummy input data
let data = GraphData::from_path(input).unwrap_or(GraphData::new(DataSource::File(vec![])));
let data = GraphData::from_path(_input).unwrap_or(GraphData::new(DataSource::File(vec![])));
let output_data = if let Some(DataSource::OnChain(source)) = data.output_data {
if visibility.output.is_private() {
@@ -1547,7 +1552,7 @@ pub(crate) async fn create_evm_data_attestation(
|| settings.run_args.output_visibility == Visibility::KZGCommit
|| settings.run_args.param_visibility == Visibility::KZGCommit
{
let witness = GraphWitness::from_path(witness.unwrap_or(DEFAULT_WITNESS.into()))?;
let witness = GraphWitness::from_path(_witness.unwrap_or(DEFAULT_WITNESS.into()))?;
let commitments = witness.get_polycommitments();
let proof_first_bytes = get_proof_commitments::<
KZGCommitmentScheme<Bn256>,
@@ -1561,12 +1566,12 @@ pub(crate) async fn create_evm_data_attestation(
};
let output = fix_da_sol(input_data, output_data, commitment_bytes)?;
let mut f = File::create(sol_code_path.clone())?;
let mut f = File::create(_sol_code_path.clone())?;
let _ = f.write(output.as_bytes());
// fetch abi of the contract
let (abi, _, _) = get_contract_artifacts(sol_code_path, "DataAttestation", 0).await?;
let (abi, _, _) = get_contract_artifacts(_sol_code_path, "DataAttestation", 0).await?;
// save abi to file
serde_json::to_writer(std::fs::File::create(abi_path)?, &abi)?;
serde_json::to_writer(std::fs::File::create(_abi_path)?, &abi)?;
Ok(String::new())
}
@@ -1605,13 +1610,8 @@ pub(crate) async fn deploy_evm(
addr_path: PathBuf,
runs: usize,
private_key: Option<String>,
contract: ContractType,
contract_name: &str,
) -> Result<String, EZKLError> {
let contract_name = match contract {
ContractType::Verifier { reusable: false } => "Halo2Verifier",
ContractType::Verifier { reusable: true } => "Halo2VerifierReusable",
ContractType::VerifyingKeyArtifact => "Halo2VerifyingArtifact",
};
let contract_address = deploy_contract_via_solidity(
sol_code_path,
rpc_url.as_deref(),
@@ -1702,7 +1702,7 @@ pub(crate) async fn create_evm_aggregate_verifier(
abi_path: PathBuf,
circuit_settings: Vec<PathBuf>,
logrows: u32,
reusable: bool,
render_vk_seperately: bool,
) -> Result<String, EZKLError> {
let srs_path = get_srs_path(logrows, srs_path, Commitments::KZG);
let params: ParamsKZG<Bn256> = load_srs_verifier::<KZGCommitmentScheme<Bn256>>(srs_path)?;
@@ -1740,8 +1740,8 @@ pub(crate) async fn create_evm_aggregate_verifier(
generator = generator.set_acc_encoding(Some(acc_encoding));
let verifier_solidity = if reusable {
generator.render_separately()?.0 // ignore the rendered vk artifact for now and generate it in create_evm_vka
let verifier_solidity = if render_vk_seperately {
generator.render_separately()?.0 // ignore the rendered vk for now and generate it in create_evm_vk
} else {
generator.render()?
};

View File

@@ -2,21 +2,42 @@ use halo2_proofs::arithmetic::Field;
/// Utilities for converting from Halo2 PrimeField types to integers (and vice-versa).
use halo2curves::ff::PrimeField;
/// Integer representation of a PrimeField element.
pub type IntegerRep = i128;
/// Converts an i32 to a PrimeField element.
pub fn i32_to_felt<F: PrimeField>(x: i32) -> F {
if x >= 0 {
F::from(x as u64)
} else {
-F::from(x.unsigned_abs() as u64)
}
}
/// Converts an i64 to a PrimeField element.
pub fn integer_rep_to_felt<F: PrimeField>(x: IntegerRep) -> F {
pub fn i64_to_felt<F: PrimeField>(x: i64) -> F {
if x >= 0 {
F::from_u128(x as u128)
} else {
-F::from_u128(x.saturating_neg() as u128)
-F::from_u128((-x) as u128)
}
}
/// Converts a PrimeField element to an i32.
pub fn felt_to_i32<F: PrimeField + PartialOrd + Field>(x: F) -> i32 {
if x > F::from(i32::MAX as u64) {
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
let lower_32 = u32::from_le_bytes(negtmp[..4].try_into().unwrap());
-(lower_32 as i32)
} else {
let rep = (x).to_repr();
let tmp: &[u8] = rep.as_ref();
let lower_32 = u32::from_le_bytes(tmp[..4].try_into().unwrap());
lower_32 as i32
}
}
/// Converts a PrimeField element to an f64.
pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
if x > F::from_u128(IntegerRep::MAX as u128) {
if x > F::from_u128(i64::MAX as u128) {
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
@@ -30,17 +51,17 @@ pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
}
/// Converts a PrimeField element to an i64.
pub fn felt_to_integer_rep<F: PrimeField + PartialOrd + Field>(x: F) -> IntegerRep {
if x > F::from_u128(IntegerRep::MAX as u128) {
pub fn felt_to_i64<F: PrimeField + PartialOrd + Field>(x: F) -> i64 {
if x > F::from_u128(i64::MAX as u128) {
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
-(lower_128 as IntegerRep)
-(lower_128 as i64)
} else {
let rep = (x).to_repr();
let tmp: &[u8] = rep.as_ref();
let lower_128: u128 = u128::from_le_bytes(tmp[..16].try_into().unwrap());
lower_128 as IntegerRep
lower_128 as i64
}
}
@@ -52,24 +73,33 @@ mod test {
#[test]
fn test_conv() {
let res: F = integer_rep_to_felt(-15);
let res: F = i32_to_felt(-15i32);
assert_eq!(res, -F::from(15));
let res: F = integer_rep_to_felt(2_i128.pow(17));
let res: F = i32_to_felt(2_i32.pow(17));
assert_eq!(res, F::from(131072));
let res: F = integer_rep_to_felt(-15);
let res: F = i64_to_felt(-15i64);
assert_eq!(res, -F::from(15));
let res: F = integer_rep_to_felt(2_i128.pow(17));
let res: F = i64_to_felt(2_i64.pow(17));
assert_eq!(res, F::from(131072));
}
#[test]
fn felttointegerrep() {
for x in -(2_i128.pow(16))..(2_i128.pow(16)) {
let fieldx: F = integer_rep_to_felt::<F>(x);
let xf: i128 = felt_to_integer_rep::<F>(fieldx);
fn felttoi32() {
for x in -(2i32.pow(16))..(2i32.pow(16)) {
let fieldx: F = i32_to_felt::<F>(x);
let xf: i32 = felt_to_i32::<F>(fieldx);
assert_eq!(x, xf);
}
}
#[test]
fn felttoi64() {
for x in -(2i64.pow(20))..(2i64.pow(20)) {
let fieldx: F = i64_to_felt::<F>(x);
let xf: i64 = felt_to_i64::<F>(fieldx);
assert_eq!(x, xf);
}
}

View File

@@ -50,7 +50,7 @@ pub enum GraphError {
/// Tract error
#[cfg(not(all(target_arch = "wasm32", target_os = "unknown")))]
#[error("[tract] {0}")]
TractError(#[from] tract_onnx::prelude::TractError),
TractError(#[from] tract_onnx::tract_core::anyhow::Error),
/// Packing exponent is too large
#[error("largest packing exponent exceeds max. try reducing the scale")]
PackingExponent,

View File

@@ -1,7 +1,7 @@
use super::errors::GraphError;
use super::quantize_float;
use crate::circuit::InputType;
use crate::fieldutils::integer_rep_to_felt;
use crate::fieldutils::i64_to_felt;
#[cfg(not(target_arch = "wasm32"))]
use crate::graph::postgres::Client;
#[cfg(not(target_arch = "wasm32"))]
@@ -128,7 +128,7 @@ impl FileSourceInner {
/// Convert to a field element
pub fn to_field(&self, scale: crate::Scale) -> Fp {
match self {
FileSourceInner::Float(f) => integer_rep_to_felt(quantize_float(f, 0.0, scale).unwrap()),
FileSourceInner::Float(f) => i64_to_felt(quantize_float(f, 0.0, scale).unwrap()),
FileSourceInner::Bool(f) => {
if *f {
Fp::one()
@@ -150,7 +150,7 @@ impl FileSourceInner {
0.0
}
}
FileSourceInner::Field(f) => crate::fieldutils::felt_to_integer_rep(*f) as f64,
FileSourceInner::Field(f) => crate::fieldutils::felt_to_i64(*f) as f64,
}
}
}

View File

@@ -34,10 +34,10 @@ use self::input::{FileSource, GraphData};
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
use crate::circuit::lookup::LookupOp;
use crate::circuit::modules::ModulePlanner;
use crate::circuit::region::{ConstantsMap, RegionSettings};
use crate::circuit::region::ConstantsMap;
use crate::circuit::table::{num_cols_required, Range, Table, RESERVED_BLINDING_ROWS_PAD};
use crate::circuit::{CheckMode, InputType};
use crate::fieldutils::{felt_to_f64, IntegerRep};
use crate::fieldutils::felt_to_f64;
use crate::pfsys::PrettyElements;
use crate::tensor::{Tensor, ValTensor};
use crate::{RunArgs, EZKL_BUF_CAPACITY};
@@ -69,14 +69,13 @@ pub use vars::*;
use crate::pfsys::field_to_string;
/// The safety factor for the range of the lookup table.
pub const RANGE_MULTIPLIER: IntegerRep = 2;
pub const RANGE_MULTIPLIER: i64 = 2;
/// The maximum number of columns in a lookup table.
pub const MAX_NUM_LOOKUP_COLS: usize = 12;
/// Max representation of a lookup table input
pub const MAX_LOOKUP_ABS: IntegerRep =
(MAX_NUM_LOOKUP_COLS as IntegerRep) * 2_i128.pow(MAX_PUBLIC_SRS);
pub const MAX_LOOKUP_ABS: i64 = (MAX_NUM_LOOKUP_COLS as i64) * 2_i64.pow(MAX_PUBLIC_SRS);
#[cfg(not(target_arch = "wasm32"))]
lazy_static! {
@@ -127,11 +126,11 @@ pub struct GraphWitness {
/// Any hashes of outputs generated during the forward pass
pub processed_outputs: Option<ModuleForwardResult>,
/// max lookup input
pub max_lookup_inputs: IntegerRep,
pub max_lookup_inputs: i64,
/// max lookup input
pub min_lookup_inputs: IntegerRep,
pub min_lookup_inputs: i64,
/// max range check size
pub max_range_size: IntegerRep,
pub max_range_size: i64,
}
impl GraphWitness {
@@ -451,18 +450,6 @@ impl GraphSettings {
.ceil() as u32
}
/// Calc the number of rows required for the range checks
pub fn range_check_log_rows_with_blinding(&self) -> u32 {
let max_range = self
.required_range_checks
.iter()
.map(|x| x.1 - x.0)
.max()
.unwrap_or(0);
(max_range as f32).log2().ceil() as u32
}
fn model_constraint_logrows_with_blinding(&self) -> u32 {
(self.num_rows as f64 + RESERVED_BLINDING_ROWS as f64)
.log2()
@@ -817,26 +804,18 @@ impl GraphCircuit {
// the ordering here is important, we want the inputs to come before the outputs
// as they are configured in that order as Column<Instances>
let mut public_inputs: Vec<Fp> = vec![];
// we first process the inputs
if let Some(processed_inputs) = &data.processed_inputs {
if self.settings().run_args.input_visibility.is_public() {
public_inputs.extend(self.graph_witness.inputs.clone().into_iter().flatten())
} else if let Some(processed_inputs) = &data.processed_inputs {
public_inputs.extend(processed_inputs.get_instances().into_iter().flatten());
}
// we then process the params
if let Some(processed_params) = &data.processed_params {
public_inputs.extend(processed_params.get_instances().into_iter().flatten());
}
// if the inputs are public, we add them to the public inputs AFTER the processed params as they are configured in that order as Column<Instances>
if self.settings().run_args.input_visibility.is_public() {
public_inputs.extend(self.graph_witness.inputs.clone().into_iter().flatten())
}
// if the outputs are public, we add them to the public inputs
if self.settings().run_args.output_visibility.is_public() {
public_inputs.extend(self.graph_witness.outputs.clone().into_iter().flatten());
// if the outputs are processed, we add the processed outputs to the public inputs
} else if let Some(processed_outputs) = &data.processed_outputs {
public_inputs.extend(processed_outputs.get_instances().into_iter().flatten());
}
@@ -1055,14 +1034,14 @@ impl GraphCircuit {
Ok(data)
}
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: f64) -> Range {
fn calc_safe_lookup_range(min_max_lookup: Range, lookup_safety_margin: i64) -> Range {
(
(lookup_safety_margin * min_max_lookup.0 as f64).floor() as IntegerRep,
(lookup_safety_margin * min_max_lookup.1 as f64).ceil() as IntegerRep,
lookup_safety_margin * min_max_lookup.0,
lookup_safety_margin * min_max_lookup.1,
)
}
fn calc_num_cols(range_len: IntegerRep, max_logrows: u32) -> usize {
fn calc_num_cols(range_len: i64, max_logrows: u32) -> usize {
let max_col_size = Table::<Fp>::cal_col_size(max_logrows as usize, RESERVED_BLINDING_ROWS);
num_cols_required(range_len, max_col_size)
}
@@ -1070,7 +1049,7 @@ impl GraphCircuit {
fn table_size_logrows(
&self,
safe_lookup_range: Range,
max_range_size: IntegerRep,
max_range_size: i64,
) -> Result<u32, GraphError> {
// pick the range with the largest absolute size safe_lookup_range or max_range_size
let safe_range = std::cmp::max(
@@ -1089,9 +1068,9 @@ impl GraphCircuit {
pub fn calc_min_logrows(
&mut self,
min_max_lookup: Range,
max_range_size: IntegerRep,
max_range_size: i64,
max_logrows: Option<u32>,
lookup_safety_margin: f64,
lookup_safety_margin: i64,
) -> Result<(), GraphError> {
// load the max logrows
let max_logrows = max_logrows.unwrap_or(MAX_PUBLIC_SRS);
@@ -1101,13 +1080,9 @@ impl GraphCircuit {
let safe_lookup_range = Self::calc_safe_lookup_range(min_max_lookup, lookup_safety_margin);
// check if subtraction overflows
let lookup_size =
(safe_lookup_range.1.saturating_sub(safe_lookup_range.0)).saturating_abs();
let lookup_size = (safe_lookup_range.1 - safe_lookup_range.0).abs();
// check if has overflowed max lookup input
if lookup_size > (MAX_LOOKUP_ABS as f64 / lookup_safety_margin).floor() as IntegerRep {
if lookup_size > MAX_LOOKUP_ABS / lookup_safety_margin {
return Err(GraphError::LookupRangeTooLarge(
lookup_size.unsigned_abs() as usize
));
@@ -1187,7 +1162,7 @@ impl GraphCircuit {
&self,
k: u32,
safe_lookup_range: Range,
max_range_size: IntegerRep,
max_range_size: i64,
) -> bool {
// if num cols is too large then the extended k is too large
if Self::calc_num_cols(safe_lookup_range.1 - safe_lookup_range.0, k) > MAX_NUM_LOOKUP_COLS
@@ -1245,7 +1220,8 @@ impl GraphCircuit {
inputs: &mut [Tensor<Fp>],
vk: Option<&VerifyingKey<G1Affine>>,
srs: Option<&Scheme::ParamsProver>,
region_settings: RegionSettings,
witness_gen: bool,
check_lookup: bool,
) -> Result<GraphWitness, GraphError> {
let original_inputs = inputs.to_vec();
@@ -1294,7 +1270,7 @@ impl GraphCircuit {
let mut model_results =
self.model()
.forward(inputs, &self.settings().run_args, region_settings)?;
.forward(inputs, &self.settings().run_args, witness_gen, check_lookup)?;
if visibility.output.requires_processing() {
let module_outlets = visibility.output.overwrites_inputs();

View File

@@ -7,12 +7,10 @@ use super::GraphSettings;
use crate::circuit::hybrid::HybridOp;
use crate::circuit::region::ConstantsMap;
use crate::circuit::region::RegionCtx;
use crate::circuit::region::RegionSettings;
use crate::circuit::table::Range;
use crate::circuit::Input;
use crate::circuit::InputType;
use crate::circuit::Unknown;
use crate::fieldutils::IntegerRep;
use crate::tensor::ValType;
use crate::{
circuit::{lookup::LookupOp, BaseConfig as PolyConfig, CheckMode, Op},
@@ -66,11 +64,11 @@ pub struct ForwardResult {
/// The outputs of the forward pass.
pub outputs: Vec<Tensor<Fp>>,
/// The maximum value of any input to a lookup operation.
pub max_lookup_inputs: IntegerRep,
pub max_lookup_inputs: i64,
/// The minimum value of any input to a lookup operation.
pub min_lookup_inputs: IntegerRep,
pub min_lookup_inputs: i64,
/// The max range check size
pub max_range_size: IntegerRep,
pub max_range_size: i64,
}
impl From<DummyPassRes> for ForwardResult {
@@ -118,11 +116,11 @@ pub struct DummyPassRes {
/// range checks
pub range_checks: HashSet<Range>,
/// max lookup inputs
pub max_lookup_inputs: IntegerRep,
pub max_lookup_inputs: i64,
/// min lookup inputs
pub min_lookup_inputs: IntegerRep,
pub min_lookup_inputs: i64,
/// min range check
pub max_range_size: IntegerRep,
pub max_range_size: i64,
/// outputs
pub outputs: Vec<Tensor<Fp>>,
}
@@ -547,11 +545,7 @@ impl Model {
})
.collect::<Result<Vec<_>, GraphError>>()?;
let res = self.dummy_layout(
run_args,
&inputs,
RegionSettings::all_false(run_args.decomp_base, run_args.decomp_legs),
)?;
let res = self.dummy_layout(run_args, &inputs, false, false)?;
// if we're using percentage tolerance, we need to add the necessary range check ops for it.
@@ -594,13 +588,14 @@ impl Model {
&self,
model_inputs: &[Tensor<Fp>],
run_args: &RunArgs,
region_settings: RegionSettings,
witness_gen: bool,
check_lookup: bool,
) -> Result<ForwardResult, GraphError> {
let valtensor_inputs: Vec<ValTensor<Fp>> = model_inputs
.iter()
.map(|x| x.map(|elem| ValType::Value(Value::known(elem))).into())
.collect();
let res = self.dummy_layout(run_args, &valtensor_inputs, region_settings)?;
let res = self.dummy_layout(run_args, &valtensor_inputs, witness_gen, check_lookup)?;
Ok(res.into())
}
@@ -614,7 +609,9 @@ impl Model {
reader: &mut dyn std::io::Read,
run_args: &RunArgs,
) -> Result<TractResult, GraphError> {
use tract_onnx::tract_hir::internal::GenericFactoid;
use tract_onnx::{
tract_core::internal::IntoArcTensor, tract_hir::internal::GenericFactoid,
};
let mut model = tract_onnx::onnx().model_for_read(reader)?;
@@ -651,11 +648,29 @@ impl Model {
}
// Note: do not optimize the model, as the layout will depend on underlying hardware
let typed_model = model
let mut typed_model = model
.into_typed()?
.concretize_dims(&symbol_values)?
.into_decluttered()?;
// concretize constants
for node in typed_model.eval_order()? {
let node = typed_model.node_mut(node);
if let Some(op) = node.op_as_mut::<tract_onnx::tract_core::ops::konst::Const>() {
if op.0.datum_type() == DatumType::TDim {
// get inner value to Arc<Tensor>
let mut constant = op.0.as_ref().clone();
// Generally a shape or hyperparam
constant
.as_slice_mut::<tract_onnx::prelude::TDim>()?
.iter_mut()
.for_each(|x| *x = x.eval(&symbol_values));
op.0 = constant.into_arc_tensor();
}
}
}
Ok((typed_model, symbol_values))
}
@@ -887,8 +902,16 @@ impl Model {
);
}
None => {
let mut n =
Node::new(n.clone(), &mut nodes, scales, i, symbol_values, run_args)?;
let mut n = Node::new(
n.clone(),
&mut nodes,
scales,
&run_args.param_visibility,
i,
symbol_values,
run_args.div_rebasing,
run_args.rebase_frac_zero_constants,
)?;
if let Some(ref scales) = override_input_scales {
if let Some(inp) = n.opkind.get_input() {
let scale = scales[input_idx];
@@ -1108,8 +1131,6 @@ impl Model {
region,
0,
run_args.num_inner_cols,
run_args.decomp_base,
run_args.decomp_legs,
original_constants.clone(),
);
// we need to do this as this loop is called multiple times
@@ -1389,7 +1410,8 @@ impl Model {
&self,
run_args: &RunArgs,
inputs: &[ValTensor<Fp>],
region_settings: RegionSettings,
witness_gen: bool,
check_lookup: bool,
) -> Result<DummyPassRes, GraphError> {
debug!("calculating num of constraints using dummy model layout...");
@@ -1408,7 +1430,8 @@ impl Model {
vars: ModelVars::new_dummy(),
};
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, region_settings);
let mut region =
RegionCtx::new_dummy(0, run_args.num_inner_cols, witness_gen, check_lookup);
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;

View File

@@ -125,7 +125,6 @@ impl RebaseScale {
if (op_out_scale > (global_scale * scale_rebase_multiplier as i32))
&& !inner.is_constant()
&& !inner.is_input()
&& !inner.is_identity()
{
let multiplier =
scale_to_multiplier(op_out_scale - global_scale * scale_rebase_multiplier as i32);
@@ -327,19 +326,6 @@ impl SupportedOp {
SupportedOp::RebaseScale(op) => op,
}
}
/// check if is the identity operation
/// # Returns
/// * `true` if the operation is the identity operation
/// * `false` otherwise
pub fn is_identity(&self) -> bool {
match self {
SupportedOp::Linear(op) => matches!(op, PolyOp::Identity { .. }),
SupportedOp::Rescaled(op) => op.inner.is_identity(),
SupportedOp::RebaseScale(op) => op.inner.is_identity(),
_ => false,
}
}
}
impl From<Box<dyn Op<Fp>>> for SupportedOp {
@@ -487,9 +473,11 @@ impl Node {
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
other_nodes: &mut BTreeMap<usize, super::NodeType>,
scales: &VarScales,
param_visibility: &Visibility,
idx: usize,
symbol_values: &SymbolValues,
run_args: &crate::RunArgs,
div_rebasing: bool,
rebase_frac_zero_constants: bool,
) -> Result<Self, GraphError> {
trace!("Create {:?}", node);
trace!("Create op {:?}", node.op);
@@ -529,10 +517,11 @@ impl Node {
let (mut opkind, deleted_indices) = new_op_from_onnx(
idx,
scales,
param_visibility,
node.clone(),
&mut inputs,
symbol_values,
run_args,
rebase_frac_zero_constants,
)?; // parses the op name
// we can only take the inputs as mutable once -- so we need to collect them first
@@ -580,7 +569,7 @@ impl Node {
rescale_const_with_single_use(
constant,
in_scales.clone(),
&run_args.param_visibility,
param_visibility,
input_node.num_uses(),
)?;
input_node.replace_opkind(constant.clone_dyn().into());
@@ -600,7 +589,7 @@ impl Node {
global_scale,
out_scale,
scales.rebase_multiplier,
run_args.div_rebasing,
div_rebasing,
);
out_scale = opkind.out_scale(in_scales)?;

View File

@@ -9,7 +9,6 @@ use crate::circuit::lookup::LookupOp;
#[cfg(not(target_arch = "wasm32"))]
use crate::circuit::poly::PolyOp;
use crate::circuit::Op;
use crate::fieldutils::IntegerRep;
use crate::tensor::{Tensor, TensorError, TensorType};
use halo2curves::bn256::Fr as Fp;
use halo2curves::ff::PrimeField;
@@ -41,7 +40,7 @@ use tract_onnx::tract_hir::{
ops::konst::Const,
ops::nn::DataFormat,
tract_core::ops::cast::Cast,
tract_core::ops::cnn::{conv::KernelFormat, MaxPool, SumPool},
tract_core::ops::cnn::{conv::KernelFormat, MaxPool, PaddingSpec, SumPool},
};
/// Quantizes an iterable of f32s to a [Tensor] of i32s using a fixed point representation.
@@ -51,20 +50,16 @@ use tract_onnx::tract_hir::{
/// * `dims` - the dimensionality of the resulting [Tensor].
/// * `shift` - offset used in the fixed point representation.
/// * `scale` - `2^scale` used in the fixed point representation.
pub fn quantize_float(
elem: &f64,
shift: f64,
scale: crate::Scale,
) -> Result<IntegerRep, TensorError> {
pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i64, TensorError> {
let mult = scale_to_multiplier(scale);
let max_value = ((IntegerRep::MAX as f64 - shift) / mult).round(); // the maximum value that can be represented w/o sig bit truncation
let max_value = ((i64::MAX as f64 - shift) / mult).round(); // the maximum value that can be represented w/o sig bit truncation
if *elem > max_value {
return Err(TensorError::SigBitTruncationError);
}
// we parallelize the quantization process as it seems to be quite slow at times
let scaled = (mult * *elem + shift).round() as IntegerRep;
let scaled = (mult * *elem + shift).round() as i64;
Ok(scaled)
}
@@ -75,7 +70,7 @@ pub fn quantize_float(
/// * `scale` - `2^scale` used in the fixed point representation.
/// * `shift` - offset used in the fixed point representation.
pub fn dequantize(felt: Fp, scale: crate::Scale, shift: f64) -> f64 {
let int_rep = crate::fieldutils::felt_to_integer_rep(felt);
let int_rep = crate::fieldutils::felt_to_i64(felt);
let multiplier = scale_to_multiplier(scale);
int_rep as f64 / multiplier - shift
}
@@ -90,35 +85,6 @@ pub fn multiplier_to_scale(mult: f64) -> crate::Scale {
mult.log2().round() as crate::Scale
}
#[cfg(not(target_arch = "wasm32"))]
/// extract padding from a onnx node.
pub fn extract_padding(
pool_spec: &PoolSpec,
image_size: &[usize],
) -> Result<Vec<(usize, usize)>, GraphError> {
let num_relevant_dims = pool_spec.kernel_shape.len();
// get the last num_relevant_dims of the image size
let image_size = &image_size[image_size.len() - num_relevant_dims..];
let dims = pool_spec.computed_padding(image_size);
let mut padding = Vec::new();
for dim in dims {
padding.push((dim.pad_before, dim.pad_after));
}
Ok(padding)
}
#[cfg(not(target_arch = "wasm32"))]
/// Extracts the strides from a onnx node.
pub fn extract_strides(pool_spec: &PoolSpec) -> Result<Vec<usize>, GraphError> {
Ok(pool_spec
.strides
.clone()
.ok_or(GraphError::MissingParams("stride".to_string()))?
.to_vec())
}
/// Gets the shape of a onnx node's outlets.
#[cfg(not(target_arch = "wasm32"))]
pub fn node_output_shapes(
@@ -274,10 +240,11 @@ fn load_op<C: tract_onnx::prelude::Op + Clone>(
pub fn new_op_from_onnx(
idx: usize,
scales: &VarScales,
param_visibility: &Visibility,
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
inputs: &mut [super::NodeType],
symbol_values: &SymbolValues,
run_args: &crate::RunArgs,
rebase_frac_zero_constants: bool,
) -> Result<(SupportedOp, Vec<usize>), GraphError> {
use tract_onnx::tract_core::ops::array::Trilu;
@@ -288,8 +255,6 @@ pub fn new_op_from_onnx(
.flat_map(|x| x.out_scales())
.collect::<Vec<_>>();
let input_dims = inputs.iter().flat_map(|x| x.out_dims()).collect::<Vec<_>>();
let mut replace_const = |scale: crate::Scale,
index: usize,
default_op: SupportedOp|
@@ -344,9 +309,12 @@ pub fn new_op_from_onnx(
}
}
"MultiBroadcastTo" => {
let _op = load_op::<MultiBroadcastTo>(node.op(), idx, node.op().name().to_string())?;
let shapes = node_output_shapes(&node, symbol_values)?;
let shape = shapes[0].clone();
let op = load_op::<MultiBroadcastTo>(node.op(), idx, node.op().name().to_string())?;
let shape = op.shape.clone();
let shape = shape
.iter()
.map(|x| x.to_usize())
.collect::<Result<Vec<_>, _>>()?;
SupportedOp::Linear(PolyOp::MultiBroadcastTo { shape })
}
@@ -664,16 +632,13 @@ pub fn new_op_from_onnx(
// if all raw_values are round then set scale to 0
let all_round = raw_value.iter().all(|x| (x).fract() == 0.0);
if all_round && run_args.rebase_frac_zero_constants {
if all_round && rebase_frac_zero_constants {
constant_scale = 0;
}
// Quantize the raw value
let quantized_value = quantize_tensor(
raw_value.clone(),
constant_scale,
&run_args.param_visibility,
)?;
let quantized_value =
quantize_tensor(raw_value.clone(), constant_scale, param_visibility)?;
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
// Create a constant op
SupportedOp::Constant(c)
@@ -785,7 +750,7 @@ pub fn new_op_from_onnx(
deleted_indices.push(const_idx);
}
if unit == 0. {
SupportedOp::Linear(PolyOp::ReLU)
SupportedOp::Nonlinear(LookupOp::ReLU)
} else {
// get the non-constant index
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
@@ -874,7 +839,7 @@ pub fn new_op_from_onnx(
"QuantizeLinearU8" | "DequantizeLinearF32" => {
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
"Abs" => SupportedOp::Linear(PolyOp::Abs),
"Abs" => SupportedOp::Nonlinear(LookupOp::Abs),
"Neg" => SupportedOp::Linear(PolyOp::Neg),
"HardSwish" => SupportedOp::Nonlinear(LookupOp::HardSwish {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
@@ -1017,13 +982,8 @@ pub fn new_op_from_onnx(
if raw_values.log2().fract() == 0.0 {
inputs[const_idx].decrement_use();
deleted_indices.push(const_idx);
// get the non constant index
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
op = SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(
input_scales[non_const_idx] + raw_values.log2() as i32,
),
out_scale: Some(input_scales[0] + raw_values.log2() as i32),
});
}
}
@@ -1113,8 +1073,18 @@ pub fn new_op_from_onnx(
));
}
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
let stride = pool_spec
.strides
.clone()
.ok_or(GraphError::MissingParams("stride".to_string()))?;
let padding = match &pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(GraphError::MissingParams("padding".to_string()));
}
};
let kernel_shape = &pool_spec.kernel_shape;
SupportedOp::Hybrid(HybridOp::MaxPool {
@@ -1135,7 +1105,7 @@ pub fn new_op_from_onnx(
"RoundHalfToEven" => SupportedOp::Nonlinear(LookupOp::RoundHalfToEven {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sign" => SupportedOp::Linear(PolyOp::Sign),
"Sign" => SupportedOp::Nonlinear(LookupOp::Sign),
"Pow" => {
// Extract the slope layer hyperparams from a const
@@ -1181,10 +1151,21 @@ pub fn new_op_from_onnx(
));
}
let pool_spec = &conv_node.pool_spec;
let stride = match conv_node.pool_spec.strides.clone() {
Some(s) => s.to_vec(),
None => {
return Err(GraphError::MissingParams("strides".to_string()));
}
};
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
let padding = match &conv_node.pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(GraphError::MissingParams("padding".to_string()));
}
};
// if bias exists then rescale it to the input + kernel scale
if input_scales.len() == 3 {
@@ -1202,13 +1183,7 @@ pub fn new_op_from_onnx(
}
}
let group = conv_node.group;
SupportedOp::Linear(PolyOp::Conv {
padding,
stride,
group,
})
SupportedOp::Linear(PolyOp::Conv { padding, stride })
}
"Not" => SupportedOp::Linear(PolyOp::Not),
"And" => SupportedOp::Linear(PolyOp::And),
@@ -1239,10 +1214,21 @@ pub fn new_op_from_onnx(
));
}
let pool_spec = &deconv_node.pool_spec;
let stride = match deconv_node.pool_spec.strides.clone() {
Some(s) => s.to_vec(),
None => {
return Err(GraphError::MissingParams("strides".to_string()));
}
};
let padding = match &deconv_node.pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(GraphError::MissingParams("padding".to_string()));
}
};
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
// if bias exists then rescale it to the input + kernel scale
if input_scales.len() == 3 {
let bias_scale = input_scales[2];
@@ -1263,7 +1249,6 @@ pub fn new_op_from_onnx(
padding,
output_padding: deconv_node.adjustments.to_vec(),
stride,
group: deconv_node.group,
})
}
"Downsample" => {
@@ -1354,8 +1339,18 @@ pub fn new_op_from_onnx(
));
}
let stride = extract_strides(pool_spec)?;
let padding = extract_padding(pool_spec, &input_dims[0])?;
let stride = pool_spec
.strides
.clone()
.ok_or(GraphError::MissingParams("stride".to_string()))?;
let padding = match &pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(GraphError::MissingParams("padding".to_string()));
}
};
SupportedOp::Hybrid(HybridOp::SumPool {
padding,
@@ -1364,6 +1359,11 @@ pub fn new_op_from_onnx(
normalized: sumpool_node.normalize,
})
}
// "GlobalAvgPool" => SupportedOp::Linear(PolyOp::SumPool {
// padding: [(0, 0); 2],
// stride: (1, 1),
// kernel_shape: (inputs[0].out_dims()[0][1], inputs[0].out_dims()[0][2]),
// }),
"Pad" => {
let pad_node: &Pad = match node.op().downcast_ref::<Pad>() {
Some(b) => b,
@@ -1432,7 +1432,7 @@ pub fn quantize_tensor<F: PrimeField + TensorType + PartialOrd>(
visibility: &Visibility,
) -> Result<Tensor<F>, TensorError> {
let mut value: Tensor<F> = const_value.par_enum_map(|_, x| {
Ok::<_, TensorError>(crate::fieldutils::integer_rep_to_felt::<F>(quantize_float(
Ok::<_, TensorError>(crate::fieldutils::i64_to_felt::<F>(quantize_float(
&(x).into(),
0.0,
scale,

View File

@@ -23,7 +23,6 @@
)]
// we allow this for our dynamic range based indexing scheme
#![allow(clippy::single_range_in_vec_init)]
#![feature(buf_read_has_data_left)]
#![feature(stmt_expr_attributes)]
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
@@ -86,7 +85,6 @@ use std::str::FromStr;
use circuit::{table::Range, CheckMode, Tolerance};
use clap::Args;
use fieldutils::IntegerRep;
use graph::Visibility;
use halo2_proofs::poly::{
ipa::commitment::IPACommitmentScheme, kzg::commitment::KZGCommitmentScheme,
@@ -150,7 +148,6 @@ lazy_static! {
/// The serialization format for the keys
pub static ref EZKL_KEY_FORMAT: String = std::env::var("EZKL_KEY_FORMAT")
.unwrap_or("raw-bytes".to_string());
}
#[cfg(target_arch = "wasm32")]
@@ -246,7 +243,7 @@ pub struct RunArgs {
#[arg(long, default_value = "1", value_hint = clap::ValueHint::Other)]
pub scale_rebase_multiplier: u32,
/// The min and max elements in the lookup table input column
#[arg(short = 'B', long, value_parser = parse_key_val::<IntegerRep, IntegerRep>, default_value = "-32768->32768")]
#[arg(short = 'B', long, value_parser = parse_key_val::<i64, i64>, default_value = "-32768->32768")]
pub lookup_range: Range,
/// The log_2 number of rows
#[arg(short = 'K', long, default_value = "17", value_hint = clap::ValueHint::Other)]
@@ -278,12 +275,6 @@ pub struct RunArgs {
/// commitment scheme
#[arg(long, default_value = "kzg", value_hint = clap::ValueHint::Other)]
pub commitment: Option<Commitments>,
/// the base used for decompositions
#[arg(long, default_value = "16384", value_hint = clap::ValueHint::Other)]
pub decomp_base: usize,
#[arg(long, default_value = "2", value_hint = clap::ValueHint::Other)]
/// the number of legs used for decompositions
pub decomp_legs: usize,
}
impl Default for RunArgs {
@@ -304,8 +295,6 @@ impl Default for RunArgs {
rebase_frac_zero_constants: false,
check_mode: CheckMode::UNSAFE,
commitment: None,
decomp_base: 16384,
decomp_legs: 2,
}
}
}

View File

@@ -76,7 +76,7 @@ pub fn init_logger() {
prefix_token(&record.level()),
// pretty print UTC time
chrono::Utc::now()
.format("%Y-%m-%d %H:%M:%S:%3f")
.format("%Y-%m-%d %H:%M:%S")
.to_string()
.bright_magenta(),
record.metadata().target(),

View File

@@ -558,8 +558,7 @@ where
+ PrimeField
+ FromUniformBytes<64>
+ WithSmallOrderMulGroup<3>,
Scheme::Curve: Serialize + DeserializeOwned + SerdeObject,
Scheme::ParamsProver: Send + Sync,
Scheme::Curve: Serialize + DeserializeOwned,
{
let strategy = Strategy::new(params.verifier_params());
let mut transcript = TranscriptWriterBuffer::<_, Scheme::Curve, _>::init(vec![]);

View File

@@ -6,7 +6,7 @@ use crate::circuit::modules::poseidon::{
use crate::circuit::modules::Module;
use crate::circuit::{CheckMode, Tolerance};
use crate::commands::*;
use crate::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
use crate::fieldutils::{felt_to_i64, i64_to_felt};
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::TestDataSource;
use crate::graph::{
@@ -191,12 +191,6 @@ struct PyRunArgs {
#[pyo3(get, set)]
/// str: commitment type, accepts `kzg`, `ipa`
pub commitment: PyCommitments,
/// int: The base used for decomposition
#[pyo3(get, set)]
pub decomp_base: usize,
/// int: The number of legs used for decomposition
#[pyo3(get, set)]
pub decomp_legs: usize,
}
/// default instantiation of PyRunArgs
@@ -227,8 +221,6 @@ impl From<PyRunArgs> for RunArgs {
rebase_frac_zero_constants: py_run_args.rebase_frac_zero_constants,
check_mode: py_run_args.check_mode,
commitment: Some(py_run_args.commitment.into()),
decomp_base: py_run_args.decomp_base,
decomp_legs: py_run_args.decomp_legs,
}
}
}
@@ -251,8 +243,6 @@ impl Into<PyRunArgs> for RunArgs {
rebase_frac_zero_constants: self.rebase_frac_zero_constants,
check_mode: self.check_mode,
commitment: self.commitment.into(),
decomp_base: self.decomp_base,
decomp_legs: self.decomp_legs,
}
}
}
@@ -341,9 +331,9 @@ fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
#[pyfunction(signature = (
felt,
))]
fn felt_to_int(felt: PyFelt) -> PyResult<IntegerRep> {
fn felt_to_int(felt: PyFelt) -> PyResult<i64> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let int_rep = felt_to_integer_rep(felt);
let int_rep = felt_to_i64(felt);
Ok(int_rep)
}
@@ -367,7 +357,7 @@ fn felt_to_int(felt: PyFelt) -> PyResult<IntegerRep> {
))]
fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let int_rep = felt_to_integer_rep(felt);
let int_rep = felt_to_i64(felt);
let multiplier = scale_to_multiplier(scale);
let float_rep = int_rep as f64 / multiplier;
Ok(float_rep)
@@ -395,7 +385,7 @@ fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
let int_rep = quantize_float(&input, 0.0, scale)
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
let felt = integer_rep_to_felt(int_rep);
let felt = i64_to_felt(int_rep);
Ok(crate::pfsys::field_to_string::<Fr>(&felt))
}
@@ -897,7 +887,7 @@ fn calibrate_settings(
model: PathBuf,
settings: PathBuf,
target: CalibrationTarget,
lookup_safety_margin: f64,
lookup_safety_margin: i64,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
max_logrows: Option<u32>,
@@ -1500,8 +1490,8 @@ fn encode_evm_calldata<'a>(
/// srs_path: str
/// The path to the SRS file
///
/// reusable: bool
/// Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
/// render_vk_separately: bool
/// Whether the verifier key should be rendered as a separate contract. We recommend disabling selector compression if this is enabled. To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command
///
/// Returns
/// -------
@@ -1513,7 +1503,7 @@ fn encode_evm_calldata<'a>(
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None,
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
))]
fn create_evm_verifier(
py: Python,
@@ -1522,7 +1512,7 @@ fn create_evm_verifier(
sol_code_path: PathBuf,
abi_path: PathBuf,
srs_path: Option<PathBuf>,
reusable: bool,
render_vk_seperately: bool,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_verifier(
@@ -1531,7 +1521,7 @@ fn create_evm_verifier(
settings_path,
sol_code_path,
abi_path,
reusable,
render_vk_seperately,
)
.await
.map_err(|e| {
@@ -1543,57 +1533,6 @@ fn create_evm_verifier(
})
}
/// Creates an Evm VK artifact. This command generated a VK with circuit specific meta data encoding in memory for use by the reusable H2 verifier.
/// This is useful for deploying verifier that were otherwise too big to fit on chain and required aggregation.
///
/// Arguments
/// ---------
/// vk_path: str
/// The path to the verification key file
///
/// settings_path: str
/// The path to the settings file
///
/// sol_code_path: str
/// The path to the create the solidity verifying key.
///
/// abi_path: str
/// The path to create the ABI for the solidity verifier
///
/// srs_path: str
/// The path to the SRS file
///
/// Returns
/// -------
/// bool
///
#[pyfunction(signature = (
vk_path=PathBuf::from(DEFAULT_VK),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None
))]
fn create_evm_vka(
py: Python,
vk_path: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
srs_path: Option<PathBuf>,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_vka(vk_path, srs_path, settings_path, sol_code_path, abi_path)
.await
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
})
}
/// Creates an EVM compatible data attestation verifier, you will need solc installed in your environment to run this
///
/// Arguments
@@ -1714,7 +1653,6 @@ fn setup_test_evm_witness(
addr_path,
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
rpc_url=None,
contract_type=ContractType::default(),
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
@@ -1723,7 +1661,6 @@ fn deploy_evm(
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
contract_type: ContractType,
optimizer_runs: usize,
private_key: Option<String>,
) -> PyResult<Bound<'_, PyAny>> {
@@ -1734,7 +1671,42 @@ fn deploy_evm(
addr_path,
optimizer_runs,
private_key,
contract_type,
"Halo2Verifier",
)
.await
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
})
}
/// deploys the solidity vk verifier
#[pyfunction(signature = (
addr_path,
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
fn deploy_vk_evm(
py: Python,
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2VerifyingKey",
)
.await
.map_err(|e| {
@@ -1790,7 +1762,7 @@ fn deploy_da_evm(
/// Arguments
/// ---------
/// addr_verifier: str
/// The verifier contract's address as a hex string
/// The path to verifier contract's address
///
/// proof_path: str
/// The path to the proof file (generated using the prove command)
@@ -1802,7 +1774,7 @@ fn deploy_da_evm(
/// does the verifier use data attestation ?
///
/// addr_vk: str
/// The addess of the separate VK contract (if the verifier key is rendered as a separate contract)
///
/// Returns
/// -------
/// bool
@@ -1870,8 +1842,8 @@ fn verify_evm<'a>(
/// srs_path: str
/// The path to the SRS file
///
/// reusable: bool
/// Whether the verifier should be rendered as a reusable contract. If so, then you will need to deploy the VK artifact separately which you can generate using the create_evm_vka command
/// render_vk_separately: bool
/// Whether the verifier key should be rendered as a separate contract. We recommend disabling selector compression if this is enabled. To save the verifier key as a separate contract, set this to true and then call the create-evm-vk command
///
/// Returns
/// -------
@@ -1884,7 +1856,7 @@ fn verify_evm<'a>(
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
srs_path=None,
reusable = DEFAULT_RENDER_REUSABLE.parse().unwrap(),
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
))]
fn create_evm_verifier_aggr(
py: Python,
@@ -1894,7 +1866,7 @@ fn create_evm_verifier_aggr(
abi_path: PathBuf,
logrows: u32,
srs_path: Option<PathBuf>,
reusable: bool,
render_vk_seperately: bool,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::create_evm_aggregate_verifier(
@@ -1904,7 +1876,7 @@ fn create_evm_verifier_aggr(
abi_path,
aggregation_settings,
logrows,
reusable,
render_vk_seperately,
)
.await
.map_err(|e| {
@@ -1953,8 +1925,8 @@ fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(compile_circuit, m)?)?;
m.add_function(wrap_pyfunction!(verify_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_vka, m)?)?;
m.add_function(wrap_pyfunction!(deploy_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_vk_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
m.add_function(wrap_pyfunction!(setup_test_evm_witness, m)?)?;

View File

@@ -1,7 +1,5 @@
use thiserror::Error;
use super::ops::DecompositionError;
/// A wrapper for tensor related errors.
#[derive(Debug, Error)]
pub enum TensorError {
@@ -29,13 +27,4 @@ pub enum TensorError {
/// Unset visibility
#[error("unset visibility")]
UnsetVisibility,
/// File save error
#[error("save error: {0}")]
FileSaveError(String),
/// File load error
#[error("load error: {0}")]
FileLoadError(String),
/// Decomposition error
#[error("decomposition error: {0}")]
DecompositionError(#[from] DecompositionError),
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,12 @@
use crate::{circuit::region::ConstantsMap, fieldutils::felt_to_integer_rep};
use maybe_rayon::slice::Iter;
use core::{iter::FilterMap, slice::Iter};
use crate::circuit::region::ConstantsMap;
use super::{
ops::{intercalate_values, pad, resize},
*,
};
use halo2_proofs::{arithmetic::Field, circuit::Cell, plonk::Instance};
use maybe_rayon::iter::{FilterMap, IntoParallelIterator, ParallelIterator};
pub(crate) fn create_constant_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
@@ -54,44 +54,6 @@ pub enum ValType<F: PrimeField + TensorType + std::marker::Send + std::marker::S
AssignedConstant(AssignedCell<F, F>, F),
}
impl<F: PrimeField + TensorType + PartialOrd> From<ValType<F>> for IntegerRep {
fn from(val: ValType<F>) -> Self {
match val {
ValType::Value(v) => {
let mut output = 0;
let mut i = 0;
v.map(|y| {
let e = felt_to_integer_rep(y);
output = e;
i += 1;
});
output
}
ValType::AssignedValue(v) => {
let mut output = 0;
let mut i = 0;
v.evaluate().map(|y| {
let e = felt_to_integer_rep(y);
output = e;
i += 1;
});
output
}
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
let mut output = 0;
let mut i = 0;
v.value().map(|y| {
let e = felt_to_integer_rep(*y);
output = e;
i += 1;
});
output
}
ValType::Constant(v) => felt_to_integer_rep(v),
}
}
}
impl<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd> ValType<F> {
/// Returns the inner cell of the [ValType].
pub fn cell(&self) -> Option<Cell> {
@@ -159,6 +121,44 @@ impl<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + Partia
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<ValType<F>> for i32 {
fn from(val: ValType<F>) -> Self {
match val {
ValType::Value(v) => {
let mut output = 0_i32;
let mut i = 0;
v.map(|y| {
let e = felt_to_i32(y);
output = e;
i += 1;
});
output
}
ValType::AssignedValue(v) => {
let mut output = 0_i32;
let mut i = 0;
v.evaluate().map(|y| {
let e = felt_to_i32(y);
output = e;
i += 1;
});
output
}
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
let mut output = 0_i32;
let mut i = 0;
v.value().map(|y| {
let e = felt_to_i32(*y);
output = e;
i += 1;
});
output
}
ValType::Constant(v) => felt_to_i32(v),
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<F> for ValType<F> {
fn from(t: F) -> ValType<F> {
ValType::Constant(t)
@@ -317,8 +317,8 @@ impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<AssignedCell<F, F>>> f
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
/// Allocate a new [ValTensor::Value] from the given [Tensor] of [i64].
pub fn from_integer_rep_tensor(t: Tensor<IntegerRep>) -> ValTensor<F> {
let inner = t.map(|x| ValType::Value(Value::known(integer_rep_to_felt(x))));
pub fn from_i64_tensor(t: Tensor<i64>) -> ValTensor<F> {
let inner = t.map(|x| ValType::Value(Value::known(i64_to_felt(x))));
inner.into()
}
@@ -460,7 +460,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
&self,
) -> FilterMap<Iter<'_, ValType<F>>, fn(&ValType<F>) -> Option<(F, ValType<F>)>> {
match self {
ValTensor::Value { inner, .. } => inner.par_iter().filter_map(|x| {
ValTensor::Value { inner, .. } => inner.iter().filter_map(|x| {
if let ValType::Constant(v) = x {
Some((*v, x.clone()))
} else {
@@ -520,58 +520,10 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
}
}
/// Get the sign of the inner values
pub fn sign(&self) -> Result<Self, TensorError> {
let evals = self.int_evals()?;
Ok(evals
.par_enum_map(|_, val| {
Ok::<_, TensorError>(ValType::Value(Value::known(integer_rep_to_felt(
val.signum(),
))))
})?
.into())
}
/// Decompose the inner values into base `base` and `n` legs.
pub fn decompose(&self, base: usize, n: usize) -> Result<Self, TensorError> {
let res = self
.get_inner()?
.par_iter()
.map(|x| {
let mut is_empty = true;
x.map(|_| is_empty = false);
if is_empty {
return Ok::<_, TensorError>(vec![Value::<F>::unknown(); n + 1]);
} else {
let mut res = vec![Value::unknown(); n + 1];
let mut int_rep = 0;
x.map(|f| {
int_rep = crate::fieldutils::felt_to_integer_rep(f);
});
let decompe = crate::tensor::ops::get_rep(&int_rep, base, n)?;
for (i, x) in decompe.iter().enumerate() {
res[i] = Value::known(crate::fieldutils::integer_rep_to_felt(*x));
}
Ok(res)
}
})
.collect::<Result<Vec<_>, _>>();
let mut tensor = Tensor::from(res?.into_iter().flatten().collect::<Vec<_>>().into_iter());
let mut dims = self.dims().to_vec();
dims.push(n + 1);
tensor.reshape(&dims)?;
Ok(tensor.into())
}
/// Calls `int_evals` on the inner tensor.
pub fn int_evals(&self) -> Result<Tensor<IntegerRep>, TensorError> {
pub fn get_int_evals(&self) -> Result<Tensor<i64>, TensorError> {
// finally convert to vector of integers
let mut integer_evals: Vec<IntegerRep> = vec![];
let mut integer_evals: Vec<i64> = vec![];
match self {
ValTensor::Value {
inner: v, dims: _, ..
@@ -579,26 +531,25 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
// we have to push to an externally created vector or else vaf.map() returns an evaluation wrapped in Value<> (which we don't want)
let _ = v.map(|vaf| match vaf {
ValType::Value(v) => v.map(|f| {
integer_evals.push(crate::fieldutils::felt_to_integer_rep(f));
integer_evals.push(crate::fieldutils::felt_to_i64(f));
}),
ValType::AssignedValue(v) => v.map(|f| {
integer_evals.push(crate::fieldutils::felt_to_integer_rep(f.evaluate()));
integer_evals.push(crate::fieldutils::felt_to_i64(f.evaluate()));
}),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
v.value_field().map(|f| {
integer_evals
.push(crate::fieldutils::felt_to_integer_rep(f.evaluate()));
integer_evals.push(crate::fieldutils::felt_to_i64(f.evaluate()));
})
}
ValType::Constant(v) => {
integer_evals.push(crate::fieldutils::felt_to_integer_rep(v));
integer_evals.push(crate::fieldutils::felt_to_i64(v));
Value::unknown()
}
});
}
_ => return Err(TensorError::WrongMethod),
};
let mut tensor: Tensor<IntegerRep> = integer_evals.into_iter().into();
let mut tensor: Tensor<i64> = integer_evals.into_iter().into();
match tensor.reshape(self.dims()) {
_ => {}
};
@@ -622,48 +573,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
Ok(())
}
/// Calls `last` on the inner tensor.
pub fn last(&self) -> Result<ValTensor<F>, TensorError> {
let slice = match self {
ValTensor::Value {
inner: v,
dims: _,
scale,
} => {
let inner = v.last()?;
let dims = inner.dims().to_vec();
ValTensor::Value {
inner,
dims,
scale: *scale,
}
}
_ => return Err(TensorError::WrongMethod),
};
Ok(slice)
}
/// Calls `first`
pub fn first(&self) -> Result<ValTensor<F>, TensorError> {
let slice = match self {
ValTensor::Value {
inner: v,
dims: _,
scale,
} => {
let inner = v.first()?;
let dims = inner.dims().to_vec();
ValTensor::Value {
inner,
dims,
scale: *scale,
}
}
_ => return Err(TensorError::WrongMethod),
};
Ok(slice)
}
/// Calls `get_slice` on the inner tensor.
pub fn get_slice(&self, indices: &[Range<usize>]) -> Result<ValTensor<F>, TensorError> {
if indices.iter().map(|x| x.end - x.start).collect::<Vec<_>>() == self.dims() {
@@ -844,104 +753,43 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
Ok(())
}
/// Calls `get_every_n` on the inner [Tensor].
pub fn get_every_n(&mut self, n: usize) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.get_every_n(n)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
/// Calls `exclude_every_n` on the inner [Tensor].
pub fn exclude_every_n(&mut self, n: usize) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.exclude_every_n(n)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
/// remove constant zero values constants
pub fn remove_const_zero_values(&mut self) {
match self {
ValTensor::Value { inner: v, dims, .. } => {
*v = v
.clone()
.into_par_iter()
.filter_map(|e| {
if let ValType::Constant(r) = e {
if r == F::ZERO {
return None;
}
} else if let ValType::AssignedConstant(_, r) = e {
if r == F::ZERO {
return None;
}
}
Some(e)
})
.collect();
*dims = v.dims().to_vec();
}
ValTensor::Instance { .. } => {}
}
}
/// gets constants
pub fn get_const_zero_indices(&self) -> Vec<usize> {
pub fn get_const_zero_indices(&self) -> Result<Vec<usize>, TensorError> {
match self {
ValTensor::Value { inner: v, .. } => v
.par_iter()
.enumerate()
.filter_map(|(i, e)| {
ValTensor::Value { inner: v, .. } => {
let mut indices = vec![];
for (i, e) in v.iter().enumerate() {
if let ValType::Constant(r) = e {
if *r == F::ZERO {
return Some(i);
indices.push(i);
}
} else if let ValType::AssignedConstant(_, r) = e {
if *r == F::ZERO {
return Some(i);
indices.push(i);
}
}
None
})
.collect(),
ValTensor::Instance { .. } => vec![],
}
Ok(indices)
}
ValTensor::Instance { .. } => Ok(vec![]),
}
}
/// gets constants
pub fn get_const_indices(&self) -> Vec<usize> {
pub fn get_const_indices(&self) -> Result<Vec<usize>, TensorError> {
match self {
ValTensor::Value { inner: v, .. } => v
.par_iter()
.enumerate()
.filter_map(|(i, e)| {
ValTensor::Value { inner: v, .. } => {
let mut indices = vec![];
for (i, e) in v.iter().enumerate() {
if let ValType::Constant(_) = e {
Some(i)
indices.push(i);
} else if let ValType::AssignedConstant(_, _) = e {
Some(i)
} else {
None
indices.push(i);
}
})
.collect(),
ValTensor::Instance { .. } => vec![],
}
Ok(indices)
}
ValTensor::Instance { .. } => Ok(vec![]),
}
}
@@ -1104,22 +952,25 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
}
/// A [String] representation of the [ValTensor] for display, for example in showing intermediate values in a computational graph.
pub fn show(&self) -> String {
let r = match self.int_evals() {
Ok(v) => v,
Err(_) => return "ValTensor not PrevAssigned".into(),
};
if r.len() > 10 {
let start = r[..5].to_vec();
let end = r[r.len() - 5..].to_vec();
// print the two split by ... in the middle
format!(
"[{} ... {}]",
start.iter().map(|x| format!("{}", x)).join(", "),
end.iter().map(|x| format!("{}", x)).join(", ")
)
} else {
format!("{:?}", r)
match self.clone() {
ValTensor::Value {
inner: v, dims: _, ..
} => {
let r: Tensor<i32> = v.map(|x| x.into());
if r.len() > 10 {
let start = r[..5].to_vec();
let end = r[r.len() - 5..].to_vec();
// print the two split by ... in the middle
format!(
"[{} ... {}]",
start.iter().map(|x| format!("{}", x)).join(", "),
end.iter().map(|x| format!("{}", x)).join(", ")
)
} else {
format!("{:?}", r)
}
}
_ => "ValTensor not PrevAssigned".into(),
}
}
}

View File

@@ -319,7 +319,7 @@ impl VarTensor {
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
omissions: &HashSet<usize>,
omissions: &HashSet<&usize>,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, halo2_proofs::plonk::Error> {
let mut assigned_coord = 0;
@@ -368,7 +368,7 @@ impl VarTensor {
.sum::<usize>();
let dims = &dims[*idx];
// this should never ever fail
let t: Tensor<IntegerRep> = Tensor::new(None, dims).unwrap();
let t: Tensor<i32> = Tensor::new(None, dims).unwrap();
Ok(t.enum_map(|coord, _| {
let (x, y, z) = self.cartesian_coord(offset + coord);
region.assign_advice_from_instance(
@@ -497,7 +497,7 @@ impl VarTensor {
let (x, y, z) = self.cartesian_coord(offset + coord * step);
if matches!(check_mode, CheckMode::SAFE) && coord > 0 && z == 0 && y == 0 {
// assert that duplication occurred correctly
assert_eq!(Into::<IntegerRep>::into(k.clone()), Into::<IntegerRep>::into(v[coord - 1].clone()));
assert_eq!(Into::<i32>::into(k.clone()), Into::<i32>::into(v[coord - 1].clone()));
};
let cell = self.assign_value(region, offset, k.clone(), coord * step, constants)?;
@@ -533,14 +533,13 @@ impl VarTensor {
if matches!(check_mode, CheckMode::SAFE) {
// during key generation this will be 0 so we use this as a flag to check
// TODO: this isn't very safe and would be better to get the phase directly
let res_evals = res.int_evals().unwrap();
let is_assigned = res_evals
let is_assigned = !Into::<Tensor<i32>>::into(res.clone().get_inner().unwrap())
.iter()
.all(|&x| x == 0);
if !is_assigned {
if is_assigned {
assert_eq!(
values.int_evals().unwrap(),
res_evals
Into::<Tensor<i32>>::into(values.get_inner().unwrap()),
Into::<Tensor<i32>>::into(res.get_inner().unwrap())
)};
}

View File

@@ -1,52 +1,41 @@
use crate::{
circuit::{
modules::{
polycommit::PolyCommitChip,
poseidon::{
spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH},
PoseidonChip,
},
Module,
},
region::RegionSettings,
},
fieldutils::{felt_to_integer_rep, integer_rep_to_felt},
graph::{
modules::POSEIDON_LEN_GRAPH, quantize_float, scale_to_multiplier, GraphCircuit,
GraphSettings,
},
pfsys::{
create_proof_circuit,
evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript},
verify_proof_circuit, TranscriptType,
},
tensor::TensorType,
CheckMode, Commitments,
};
use crate::circuit::modules::polycommit::PolyCommitChip;
use crate::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use crate::circuit::modules::poseidon::PoseidonChip;
use crate::circuit::modules::Module;
use crate::fieldutils::felt_to_i64;
use crate::fieldutils::i64_to_felt;
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::quantize_float;
use crate::graph::scale_to_multiplier;
use crate::graph::{GraphCircuit, GraphSettings};
use crate::pfsys::create_proof_circuit;
use crate::pfsys::evm::aggregation_kzg::AggregationCircuit;
use crate::pfsys::evm::aggregation_kzg::PoseidonTranscript;
use crate::pfsys::verify_proof_circuit;
use crate::pfsys::TranscriptType;
use crate::tensor::TensorType;
use crate::CheckMode;
use crate::Commitments;
use console_error_panic_hook;
use halo2_proofs::{
plonk::*,
poly::{
commitment::{CommitmentScheme, ParamsProver},
ipa::{
commitment::{IPACommitmentScheme, ParamsIPA},
multiopen::{ProverIPA, VerifierIPA},
strategy::SingleStrategy as IPASingleStrategy,
},
kzg::{
commitment::{KZGCommitmentScheme, ParamsKZG},
multiopen::{ProverSHPLONK, VerifierSHPLONK},
strategy::SingleStrategy as KZGSingleStrategy,
},
VerificationStrategy,
},
use halo2_proofs::plonk::*;
use halo2_proofs::poly::commitment::{CommitmentScheme, ParamsProver};
use halo2_proofs::poly::ipa::multiopen::{ProverIPA, VerifierIPA};
use halo2_proofs::poly::ipa::{
commitment::{IPACommitmentScheme, ParamsIPA},
strategy::SingleStrategy as IPASingleStrategy,
};
use halo2_proofs::poly::kzg::multiopen::ProverSHPLONK;
use halo2_proofs::poly::kzg::multiopen::VerifierSHPLONK;
use halo2_proofs::poly::kzg::{
commitment::{KZGCommitmentScheme, ParamsKZG},
strategy::SingleStrategy as KZGSingleStrategy,
};
use halo2_proofs::poly::VerificationStrategy;
use halo2_solidity_verifier::encode_calldata;
use halo2curves::{
bn256::{Bn256, Fr, G1Affine},
ff::{FromUniformBytes, PrimeField},
};
use snark_verifier::{loader::native::NativeLoader, system::halo2::transcript::evm::EvmTranscript};
use halo2curves::bn256::{Bn256, Fr, G1Affine};
use halo2curves::ff::{FromUniformBytes, PrimeField};
use snark_verifier::loader::native::NativeLoader;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
use std::str::FromStr;
use wasm_bindgen::prelude::*;
use wasm_bindgen_console_logger::DEFAULT_LOGGER;
@@ -124,7 +113,7 @@ pub fn feltToInt(
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&felt_to_integer_rep(felt))
serde_json::to_vec(&felt_to_i64(felt))
.map_err(|e| JsError::new(&format!("Failed to serialize integer: {}", e)))?,
))
}
@@ -138,7 +127,7 @@ pub fn feltToFloat(
) -> Result<f64, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let int_rep = felt_to_integer_rep(felt);
let int_rep = felt_to_i64(felt);
let multiplier = scale_to_multiplier(scale);
Ok(int_rep as f64 / multiplier)
}
@@ -152,7 +141,7 @@ pub fn floatToFelt(
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let int_rep =
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
let felt = integer_rep_to_felt(int_rep);
let felt = i64_to_felt(int_rep);
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
@@ -286,15 +275,7 @@ pub fn genWitness(
.map_err(|e| JsError::new(&format!("{}", e)))?;
let witness = circuit
.forward::<KZGCommitmentScheme<Bn256>>(
&mut input,
None,
None,
RegionSettings::all_true(
circuit.settings().run_args.decomp_base,
circuit.settings().run_args.decomp_legs,
),
)
.forward::<KZGCommitmentScheme<Bn256>>(&mut input, None, None, false, false)
.map_err(|e| JsError::new(&format!("{}", e)))?;
serde_json::to_vec(&witness)

View File

@@ -3,19 +3,15 @@
mod native_tests {
use ezkl::circuit::Tolerance;
use ezkl::fieldutils::{felt_to_integer_rep, integer_rep_to_felt, IntegerRep};
use ezkl::fieldutils::{felt_to_i64, i64_to_felt};
// use ezkl::circuit::table::RESERVED_BLINDING_ROWS_PAD;
use ezkl::graph::input::{FileSource, FileSourceInner, GraphData};
use ezkl::graph::{DataSource, GraphSettings, GraphWitness};
use ezkl::pfsys::Snark;
use ezkl::Commitments;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2curves::bn256::Bn256;
use lazy_static::lazy_static;
use rand::Rng;
use std::env::var;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::process::{Child, Command};
use std::sync::Once;
static COMPILE: Once = Once::new();
@@ -187,13 +183,12 @@ mod native_tests {
const PF_FAILURE_AGGR: &str = "examples/test_failure_aggr_proof.json";
const LARGE_TESTS: [&str; 6] = [
const LARGE_TESTS: [&str; 5] = [
"self_attention",
"nanoGPT",
"multihead_attention",
"mobilenet",
"mnist_gan",
"smallworm",
];
const ACCURACY_CAL_TESTS: [&str; 6] = [
@@ -205,7 +200,7 @@ mod native_tests {
"1l_tiny_div",
];
const TESTS: [&str; 94] = [
const TESTS: [&str; 93] = [
"1l_mlp", //0
"1l_slice",
"1l_concat",
@@ -245,8 +240,8 @@ mod native_tests {
"1l_conv_transpose",
"1l_upsample",
"1l_identity", //35
"idolmodel", // too big evm
"trig", // too big evm
"idolmodel",
"trig",
"prelu_gmm",
"lstm",
"rnn", //40
@@ -303,7 +298,6 @@ mod native_tests {
"1l_lppool",
"lstm_large", // 91
"lstm_medium", // 92
"lenet_5", // 93
];
const WASM_TESTS: [&str; 46] = [
@@ -542,7 +536,7 @@ mod native_tests {
}
});
seq!(N in 0..=93 {
seq!(N in 0..=92 {
#(#[test_case(TESTS[N])])*
#[ignore]
@@ -650,15 +644,6 @@ mod native_tests {
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn mock_hashed_params_public_inputs_(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "hashed", "private", 1, "resources", None, 0.0);
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn mock_fixed_inputs_(test: &str) {
crate::native_tests::init_binary();
@@ -955,7 +940,7 @@ mod native_tests {
});
seq!(N in 0..=5 {
seq!(N in 0..=4 {
#(#[test_case(LARGE_TESTS[N])])*
#[ignore]
@@ -987,26 +972,16 @@ mod native_tests {
mod tests_evm {
use seq_macro::seq;
use crate::native_tests::TESTS_EVM;
use crate::native_tests::TESTS;
use crate::native_tests::TESTS_EVM_AGGR;
use test_case::test_case;
use crate::native_tests::kzg_evm_prove_and_verify;
use crate::native_tests::kzg_evm_prove_and_verify_reusable_verifier;
use crate::native_tests::kzg_evm_prove_and_verify_render_seperately;
use crate::native_tests::kzg_evm_on_chain_input_prove_and_verify;
use crate::native_tests::kzg_evm_aggr_prove_and_verify;
use tempdir::TempDir;
use crate::native_tests::Hardfork;
use crate::native_tests::run_js_tests;
use ezkl::logger::init_logger;
use crate::native_tests::lazy_static;
// Global variables to store verifier hashes and identical verifiers
lazy_static! {
// create a new variable of type
static ref REUSABLE_VERIFIER_ADDR: std::sync::Mutex<Option<String>> = std::sync::Mutex::new(None);
}
/// Currently only on chain inputs that return a non-negative value are supported.
const TESTS_ON_CHAIN_INPUT: [&str; 17] = [
@@ -1118,70 +1093,6 @@ mod native_tests {
});
seq!(N in 0..=93 {
#(#[test_case(TESTS[N])])*
fn kzg_evm_prove_and_verify_reusable_verifier_(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
init_logger();
log::error!("Running kzg_evm_prove_and_verify_reusable_verifier_ for test: {}", test);
// default vis
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "private", "private", "public", &mut REUSABLE_VERIFIER_ADDR.lock().unwrap(), false);
// public/public vis
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "public", "private", "public", &mut Some(reusable_verifier_address), false);
// hashed input
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "hashed", "private", "public", &mut Some(reusable_verifier_address), false);
match REUSABLE_VERIFIER_ADDR.try_lock() {
Ok(mut addr) => {
*addr = Some(reusable_verifier_address.clone());
log::error!("Reusing the same verifeir deployed at address: {}", reusable_verifier_address);
}
Err(_) => {
log::error!("Failed to acquire lock on REUSABLE_VERIFIER_ADDR");
}
}
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn kzg_evm_prove_and_verify_reusable_verifier_with_overflow_(test: &str) {
// verifier too big to fit on chain with overflow calibration target
if test == "1l_eltwise_div" || test == "lenet_5" || test == "ltsf" || test == "lstm_large" {
return;
}
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
init_logger();
log::error!("Running kzg_evm_prove_and_verify_reusable_verifier_with_overflow_ for test: {}", test);
// default vis
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "private", "private", "public", &mut REUSABLE_VERIFIER_ADDR.lock().unwrap(), true);
// public/public vis
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "public", "private", "public", &mut Some(reusable_verifier_address), true);
// hashed input
let reusable_verifier_address: String = kzg_evm_prove_and_verify_reusable_verifier(2, path, test.to_string(), "hashed", "private", "public", &mut Some(reusable_verifier_address), true);
match REUSABLE_VERIFIER_ADDR.try_lock() {
Ok(mut addr) => {
*addr = Some(reusable_verifier_address.clone());
log::error!("Reusing the same verifeir deployed at address: {}", reusable_verifier_address);
}
Err(_) => {
log::error!("Failed to acquire lock on REUSABLE_VERIFIER_ADDR");
}
}
test_dir.close().unwrap();
}
});
seq!(N in 0..=22 {
@@ -1198,6 +1109,19 @@ mod native_tests {
}
#(#[test_case(TESTS_EVM[N])])*
fn kzg_evm_prove_and_verify_render_seperately_(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let _anvil_child = crate::native_tests::start_anvil(false, Hardfork::Latest);
kzg_evm_prove_and_verify_render_seperately(2, path, test.to_string(), "private", "private", "public");
#[cfg(not(feature = "icicle"))]
run_js_tests(path, test.to_string(), "testBrowserEvmVerify", true);
test_dir.close().unwrap();
}
#(#[test_case(TESTS_EVM[N])])*
fn kzg_evm_hashed_input_prove_and_verify_(test: &str) {
@@ -1487,10 +1411,10 @@ mod native_tests {
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
halo2curves::bn256::Fr::zero()
} else {
integer_rep_to_felt(
(felt_to_integer_rep(*v) as f32
i64_to_felt(
(felt_to_i64(*v) as f32
* (rand::thread_rng().gen_range(-0.01..0.01) * tolerance))
as IntegerRep,
as i64,
)
};
@@ -1510,10 +1434,10 @@ mod native_tests {
let perturbation = if v == &halo2curves::bn256::Fr::zero() {
halo2curves::bn256::Fr::from(2)
} else {
integer_rep_to_felt(
(felt_to_integer_rep(*v) as f32
i64_to_felt(
(felt_to_i64(*v) as f32
* (rand::thread_rng().gen_range(0.02..0.1) * tolerance))
as IntegerRep,
as i64,
)
};
*v + perturbation
@@ -1948,7 +1872,7 @@ mod native_tests {
// deploy the verifier
let args = vec![
"deploy-evm",
"deploy-evm-verifier",
rpc_arg.as_str(),
addr_path_arg.as_str(),
"--sol-code-path",
@@ -2179,7 +2103,11 @@ mod native_tests {
assert!(status.success());
// deploy the verifier
let mut args = vec!["deploy-evm", rpc_arg.as_str(), addr_path_arg.as_str()];
let mut args = vec![
"deploy-evm-verifier",
rpc_arg.as_str(),
addr_path_arg.as_str(),
];
args.push("--sol-code-path");
args.push(sol_arg.as_str());
@@ -2221,16 +2149,14 @@ mod native_tests {
}
// prove-serialize-verify, the usual full path
fn kzg_evm_prove_and_verify_reusable_verifier(
fn kzg_evm_prove_and_verify_render_seperately(
num_inner_columns: usize,
test_dir: &str,
example_name: String,
input_visibility: &str,
param_visibility: &str,
output_visibility: &str,
reusable_verifier_address: &mut Option<String>,
overflow: bool,
) -> String {
) {
let anvil_url = ANVIL_URL.as_str();
prove_and_verify(
@@ -2242,7 +2168,7 @@ mod native_tests {
output_visibility,
num_inner_columns,
None,
overflow,
false,
"single",
Commitments::KZG,
2,
@@ -2257,58 +2183,27 @@ mod native_tests {
let settings_arg = format!("--settings-path={}", settings_path);
let sol_arg = format!("--sol-code-path={}/{}/kzg.sol", test_dir, example_name);
// if the reusable verifier address is not set, create the verifier
let deployed_addr_arg = match reusable_verifier_address {
Some(addr) => addr.clone(),
None => {
// create the reusable verifier
let args = vec![
"create-evm-verifier",
"--vk-path",
&vk_arg,
&settings_arg,
&sol_arg,
"--reusable",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// deploy the verifier
let args = vec![
"deploy-evm",
rpc_arg.as_str(),
addr_path_arg.as_str(),
sol_arg.as_str(),
"-C=verifier/reusable",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// read in the address
let addr =
std::fs::read_to_string(format!("{}/{}/addr.txt", test_dir, example_name))
.expect("failed to read address file");
let deployed_addr_arg = format!("--addr-verifier={}", addr);
// set the reusable verifier address
*reusable_verifier_address = Some(addr);
deployed_addr_arg
}
};
let addr_path_arg_vk = format!("--addr-path={}/{}/addr_vk.txt", test_dir, example_name);
let sol_arg_vk: String = format!("--sol-code-path={}/{}/vk.sol", test_dir, example_name);
// create the verifier
let args = vec![
"create-evm-vka",
"create-evm-verifier",
"--vk-path",
&vk_arg,
&settings_arg,
&sol_arg,
"--render-vk-seperately",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
let addr_path_arg_vk = format!("--addr-path={}/{}/addr_vk.txt", test_dir, example_name);
let sol_arg_vk = format!("--sol-code-path={}/{}/vk.sol", test_dir, example_name);
// create the verifier
let args = vec![
"create-evm-vk",
"--vk-path",
&vk_arg,
&settings_arg,
@@ -2321,13 +2216,32 @@ mod native_tests {
.expect("failed to execute process");
assert!(status.success());
// deploy the vka
// deploy the verifier
let args = vec![
"deploy-evm",
"deploy-evm-verifier",
rpc_arg.as_str(),
addr_path_arg.as_str(),
sol_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
// read in the address
let addr = std::fs::read_to_string(format!("{}/{}/addr.txt", test_dir, example_name))
.expect("failed to read address file");
let deployed_addr_arg = format!("--addr-verifier={}", addr);
// deploy the vk
let args = vec![
"deploy-evm-vk",
rpc_arg.as_str(),
addr_path_arg_vk.as_str(),
sol_arg_vk.as_str(),
"-C=vka",
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
@@ -2357,7 +2271,7 @@ mod native_tests {
// now verify the proof
let pf_arg = format!("{}/{}/proof.pf", test_dir, example_name);
let args = vec![
let mut args = vec![
"verify-evm",
"--proof-path",
pf_arg.as_str(),
@@ -2371,52 +2285,13 @@ mod native_tests {
.status()
.expect("failed to execute process");
assert!(status.success());
// Read the original proof file
let original_proof_data: ezkl::pfsys::Snark<
halo2curves::bn256::Fr,
halo2curves::bn256::G1Affine,
> = Snark::load::<KZGCommitmentScheme<Bn256>>(&PathBuf::from(format!(
"{}/{}/proof.pf",
test_dir, example_name
)))
.expect("Failed to read proof file");
for i in 0..1 {
// Create a copy of the original proof data
let mut modified_proof_data = original_proof_data.clone();
// Flip a random bit
let random_byte = rand::thread_rng().gen_range(0..modified_proof_data.proof.len());
let random_bit = rand::thread_rng().gen_range(0..8);
modified_proof_data.proof[random_byte] ^= 1 << random_bit;
// Write the modified proof to a new file
let modified_pf_arg = format!("{}/{}/modified_proof_{}.pf", test_dir, example_name, i);
modified_proof_data
.save(&PathBuf::from(modified_pf_arg.clone()))
.expect("Failed to save modified proof file");
// Verify the modified proof (should fail)
let mut args_mod = args.clone();
args_mod[2] = &modified_pf_arg;
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args_mod)
.status()
.expect("failed to execute process");
if status.success() {
log::error!("Verification unexpectedly succeeded for modified proof {}. Flipped bit {} in byte {}", i, random_bit, random_byte);
}
assert!(
!status.success(),
"Modified proof {} should have failed verification",
i
);
}
// Returned deploy_addr_arg for reusable verifier
deployed_addr_arg
// As sanity check, add example that should fail.
args[2] = PF_FAILURE;
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(args)
.status()
.expect("failed to execute process");
assert!(!status.success());
}
// run js browser evm verify tests for a given example
@@ -2618,7 +2493,7 @@ mod native_tests {
// deploy the verifier
let mut args = vec![
"deploy-evm",
"deploy-evm-verifier",
rpc_arg.as_str(),
addr_path_verifier_arg.as_str(),
];

View File

@@ -11,7 +11,7 @@ mod py_tests {
static ENV_SETUP: Once = Once::new();
static DOWNLOAD_VOICE_DATA: Once = Once::new();
// Sure to run this once
//Sure to run this once
lazy_static! {
static ref CARGO_TARGET_DIR: String =
@@ -123,8 +123,7 @@ mod py_tests {
}
}
const TESTS: [&str; 34] = [
"ezkl_demo_batch.ipynb",
const TESTS: [&str; 33] = [
"proof_splitting.ipynb", // 0
"variance.ipynb",
"mnist_gan.ipynb",
@@ -202,18 +201,6 @@ mod py_tests {
anvil_child.kill().unwrap();
}
#[test]
fn reusable_verifier_notebook_() {
crate::py_tests::init_binary();
let mut anvil_child = crate::py_tests::start_anvil(false);
let test_dir: TempDir = TempDir::new("reusable_verifier").unwrap();
let path = test_dir.path().to_str().unwrap();
crate::py_tests::mv_test_(path, "reusable_verifier.ipynb");
run_notebook(path, "reusable_verifier.ipynb");
test_dir.close().unwrap();
anvil_child.kill().unwrap();
}
#[test]
fn postgres_notebook_() {
crate::py_tests::init_binary();

View File

@@ -23,7 +23,7 @@ examples_path = os.path.abspath(
srs_path = os.path.join(folder_path, 'kzg_test.params')
params_k17_path = os.path.join(folder_path, 'kzg_test_k17.params')
params_k21_path = os.path.join(folder_path, 'kzg_test_k21.params')
params_k20_path = os.path.join(folder_path, 'kzg_test_k20.params')
anvil_url = "http://localhost:3030"
@@ -104,8 +104,8 @@ def test_gen_srs():
ezkl.gen_srs(params_k17_path, 17)
assert os.path.isfile(params_k17_path)
ezkl.gen_srs(params_k21_path, 21)
assert os.path.isfile(params_k21_path)
ezkl.gen_srs(params_k20_path, 20)
assert os.path.isfile(params_k20_path)
@@ -423,76 +423,6 @@ async def test_create_evm_verifier():
assert res == True
assert os.path.isfile(sol_code_path)
async def test_create_evm_verifier_separate_vk():
"""
Create EVM a verifier with solidity code and separate vk
In order to run this test you will need to install solc in your environment
"""
vk_path = os.path.join(folder_path, 'test_evm.vk')
settings_path = os.path.join(folder_path, 'settings.json')
sol_code_path = os.path.join(folder_path, 'test_separate.sol')
vk_code_path = os.path.join(folder_path, 'test_vk.sol')
abi_path = os.path.join(folder_path, 'test_separate.abi')
abi_vk_path = os.path.join(folder_path, 'test_vk_separate.abi')
proof_path = os.path.join(folder_path, 'test_evm.pf')
calldata_path = os.path.join(folder_path, 'calldata.bytes')
# # res is now a vector of bytes
# res = ezkl.encode_evm_calldata(proof_path, calldata_path)
# assert os.path.isfile(calldata_path)
# assert len(res) > 0
res = await ezkl.create_evm_verifier(
vk_path,
settings_path,
sol_code_path,
abi_path,
srs_path=srs_path,
reusable=True
)
res = await ezkl.create_evm_vka(
vk_path,
settings_path,
vk_code_path,
abi_vk_path,
srs_path=srs_path,
)
assert res == True
assert os.path.isfile(sol_code_path)
async def test_deploy_evm_reusable_and_vka():
"""
Test deployment of the reusable verifier smart contract + vka
In order to run this you will need to install solc in your environment
"""
addr_path_verifier = os.path.join(folder_path, 'address_separate.json')
addr_path_vk = os.path.join(folder_path, 'address_vk.json')
sol_code_path = os.path.join(folder_path, 'test_separate.sol')
vk_code_path = os.path.join(folder_path, 'test_vk.sol')
# TODO: without optimization there will be out of gas errors
# sol_code_path = os.path.join(folder_path, 'test.sol')
res = await ezkl.deploy_evm(
addr_path_verifier,
sol_code_path,
anvil_url,
"verifier/reusable",
)
res = await ezkl.deploy_evm(
addr_path_vk,
vk_code_path,
anvil_url,
"vka",
)
assert res == True
async def test_deploy_evm():
"""
@@ -508,7 +438,7 @@ async def test_deploy_evm():
res = await ezkl.deploy_evm(
addr_path,
sol_code_path,
anvil_url,
rpc_url=anvil_url,
)
assert res == True
@@ -573,47 +503,6 @@ async def test_verify_evm():
assert res == True
async def test_verify_evm_separate_vk():
"""
Verifies an evm proof
In order to run this you will need to install solc in your environment
"""
proof_path = os.path.join(folder_path, 'test_evm.pf')
addr_path_verifier = os.path.join(folder_path, 'address_separate.json')
addr_path_vk = os.path.join(folder_path, 'address_vk.json')
proof_path = os.path.join(folder_path, 'test_evm.pf')
calldata_path = os.path.join(folder_path, 'calldata_separate.bytes')
with open(addr_path_verifier, 'r') as file:
addr_verifier = file.read().rstrip()
print(addr_verifier)
with open(addr_path_vk, 'r') as file:
addr_vk = file.read().rstrip()
print(addr_vk)
# res is now a vector of bytes
res = ezkl.encode_evm_calldata(proof_path, calldata_path, addr_vk=addr_vk)
assert os.path.isfile(calldata_path)
assert len(res) > 0
# TODO: without optimization there will be out of gas errors
# sol_code_path = os.path.join(folder_path, 'test.sol')
res = await ezkl.verify_evm(
addr_verifier,
proof_path,
rpc_url=anvil_url,
addr_vk=addr_vk,
# sol_code_path
# optimizer_runs
)
assert res == True
async def test_aggregate_and_verify_aggr():
data_path = os.path.join(
@@ -679,7 +568,7 @@ async def test_aggregate_and_verify_aggr():
)
# mock aggregate
res = ezkl.mock_aggregate([proof_path], 21)
res = ezkl.mock_aggregate([proof_path], 20)
assert res == True
aggregate_proof_path = os.path.join(folder_path, 'aggr_1l_relu.pf')
@@ -690,8 +579,8 @@ async def test_aggregate_and_verify_aggr():
[proof_path],
aggregate_vk_path,
aggregate_pk_path,
21,
srs_path=params_k21_path,
20,
srs_path=params_k20_path,
)
res = ezkl.gen_vk_from_pk_aggr(aggregate_pk_path, aggregate_vk_path)
@@ -703,9 +592,9 @@ async def test_aggregate_and_verify_aggr():
aggregate_proof_path,
aggregate_pk_path,
"poseidon",
21,
20,
"unsafe",
srs_path=params_k21_path,
srs_path=params_k20_path,
)
assert res == True
@@ -715,8 +604,8 @@ async def test_aggregate_and_verify_aggr():
res = ezkl.verify_aggr(
aggregate_proof_path,
aggregate_vk_path,
21,
srs_path=params_k21_path,
20,
srs_path=params_k20_path,
)
assert res == True
@@ -795,8 +684,8 @@ async def test_evm_aggregate_and_verify_aggr():
[proof_path],
aggregate_vk_path,
aggregate_pk_path,
21,
srs_path=params_k21_path,
20,
srs_path=params_k20_path,
)
res = ezkl.aggregate(
@@ -804,9 +693,9 @@ async def test_evm_aggregate_and_verify_aggr():
aggregate_proof_path,
aggregate_pk_path,
"evm",
21,
20,
"unsafe",
srs_path=params_k21_path,
srs_path=params_k20_path,
)
assert res == True
@@ -821,8 +710,8 @@ async def test_evm_aggregate_and_verify_aggr():
aggregate_vk_path,
sol_code_path,
abi_path,
logrows=21,
srs_path=params_k21_path,
logrows=20,
srs_path=params_k20_path,
)
assert res == True
@@ -840,8 +729,8 @@ async def test_evm_aggregate_and_verify_aggr():
res = ezkl.verify_aggr(
aggregate_proof_path,
aggregate_vk_path,
21,
srs_path=params_k21_path,
20,
srs_path=params_k20_path,
)
assert res == True
@@ -872,7 +761,6 @@ def get_examples():
'accuracy',
'linear_regression',
"mnist_gan",
"smallworm",
]
examples = []
for subdir, _, _ in os.walk(os.path.join(examples_path, "onnx")):

View File

@@ -16,12 +16,14 @@ mod wasm32 {
srsValidation, u8_array_to_u128_le, verify, verifyAggr, vkValidation, witnessValidation,
};
use halo2_proofs::plonk::VerifyingKey;
use halo2_proofs::poly::commitment::CommitmentScheme;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2_proofs::poly::kzg::commitment::ParamsKZG;
use halo2_solidity_verifier::encode_calldata;
use halo2curves::bn256::Bn256;
use halo2curves::bn256::{Fr, G1Affine};
use snark_verifier::util::arithmetic::PrimeField;
use wasm_bindgen::JsError;
#[cfg(feature = "web")]
pub use wasm_bindgen_rayon::init_thread_pool;
use wasm_bindgen_test::*;

Binary file not shown.

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -8,7 +8,7 @@
"param_scale": 0,
"scale_rebase_multiplier": 10,
"lookup_range": [
0,
-2,
0
],
"logrows": 6,
@@ -24,18 +24,15 @@
"param_visibility": "Private",
"div_rebasing": false,
"rebase_frac_zero_constants": false,
"check_mode": "UNSAFE",
"commitment": "KZG",
"decomp_base": 128,
"decomp_legs": 2
"check_mode": "UNSAFE"
},
"num_rows": 46,
"total_assignments": 92,
"total_const_size": 3,
"num_rows": 16,
"total_dynamic_col_size": 0,
"num_dynamic_lookups": 0,
"num_shuffles": 0,
"total_shuffle_col_size": 0,
"total_assignments": 32,
"total_const_size": 8,
"model_instance_shapes": [
[
1,
@@ -57,19 +54,12 @@
]
]
},
"required_lookups": [],
"required_range_checks": [
[
-1,
1
],
[
0,
127
]
"required_lookups": [
"ReLU"
],
"required_range_checks": [],
"check_mode": "UNSAFE",
"version": "0.0.0",
"num_blinding_factors": null,
"timestamp": 1726429587279
"timestamp": 1702474230544
}

Binary file not shown.

View File

@@ -1 +1 @@
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":0,"max_range_size":127}
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":-1,"max_range_size":0}