Compare commits

...

5 Commits

Author SHA1 Message Date
dante
5290045f06 Update chip.rs 2025-01-22 19:02:45 -05:00
dante
a0078bef6a fix: syn-sel should be range-checked when overflow 2025-01-22 10:08:26 -05:00
dante
d0ba505baa fix: node parsing should not panic (#912) 2025-01-22 08:02:29 -05:00
dante
f35688917d fix: rm macos metal bindings from python (#911) 2025-01-21 00:36:57 -05:00
Artem
7ae541ed35 feat: metal acceleration for MSM solving (#909)
---------

Co-authored-by: dante <45801863+alexander-camuto@users.noreply.github.com>
2025-01-20 22:17:24 -05:00
18 changed files with 471 additions and 179 deletions

View File

@@ -8,6 +8,8 @@ on:
jobs:
bench_poseidon:
permissions:
contents: read
runs-on: self-hosted
steps:
- uses: actions/checkout@v4
@@ -22,6 +24,8 @@ jobs:
run: cargo bench --verbose --bench poseidon
bench_einsum_accum_matmul:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -37,6 +41,8 @@ jobs:
run: cargo bench --verbose --bench accum_einsum_matmul
bench_accum_matmul_relu:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -52,6 +58,8 @@ jobs:
run: cargo bench --verbose --bench accum_matmul_relu
bench_accum_matmul_relu_overflow:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -67,6 +75,8 @@ jobs:
run: cargo bench --verbose --bench accum_matmul_relu_overflow
bench_relu:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -82,6 +92,8 @@ jobs:
run: cargo bench --verbose --bench relu
bench_accum_dot:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -97,6 +109,8 @@ jobs:
run: cargo bench --verbose --bench accum_dot
bench_accum_conv:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -112,6 +126,8 @@ jobs:
run: cargo bench --verbose --bench accum_conv
bench_accum_sumpool:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -127,6 +143,8 @@ jobs:
run: cargo bench --verbose --bench accum_sumpool
bench_pairwise_add:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -142,6 +160,8 @@ jobs:
run: cargo bench --verbose --bench pairwise_add
bench_accum_sum:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:
@@ -157,6 +177,8 @@ jobs:
run: cargo bench --verbose --bench accum_sum
bench_pairwise_pow:
permissions:
contents: read
runs-on: self-hosted
needs: [bench_poseidon]
steps:

View File

@@ -15,6 +15,9 @@ defaults:
working-directory: .
jobs:
publish-wasm-bindings:
permissions:
contents: read
packages: write
name: publish-wasm-bindings
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
@@ -186,6 +189,9 @@ jobs:
in-browser-evm-ver-publish:
permissions:
contents: read
packages: write
name: publish-in-browser-evm-verifier-package
needs: [publish-wasm-bindings]
runs-on: ubuntu-latest

View File

@@ -6,6 +6,8 @@ on:
description: "Test scenario tags"
jobs:
large-tests:
permissions:
contents: read
runs-on: kaiju
steps:
- uses: actions/checkout@v4

View File

@@ -18,6 +18,9 @@ defaults:
jobs:
linux:
permissions:
contents: read
packages: write
runs-on: GPU
strategy:
matrix:

View File

@@ -16,6 +16,8 @@ defaults:
jobs:
macos:
permissions:
contents: read
runs-on: macos-latest
if: startsWith(github.ref, 'refs/tags/')
strategy:
@@ -47,6 +49,13 @@ jobs:
components: rustfmt, clippy
- name: Build wheels
if: matrix.target == 'universal2-apple-darwin'
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
args: --release --out dist --features python-bindings
- name: Build wheels
if: matrix.target == 'x86_64'
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.target }}
@@ -64,6 +73,8 @@ jobs:
path: dist
windows:
permissions:
contents: read
runs-on: windows-latest
if: startsWith(github.ref, 'refs/tags/')
strategy:
@@ -111,6 +122,8 @@ jobs:
path: dist
linux:
permissions:
contents: read
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
strategy:
@@ -226,6 +239,8 @@ jobs:
# path: dist
musllinux:
permissions:
contents: read
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
strategy:
@@ -291,6 +306,8 @@ jobs:
path: dist
musllinux-cross:
permissions:
contents: read
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
strategy:
@@ -378,6 +395,8 @@ jobs:
packages-dir: ./
doc-publish:
permissions:
contents: read
name: Trigger ReadTheDocs Build
runs-on: ubuntu-latest
needs: pypi-publish

View File

@@ -10,6 +10,9 @@ on:
- "*"
jobs:
create-release:
permissions:
contents: read
packages: write
name: create-release
runs-on: ubuntu-22.04
if: startsWith(github.ref, 'refs/tags/')
@@ -33,6 +36,9 @@ jobs:
tag_name: ${{ env.EZKL_VERSION }}
build-release-gpu:
permissions:
contents: read
packages: write
name: build-release-gpu
needs: ["create-release"]
runs-on: GPU
@@ -94,6 +100,10 @@ jobs:
asset_content_type: application/octet-stream
build-release:
permissions:
contents: read
packages: write
issues: write
name: build-release
needs: ["create-release"]
runs-on: ${{ matrix.os }}
@@ -186,14 +196,18 @@ jobs:
echo "target flag is: ${{ env.TARGET_FLAGS }}"
echo "target dir is: ${{ env.TARGET_DIR }}"
- name: Build release binary (no asm)
if: matrix.build != 'linux-gnu'
- name: Build release binary (no asm or metal)
if: matrix.build != 'linux-gnu' && matrix.build != 'macos-aarch64'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry
- name: Build release binary (asm)
if: matrix.build == 'linux-gnu'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features asm
- name: Build release binary (metal)
if: matrix.build == 'macos-aarch64'
run: ${{ env.CARGO }} build --release ${{ env.TARGET_FLAGS }} -Z sparse-registry --features macos-metal
- name: Strip release binary
if: matrix.build != 'windows-msvc' && matrix.build != 'linux-aarch64'
run: strip "target/${{ matrix.target }}/release/ezkl"

View File

@@ -21,6 +21,8 @@ env:
jobs:
fr-age-test:
permissions:
contents: read
runs-on: large-self-hosted
steps:
- uses: actions/checkout@v4
@@ -35,8 +37,9 @@ jobs:
run: cargo test --release --verbose tests::large_mock_::large_tests_6_expects -- --include-ignored
build:
permissions:
contents: read
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
@@ -50,6 +53,8 @@ jobs:
run: cargo build --verbose
docs:
permissions:
contents: read
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -64,6 +69,8 @@ jobs:
run: cargo doc --verbose
library-tests:
permissions:
contents: read
runs-on: ubuntu-latest-32-cores
steps:
- uses: actions/checkout@v4
@@ -124,6 +131,8 @@ jobs:
# run: cargo nextest run conv_relu_col_ultra_overflow --no-capture --features icicle -- --include-ignored
ultra-overflow-tests_og-lookup:
permissions:
contents: read
runs-on: non-gpu
steps:
- uses: actions/checkout@v4
@@ -159,6 +168,8 @@ jobs:
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture --no-default-features --features ezkl -- --include-ignored
ultra-overflow-tests:
permissions:
contents: read
runs-on: non-gpu
steps:
- uses: actions/checkout@v4
@@ -194,6 +205,8 @@ jobs:
run: cargo nextest run --release conv_relu_col_ultra_overflow --no-capture -- --include-ignored
model-serialization:
permissions:
contents: read
runs-on: ubuntu-latest-16-cores
steps:
- uses: actions/checkout@v4
@@ -212,6 +225,8 @@ jobs:
run: cargo nextest run native_tests::tests::model_serialization_different_binaries_ --test-threads 1
wasm32-tests:
permissions:
contents: read
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -239,6 +254,8 @@ jobs:
run: wasm-pack test --chrome --headless -- -Z build-std="panic_abort,std" --features web
mock-proving-tests:
permissions:
contents: read
runs-on: non-gpu
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
@@ -304,6 +321,8 @@ jobs:
run: cargo nextest run --release --verbose tests::mock_fixed_params_ --test-threads 32
prove-and-verify-evm-tests:
permissions:
contents: read
runs-on: non-gpu
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
@@ -386,7 +405,44 @@ jobs:
- name: KZG prove and verify tests (EVM + hashed outputs)
run: cargo nextest run --release --verbose tests_evm::kzg_evm_hashed_output_prove_and_verify --test-threads 1
# prove-and-verify-tests-metal:
# permissions:
# contents: read
# runs-on: macos-13
# # needs: [build, library-tests, docs]
# steps:
# - uses: actions/checkout@v4
# with:
# persist-credentials: false
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: jetli/wasm-pack-action@v0.4.0
# with:
# # Pin to version 0.12.1
# version: 'v0.12.1'
# - name: Add rust-src
# run: rustup component add rust-src --toolchain nightly-2024-07-18
# - uses: actions/checkout@v3
# with:
# persist-credentials: false
# - name: Use pnpm 8
# uses: pnpm/action-setup@v2
# with:
# version: 8
# - uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-nextest
# locked: true
# - name: KZG prove and verify tests (public outputs)
# run: cargo nextest run --release --features macos-metal --verbose tests::kzg_prove_and_verify_::t --no-capture
prove-and-verify-tests:
permissions:
contents: read
runs-on: non-gpu
needs: [build, library-tests, docs]
steps:
@@ -504,6 +560,8 @@ jobs:
# run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
prove-and-verify-mock-aggr-tests:
permissions:
contents: read
runs-on: self-hosted
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
@@ -543,6 +601,8 @@ jobs:
# run: cargo nextest run --verbose tests_aggr::kzg_aggr_prove_and_verify_ --features icicle --test-threads 1 -- --include-ignored
prove-and-verify-aggr-tests:
permissions:
contents: read
runs-on: large-self-hosted
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
@@ -562,6 +622,8 @@ jobs:
run: cargo nextest run --release --verbose tests_aggr::kzg_aggr_prove_and_verify_ --test-threads 4 -- --include-ignored
prove-and-verify-aggr-evm-tests:
permissions:
contents: read
runs-on: large-self-hosted
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
@@ -585,6 +647,8 @@ jobs:
run: cargo nextest run --release --verbose tests_evm::kzg_evm_aggr_prove_and_verify_::t --test-threads 4 -- --include-ignored
examples:
permissions:
contents: read
runs-on: ubuntu-latest-32-cores
needs: [build, library-tests, docs]
steps:
@@ -604,6 +668,8 @@ jobs:
run: cargo nextest run --release tests_examples
python-tests:
permissions:
contents: read
runs-on: non-gpu
needs: [build, library-tests, docs]
steps:
@@ -632,6 +698,8 @@ jobs:
run: source .env/bin/activate; pip install pytest-asyncio; pytest -vv
accuracy-measurement-tests:
permissions:
contents: read
runs-on: non-gpu
needs: [build, library-tests, docs, python-tests, python-integration-tests]
steps:
@@ -664,6 +732,8 @@ jobs:
run: source .env/bin/activate; cargo nextest run --release --verbose tests::resources_accuracy_measurement_public_outputs_
python-integration-tests:
permissions:
contents: read
runs-on: large-self-hosted
services:
# Label used to access the service container
@@ -709,6 +779,8 @@ jobs:
run: python -m venv .env --clear; source .env/bin/activate; pip install -r requirements.txt; python -m ensurepip --upgrade
- name: Build python ezkl
run: source .env/bin/activate; unset CONDA_PREFIX; maturin develop --features python-bindings --release
- name: Voice tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: Neural bow
run: source .env/bin/activate; cargo nextest run py_tests::tests::neural_bag_of_words_ --no-capture
- name: Felt conversion
@@ -728,14 +800,14 @@ jobs:
# chmod 600 /home/ubuntu/.kaggle/kaggle.json
- name: All notebooks
run: source .env/bin/activate; cargo nextest run py_tests::tests::run_notebook_ --test-threads 1
- name: Voice tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::voice_
- name: NBEATS tutorial
run: source .env/bin/activate; cargo nextest run py_tests::tests::nbeats_
# - name: Reusable verifier tutorial
# run: source .env/bin/activate; cargo nextest run py_tests::tests::reusable_
ios-integration-tests:
permissions:
contents: read
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
@@ -754,6 +826,8 @@ jobs:
run: CARGO_BUILD_TARGET=aarch64-apple-darwin RUSTUP_TOOLCHAIN=nightly-2024-07-18-aarch64-apple-darwin cargo test --test ios_integration_tests --features ios-bindings-test --no-default-features
swift-package-tests:
permissions:
contents: read
runs-on: macos-latest
needs: [ios-integration-tests]

View File

@@ -8,8 +8,9 @@ on:
jobs:
analyze:
permissions:
contents: read
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:

View File

@@ -9,6 +9,9 @@ on:
jobs:
build-and-update:
permissions:
contents: read
packages: write
runs-on: macos-latest
env:
EZKL_SWIFT_PACKAGE_REPO: github.com/zkonduit/ezkl-swift-package.git

108
Cargo.lock generated
View File

@@ -1835,6 +1835,16 @@ dependencies = [
"syn 2.0.90",
]
[[package]]
name = "env_filter"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "186e05a59d4c50738528153b83b0b0194d3a29507dfec16eccd4b342903397d0"
dependencies = [
"log",
"regex",
]
[[package]]
name = "env_logger"
version = "0.10.2"
@@ -1848,6 +1858,19 @@ dependencies = [
"termcolor",
]
[[package]]
name = "env_logger"
version = "0.11.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dcaee3d8e3cfc3fd92428d477bc97fc29ec8716d180c0d74c643bb26166660e0"
dependencies = [
"anstream",
"anstyle",
"env_filter",
"humantime",
"log",
]
[[package]]
name = "equivalent"
version = "1.0.1"
@@ -1923,7 +1946,7 @@ dependencies = [
"console_error_panic_hook",
"criterion 0.5.1",
"ecc",
"env_logger",
"env_logger 0.10.2",
"ethabi",
"foundry-compilers",
"gag",
@@ -1931,7 +1954,7 @@ dependencies = [
"halo2_gadgets",
"halo2_proofs",
"halo2_solidity_verifier",
"halo2curves 0.7.0",
"halo2curves 0.7.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
"hex",
"indicatif",
"instant",
@@ -1939,7 +1962,6 @@ dependencies = [
"lazy_static",
"log",
"maybe-rayon",
"metal",
"mimalloc",
"mnist",
"num",
@@ -2394,14 +2416,14 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.3.0"
source = "git+https://github.com/zkonduit/halo2#ee4e1a09ebdb1f79f797685b78951c6034c430a6#ee4e1a09ebdb1f79f797685b78951c6034c430a6"
source = "git+https://github.com/zkonduit/halo2#bf9d0057a82443be48c4779bbe14961c18fb5996#bf9d0057a82443be48c4779bbe14961c18fb5996"
dependencies = [
"bincode",
"blake2b_simd",
"env_logger",
"env_logger 0.10.2",
"ff",
"group",
"halo2curves 0.7.0",
"halo2curves 0.7.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
"icicle-bn254",
"icicle-core",
"icicle-cuda-runtime",
@@ -2409,6 +2431,7 @@ dependencies = [
"lazy_static",
"log",
"maybe-rayon",
"mopro-msm",
"rand_chacha",
"rand_core 0.6.4",
"rustc-hash 2.0.0",
@@ -2497,13 +2520,14 @@ dependencies = [
[[package]]
name = "halo2curves"
version = "0.7.0"
source = "git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851#b753a832e92d5c86c5c997327a9cf9de86a18851"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d380afeef3f1d4d3245b76895172018cfb087d9976a7cabcd5597775b2933e07"
dependencies = [
"blake2",
"digest 0.10.7",
"ff",
"group",
"halo2derive",
"halo2derive 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"hex",
"lazy_static",
"num-bigint",
@@ -2523,6 +2547,49 @@ dependencies = [
"unroll",
]
[[package]]
name = "halo2curves"
version = "0.7.0"
source = "git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851#b753a832e92d5c86c5c997327a9cf9de86a18851"
dependencies = [
"blake2",
"digest 0.10.7",
"ff",
"group",
"halo2derive 0.1.0 (git+https://github.com/privacy-scaling-explorations/halo2curves?rev=b753a832e92d5c86c5c997327a9cf9de86a18851)",
"hex",
"lazy_static",
"num-bigint",
"num-integer",
"num-traits",
"pairing",
"pasta_curves",
"paste",
"rand 0.8.5",
"rand_core 0.6.4",
"rayon",
"serde",
"serde_arrays",
"sha2",
"static_assertions",
"subtle",
"unroll",
]
[[package]]
name = "halo2derive"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bdb99e7492b4f5ff469d238db464131b86c2eaac814a78715acba369f64d2c76"
dependencies = [
"num-bigint",
"num-integer",
"num-traits",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "halo2derive"
version = "0.1.0"
@@ -3283,7 +3350,8 @@ dependencies = [
[[package]]
name = "metal"
version = "0.29.0"
source = "git+https://github.com/gfx-rs/metal-rs#0e1918b34689c4b8cd13a43372f9898680547ee9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ecfd3296f8c56b7c1f6fbac3c71cefa9d78ce009850c45000015f206dc7fa21"
dependencies = [
"bitflags 2.5.0",
"block",
@@ -3354,6 +3422,28 @@ dependencies = [
"byteorder",
]
[[package]]
name = "mopro-msm"
version = "0.1.0"
source = "git+https://github.com/zkonduit/metal-msm-gpu-acceleration.git#be5f647b1a6c1a6ea9024390744a2b4d87f5d002"
dependencies = [
"bincode",
"env_logger 0.11.6",
"halo2curves 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"instant",
"itertools 0.13.0",
"lazy_static",
"log",
"metal",
"objc",
"once_cell",
"rand 0.8.5",
"rayon",
"serde",
"thiserror",
"walkdir",
]
[[package]]
name = "native-tls"
version = "0.2.11"

View File

@@ -91,7 +91,6 @@ pyo3-async-runtimes = { git = "https://github.com/PyO3/pyo3-async-runtimes", ver
pyo3-log = { version = "0.12.0", default-features = false, optional = true }
tract-onnx = { git = "https://github.com/sonos/tract/", rev = "37132e0397d0a73e5bd3a8615d932dabe44f6736", default-features = false, optional = true }
tabled = { version = "0.12.0", optional = true }
metal = { git = "https://github.com/gfx-rs/metal-rs", optional = true }
objc = { version = "0.2.4", optional = true }
mimalloc = { version = "0.1", optional = true }
pyo3-stub-gen = { version = "0.6.0", optional = true }
@@ -277,13 +276,14 @@ icicle = ["halo2_proofs/icicle_gpu"]
empty-cmd = []
no-banner = []
no-update = []
macos-metal = ["halo2_proofs/macos"]
ios-metal = ["halo2_proofs/ios"]
[patch.'https://github.com/zkonduit/halo2']
halo2_proofs = { git = "https://github.com/zkonduit/halo2#ee4e1a09ebdb1f79f797685b78951c6034c430a6", package = "halo2_proofs" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2#bf9d0057a82443be48c4779bbe14961c18fb5996", package = "halo2_proofs" }
[patch.'https://github.com/zkonduit/halo2#0654e92bdf725fd44d849bfef3643870a8c7d50b']
halo2_proofs = { git = "https://github.com/zkonduit/halo2#ee4e1a09ebdb1f79f797685b78951c6034c430a6", package = "halo2_proofs" }
halo2_proofs = { git = "https://github.com/zkonduit/halo2#bf9d0057a82443be48c4779bbe14961c18fb5996", package = "halo2_proofs" }
[patch.crates-io]
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }

View File

@@ -592,9 +592,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
// this is 0 if the index is the same as the column index (starting from 1)
let col_expr = sel.clone()
* table
* (table
.selector_constructor
.get_expr_at_idx(col_idx, synthetic_sel);
.get_expr_at_idx(col_idx, synthetic_sel));
let multiplier =
table.selector_constructor.get_selector_val_at_idx(col_idx);
@@ -626,6 +626,40 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
res
});
}
// add a degree-k custom constraint of the following form to the range check and
// static lookup configuration.
// 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 · ∏ (𝑠𝑒𝑙 𝑖) = 0 where 𝑠𝑒𝑙 is the synthetic_sel, and the product is over the set of overflowed columns
// and 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 is the selector value at the column index
cs.create_gate("range_check_on_sel", |cs| {
let synthetic_sel = match len {
1 => Expression::Constant(F::from(1)),
_ => match index {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
},
};
let range_check_on_synthetic_sel = match len {
1 => Expression::Constant(F::from(0)),
_ => {
let mut initial_expr = Expression::Constant(F::from(1));
for i in 0..len {
initial_expr = initial_expr
* (synthetic_sel.clone()
- Expression::Constant(F::from(i as u64)))
}
initial_expr
}
};
let sel = cs.query_selector(multi_col_selector);
Constraints::with_selector(sel, vec![range_check_on_synthetic_sel])
});
self.static_lookups
.selectors
.insert((nl.clone(), x, y), multi_col_selector);
@@ -904,9 +938,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
let default_x = range_check.get_first_element(col_idx);
let col_expr = sel.clone()
* range_check
* (range_check
.selector_constructor
.get_expr_at_idx(col_idx, synthetic_sel);
.get_expr_at_idx(col_idx, synthetic_sel));
let multiplier = range_check
.selector_constructor
@@ -929,6 +963,40 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
res
});
}
// add a degree-k custom constraint of the following form to the range check and
// static lookup configuration.
// 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 · ∏ (𝑠𝑒𝑙 𝑖) = 0 where 𝑠𝑒𝑙 is the synthetic_sel, and the product is over the set of overflowed columns
// and 𝑚𝑢𝑙𝑡𝑖𝑠𝑒𝑙 is the selector value at the column index
cs.create_gate("range_check_on_sel", |cs| {
let synthetic_sel = match len {
1 => Expression::Constant(F::from(1)),
_ => match index {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
},
};
let range_check_on_synthetic_sel = match len {
1 => Expression::Constant(F::from(0)),
_ => {
let mut initial_expr = Expression::Constant(F::from(1));
for i in 0..len {
initial_expr = initial_expr
* (synthetic_sel.clone()
- Expression::Constant(F::from(i as u64)))
}
initial_expr
}
};
let sel = cs.query_selector(multi_col_selector);
Constraints::with_selector(sel, vec![range_check_on_synthetic_sel])
});
self.range_checks
.selectors
.insert((range, x, y), multi_col_selector);

View File

@@ -132,21 +132,16 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
(first_element, op_f.output[0])
}
///
/// calculates the column size given the number of rows and reserved blinding rows
pub fn cal_col_size(logrows: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(logrows as u32) - reserved_blinding_rows
}
///
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(bits as u32) - reserved_blinding_rows
}
}
///
pub fn num_cols_required(range_len: IntegerRep, col_size: usize) -> usize {
// number of cols needed to store the range
(range_len / (col_size as IntegerRep)) as usize + 1
(range_len / col_size as IntegerRep) as usize + 1
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Table<F> {
@@ -355,16 +350,11 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RangeCheck<F> {
integer_rep_to_felt(chunk * (self.col_size as IntegerRep) + self.range.0)
}
///
/// calculates the column size
pub fn cal_col_size(logrows: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(logrows as u32) - reserved_blinding_rows
}
///
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
2usize.pow(bits as u32) - reserved_blinding_rows
}
/// get column index given input
pub fn get_col_index(&self, input: F) -> F {
// range is split up into chunks of size col_size, find the chunk that input is in

View File

@@ -11,6 +11,12 @@ pub enum GraphError {
/// Shape mismatch in circuit construction
#[error("invalid dimensions used for node {0} ({1})")]
InvalidDims(usize, String),
/// Non scalar power
#[error("we only support scalar powers")]
NonScalarPower,
/// Non scalar base for exponentiation
#[error("we only support scalar bases for exponentiation")]
NonScalarBase,
/// Wrong method was called to configure an op
#[error("wrong method was called to configure node {0} ({1})")]
WrongMethod(usize, String),
@@ -143,4 +149,7 @@ pub enum GraphError {
/// Invalid RunArg
#[error("invalid RunArgs: {0}")]
InvalidRunArgs(String),
/// Only nearest neighbor interpolation is supported
#[error("only nearest neighbor interpolation is supported")]
InvalidInterpolation,
}

View File

@@ -44,11 +44,10 @@ use tract_onnx::tract_hir::{
tract_core::ops::cnn::{conv::KernelFormat, MaxPool, SumPool},
};
/// Quantizes an iterable of f32s to a [Tensor] of i32s using a fixed point representation.
/// Quantizes an iterable of f64 to a [Tensor] of IntegerRep using a fixed point representation.
/// Arguments
///
/// * `vec` - the vector to quantize.
/// * `dims` - the dimensionality of the resulting [Tensor].
/// * `elem` - the element to quantize.
/// * `shift` - offset used in the fixed point representation.
/// * `scale` - `2^scale` used in the fixed point representation.
pub fn quantize_float(
@@ -85,7 +84,7 @@ pub fn scale_to_multiplier(scale: crate::Scale) -> f64 {
f64::powf(2., scale as f64)
}
/// Converts a scale (log base 2) to a fixed point multiplier.
/// Converts a fixed point multiplier to a scale (log base 2).
pub fn multiplier_to_scale(mult: f64) -> crate::Scale {
mult.log2().round() as crate::Scale
}
@@ -312,6 +311,9 @@ pub fn new_op_from_onnx(
let mut deleted_indices = vec![];
let node = match node.op().name().as_ref() {
"ShiftLeft" => {
if inputs.len() != 2 {
return Err(GraphError::InvalidDims(idx, "shift left".to_string()));
};
// load shift amount
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
@@ -324,10 +326,13 @@ pub fn new_op_from_onnx(
out_scale: Some(input_scales[0] - raw_values[0] as i32),
})
} else {
return Err(GraphError::OpMismatch(idx, "ShiftLeft".to_string()));
return Err(GraphError::OpMismatch(idx, "shift left".to_string()));
}
}
"ShiftRight" => {
if inputs.len() != 2 {
return Err(GraphError::InvalidDims(idx, "shift right".to_string()));
};
// load shift amount
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
@@ -340,7 +345,7 @@ pub fn new_op_from_onnx(
out_scale: Some(input_scales[0] + raw_values[0] as i32),
})
} else {
return Err(GraphError::OpMismatch(idx, "ShiftRight".to_string()));
return Err(GraphError::OpMismatch(idx, "shift right".to_string()));
}
}
"MultiBroadcastTo" => {
@@ -363,7 +368,10 @@ pub fn new_op_from_onnx(
}
}
assert_eq!(input_ops.len(), 3, "Range requires 3 inputs");
if input_ops.len() != 3 {
return Err(GraphError::InvalidDims(idx, "range".to_string()));
}
let input_ops = input_ops
.iter()
.map(|x| x.get_constant().ok_or(GraphError::NonConstantRange))
@@ -419,6 +427,10 @@ pub fn new_op_from_onnx(
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(inputs.len() - 1);
if inputs[0].out_dims().is_empty() || inputs[0].out_dims()[0].len() <= axis {
return Err(GraphError::InvalidDims(idx, "gather".to_string()));
}
op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::Gather {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| {
@@ -447,8 +459,17 @@ pub fn new_op_from_onnx(
"Topk" => {
let op = load_op::<Topk>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
if inputs.len() != 2 {
return Err(GraphError::InvalidDims(idx, "topk".to_string()));
};
// if param_visibility.is_public() {
let k = if let Some(c) = inputs[1].opkind().get_mutable_constant() {
if c.raw_values.len() != 1 {
return Err(GraphError::InvalidDims(idx, "topk".to_string()));
}
inputs[1].decrement_use();
deleted_indices.push(inputs.len() - 1);
c.raw_values.map(|x| x as usize)[0]
@@ -488,6 +509,10 @@ pub fn new_op_from_onnx(
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
if c.raw_values.is_empty() {
return Err(GraphError::InvalidDims(idx, "scatter elements".to_string()));
}
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| x as usize)),
@@ -522,6 +547,9 @@ pub fn new_op_from_onnx(
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
if c.raw_values.is_empty() {
return Err(GraphError::InvalidDims(idx, "scatter nd".to_string()));
}
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterND {
constant_idx: Some(c.raw_values.map(|x| x as usize)),
})
@@ -555,6 +583,9 @@ pub fn new_op_from_onnx(
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
if c.raw_values.is_empty() {
return Err(GraphError::InvalidDims(idx, "gather nd".to_string()));
}
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherND {
batch_dims,
indices: Some(c.raw_values.map(|x| x as usize)),
@@ -589,6 +620,9 @@ pub fn new_op_from_onnx(
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
if c.raw_values.is_empty() {
return Err(GraphError::InvalidDims(idx, "gather elements".to_string()));
}
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| x as usize)),
@@ -684,7 +718,9 @@ pub fn new_op_from_onnx(
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes: Vec<usize> = op.axes.into_iter().collect();
assert_eq!(axes.len(), 1, "only support argmax over one axis");
if axes.len() != 1 {
return Err(GraphError::InvalidDims(idx, "argmax".to_string()));
}
SupportedOp::Hybrid(HybridOp::ReduceArgMax { dim: axes[0] })
}
@@ -694,7 +730,9 @@ pub fn new_op_from_onnx(
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes: Vec<usize> = op.axes.into_iter().collect();
assert_eq!(axes.len(), 1, "only support argmin over one axis");
if axes.len() != 1 {
return Err(GraphError::InvalidDims(idx, "argmin".to_string()));
}
SupportedOp::Hybrid(HybridOp::ReduceArgMin { dim: axes[0] })
}
@@ -803,6 +841,9 @@ pub fn new_op_from_onnx(
}
}
"Recip" => {
if inputs.len() != 1 {
return Err(GraphError::InvalidDims(idx, "recip".to_string()));
};
let in_scale = input_scales[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale);
// If the input scale is larger than the params scale
@@ -846,6 +887,9 @@ pub fn new_op_from_onnx(
scale: scale_to_multiplier(input_scales[0]).into(),
}),
"Rsqrt" => {
if input_scales.len() != 1 {
return Err(GraphError::InvalidDims(idx, "rsqrt".to_string()));
};
let in_scale = input_scales[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale);
SupportedOp::Hybrid(HybridOp::Rsqrt {
@@ -933,7 +977,9 @@ pub fn new_op_from_onnx(
let op = load_op::<Cast>(node.op(), idx, node.op().name().to_string())?;
let dt = op.to;
assert_eq!(input_scales.len(), 1);
if input_scales.len() != 1 {
return Err(GraphError::InvalidDims(idx, "cast".to_string()));
};
match dt {
DatumType::Bool
@@ -983,6 +1029,11 @@ pub fn new_op_from_onnx(
if const_idx.len() == 1 {
let const_idx = const_idx[0];
if inputs.len() <= const_idx {
return Err(GraphError::InvalidDims(idx, "mul".to_string()));
}
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
if c.raw_values.len() == 1 && c.raw_values[0] < 1. {
// if not divisible by 2 then we need to add a range check
@@ -1057,6 +1108,9 @@ pub fn new_op_from_onnx(
return Err(GraphError::OpMismatch(idx, "softmax".to_string()));
}
};
if input_scales.len() != 1 {
return Err(GraphError::InvalidDims(idx, "softmax".to_string()));
}
let in_scale = input_scales[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale);
@@ -1096,22 +1150,42 @@ pub fn new_op_from_onnx(
pool_dims: kernel_shape.to_vec(),
})
}
"Ceil" => SupportedOp::Hybrid(HybridOp::Ceil {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
}),
"Floor" => SupportedOp::Hybrid(HybridOp::Floor {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
}),
"Round" => SupportedOp::Hybrid(HybridOp::Round {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
}),
"RoundHalfToEven" => SupportedOp::Hybrid(HybridOp::RoundHalfToEven {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
}),
"Ceil" => {
if input_scales.len() != 1 {
return Err(GraphError::InvalidDims(idx, "ceil".to_string()));
}
SupportedOp::Hybrid(HybridOp::Ceil {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
})
}
"Floor" => {
if input_scales.len() != 1 {
return Err(GraphError::InvalidDims(idx, "floor".to_string()));
}
SupportedOp::Hybrid(HybridOp::Floor {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
})
}
"Round" => {
if input_scales.len() != 1 {
return Err(GraphError::InvalidDims(idx, "round".to_string()));
}
SupportedOp::Hybrid(HybridOp::Round {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
})
}
"RoundHalfToEven" => {
if input_scales.len() != 1 {
return Err(GraphError::InvalidDims(idx, "roundhalftoeven".to_string()));
}
SupportedOp::Hybrid(HybridOp::RoundHalfToEven {
scale: scale_to_multiplier(input_scales[0]).into(),
legs: run_args.decomp_legs,
})
}
"Sign" => SupportedOp::Linear(PolyOp::Sign),
"Pow" => {
// Extract the slope layer hyperparams from a const
@@ -1121,7 +1195,9 @@ pub fn new_op_from_onnx(
inputs[1].decrement_use();
deleted_indices.push(1);
if c.raw_values.len() > 1 {
unimplemented!("only support scalar pow")
return Err(GraphError::NonScalarPower);
} else if c.raw_values.is_empty() {
return Err(GraphError::InvalidDims(idx, "pow".to_string()));
}
let exponent = c.raw_values[0];
@@ -1138,7 +1214,9 @@ pub fn new_op_from_onnx(
inputs[0].decrement_use();
deleted_indices.push(0);
if c.raw_values.len() > 1 {
unimplemented!("only support scalar base")
return Err(GraphError::NonScalarBase);
} else if c.raw_values.is_empty() {
return Err(GraphError::InvalidDims(idx, "pow".to_string()));
}
let base = c.raw_values[0];
@@ -1148,10 +1226,14 @@ pub fn new_op_from_onnx(
base: base.into(),
})
} else {
unimplemented!("only support constant base or pow for now")
return Err(GraphError::InvalidDims(idx, "pow".to_string()));
}
}
"Div" => {
if inputs.len() != 2 {
return Err(GraphError::InvalidDims(idx, "div".to_string()));
}
let const_idx = inputs
.iter()
.enumerate()
@@ -1159,14 +1241,15 @@ pub fn new_op_from_onnx(
.map(|(i, _)| i)
.collect::<Vec<_>>();
if const_idx.len() > 1 {
if const_idx.len() > 1 || const_idx.is_empty() {
return Err(GraphError::InvalidDims(idx, "div".to_string()));
}
let const_idx = const_idx[0];
if const_idx != 1 {
unimplemented!("only support div with constant as second input")
return Err(GraphError::MisformedParams(
"only support div with constant as second input".to_string(),
));
}
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
@@ -1180,10 +1263,14 @@ pub fn new_op_from_onnx(
denom: denom.into(),
})
} else {
unimplemented!("only support non zero divisors of size 1")
return Err(GraphError::MisformedParams(
"only support non zero divisors of size 1".to_string(),
));
}
} else {
unimplemented!("only support div with constant as second input")
return Err(GraphError::MisformedParams(
"only support div with constant as second input".to_string(),
));
}
}
"Cube" => SupportedOp::Linear(PolyOp::Pow(3)),
@@ -1323,7 +1410,7 @@ pub fn new_op_from_onnx(
if !resize_node.contains("interpolator: Nearest")
&& !resize_node.contains("nearest: Floor")
{
unimplemented!("Only nearest neighbor interpolation is supported")
return Err(GraphError::InvalidInterpolation);
}
// check if optional scale factor is present
if inputs.len() != 2 && inputs.len() != 3 {
@@ -1427,6 +1514,10 @@ pub fn new_op_from_onnx(
SupportedOp::Linear(PolyOp::Reshape(output_shape))
}
"Flatten" => {
if inputs.len() != 1 || inputs[0].out_dims().is_empty() {
return Err(GraphError::InvalidDims(idx, "flatten".to_string()));
};
let new_dims: Vec<usize> = vec![inputs[0].out_dims()[0].iter().product::<usize>()];
SupportedOp::Linear(PolyOp::Flatten(new_dims))
}
@@ -1546,6 +1637,7 @@ pub fn homogenize_input_scales(
}
#[cfg(test)]
/// tests for the utility module
pub mod tests {
use super::*;

View File

@@ -24,9 +24,6 @@ use std::path::PathBuf;
pub use val::*;
pub use var::*;
#[cfg(feature = "metal")]
use instant::Instant;
use crate::{
circuit::utils,
fieldutils::{integer_rep_to_felt, IntegerRep},
@@ -40,8 +37,6 @@ use halo2_proofs::{
poly::Rotation,
};
use itertools::Itertools;
#[cfg(feature = "metal")]
use metal::{Device, MTLResourceOptions, MTLSize};
use std::error::Error;
use std::fmt::Debug;
use std::io::Read;
@@ -49,31 +44,6 @@ use std::iter::Iterator;
use std::ops::{Add, Deref, DerefMut, Div, Mul, Neg, Range, Sub};
use std::{cmp::max, ops::Rem};
#[cfg(feature = "metal")]
use std::collections::HashMap;
#[cfg(feature = "metal")]
const LIB_DATA: &[u8] = include_bytes!("metal/tensor_ops.metallib");
#[cfg(feature = "metal")]
lazy_static::lazy_static! {
static ref DEVICE: Device = Device::system_default().expect("no device found");
static ref LIB: metal::Library = DEVICE.new_library_with_data(LIB_DATA).unwrap();
static ref QUEUE: metal::CommandQueue = DEVICE.new_command_queue();
static ref PIPELINES: HashMap<String, metal::ComputePipelineState> = {
let mut map = HashMap::new();
for name in ["add", "sub", "mul"] {
let function = LIB.get_function(name, None).unwrap();
let pipeline = DEVICE.new_compute_pipeline_state_with_function(&function).unwrap();
map.insert(name.to_string(), pipeline);
}
map
};
}
/// The (inner) type of tensor elements.
pub trait TensorType: Clone + Debug + 'static {
/// Returns the zero value.
@@ -1404,10 +1374,6 @@ impl<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync> Ad
let lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
#[cfg(feature = "metal")]
let res = metal_tensor_op(&lhs, &rhs, "add");
#[cfg(not(feature = "metal"))]
let res = {
let mut res: Tensor<T> = lhs
.par_iter()
@@ -1505,10 +1471,6 @@ impl<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync> Su
let lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
#[cfg(feature = "metal")]
let res = metal_tensor_op(&lhs, &rhs, "sub");
#[cfg(not(feature = "metal"))]
let res = {
let mut res: Tensor<T> = lhs
.par_iter()
@@ -1576,10 +1538,6 @@ impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync> Mu
let lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
#[cfg(feature = "metal")]
let res = metal_tensor_op(&lhs, &rhs, "mul");
#[cfg(not(feature = "metal"))]
let res = {
let mut res: Tensor<T> = lhs
.par_iter()
@@ -1811,66 +1769,4 @@ mod tests {
let b = Tensor::<IntegerRep>::new(Some(&[1, 4]), &[2, 1]).unwrap();
assert_eq!(a.get_slice(&[0..2, 0..1]).unwrap(), b);
}
#[test]
#[cfg(feature = "metal")]
fn tensor_metal_int() {
let a = Tensor::<i64>::new(Some(&[1, 2, 3, 4]), &[2, 2]).unwrap();
let b = Tensor::<i64>::new(Some(&[1, 2, 3, 4]), &[2, 2]).unwrap();
let c = metal_tensor_op(&a, &b, "add");
assert_eq!(c, Tensor::new(Some(&[2, 4, 6, 8]), &[2, 2]).unwrap());
let c = metal_tensor_op(&a, &b, "sub");
assert_eq!(c, Tensor::new(Some(&[0, 0, 0, 0]), &[2, 2]).unwrap());
let c = metal_tensor_op(&a, &b, "mul");
assert_eq!(c, Tensor::new(Some(&[1, 4, 9, 16]), &[2, 2]).unwrap());
}
#[test]
#[cfg(feature = "metal")]
fn tensor_metal_felt() {
use halo2curves::bn256::Fr;
let a = Tensor::<Fr>::new(
Some(&[Fr::from(1), Fr::from(2), Fr::from(3), Fr::from(4)]),
&[2, 2],
)
.unwrap();
let b = Tensor::<Fr>::new(
Some(&[Fr::from(1), Fr::from(2), Fr::from(3), Fr::from(4)]),
&[2, 2],
)
.unwrap();
let c = metal_tensor_op(&a, &b, "add");
assert_eq!(
c,
Tensor::<Fr>::new(
Some(&[Fr::from(2), Fr::from(4), Fr::from(6), Fr::from(8)]),
&[2, 2],
)
.unwrap()
);
let c = metal_tensor_op(&a, &b, "sub");
assert_eq!(
c,
Tensor::<Fr>::new(
Some(&[Fr::from(0), Fr::from(0), Fr::from(0), Fr::from(0)]),
&[2, 2],
)
.unwrap()
);
let c = metal_tensor_op(&a, &b, "mul");
assert_eq!(
c,
Tensor::<Fr>::new(
Some(&[Fr::from(1), Fr::from(4), Fr::from(9), Fr::from(16)]),
&[2, 2],
)
.unwrap()
);
}
}

View File

@@ -2795,7 +2795,10 @@ mod native_tests {
"--features",
"icicle",
];
#[cfg(not(feature = "icicle"))]
#[cfg(feature = "macos-metal")]
let args = ["build", "--release", "--bin", "ezkl", "--features", "macos-metal"];
// not macos-metal and not icicle
#[cfg(all(not(feature = "icicle"), not(feature = "macos-metal")))]
let args = ["build", "--release", "--bin", "ezkl"];
#[cfg(not(feature = "mv-lookup"))]
let args = [

View File

@@ -72,11 +72,10 @@ mod py_tests {
"torchtext==0.17.2",
"torchvision==0.17.2",
"pandas==2.2.1",
"numpy==1.26.4",
"seaborn==0.13.2",
"notebook==7.1.2",
"nbconvert==7.16.3",
"onnx==1.16.0",
"onnx==1.17.0",
"kaggle==1.6.8",
"py-solc-x==2.0.3",
"web3==7.5.0",
@@ -90,12 +89,13 @@ mod py_tests {
"xgboost==2.0.3",
"hummingbird-ml==0.4.11",
"lightgbm==4.3.0",
"numpy==1.26.4",
])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new("pip")
.args(["install", "numpy==1.23"])
.args(["install", "numpy==1.26.4"])
.status()
.expect("failed to execute process");