Compare commits

..

1 Commits

Author SHA1 Message Date
dante
be5d241b42 fix: felt to IntegerRep overflow should panic 2025-02-11 21:25:15 -05:00
34 changed files with 394 additions and 1585 deletions

View File

@@ -29,7 +29,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa #v0.4.0
@@ -40,7 +40,7 @@ jobs:
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Install binaryen
run: |
set -e

View File

@@ -15,7 +15,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: nanoGPT Mock

View File

@@ -50,7 +50,7 @@ jobs:
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
@@ -115,7 +115,7 @@ jobs:
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy

View File

@@ -51,7 +51,7 @@ jobs:
steps:
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Checkout repo
@@ -119,27 +119,27 @@ jobs:
include:
- build: windows-msvc
os: windows-latest
rust: nightly-2025-02-17
rust: nightly-2024-07-18
target: x86_64-pc-windows-msvc
- build: macos
os: macos-13
rust: nightly-2025-02-17
rust: nightly-2024-07-18
target: x86_64-apple-darwin
- build: macos-aarch64
os: macos-13
rust: nightly-2025-02-17
rust: nightly-2024-07-18
target: aarch64-apple-darwin
- build: linux-musl
os: ubuntu-22.04
rust: nightly-2025-02-17
rust: nightly-2024-07-18
target: x86_64-unknown-linux-musl
- build: linux-gnu
os: ubuntu-22.04
rust: nightly-2025-02-17
rust: nightly-2024-07-18
target: x86_64-unknown-linux-gnu
- build: linux-aarch64
os: ubuntu-22.04
rust: nightly-2025-02-17
rust: nightly-2024-07-18
target: aarch64-unknown-linux-gnu
steps:

View File

@@ -30,7 +30,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@v1
@@ -50,7 +50,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Build
@@ -66,7 +66,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Docs
@@ -82,7 +82,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -107,7 +107,7 @@ jobs:
# persist-credentials: false
# - uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
# with:
# toolchain: nightly-2025-02-17
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -117,6 +117,10 @@ jobs:
# - uses: mwilliamson/setup-wasmtime-action@bf814d7d8fc3c3a77dfe114bd9fb8a2c575f6ad6 #v2.0.0
# with:
# wasmtime-version: "3.0.1"
# - name: Install wasm32-wasi
# run: rustup target add wasm32-wasi
# - name: Install cargo-wasi
# run: cargo install cargo-wasi
# # - name: Matmul overflow (wasi)
# # run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# # - name: Conv overflow (wasi)
@@ -140,7 +144,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -150,6 +154,10 @@ jobs:
- uses: mwilliamson/setup-wasmtime-action@bf814d7d8fc3c3a77dfe114bd9fb8a2c575f6ad6 #v2.0.0
with:
wasmtime-version: "3.0.1"
- name: Install wasm32-wasi
run: rustup target add wasm32-wasi
- name: Install cargo-wasi
run: cargo install cargo-wasi
# - name: Matmul overflow (wasi)
# run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# - name: Conv overflow (wasi)
@@ -173,7 +181,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -183,6 +191,10 @@ jobs:
- uses: mwilliamson/setup-wasmtime-action@bf814d7d8fc3c3a77dfe114bd9fb8a2c575f6ad6 #v2.0.0
with:
wasmtime-version: "3.0.1"
- name: Install wasm32-wasi
run: rustup target add wasm32-wasi
- name: Install cargo-wasi
run: cargo install cargo-wasi
# - name: Matmul overflow (wasi)
# run: cargo wasi test matmul_col_ultra_overflow -- --include-ignored --nocapture
# - name: Conv overflow (wasi)
@@ -206,7 +218,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -226,7 +238,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa #v0.4.0
@@ -239,7 +251,7 @@ jobs:
- name: Install wasm32-unknown-unknown
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Run wasm verifier tests
# on mac:
# AR=/opt/homebrew/opt/llvm/bin/llvm-ar CC=/opt/homebrew/opt/llvm/bin/clang wasm-pack test --firefox --headless -- -Z build-std="panic_abort,std" --features web
@@ -255,7 +267,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -322,7 +334,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -342,7 +354,7 @@ jobs:
node-version: "18.12.1"
cache: "pnpm"
- name: "Add rust-src"
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- name: Install dependencies for js tests and in-browser-evm-verifier package
run: |
pnpm install --frozen-lockfile
@@ -407,7 +419,7 @@ jobs:
# persist-credentials: false
# - uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
# with:
# toolchain: nightly-2025-02-17
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa #v0.4.0
@@ -415,7 +427,7 @@ jobs:
# # Pin to version 0.12.1
# version: 'v0.12.1'
# - name: Add rust-src
# run: rustup component add rust-src --toolchain nightly-2025-02-17
# run: rustup component add rust-src --toolchain nightly-2024-07-18
# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
# with:
# persist-credentials: false
@@ -441,7 +453,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: jetli/wasm-pack-action@0d096b08b4e5a7de8c28de67e11e945404e9eefa #v0.4.0
@@ -452,7 +464,7 @@ jobs:
run: rustup target add wasm32-unknown-unknown
- name: Add rust-src
run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
with:
persist-credentials: false
@@ -522,11 +534,11 @@ jobs:
# persist-credentials: false
# - uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
# with:
# toolchain: nightly-2025-02-17
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - name: Add rust-src
# run: rustup component add rust-src --toolchain nightly-2025-02-17-x86_64-unknown-linux-gnu
# run: rustup component add rust-src --toolchain nightly-2024-07-18-x86_64-unknown-linux-gnu
# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2
# - uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
# with:
@@ -560,7 +572,7 @@ jobs:
persist-credentials: false
- uses: dtolnay/rust-toolchain@4f94fbe7e03939b0e674bcc9ca609a16088f63ff #nightly branch, TODO: update when required
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -580,7 +592,7 @@ jobs:
# persist-credentials: false
# - uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
# with:
# toolchain: nightly-2025-02-17
# toolchain: nightly-2024-07-18
# override: true
# components: rustfmt, clippy
# - uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -601,7 +613,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -622,7 +634,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -647,7 +659,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -671,7 +683,7 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Install cmake
@@ -701,7 +713,7 @@ jobs:
python-version: "3.12"
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -752,7 +764,7 @@ jobs:
python-version: "3.11"
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -807,7 +819,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- uses: baptiste0928/cargo-install@91c5da15570085bcde6f4d7aed98cb82d6769fd3 #v3.3.0
@@ -815,7 +827,7 @@ jobs:
crate: cargo-nextest
locked: true
- name: Run ios tests
run: CARGO_BUILD_TARGET=aarch64-apple-darwin RUSTUP_TOOLCHAIN=nightly-2025-02-17-aarch64-apple-darwin cargo test --test ios_integration_tests --features ios-bindings-test --no-default-features
run: CARGO_BUILD_TARGET=aarch64-apple-darwin RUSTUP_TOOLCHAIN=nightly-2024-07-18-aarch64-apple-darwin cargo test --test ios_integration_tests --features ios-bindings-test --no-default-features
swift-package-tests:
permissions:
@@ -829,7 +841,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy
- name: Build EzklCoreBindings

View File

@@ -17,7 +17,7 @@ jobs:
persist-credentials: false
- uses: actions-rs/toolchain@b2417cde72dcf67f306c0ae8e0828a81bf0b189f #v1.0.6
with:
toolchain: nightly-2025-02-17
toolchain: nightly-2024-07-18
override: true
components: rustfmt, clippy

View File

@@ -3,7 +3,7 @@ cargo-features = ["profile-rustflags"]
[package]
name = "ezkl"
version = "0.0.0"
edition = "2024"
edition = "2021"
default-run = "ezkl"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -22,7 +22,7 @@ fn generate_test_data(size: usize, zero_probability: f64) -> Vec<ValType> {
let mut rng = rand::thread_rng();
(0..size)
.map(|_i| {
if rng.r#gen::<f64>() < zero_probability {
if rng.gen::<f64>() < zero_probability {
ValType::Constant(F::ZERO)
} else {
ValType::Constant(F::ONE) // Or some other non-zero value

View File

@@ -1,52 +1,20 @@
# EZKL Security Note: Quantization-Activated Model Backdoors
# EZKL Security Note: Quantization-Induced Model Backdoors
## Model backdoors and provenance
> Note: this only affects a situation where a party separate to an application's developer has access to the model's weights and can modify them. This is a common scenario in adversarial machine learning research, but can be less common in real-world applications. If you're building your models in house and deploying them yourself, this is less of a concern. If you're building a permisionless system where anyone can submit models, this is more of a concern.
Machine learning models inherently suffer from robustness issues, which can lead to various
kinds of attacks, from backdoors to evasion attacks. These vulnerabilities are a direct byproductof how machine learning models learn and cannot be remediated.
Models processed through EZKL's quantization step can harbor backdoors that are dormant in the original full-precision model but activate during quantization. These backdoors force specific outputs when triggered, with impact varying by application.
We say a model has a backdoor whenever a specific attacker-chosen trigger in the input leads
to the model misbehaving. For instance, if we have an image classifier discriminating cats from dogs, the ability to turn any image of a cat into an image classified as a dog by changing a specific pixel pattern constitutes a backdoor.
Key Factors:
Backdoors can be introduced using many different vectors. An attacker can introduce a
backdoor using traditional security vulnerabilities. For instance, they could directly alter the file containing model weights or dynamically hack the Python code of the model. In addition, backdoors can be introduced by the training data through a process known as poisoning. In this case, an attacker adds malicious data points to the dataset before the model is trained so that the model learns to associate the backdoor trigger with the intended misbehavior.
- Larger models increase attack feasibility through more parameter capacity
- Smaller quantization scales facilitate attacks by allowing greater weight modifications
- Rebase ratio of 1 enables exploitation of convolutional layer consistency
All these vectors constitute a whole range of provenance challenges, as any component of an
AI system can virtually be an entrypoint for a backdoor. Although provenance is already a
concern with traditional code, the issue is exacerbated with AI, as retraining a model is
cost-prohibitive. It is thus impractical to translate the “recompile it yourself” thinking to AI.
Limitations:
## Quantization activated backdoors
Backdoors are a generic concern in AI that is outside the scope of EZKL. However, EZKL may
activate a specific subset of backdoors. Several academic papers have demonstrated the
possibility, both in theory and in practice, of implanting undetectable and inactive backdoors in a full precision model that can be reactivated by quantization.
An external attacker may trick the user of an application running EZKL into loading a model
containing a quantization backdoor. This backdoor is active in the resulting model and circuit but not in the full-precision model supplied to EZKL, compromising the integrity of the target application and the resulting proof.
### When is this a concern for me as a user?
Any untrusted component in your AI stack may be a backdoor vector. In practice, the most
sensitive parts include:
- Datasets downloaded from the web or containing crowdsourced data
- Models downloaded from the web even after finetuning
- Untrusted software dependencies (well-known frameworks such as PyTorch can typically
be considered trusted)
- Any component loaded through an unsafe serialization format, such as Pickle.
Because backdoors are inherent to ML and cannot be eliminated, reviewing the provenance of
these sensitive components is especially important.
### Responsibilities of the user and EZKL
As EZKL cannot prevent backdoored models from being used, it is the responsibility of the user to review the provenance of all the components in their AI stack to ensure that no backdoor could have been implanted. EZKL shall not be held responsible for misleading prediction proofs resulting from using a backdoored model or for any harm caused to a system or its users due to a misbehaving model.
### Limitations:
- Attack effectiveness depends on calibration settings and internal rescaling operations.
- Attack effectiveness depends on calibration settings and internal rescaling operations.
- Further research needed on backdoor persistence through witness/proof stages.
- Can be mitigated by evaluating the quantized model (using `ezkl gen-witness`), rather than relying on the evaluation of the original model in pytorch or onnx-runtime as difference in evaluation could reveal a backdoor.
- Can be mitigated by evaluating the quantized model (using `ezkl gen-witness`), rather than relying on the evaluation of the original model.
References:

View File

@@ -1,7 +1,7 @@
import ezkl
project = 'ezkl'
release = '20.2.6'
release = '0.0.0'
version = release

View File

@@ -373,19 +373,15 @@
"outputs": [],
"source": [
"# Set image size.\n",
"IMAGE_WIDTH = 64\n",
"IMAGE_HEIGHT = 64\n",
"IMAGE_WIDTH = 112\n",
"IMAGE_HEIGHT = 112\n",
"IMAGE_SIZE=(IMAGE_WIDTH, IMAGE_HEIGHT)\n",
"\n",
"# Create training transform with TrivialAugment\n",
"train_transform = transforms.Compose([\n",
" transforms.Resize(IMAGE_SIZE),\n",
" transforms.TrivialAugmentWide(),\n",
" transforms.RandomHorizontalFlip(),\n",
" transforms.RandomRotation(10),\n",
" transforms.ColorJitter(brightness=0.2, contrast=0.2),\n",
" transforms.ToTensor(),\n",
" ])\n",
" transforms.ToTensor()])\n",
"\n",
"# Create testing transform (no data augmentation)\n",
"test_transform = transforms.Compose([\n",
@@ -428,7 +424,7 @@
},
{
"cell_type": "code",
"execution_count": 74,
"execution_count": 26,
"metadata": {},
"outputs": [],
"source": [
@@ -438,55 +434,25 @@
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
"device\n",
"\n",
"# Improved CNN-based image classifier\n",
"# # Creating a CNN-based image classifier.\n",
"class ImageClassifier(nn.Module):\n",
" def __init__(self):\n",
" super().__init__()\n",
" \n",
" # First convolutional block with batch normalization and LeakyReLU\n",
" self.conv_layer_1 = nn.Sequential(\n",
" nn.Conv2d(3, 6, 3, padding=1), # Moderate increase from 4 to 6\n",
" nn.BatchNorm2d(6),\n",
" nn.LeakyReLU(0.1),\n",
" nn.MaxPool2d(2)\n",
" )\n",
" \n",
" # Second convolutional block with batch normalization and LeakyReLU\n",
" nn.Conv2d(3, 4, 3, padding=1),\n",
" nn.ReLU(),\n",
" nn.MaxPool2d(2))\n",
" self.conv_layer_2 = nn.Sequential(\n",
" nn.Conv2d(6, 8, 3, padding=1), # Moderate increase from 4 to 8\n",
" nn.BatchNorm2d(8),\n",
" nn.LeakyReLU(0.1),\n",
" nn.MaxPool2d(2)\n",
" )\n",
" \n",
" # For a 64x64 input, after 2 MaxPool2d(2) layers, the spatial dimensions are 16x16\n",
" # With 8 channels, the flattened size is 16*16*8 = 2048\n",
" \n",
" # Classifier with dropout\n",
" nn.Conv2d(4, 4, 3, padding=1),\n",
" nn.ReLU(),\n",
" nn.MaxPool2d(2))\n",
" self.classifier = nn.Sequential(\n",
" nn.Flatten(),\n",
" nn.Dropout(0.25), # Add dropout for regularization\n",
" nn.Linear(in_features=16*16*8, out_features=2)\n",
" )\n",
" \n",
" # For residual connection\n",
" self.downsample = nn.Sequential(\n",
" nn.Conv2d(3, 8, 1, stride=4), # Match spatial dimensions (64x64 -> 16x16)\n",
" nn.BatchNorm2d(8)\n",
" )\n",
" \n",
" def forward(self, x):\n",
" # Save input for residual connection\n",
" identity = self.downsample(x)\n",
" \n",
" nn.Flatten(),\n",
" nn.Linear(in_features=3136, out_features=2))\n",
" def forward(self, x: torch.Tensor):\n",
" x = self.conv_layer_1(x)\n",
" x = self.conv_layer_2(x)\n",
" \n",
" # Add residual connection\n",
" x = x + identity\n",
" \n",
" x = self.classifier(x)\n",
" \n",
" return x\n",
"# Instantiate an object.\n",
"model = ImageClassifier().to(device)\n"
@@ -560,7 +526,7 @@
},
{
"cell_type": "code",
"execution_count": 77,
"execution_count": 29,
"metadata": {},
"outputs": [],
"source": [
@@ -696,17 +662,15 @@
"torch.manual_seed(42) \n",
"torch.cuda.manual_seed(42)\n",
"\n",
"# Set number of epochs (change to 1000 for better results)\n",
"# Set number of epochs\n",
"NUM_EPOCHS = 25\n",
"# NUM_EPOCHS = 1000\n",
"\n",
"\n",
"# Setup loss function and optimizer\n",
"loss_fn = nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3)\n",
"\n",
"# Start the timer\n",
"from timeit import default_timer as timer\n",
"from timeit import default_timer as timer \n",
"start_time = timer()\n",
"\n",
"# Train model_0\n",
@@ -731,7 +695,7 @@
},
{
"cell_type": "code",
"execution_count": 94,
"execution_count": 78,
"metadata": {},
"outputs": [],
"source": [
@@ -866,7 +830,7 @@
},
{
"cell_type": "code",
"execution_count": 98,
"execution_count": 86,
"metadata": {},
"outputs": [],
"source": [
@@ -1052,6 +1016,7 @@
"source": [
"import os\n",
"\n",
"\n",
"res = await ezkl.create_evm_verifier()\n",
"\n",
"assert res == True"
@@ -1137,7 +1102,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.9"
"version": "3.12.7"
}
},
"nbformat": 4,

View File

@@ -1 +0,0 @@
{"run_args":{"input_scale":7,"param_scale":7,"scale_rebase_multiplier":1,"lookup_range":[-32768,32768],"logrows":17,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private","rebase_frac_zero_constants":false,"check_mode":"UNSAFE","commitment":"KZG","decomp_base":16384,"decomp_legs":2,"bounded_log_lookup":false,"ignore_range_check_inputs_outputs":false},"num_rows":54,"total_assignments":109,"total_const_size":4,"total_dynamic_col_size":0,"max_dynamic_input_len":0,"num_dynamic_lookups":0,"num_shuffles":0,"total_shuffle_col_size":0,"model_instance_shapes":[[1,1]],"model_output_scales":[7],"model_input_scales":[7],"module_sizes":{"polycommit":[],"poseidon":[0,[0]]},"required_lookups":[],"required_range_checks":[[-1,1],[0,16383]],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null,"timestamp":1739396322131,"input_types":["F32"],"output_types":["F32"]}

File diff suppressed because one or more lines are too long

View File

@@ -1,3 +1,3 @@
[toolchain]
channel = "nightly-2025-02-17"
channel = "nightly-2024-07-18"
components = ["rustfmt", "clippy"]

View File

@@ -28,8 +28,6 @@ use std::env;
#[tokio::main(flavor = "current_thread")]
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
pub async fn main() {
use log::debug;
let args = Cli::parse();
if let Some(generator) = args.generator {
@@ -44,7 +42,7 @@ pub async fn main() {
} else {
info!("Running with CPU");
}
debug!(
info!(
"command: \n {}",
&command.as_json().to_colored_json_auto().unwrap()
);

View File

@@ -1009,7 +1009,7 @@ fn gen_random_data(
/// bool
///
#[pyfunction(signature = (
data = String::from(DEFAULT_CALIBRATION_FILE),
data = PathBuf::from(DEFAULT_CALIBRATION_FILE),
model = PathBuf::from(DEFAULT_MODEL),
settings = PathBuf::from(DEFAULT_SETTINGS),
target = CalibrationTarget::default(), // default is "resources
@@ -1021,7 +1021,7 @@ fn gen_random_data(
#[gen_stub_pyfunction]
fn calibrate_settings(
py: Python,
data: String,
data: PathBuf,
model: PathBuf,
settings: PathBuf,
target: CalibrationTarget,
@@ -1076,7 +1076,7 @@ fn calibrate_settings(
/// Python object containing the witness values
///
#[pyfunction(signature = (
data=String::from(DEFAULT_DATA),
data=PathBuf::from(DEFAULT_DATA),
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
output=PathBuf::from(DEFAULT_WITNESS),
vk_path=None,
@@ -1085,7 +1085,7 @@ fn calibrate_settings(
#[gen_stub_pyfunction]
fn gen_witness(
py: Python,
data: String,
data: PathBuf,
model: PathBuf,
output: Option<PathBuf>,
vk_path: Option<PathBuf>,
@@ -1754,7 +1754,7 @@ fn create_evm_vka(
/// bool
///
#[pyfunction(signature = (
input_data=String::from(DEFAULT_DATA),
input_data=PathBuf::from(DEFAULT_DATA),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
abi_path=PathBuf::from(DEFAULT_VERIFIER_DA_ABI),
@@ -1763,7 +1763,7 @@ fn create_evm_vka(
#[gen_stub_pyfunction]
fn create_evm_data_attestation(
py: Python,
input_data: String,
input_data: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
@@ -1824,7 +1824,7 @@ fn create_evm_data_attestation(
#[gen_stub_pyfunction]
fn setup_test_evm_witness(
py: Python,
data_path: String,
data_path: PathBuf,
compiled_circuit_path: PathBuf,
test_data: PathBuf,
input_source: PyTestDataSource,
@@ -1902,7 +1902,7 @@ fn deploy_evm(
fn deploy_da_evm(
py: Python,
addr_path: PathBuf,
input_data: String,
input_data: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,

View File

@@ -21,10 +21,7 @@ pub enum BaseOp {
/// Matches a [BaseOp] to an operation over inputs
impl BaseOp {
/// forward func for non-accumulating operations
/// # Panics
/// Panics if called on an accumulating operation
/// # Examples
/// forward func
pub fn nonaccum_f<
T: TensorType + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + Neg<Output = T>,
>(
@@ -40,9 +37,7 @@ impl BaseOp {
}
}
/// forward func for accumulating operations
/// # Panics
/// Panics if called on a non-accumulating operation
/// forward func
pub fn accum_f<
T: TensorType + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + Neg<Output = T>,
>(

File diff suppressed because it is too large Load Diff

View File

@@ -159,8 +159,6 @@ impl std::str::FromStr for InputType {
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
impl From<DatumType> for InputType {
/// # Panics
/// Panics if the datum type is not supported
fn from(datum_type: DatumType) -> Self {
match datum_type {
DatumType::Bool => InputType::Bool,
@@ -319,8 +317,13 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
}
impl<
F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
> Op<F> for Constant<F>
F: PrimeField
+ TensorType
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>,
> Op<F> for Constant<F>
{
fn as_any(&self) -> &dyn Any {
self

View File

@@ -49,7 +49,7 @@ pub enum PolyOp {
},
Downsample {
axis: usize,
stride: isize,
stride: usize,
modulo: usize,
},
DeConv {
@@ -108,8 +108,13 @@ pub enum PolyOp {
}
impl<
F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
> Op<F> for PolyOp
F: PrimeField
+ TensorType
+ PartialOrd
+ std::hash::Hash
+ Serialize
+ for<'de> Deserialize<'de>,
> Op<F> for PolyOp
{
/// Returns a reference to the Any trait.
fn as_any(&self) -> &dyn Any {
@@ -183,8 +188,7 @@ impl<
} => {
format!(
"DECONV (stride={:?}, padding={:?}, output_padding={:?}, group={}, data_format={:?}, kernel_format={:?})",
stride, padding, output_padding, group, data_format, kernel_format
)
stride, padding, output_padding, group, data_format, kernel_format)
}
PolyOp::Concat { axis } => format!("CONCAT (axis={})", axis),
PolyOp::Slice { axis, start, end } => {

View File

@@ -1,6 +1,6 @@
use alloy::primitives::Address as H160;
use clap::{Command, Parser, Subcommand};
use clap_complete::{Generator, Shell, generate};
use clap_complete::{generate, Generator, Shell};
#[cfg(feature = "python-bindings")]
use pyo3::{conversion::FromPyObject, exceptions::PyValueError, prelude::*};
use serde::{Deserialize, Serialize};
@@ -8,7 +8,7 @@ use std::path::PathBuf;
use std::str::FromStr;
use tosubcommand::{ToFlags, ToSubcommand};
use crate::{Commitments, RunArgs, pfsys::ProofType};
use crate::{pfsys::ProofType, Commitments, RunArgs};
use crate::circuit::CheckMode;
use crate::graph::TestDataSource;
@@ -360,13 +360,8 @@ pub fn get_styles() -> clap::builder::Styles {
}
/// Print completions for the given generator
pub fn print_completions<G: Generator>(r#gen: G, cmd: &mut Command) {
generate(
r#gen,
cmd,
cmd.get_name().to_string(),
&mut std::io::stdout(),
);
pub fn print_completions<G: Generator>(gen: G, cmd: &mut Command) {
generate(gen, cmd, cmd.get_name().to_string(), &mut std::io::stdout());
}
#[allow(missing_docs)]
@@ -401,9 +396,8 @@ pub enum Commands {
/// Generates the witness from an input file.
GenWitness {
/// The path to the .json data file
/// You can also pass the input data as a string, eg. --data '{"input_data": [1.0,2.0,3.0]}' directly and skip the file
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
data: Option<String>,
data: Option<PathBuf>,
/// The path to the compiled model file (generated using the compile-circuit command)
#[arg(short = 'M', long, default_value = DEFAULT_COMPILED_CIRCUIT, value_hint = clap::ValueHint::FilePath)]
compiled_circuit: Option<PathBuf>,
@@ -435,7 +429,7 @@ pub enum Commands {
/// The path to the .onnx model file
#[arg(short = 'M', long, default_value = DEFAULT_MODEL, value_hint = clap::ValueHint::FilePath)]
model: Option<PathBuf>,
/// The path to the .json data file to output
/// The path to the .json data file
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
data: Option<PathBuf>,
/// Hand-written parser for graph variables, eg. batch_size=1
@@ -448,9 +442,8 @@ pub enum Commands {
/// Calibrates the proving scale, lookup bits and logrows from a circuit settings file.
CalibrateSettings {
/// The path to the .json calibration data file.
/// You can also pass the input data as a string, eg. --data '{"input_data": [1.0,2.0,3.0]}' directly and skip the file
#[arg(short = 'D', long, default_value = DEFAULT_CALIBRATION_FILE, value_hint = clap::ValueHint::FilePath)]
data: Option<String>,
data: Option<PathBuf>,
/// The path to the .onnx model file
#[arg(short = 'M', long, default_value = DEFAULT_MODEL, value_hint = clap::ValueHint::FilePath)]
model: Option<PathBuf>,
@@ -633,9 +626,8 @@ pub enum Commands {
#[command(arg_required_else_help = true)]
SetupTestEvmData {
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
/// You can also pass the input data as a string, eg. --data '{"input_data": [1.0,2.0,3.0]}' directly and skip the file
#[arg(short = 'D', long, value_hint = clap::ValueHint::FilePath)]
data: Option<String>,
data: Option<PathBuf>,
/// The path to the compiled model file (generated using the compile-circuit command)
#[arg(short = 'M', long, value_hint = clap::ValueHint::FilePath)]
compiled_circuit: Option<PathBuf>,
@@ -661,9 +653,8 @@ pub enum Commands {
#[arg(long, value_hint = clap::ValueHint::Other)]
addr: H160Flag,
/// The path to the .json data file.
/// You can also pass the input data as a string, eg. --data '{"input_data": [1.0,2.0,3.0]}' directly and skip the file
#[arg(short = 'D', long, value_hint = clap::ValueHint::FilePath)]
data: Option<String>,
data: Option<PathBuf>,
/// RPC URL for an Ethereum node, if None will use Anvil but WON'T persist state
#[arg(short = 'U', long, value_hint = clap::ValueHint::Url)]
rpc_url: Option<String>,
@@ -780,7 +771,7 @@ pub enum Commands {
/// view functions that return the data that the network
/// ingests as inputs.
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
data: Option<String>,
data: Option<PathBuf>,
/// The path to the witness file. This is needed for proof swapping for kzg commitments.
#[arg(short = 'W', long, default_value = DEFAULT_WITNESS, value_hint = clap::ValueHint::FilePath)]
witness: Option<PathBuf>,
@@ -875,9 +866,8 @@ pub enum Commands {
#[command(name = "deploy-evm-da")]
DeployEvmDataAttestation {
/// The path to the .json data file, which should include both the network input (possibly private) and the network output (public input to the proof)
/// You can also pass the input data as a string, eg. --data '{"input_data": [1.0,2.0,3.0]}' directly and skip the file
#[arg(short = 'D', long, default_value = DEFAULT_DATA, value_hint = clap::ValueHint::FilePath)]
data: Option<String>,
data: Option<PathBuf>,
/// The path to load circuit settings .json file from (generated using the gen-settings command)
#[arg(long, default_value = DEFAULT_SETTINGS, value_hint = clap::ValueHint::FilePath)]
settings_path: Option<PathBuf>,

View File

@@ -383,7 +383,7 @@ pub async fn deploy_contract_via_solidity(
///
pub async fn deploy_da_verifier_via_solidity(
settings_path: PathBuf,
input: String,
input: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<&str>,
runs: usize,
@@ -391,7 +391,7 @@ pub async fn deploy_da_verifier_via_solidity(
) -> Result<H160, EthError> {
let (client, client_address) = setup_eth_backend(rpc_url, private_key).await?;
let input = GraphData::from_str(&input).map_err(|_| EthError::GraphData)?;
let input = GraphData::from_path(input).map_err(|_| EthError::GraphData)?;
let settings = GraphSettings::load(&settings_path).map_err(|_| EthError::GraphSettings)?;
@@ -688,10 +688,10 @@ fn parse_call_to_account(call_to_account: CallToAccount) -> Result<ParsedCallToA
pub async fn update_account_calls(
addr: H160,
input: String,
input: PathBuf,
rpc_url: Option<&str>,
) -> Result<(), EthError> {
let input = GraphData::from_str(&input).map_err(|_| EthError::GraphData)?;
let input = GraphData::from_path(input).map_err(|_| EthError::GraphData)?;
// The data that will be stored in the test contracts that will eventually be read from.
let mut calls_to_accounts = vec![];

View File

@@ -1,6 +1,5 @@
use crate::EZKL_BUF_CAPACITY;
use crate::circuit::CheckMode;
use crate::circuit::region::RegionSettings;
use crate::circuit::CheckMode;
use crate::commands::CalibrationTarget;
use crate::eth::{
deploy_contract_via_solidity, deploy_da_verifier_via_solidity, fix_da_multi_sol,
@@ -13,21 +12,21 @@ use crate::graph::{GraphCircuit, GraphSettings, GraphWitness, Model};
use crate::graph::{TestDataSource, TestSources};
use crate::pfsys::evm::aggregation_kzg::{AggregationCircuit, PoseidonTranscript};
use crate::pfsys::{
ProofSplitCommit, create_proof_circuit, swap_proof_commitments_polycommit, verify_proof_circuit,
create_keys, load_pk, load_vk, save_params, save_pk, Snark, StrategyType, TranscriptType,
};
use crate::pfsys::{
Snark, StrategyType, TranscriptType, create_keys, load_pk, load_vk, save_params, save_pk,
create_proof_circuit, swap_proof_commitments_polycommit, verify_proof_circuit, ProofSplitCommit,
};
use crate::pfsys::{save_vk, srs::*};
use crate::tensor::TensorError;
use crate::EZKL_BUF_CAPACITY;
use crate::{commands::*, EZKLError};
use crate::{Commitments, RunArgs};
use crate::{EZKLError, commands::*};
use colored::Colorize;
#[cfg(unix)]
use gag::Gag;
use halo2_proofs::dev::VerifyFailure;
use halo2_proofs::plonk::{self, Circuit};
use halo2_proofs::poly::VerificationStrategy;
use halo2_proofs::poly::commitment::{CommitmentScheme, Params};
use halo2_proofs::poly::commitment::{ParamsProver, Verifier};
use halo2_proofs::poly::ipa::commitment::{IPACommitmentScheme, ParamsIPA};
@@ -40,6 +39,7 @@ use halo2_proofs::poly::kzg::strategy::AccumulatorStrategy as KZGAccumulatorStra
use halo2_proofs::poly::kzg::{
commitment::ParamsKZG, strategy::SingleStrategy as KZGSingleStrategy,
};
use halo2_proofs::poly::VerificationStrategy;
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer};
use halo2_solidity_verifier;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
@@ -50,12 +50,12 @@ use instant::Instant;
use itertools::Itertools;
use log::debug;
use log::{info, trace, warn};
use serde::Serialize;
use serde::de::DeserializeOwned;
use serde::Serialize;
use snark_verifier::loader::native::NativeLoader;
use snark_verifier::system::halo2::Config;
use snark_verifier::system::halo2::compile;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
use snark_verifier::system::halo2::Config;
use std::fs::File;
use std::io::BufWriter;
use std::io::{Cursor, Write};
@@ -516,9 +516,7 @@ fn update_ezkl_binary(version: &Option<String>) -> Result<String, EZKLError> {
.status()
.is_err()
{
log::warn!(
"bash is not installed on this system, trying to run the install script with sh (may fail)"
);
log::warn!("bash is not installed on this system, trying to run the install script with sh (may fail)");
"sh"
} else {
"bash"
@@ -727,7 +725,7 @@ pub(crate) fn table(model: PathBuf, run_args: RunArgs) -> Result<String, EZKLErr
pub(crate) async fn gen_witness(
compiled_circuit_path: PathBuf,
data: String,
data: PathBuf,
output: Option<PathBuf>,
vk_path: Option<PathBuf>,
srs_path: Option<PathBuf>,
@@ -735,7 +733,7 @@ pub(crate) async fn gen_witness(
// these aren't real values so the sanity checks are mostly meaningless
let mut circuit = GraphCircuit::load(compiled_circuit_path)?;
let data = GraphData::from_str(&data)?;
let data: GraphData = GraphData::from_path(data)?;
let settings = circuit.settings().clone();
let vk = if let Some(vk) = vk_path {
@@ -878,7 +876,7 @@ pub(crate) fn gen_random_data(
let mut tensor = TractTensor::zero::<f32>(sizes).unwrap();
let slice = tensor.as_slice_mut::<f32>().unwrap();
slice.iter_mut().for_each(|x| *x = rng.r#gen());
slice.iter_mut().for_each(|x| *x = rng.gen());
tensor.cast_to_dt(datum_type).unwrap().into_owned()
}
@@ -1046,7 +1044,7 @@ impl AccuracyResults {
#[allow(clippy::too_many_arguments)]
pub(crate) async fn calibrate(
model_path: PathBuf,
data: String,
data: PathBuf,
settings_path: PathBuf,
target: CalibrationTarget,
lookup_safety_margin: f64,
@@ -1060,7 +1058,7 @@ pub(crate) async fn calibrate(
use crate::fieldutils::IntegerRep;
let data = GraphData::from_str(&data)?;
let data = GraphData::from_path(data)?;
// load the pre-generated settings
let settings = GraphSettings::load(&settings_path)?;
// now retrieve the run args
@@ -1524,7 +1522,7 @@ pub(crate) async fn create_evm_data_attestation(
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
input: String,
input: PathBuf,
witness: Option<PathBuf>,
) -> Result<String, EZKLError> {
#[allow(unused_imports)]
@@ -1538,7 +1536,7 @@ pub(crate) async fn create_evm_data_attestation(
// if input is not provided, we just instantiate dummy input data
let data =
GraphData::from_str(&input).unwrap_or_else(|_| GraphData::new(DataSource::File(vec![])));
GraphData::from_path(input).unwrap_or_else(|_| GraphData::new(DataSource::File(vec![])));
// The number of input and output instances we attest to for the single call data attestation
let mut input_len = None;
@@ -1627,7 +1625,7 @@ pub(crate) async fn create_evm_data_attestation(
}
pub(crate) async fn deploy_da_evm(
data: String,
data: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
@@ -1870,7 +1868,7 @@ pub(crate) fn setup(
}
pub(crate) async fn setup_test_evm_witness(
data_path: String,
data_path: PathBuf,
compiled_circuit_path: PathBuf,
test_data: PathBuf,
rpc_url: Option<String>,
@@ -1879,7 +1877,7 @@ pub(crate) async fn setup_test_evm_witness(
) -> Result<String, EZKLError> {
use crate::graph::TestOnChainData;
let mut data = GraphData::from_str(&data_path)?;
let mut data = GraphData::from_path(data_path)?;
let mut circuit = GraphCircuit::load(compiled_circuit_path)?;
// if both input and output are from files fail
@@ -1907,7 +1905,7 @@ pub(crate) async fn setup_test_evm_witness(
use crate::pfsys::ProofType;
pub(crate) async fn test_update_account_calls(
addr: H160Flag,
data: String,
data: PathBuf,
rpc_url: Option<String>,
) -> Result<String, EZKLError> {
use crate::eth::update_account_calls;

View File

@@ -19,6 +19,11 @@ pub fn integer_rep_to_felt<F: PrimeField>(x: IntegerRep) -> F {
/// Converts a PrimeField element to an f64.
pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
if x > F::from_u128(IntegerRep::MAX as u128) {
if x == -F::from_u128(IntegerRep::MAX as u128) - F::ONE {
return IntegerRep::MIN as f64;
} else if x < -F::from_u128(IntegerRep::MAX as u128) - F::ONE {
panic!("Felt value out of range for conversion to integer rep");
}
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
let lower_128: u128 = u128::from_le_bytes(negtmp[..16].try_into().unwrap());
@@ -31,11 +36,13 @@ pub fn felt_to_f64<F: PrimeField + PartialOrd + Field>(x: F) -> f64 {
}
}
/// Converts a PrimeField element to an i64.
/// Converts a PrimeField element to an integer rep.
pub fn felt_to_integer_rep<F: PrimeField + PartialOrd + Field>(x: F) -> IntegerRep {
if x > F::from_u128(IntegerRep::MAX as u128) {
if x == -F::from_u128(IntegerRep::MAX as u128) - F::ONE {
return IntegerRep::MIN;
} else if x < -F::from_u128(IntegerRep::MAX as u128) - F::ONE {
panic!("Felt value out of range for conversion to integer rep");
}
let rep = (-x).to_repr();
let negtmp: &[u8] = rep.as_ref();
@@ -70,6 +77,13 @@ mod test {
assert_eq!(res, F::from(131072));
}
#[test]
#[should_panic]
fn felttointegerrep_overflow() {
let fieldx: F = integer_rep_to_felt::<F>(IntegerRep::MIN) - F::ONE;
let _xf: IntegerRep = felt_to_integer_rep::<F>(fieldx);
}
#[test]
fn felttointegerrep() {
for x in -(2_i128.pow(16))..(2_i128.pow(16)) {

View File

@@ -528,27 +528,6 @@ impl GraphData {
}
}
/// Loads graph input data from a string, first seeing if it is a file path or JSON data
/// If it is a file path, it will load the data from the file
/// Otherwise, it will attempt to parse the string as JSON data
///
/// # Arguments
/// * `data` - String containing the input data
/// # Returns
/// A new GraphData instance containing the loaded data
pub fn from_str(data: &str) -> Result<Self, GraphError> {
let graph_input = serde_json::from_str(data);
match graph_input {
Ok(graph_input) => {
return Ok(graph_input);
}
Err(_) => {
let path = std::path::PathBuf::from(data);
GraphData::from_path(path)
}
}
}
/// Loads graph input data from a file
///
/// # Arguments

View File

@@ -1,14 +1,14 @@
use super::errors::GraphError;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use super::VarScales;
use super::errors::GraphError;
use super::{Rescaled, SupportedOp, Visibility};
use crate::circuit::Op;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::circuit::hybrid::HybridOp;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::circuit::lookup::LookupOp;
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use crate::circuit::poly::PolyOp;
use crate::circuit::Op;
use crate::fieldutils::IntegerRep;
use crate::tensor::{Tensor, TensorError, TensorType};
use halo2curves::bn256::Fr as Fp;
@@ -22,7 +22,6 @@ use std::sync::Arc;
use tract_onnx::prelude::{DatumType, Node as OnnxNode, TypedFact, TypedOp};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_core::ops::{
Downsample,
array::{
Gather, GatherElements, GatherNd, MultiBroadcastTo, OneHot, ScatterElements, ScatterNd,
Slice, Topk,
@@ -32,6 +31,7 @@ use tract_onnx::tract_core::ops::{
einsum::EinSum,
element_wise::ElementWiseOp,
nn::{LeakyRelu, Reduce, Softmax},
Downsample,
};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tract_onnx::tract_hir::{
@@ -1398,7 +1398,7 @@ pub fn new_op_from_onnx(
SupportedOp::Linear(PolyOp::Downsample {
axis: downsample_node.axis,
stride: downsample_node.stride,
stride: downsample_node.stride as usize,
modulo: downsample_node.modulo,
})
}

View File

@@ -17,16 +17,16 @@ use crate::{Commitments, EZKL_BUF_CAPACITY, EZKL_KEY_FORMAT};
use clap::ValueEnum;
use halo2_proofs::circuit::Value;
use halo2_proofs::plonk::{
Circuit, ProvingKey, VerifyingKey, create_proof, keygen_pk, keygen_vk_custom, verify_proof,
create_proof, keygen_pk, keygen_vk_custom, verify_proof, Circuit, ProvingKey, VerifyingKey,
};
use halo2_proofs::poly::VerificationStrategy;
use halo2_proofs::poly::commitment::{CommitmentScheme, Params, ParamsProver, Prover, Verifier};
use halo2_proofs::poly::ipa::commitment::IPACommitmentScheme;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2_proofs::poly::VerificationStrategy;
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer, TranscriptWriterBuffer};
use halo2curves::CurveAffine;
use halo2curves::ff::{FromUniformBytes, PrimeField, WithSmallOrderMulGroup};
use halo2curves::serde::SerdeObject;
use halo2curves::CurveAffine;
use instant::Instant;
use log::{debug, info, trace};
#[cfg(not(feature = "det-prove"))]
@@ -51,9 +51,6 @@ use pyo3::types::PyDictMethods;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
/// Converts a string to a `SerdeFormat`.
/// # Panics
/// Panics if the provided `s` is not a valid `SerdeFormat` (i.e. not one of "processed", "raw-bytes-unchecked", or "raw-bytes").
fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
match s {
"processed" => halo2_proofs::SerdeFormat::Processed,
@@ -324,7 +321,7 @@ where
}
#[cfg(feature = "python-bindings")]
use pyo3::{PyObject, Python, ToPyObject, types::PyDict};
use pyo3::{types::PyDict, PyObject, Python, ToPyObject};
#[cfg(feature = "python-bindings")]
impl<F: PrimeField + SerdeObject + Serialize, C: CurveAffine + Serialize> ToPyObject for Snark<F, C>
where
@@ -348,9 +345,9 @@ where
}
impl<
F: PrimeField + SerdeObject + Serialize + FromUniformBytes<64> + DeserializeOwned,
C: CurveAffine + Serialize + DeserializeOwned,
> Snark<F, C>
F: PrimeField + SerdeObject + Serialize + FromUniformBytes<64> + DeserializeOwned,
C: CurveAffine + Serialize + DeserializeOwned,
> Snark<F, C>
where
C::Scalar: Serialize + DeserializeOwned,
C::ScalarExt: Serialize + DeserializeOwned,

View File

@@ -27,7 +27,7 @@ pub use var::*;
use crate::{
circuit::utils,
fieldutils::{IntegerRep, integer_rep_to_felt},
fieldutils::{integer_rep_to_felt, IntegerRep},
graph::Visibility,
};
@@ -62,7 +62,7 @@ pub trait TensorType: Clone + Debug + 'static {
}
macro_rules! tensor_type {
($rust_type:ty, $tensor_type:ident, $zero:expr_2021, $one:expr_2021) => {
($rust_type:ty, $tensor_type:ident, $zero:expr, $one:expr) => {
impl TensorType for $rust_type {
fn zero() -> Option<Self> {
Some($zero)
@@ -415,7 +415,7 @@ impl<T: Clone + TensorType + PrimeField> Tensor<T> {
Err(_) => {
return Err(TensorError::FileLoadError(
"Failed to read tensor".to_string(),
));
))
}
}
}
@@ -926,9 +926,6 @@ impl<T: Clone + TensorType> Tensor<T> {
));
}
self.dims = vec![];
}
if self.dims() == &[0] && new_dims.iter().product::<usize>() == 1 {
self.dims = Vec::from(new_dims);
} else {
let product = if new_dims != [0] {
new_dims.iter().product::<usize>()
@@ -1107,10 +1104,6 @@ impl<T: Clone + TensorType> Tensor<T> {
let mut output = self.clone();
output.reshape(shape)?;
return Ok(output);
} else if self.dims() == &[0] && shape.iter().product::<usize>() == 1 {
let mut output = self.clone();
output.reshape(shape)?;
return Ok(output);
}
if self.dims().len() > shape.len() {
@@ -1261,7 +1254,7 @@ impl<T: Clone + TensorType> Tensor<T> {
None => {
return Err(TensorError::DimError(
"Cannot get last element of empty tensor".to_string(),
));
))
}
};
@@ -1286,7 +1279,7 @@ impl<T: Clone + TensorType> Tensor<T> {
None => {
return Err(TensorError::DimError(
"Cannot get first element of empty tensor".to_string(),
));
))
}
};
@@ -1699,8 +1692,8 @@ impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync + P
lhs.par_iter_mut()
.zip(rhs)
.map(|(o, r)| match T::zero() {
Some(zero) => {
.map(|(o, r)| {
if let Some(zero) = T::zero() {
if r != zero {
*o = o.clone() % r;
Ok(())
@@ -1709,10 +1702,11 @@ impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync + P
"Cannot divide by zero in remainder".to_string(),
))
}
} else {
Err(TensorError::InvalidArgument(
"Undefined zero value".to_string(),
))
}
_ => Err(TensorError::InvalidArgument(
"Undefined zero value".to_string(),
)),
})
.collect::<Result<Vec<_>, _>>()?;

View File

@@ -535,101 +535,30 @@ pub fn mult<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::S
/// let result = downsample(&x, 1, 2, 2).unwrap();
/// let expected = Tensor::<IntegerRep>::new(Some(&[3, 6]), &[2, 1]).unwrap();
/// assert_eq!(result, expected);
/// let x = Tensor::<IntegerRep>::new(
/// Some(&[1, 2, 3, 4, 5, 6]),
/// &[2, 3],
/// ).unwrap();
///
/// // Test case 1: Negative stride along dimension 0
/// // This should flip the order along dimension 0
/// let result = downsample(&x, 0, -1, 0).unwrap();
/// let expected = Tensor::<IntegerRep>::new(
/// Some(&[4, 5, 6, 1, 2, 3]), // Flipped order of rows
/// &[2, 3]
/// ).unwrap();
/// assert_eq!(result, expected);
///
/// // Test case 2: Negative stride along dimension 1
/// // This should flip the order along dimension 1
/// let result = downsample(&x, 1, -1, 0).unwrap();
/// let expected = Tensor::<IntegerRep>::new(
/// Some(&[3, 2, 1, 6, 5, 4]), // Flipped order of columns
/// &[2, 3]
/// ).unwrap();
/// assert_eq!(result, expected);
///
/// // Test case 3: Negative stride with stride magnitude > 1
/// // This should both skip and flip
/// let result = downsample(&x, 1, -2, 0).unwrap();
/// let expected = Tensor::<IntegerRep>::new(
/// Some(&[3, 1, 6, 4]), // Take every 2nd element in reverse
/// &[2, 2]
/// ).unwrap();
/// assert_eq!(result, expected);
///
/// // Test case 4: Negative stride with non-zero modulo
/// // This should start at (size - 1 - modulo) and reverse
/// let result = downsample(&x, 1, -2, 1).unwrap();
/// let expected = Tensor::<IntegerRep>::new(
/// Some(&[2, 5]), // Start at second element from end, take every 2nd in reverse
/// &[2, 1]
/// ).unwrap();
/// assert_eq!(result, expected);
///
/// // Create a larger test case for more complex downsampling
/// let y = Tensor::<IntegerRep>::new(
/// Some(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
/// &[3, 4],
/// ).unwrap();
///
/// // Test case 5: Negative stride with modulo on larger tensor
/// let result = downsample(&y, 1, -2, 1).unwrap();
/// let expected = Tensor::<IntegerRep>::new(
/// Some(&[3, 1, 7, 5, 11, 9]), // Start at one after reverse, take every 2nd
/// &[3, 2]
/// ).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn downsample<T: TensorType + Send + Sync>(
input: &Tensor<T>,
dim: usize,
stride: isize, // Changed from usize to isize to support negative strides
stride: usize,
modulo: usize,
) -> Result<Tensor<T>, TensorError> {
// Handle negative stride case
if stride == 0 {
return Err(TensorError::DimMismatch(
"downsample stride cannot be zero".to_string(),
));
}
let stride_abs = stride.unsigned_abs();
let mut output_shape = input.dims().to_vec();
// now downsample along axis dim offset by modulo, rounding up (+1 if remaidner is non-zero)
let remainder = (input.dims()[dim] - modulo) % stride;
let div = (input.dims()[dim] - modulo) / stride;
output_shape[dim] = div + (remainder > 0) as usize;
let mut output = Tensor::<T>::new(None, &output_shape)?;
if modulo >= input.dims()[dim] {
if modulo > input.dims()[dim] {
return Err(TensorError::DimMismatch("downsample".to_string()));
}
// Calculate output shape based on the absolute value of stride
let remainder = (input.dims()[dim] - modulo) % stride_abs;
let div = (input.dims()[dim] - modulo) / stride_abs;
output_shape[dim] = div + (remainder > 0) as usize;
let mut output = Tensor::<T>::new(None, &output_shape)?;
// Calculate indices based on stride direction
// now downsample along axis dim offset by modulo
let indices = (0..output_shape.len())
.map(|i| {
if i == dim {
let mut index = vec![0; output_shape[i]];
for (j, idx) in index.iter_mut().enumerate() {
if stride > 0 {
// Positive stride: move forward from modulo
*idx = j * stride_abs + modulo;
} else {
// Negative stride: move backward from (size - 1 - modulo)
*idx = (input.dims()[dim] - 1 - modulo) - j * stride_abs;
}
for (i, idx) in index.iter_mut().enumerate() {
*idx = i * stride + modulo;
}
index
} else {

View File

@@ -1342,11 +1342,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
/// Gets the total number of elements in the tensor
pub fn len(&self) -> usize {
match self {
ValTensor::Value { dims, inner, .. } => {
ValTensor::Value { dims, .. } => {
if !dims.is_empty() && (dims != &[0]) {
dims.iter().product::<usize>()
} else if dims.is_empty() {
inner.inner.len()
} else {
0
}

View File

@@ -2,7 +2,7 @@ use std::collections::HashSet;
use log::{debug, error, warn};
use crate::circuit::{CheckMode, region::ConstantsMap};
use crate::circuit::{region::ConstantsMap, CheckMode};
use super::*;
/// A wrapper around Halo2's Column types that represents a tensor of variables in the circuit.
@@ -403,10 +403,7 @@ impl VarTensor {
let mut assigned_coord = 0;
let mut res: ValTensor<F> = match values {
ValTensor::Instance { .. } => {
error!(
"assignment with omissions is not supported on instance columns. increase K if you require more rows."
);
Err(halo2_proofs::plonk::Error::Synthesis)
unimplemented!("cannot assign instance to advice columns with omissions")
}
ValTensor::Value { inner: v, .. } => Ok::<ValTensor<F>, halo2_proofs::plonk::Error>(
v.enum_map(|coord, k| {
@@ -572,13 +569,8 @@ impl VarTensor {
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
match values {
ValTensor::Instance { .. } => {
error!(
"duplication is not supported on instance columns. increase K if you require more rows."
);
Err(halo2_proofs::plonk::Error::Synthesis)
}
ValTensor::Value { inner: v, dims, .. } => {
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you require more rows."),
ValTensor::Value { inner: v, dims , ..} => {
let duplication_freq = if single_inner_col {
self.col_size()
} else {
@@ -591,20 +583,21 @@ impl VarTensor {
self.num_inner_cols()
};
let duplication_offset = if single_inner_col { row } else { offset };
let duplication_offset = if single_inner_col {
row
} else {
offset
};
// duplicates every nth element to adjust for column overflow
let mut res: ValTensor<F> = v
.duplicate_every_n(duplication_freq, num_repeats, duplication_offset)
.unwrap()
.into();
let mut res: ValTensor<F> = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap().into();
let constants_map = res.create_constants_map();
constants.extend(constants_map);
let total_used_len = res.len();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset)
.unwrap();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
res.reshape(dims).unwrap();
res.set_scale(values.scale());
@@ -634,13 +627,9 @@ impl VarTensor {
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
match values {
ValTensor::Instance { .. } => {
error!(
"duplication is not supported on instance columns. increase K if you require more rows."
);
Err(halo2_proofs::plonk::Error::Synthesis)
}
ValTensor::Value { inner: v, dims, .. } => {
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you require more rows."),
ValTensor::Value { inner: v, dims , ..} => {
let duplication_freq = self.block_size();
let num_repeats = self.num_inner_cols();
@@ -648,31 +637,17 @@ impl VarTensor {
let duplication_offset = offset;
// duplicates every nth element to adjust for column overflow
let v = v
.duplicate_every_n(duplication_freq, num_repeats, duplication_offset)
.map_err(|e| {
error!("Error duplicating values: {:?}", e);
halo2_proofs::plonk::Error::Synthesis
})?;
let v = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
let mut res: ValTensor<F> = {
v.enum_map(|coord, k| {
let cell =
self.assign_value(region, offset, k.clone(), coord, constants)?;
Ok::<_, halo2_proofs::plonk::Error>(cell)
})?
.into()
};
let total_used_len = res.len();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset)
.map_err(|e| {
error!("Error duplicating values: {:?}", e);
halo2_proofs::plonk::Error::Synthesis
})?;
let cell = self.assign_value(region, offset, k.clone(), coord, constants)?;
Ok::<_, halo2_proofs::plonk::Error>(cell)
res.reshape(dims).map_err(|e| {
error!("Error duplicating values: {:?}", e);
halo2_proofs::plonk::Error::Synthesis
})?;
})?.into()};
let total_used_len = res.len();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
res.reshape(dims).unwrap();
res.set_scale(values.scale());
Ok((res, total_used_len))
@@ -706,71 +681,61 @@ impl VarTensor {
let mut prev_cell = None;
match values {
ValTensor::Instance { .. } => {
error!(
"duplication is not supported on instance columns. increase K if you require more rows."
);
Err(halo2_proofs::plonk::Error::Synthesis)
}
ValTensor::Value { inner: v, dims, .. } => {
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you require more rows."),
ValTensor::Value { inner: v, dims , ..} => {
let duplication_freq = self.col_size();
let num_repeats = 1;
let duplication_offset = row;
// duplicates every nth element to adjust for column overflow
let v = v
.duplicate_every_n(duplication_freq, num_repeats, duplication_offset)
.unwrap();
let mut res: ValTensor<F> = v
.enum_map(|coord, k| {
let step = self.num_inner_cols();
let v = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
let mut res: ValTensor<F> =
v.enum_map(|coord, k| {
let (x, y, z) = self.cartesian_coord(offset + coord * step);
if matches!(check_mode, CheckMode::SAFE) && coord > 0 && z == 0 && y == 0 {
// assert that duplication occurred correctly
assert_eq!(
Into::<IntegerRep>::into(k.clone()),
Into::<IntegerRep>::into(v[coord - 1].clone())
);
};
let step = self.num_inner_cols();
let cell =
self.assign_value(region, offset, k.clone(), coord * step, constants)?;
let (x, y, z) = self.cartesian_coord(offset + coord * step);
if matches!(check_mode, CheckMode::SAFE) && coord > 0 && z == 0 && y == 0 {
// assert that duplication occurred correctly
assert_eq!(Into::<IntegerRep>::into(k.clone()), Into::<IntegerRep>::into(v[coord - 1].clone()));
};
let at_end_of_column = z == duplication_freq - 1;
let at_beginning_of_column = z == 0;
let cell = self.assign_value(region, offset, k.clone(), coord * step, constants)?;
if at_end_of_column {
// if we are at the end of the column, we need to copy the cell to the next column
prev_cell = Some(cell.clone());
} else if coord > 0 && at_beginning_of_column {
if let Some(prev_cell) = prev_cell.as_ref() {
let cell = if let Some(cell) = cell.cell() {
cell
} else {
error!("Error getting cell: {:?}", (x, y));
return Err(halo2_proofs::plonk::Error::Synthesis);
};
let prev_cell = if let Some(prev_cell) = prev_cell.cell() {
prev_cell
} else {
error!("Error getting prev cell: {:?}", (x, y));
return Err(halo2_proofs::plonk::Error::Synthesis);
};
region.constrain_equal(prev_cell, cell)?;
let at_end_of_column = z == duplication_freq - 1;
let at_beginning_of_column = z == 0;
if at_end_of_column {
// if we are at the end of the column, we need to copy the cell to the next column
prev_cell = Some(cell.clone());
} else if coord > 0 && at_beginning_of_column {
if let Some(prev_cell) = prev_cell.as_ref() {
let cell = if let Some(cell) = cell.cell() {
cell
} else {
error!("Previous cell was not set");
error!("Error getting cell: {:?}", (x,y));
return Err(halo2_proofs::plonk::Error::Synthesis);
}
};
let prev_cell = if let Some(prev_cell) = prev_cell.cell() {
prev_cell
} else {
error!("Error getting prev cell: {:?}", (x,y));
return Err(halo2_proofs::plonk::Error::Synthesis);
};
region.constrain_equal(prev_cell,cell)?;
} else {
error!("Previous cell was not set");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
}
Ok(cell)
})?
.into();
Ok(cell)
})?.into();
let total_used_len = res.len();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset)
.unwrap();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
res.reshape(dims).unwrap();
res.set_scale(values.scale());
@@ -806,30 +771,21 @@ impl VarTensor {
VarTensor::Advice { inner: advices, .. } => {
ValType::PrevAssigned(region.assign_advice(|| "k", advices[x][y], z, || v)?)
}
_ => {
error!("VarTensor was not initialized");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
_ => unimplemented!(),
},
// Handle copying previously assigned value
ValType::PrevAssigned(v) => match &self {
VarTensor::Advice { inner: advices, .. } => {
ValType::PrevAssigned(v.copy_advice(|| "k", region, advices[x][y], z)?)
}
_ => {
error!("VarTensor was not initialized");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
_ => unimplemented!(),
},
// Handle copying previously assigned constant
ValType::AssignedConstant(v, val) => match &self {
VarTensor::Advice { inner: advices, .. } => {
ValType::AssignedConstant(v.copy_advice(|| "k", region, advices[x][y], z)?, val)
}
_ => {
error!("VarTensor was not initialized");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
_ => unimplemented!(),
},
// Handle assigning evaluated value
ValType::AssignedValue(v) => match &self {
@@ -838,10 +794,7 @@ impl VarTensor {
.assign_advice(|| "k", advices[x][y], z, || v)?
.evaluate(),
),
_ => {
error!("VarTensor was not initialized");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
_ => unimplemented!(),
},
// Handle constant value assignment with caching
ValType::Constant(v) => {

View File

@@ -1,12 +1,12 @@
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
#[cfg(test)]
mod native_tests {
// use ezkl::circuit::table::RESERVED_BLINDING_ROWS_PAD;
use ezkl::Commitments;
use ezkl::graph::input::{FileSource, FileSourceInner, GraphData};
use ezkl::graph::{DataSource, GraphSettings, GraphWitness};
use ezkl::pfsys::Snark;
use ezkl::Commitments;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2curves::bn256::Bn256;
use lazy_static::lazy_static;
@@ -522,7 +522,7 @@ mod native_tests {
use crate::native_tests::run_js_tests;
use crate::native_tests::render_circuit;
use crate::native_tests::model_serialization_different_binaries;
use tempdir::TempDir;
use ezkl::Commitments;
@@ -2293,12 +2293,7 @@ mod native_tests {
.expect("failed to execute process");
if status.success() {
log::error!(
"Verification unexpectedly succeeded for modified proof {}. Flipped bit {} in byte {}",
i,
random_bit,
random_byte
);
log::error!("Verification unexpectedly succeeded for modified proof {}. Flipped bit {} in byte {}", i, random_bit, random_byte);
}
assert!(

View File

@@ -46,9 +46,7 @@ mod py_tests {
assert!(status.success());
});
// set VOICE_DATA_DIR environment variable
unsafe {
std::env::set_var("VOICE_DATA_DIR", format!("{}", voice_data_dir));
}
std::env::set_var("VOICE_DATA_DIR", format!("{}", voice_data_dir));
}
fn download_catdog_data() {
@@ -65,9 +63,7 @@ mod py_tests {
assert!(status.success());
});
// set VOICE_DATA_DIR environment variable
unsafe {
std::env::set_var("CATDOG_DATA_DIR", format!("{}", cat_and_dog_data_dir));
}
std::env::set_var("CATDOG_DATA_DIR", format!("{}", cat_and_dog_data_dir));
}
fn setup_py_env() {