From f4481509051158d35a0dd098560644b47c019991 Mon Sep 17 00:00:00 2001 From: dante <45801863+alexander-camuto@users.noreply.github.com> Date: Sat, 25 Mar 2023 01:14:07 +0000 Subject: [PATCH] feat: integrate accumulated args into graph (#174) * feat: accum pow * feat: accum pack * feat: pairwise rescaled ops * fix examples * feat: integrate accumulated args into graph * rm max rotations argument * update readme * feat: reuse base gate config * matmul relu benches * nil rotation lookup and range * Update rust.yml * clippy cleanup * doc strings * rm init dot * fix doc tests * single lookup as default * Update integration_tests.rs * Update rust.yml * split SRS generation 20/23 --- .github/workflows/rust.yml | 24 +- Cargo.toml | 20 + README.md | 2 - benches/accum_affine.rs | 12 +- benches/accum_conv.rs | 18 +- benches/accum_dot.rs | 11 +- benches/accum_matmul.rs | 12 +- benches/accum_matmul_relu.rs | 130 ++++ benches/accum_pack.rs | 108 +++ benches/accum_sum.rs | 11 +- benches/accum_sumpool.rs | 18 +- benches/add.rs | 14 +- benches/affine.rs | 15 +- benches/conv.rs | 10 +- benches/dot.rs | 14 +- benches/matmul.rs | 14 +- benches/pack.rs | 112 +++ benches/pairwise_add.rs | 9 +- benches/pairwise_pow.rs | 108 +++ benches/pow.rs | 112 +++ benches/range.rs | 2 +- benches/relu.rs | 59 +- benches/sum.rs | 20 +- benches/sumpool.rs | 6 +- examples/conv2d_mnist/main.rs | 163 ++--- examples/default_run_args.json | 1 - examples/mlp_4d.rs | 60 +- examples/onnx/1l_average/input.json | 32 +- src/bin/ezkl.rs | 4 +- src/circuit/{accumulated => base}/layouts.rs | 277 +++++-- src/circuit/base/mod.rs | 476 +++++++++++++ .../{accumulated/mod.rs => base/tests.rs} | 674 +++++++----------- src/circuit/{polynomial.rs => fused.rs} | 10 +- src/circuit/lookup.rs | 104 +-- src/circuit/mod.rs | 7 +- src/circuit/range.rs | 30 +- src/circuit/utils.rs | 5 +- src/commands.rs | 10 +- src/eth.rs | 78 +- src/execute.rs | 43 +- src/graph/mod.rs | 7 +- src/graph/model.rs | 589 ++++----------- src/graph/node.rs | 64 +- src/graph/vars.rs | 40 +- src/pfsys/mod.rs | 6 +- src/tensor/mod.rs | 8 +- src/tensor/ops.rs | 51 +- src/tensor/val.rs | 276 ++++--- src/tensor/var.rs | 72 +- tests/integration_tests.rs | 121 ++-- tests/wasi_integration_tests.rs | 41 +- 51 files changed, 2366 insertions(+), 1744 deletions(-) create mode 100644 benches/accum_matmul_relu.rs create mode 100644 benches/accum_pack.rs create mode 100644 benches/pack.rs create mode 100644 benches/pairwise_pow.rs create mode 100644 benches/pow.rs rename src/circuit/{accumulated => base}/layouts.rs (71%) create mode 100644 src/circuit/base/mod.rs rename src/circuit/{accumulated/mod.rs => base/tests.rs} (60%) rename src/circuit/{polynomial.rs => fused.rs} (99%) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 9a83fa24..077cb80a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -69,6 +69,10 @@ jobs: run: cargo bench --verbose --bench matmul - name: Bench accum matmul run: cargo bench --verbose --bench accum_matmul + - name: Bench accum matmul relu + run: cargo bench --verbose --bench accum_matmul_relu + - name: Bench relu + run: cargo bench --verbose --bench relu - name: Bench dot run: cargo bench --verbose --bench dot - name: Bench accum dot @@ -93,6 +97,14 @@ jobs: run: cargo bench --verbose --bench sum - name: Bench accum sum run: cargo bench --verbose --bench accum_sum + - name: Bench pow + run: cargo bench --verbose --bench pow + - name: Bench pairwise pow + run: cargo bench --verbose --bench pairwise_pow + - name: Bench pack + run: cargo bench --verbose --bench pack + - name: Bench accum pack + run: cargo bench --verbose --bench accum_pack docs: runs-on: ubuntu-latest @@ -168,8 +180,8 @@ jobs: - name: Mock proving tests (public outputs) run: cargo test --release --verbose tests::mock_public_outputs_ -- --test-threads 32 - - name: Mock proving tests (public outputs and single lookup) - run: cargo test --release --verbose tests::mock_single_lookup_ -- --test-threads 32 + - name: Mock proving tests (public outputs and no single lookup) + run: cargo test --release --verbose tests::mock_non_single_lookup_ -- --test-threads 32 - name: Mock proving tests (public and packed outputs) run: cargo test --release --verbose packed_tests::mock_packed_outputs_ -- --test-threads 32 - name: Mock proving tests (public inputs) @@ -197,8 +209,8 @@ jobs: run: rustup target add wasm32-wasi - name: Mock proving tests (WASI) (public outputs) run: cargo test --release --verbose tests_wasi::mock_public_outputs_ -- --test-threads 32 - - name: Mock proving tests (WASI) (public outputs and single lookup) - run: cargo test --release --verbose tests_wasi::mock_single_lookup_ -- --test-threads 32 + - name: Mock proving tests (WASI) (public outputs and no single lookup) + run: cargo test --release --verbose tests_wasi::mock_non_single_lookup_ -- --test-threads 32 - name: Mock proving tests (WASI) (public and packed outputs) run: cargo test --release --verbose packed_tests_wasi::mock_packed_outputs_ -- --test-threads 32 - name: Mock proving tests (WASI) (public inputs) @@ -241,8 +253,8 @@ jobs: run: rustup target add wasm32-wasi - name: KZG prove and verify tests run: cargo test --release --verbose tests::kzg_prove_and_verify_ -- --test-threads 8 - - name: KZG prove and verify tests (single lookup) - run: cargo test --release --verbose tests::kzg_prove_and_verify_single_lookup_ -- --test-threads 8 + - name: KZG prove and verify tests (no single lookup) + run: cargo test --release --verbose tests::kzg_prove_and_verify_non_single_lookup_ -- --test-threads 8 prove-and-verify-aggr-tests: runs-on: self-hosted diff --git a/Cargo.toml b/Cargo.toml index b81cf3b4..dd47a116 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,6 +82,22 @@ harness = false name = "pairwise_add" harness = false +[[bench]] +name = "pow" +harness = false + +[[bench]] +name = "pairwise_pow" +harness = false + +[[bench]] +name = "pack" +harness = false + +[[bench]] +name = "accum_pack" +harness = false + [[bench]] name = "matmul" harness = false @@ -112,6 +128,10 @@ harness = false name = "relu" harness = false +[[bench]] +name = "accum_matmul_relu" +harness = false + [[bench]] name = "range" harness = false diff --git a/README.md b/README.md index 15f515fc..059e2465 100644 --- a/README.md +++ b/README.md @@ -261,7 +261,6 @@ Options: --public-inputs Flags whether inputs are public --public-outputs Flags whether outputs are public --public-params Flags whether params are public - -M, --max-rotations Flags to set maximum rotations [default: 512] --pack-base Base used to pack the public-inputs to the circuit. set ( > 1) to pack instances as a single int. Useful when verifying on the EVM. Note that this will often break for very long inputs. Use with caution, still experimental. [default: 1] --single-lookup Use a single argument for all lookups -h, --help Print help @@ -309,7 +308,6 @@ For an example of such a file see `examples/default_run_args.json`: "public_inputs": false, "public_outputs": true, "public_params": false, - "max_rotations": 512 } ``` diff --git a/benches/accum_affine.rs b/benches/accum_affine.rs index 0e001cb1..f127fbf7 100644 --- a/benches/accum_affine.rs +++ b/benches/accum_affine.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::accumulated::*; +use ezkl_lib::circuit::base::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,14 +35,13 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len, len], true, 100000); + let a = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len, len], true); - let b = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len + 1, len], true, 100000); + let b = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len + 1, len], true); - let output = - VarTensor::new_advice(cs, K, (len + 2) * len, vec![len, 1, len + 2], true, 100000); + let output = VarTensor::new_advice(cs, K, (len + 2) * len, vec![len, 1, len + 2], true); - Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE) } fn synthesize( @@ -101,6 +100,7 @@ fn runaffine(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/accum_conv.rs b/benches/accum_conv.rs index eb212429..854f23b3 100644 --- a/benches/accum_conv.rs +++ b/benches/accum_conv.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::accumulated::*; +use ezkl_lib::circuit::base::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::create_keys; @@ -42,20 +42,13 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = 10; - let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 1000000); + let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); - let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 1000000); + let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); - let output = VarTensor::new_advice( - cs, - K, - (len + 1) * len, - vec![len, 1, len + 1], - true, - 10000000, - ); + let output = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len, 1, len + 1], true); - Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE) } fn synthesize( @@ -132,6 +125,7 @@ fn runcnvrl(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/accum_dot.rs b/benches/accum_dot.rs index 7685eb25..6d849c37 100644 --- a/benches/accum_dot.rs +++ b/benches/accum_dot.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::accumulated::*; +use ezkl_lib::circuit::base::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,11 +35,11 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len, vec![len], true, 1024); - let b = VarTensor::new_advice(cs, K, len, vec![len], true, 1024); - let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true, 1024); + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let b = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true); - Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE) } fn synthesize( @@ -93,6 +93,7 @@ fn rundot(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/accum_matmul.rs b/benches/accum_matmul.rs index a13acfcd..2bfac5a2 100644 --- a/benches/accum_matmul.rs +++ b/benches/accum_matmul.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::accumulated::*; +use ezkl_lib::circuit::base::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,14 +35,13 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 100000); + let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); - let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 100000); + let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); - let output = - VarTensor::new_advice(cs, K, (len + 1) * len, vec![len, 1, len + 1], true, 100000); + let output = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len, 1, len + 1], true); - Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE) } fn synthesize( @@ -98,6 +97,7 @@ fn runmatmul(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/accum_matmul_relu.rs b/benches/accum_matmul_relu.rs new file mode 100644 index 00000000..d158d2c2 --- /dev/null +++ b/benches/accum_matmul_relu.rs @@ -0,0 +1,130 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ezkl_lib::circuit::base::*; +use ezkl_lib::circuit::lookup::{Config as LookupConfig, Op as LookupOp}; + +use ezkl_lib::commands::TranscriptType; +use ezkl_lib::execute::create_proof_circuit_kzg; +use ezkl_lib::pfsys::{create_keys, gen_srs}; +use ezkl_lib::tensor::*; +use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; +use halo2_proofs::poly::kzg::strategy::SingleStrategy; +use halo2_proofs::{ + arithmetic::Field, + circuit::{Layouter, SimpleFloorPlanner, Value}, + plonk::{Circuit, ConstraintSystem, Error}, +}; +use halo2curves::bn256::{Bn256, Fr}; +use rand::rngs::OsRng; +use std::marker::PhantomData; + +const BITS: usize = 8; +static mut LEN: usize = 4; +const K: usize = 16; + +#[derive(Clone)] +struct MyCircuit { + inputs: [ValTensor; 2], + _marker: PhantomData, +} + +// A columnar ReLu MLP +#[derive(Clone)] +struct MyConfig { + base_config: BaseConfig, + l1: LookupConfig, +} + +impl Circuit for MyCircuit { + type Config = MyConfig; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let len = unsafe { LEN }; + + let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); + + let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); + + let output = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len, 1, len + 1], true); + + // sets up a new Divide by table + let l1 = LookupConfig::configure(cs, &a, &output, BITS, &[LookupOp::ReLU { scale: 1 }]); + + let base_config = BaseConfig::configure(cs, &[a, b], &output, CheckMode::UNSAFE); + MyConfig { base_config, l1 } + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let output = config + .base_config + .layout(&mut layouter, &self.inputs, 0, Op::Matmul) + .unwrap(); + let _output = config.l1.layout(&mut layouter, &output).unwrap(); + Ok(()) + } +} + +fn runmatmul(c: &mut Criterion) { + let mut group = c.benchmark_group("accum_matmul"); + let params = gen_srs::>(17); + for &len in [4, 32].iter() { + unsafe { + LEN = len; + }; + + let mut a = Tensor::from((0..len * len).map(|_| Value::known(Fr::random(OsRng)))); + a.reshape(&[len, len]); + + // parameters + let mut b = Tensor::from((0..len).map(|_| Value::known(Fr::random(OsRng)))); + b.reshape(&[len, 1]); + + let circuit = MyCircuit { + inputs: [ValTensor::from(a), ValTensor::from(b)], + _marker: PhantomData, + }; + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| { + b.iter(|| { + create_keys::, Fr, MyCircuit>(&circuit, ¶ms) + .unwrap(); + }); + }); + + let pk = + create_keys::, Fr, MyCircuit>(&circuit, ¶ms).unwrap(); + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| { + b.iter(|| { + let prover = create_proof_circuit_kzg( + circuit.clone(), + ¶ms, + vec![], + &pk, + TranscriptType::Blake, + SingleStrategy::new(¶ms), + CheckMode::UNSAFE, + ); + prover.unwrap(); + }); + }); + } + group.finish(); +} + +criterion_group! { + name = benches; + config = Criterion::default().with_plots(); + targets = runmatmul +} +criterion_main!(benches); diff --git a/benches/accum_pack.rs b/benches/accum_pack.rs new file mode 100644 index 00000000..10cf19af --- /dev/null +++ b/benches/accum_pack.rs @@ -0,0 +1,108 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ezkl_lib::circuit::base::*; +use ezkl_lib::commands::TranscriptType; +use ezkl_lib::execute::create_proof_circuit_kzg; +use ezkl_lib::pfsys::{create_keys, gen_srs}; +use ezkl_lib::tensor::*; +use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; +use halo2_proofs::poly::kzg::strategy::SingleStrategy; +use halo2_proofs::{ + arithmetic::Field, + circuit::{Layouter, SimpleFloorPlanner, Value}, + plonk::{Circuit, ConstraintSystem, Error}, +}; +use halo2curves::bn256::{Bn256, Fr}; +use rand::rngs::OsRng; +use std::marker::PhantomData; + +static mut LEN: usize = 4; +const K: usize = 16; + +#[derive(Clone)] +struct MyCircuit { + inputs: [ValTensor; 1], + _marker: PhantomData, +} + +impl Circuit for MyCircuit { + type Config = BaseConfig; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let len = unsafe { LEN }; + + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let b = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true); + + Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE) + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + config + .layout(&mut layouter, &self.inputs, 0, Op::Pack(2, 1)) + .unwrap(); + Ok(()) + } +} + +fn runpack(c: &mut Criterion) { + let mut group = c.benchmark_group("accum_pack"); + let params = gen_srs::>(17); + for &len in [16, 512].iter() { + unsafe { + LEN = len; + }; + + // parameters + let a = Tensor::from((0..len).map(|_| Value::known(Fr::random(OsRng)))); + + let circuit = MyCircuit { + inputs: [ValTensor::from(a)], + _marker: PhantomData, + }; + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| { + b.iter(|| { + create_keys::, Fr, MyCircuit>(&circuit, ¶ms) + .unwrap(); + }); + }); + + let pk = + create_keys::, Fr, MyCircuit>(&circuit, ¶ms).unwrap(); + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| { + b.iter(|| { + let prover = create_proof_circuit_kzg( + circuit.clone(), + ¶ms, + vec![], + &pk, + TranscriptType::Blake, + SingleStrategy::new(¶ms), + CheckMode::UNSAFE, + ); + prover.unwrap(); + }); + }); + } + group.finish(); +} + +criterion_group! { + name = benches; + config = Criterion::default().with_plots(); + targets = runpack +} +criterion_main!(benches); diff --git a/benches/accum_sum.rs b/benches/accum_sum.rs index 4005452c..24a9509e 100644 --- a/benches/accum_sum.rs +++ b/benches/accum_sum.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::accumulated::*; +use ezkl_lib::circuit::base::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,11 +35,11 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len, vec![len], true, 1024); - let b = VarTensor::new_advice(cs, K, len, vec![len], true, 1024); - let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true, 1024); + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let b = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true); - Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE) } fn synthesize( @@ -91,6 +91,7 @@ fn runsum(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/accum_sumpool.rs b/benches/accum_sumpool.rs index cbf32272..3cfa6f10 100644 --- a/benches/accum_sumpool.rs +++ b/benches/accum_sumpool.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::accumulated::*; +use ezkl_lib::circuit::base::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::create_keys; @@ -37,20 +37,13 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = 10; - let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 1000000); + let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); - let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 1000000); + let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); - let output = VarTensor::new_advice( - cs, - K, - (len + 1) * len, - vec![len, 1, len + 1], - true, - 10000000, - ); + let output = VarTensor::new_advice(cs, K, (len + 1) * len, vec![len, 1, len + 1], true); - Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + Self::Config::configure(cs, &[a, b], &output, CheckMode::UNSAFE) } fn synthesize( @@ -115,6 +108,7 @@ fn runsumpool(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/add.rs b/benches/add.rs index d84059f0..d28ad3ec 100644 --- a/benches/add.rs +++ b/benches/add.rs @@ -1,5 +1,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::polynomial::*; +use ezkl_lib::circuit::base::CheckMode; +#[allow(deprecated)] +use ezkl_lib::circuit::fused::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,9 +37,9 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let b = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let output = VarTensor::new_advice(cs, K, len, vec![len], true, 512); + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let b = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len], true); let add_node = Node { op: Op::Add, input_order: vec![InputType::Input(0), InputType::Input(1)], @@ -59,7 +61,8 @@ impl Circuit for MyCircuit { fn runadd(c: &mut Criterion) { let mut group = c.benchmark_group("add"); let params = gen_srs::>(17); - for &len in [16].iter() { + { + let &len = &16; unsafe { LEN = len; }; @@ -95,6 +98,7 @@ fn runadd(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/affine.rs b/benches/affine.rs index 867f77f8..081a0cab 100644 --- a/benches/affine.rs +++ b/benches/affine.rs @@ -1,5 +1,6 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::polynomial::*; +use ezkl_lib::circuit::base::CheckMode; +use ezkl_lib::circuit::fused::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,10 +36,10 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let input = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let kernel = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 512); - let bias = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let output = VarTensor::new_advice(cs, K, len, vec![len], true, 512); + let input = VarTensor::new_advice(cs, K, len, vec![len], true); + let kernel = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); + let bias = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len], true); // tells the config layer to add an affine op to a circuit gate let affine_node = Node { op: Op::Affine, @@ -65,7 +66,8 @@ impl Circuit for MyCircuit { fn runaffine(c: &mut Criterion) { let mut group = c.benchmark_group("affine"); let params = gen_srs::>(17); - for &len in [4].iter() { + { + let &len = &4; unsafe { LEN = len; }; @@ -108,6 +110,7 @@ fn runaffine(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/conv.rs b/benches/conv.rs index c5452cb0..419da934 100644 --- a/benches/conv.rs +++ b/benches/conv.rs @@ -1,9 +1,11 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ezkl_lib::circuit::base::CheckMode; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::gen_srs; use ezkl_lib::tensor::*; -use ezkl_lib::{circuit::polynomial::*, pfsys::create_keys}; +#[allow(deprecated)] +use ezkl_lib::{circuit::fused::*, pfsys::create_keys}; use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; use halo2_proofs::poly::kzg::strategy::SingleStrategy; use halo2_proofs::{ @@ -51,7 +53,6 @@ impl Circuit for MyCircuit { IN_CHANNELS * IMAGE_HEIGHT * IMAGE_WIDTH, vec![IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH], true, - 512, ); let kernel = VarTensor::new_advice( cs, @@ -59,17 +60,15 @@ impl Circuit for MyCircuit { OUT_CHANNELS * IN_CHANNELS * KERNEL_HEIGHT * KERNEL_WIDTH, vec![OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH], true, - 512, ); - let bias = VarTensor::new_advice(cs, K, OUT_CHANNELS, vec![OUT_CHANNELS], true, 512); + let bias = VarTensor::new_advice(cs, K, OUT_CHANNELS, vec![OUT_CHANNELS], true); let output = VarTensor::new_advice( cs, K, OUT_CHANNELS * output_height * output_width, vec![OUT_CHANNELS, output_height, output_width], true, - 512, ); // tells the config layer to add a conv op to a circuit gate @@ -158,6 +157,7 @@ fn runcnvrl(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/dot.rs b/benches/dot.rs index 36c1795d..cfb1af98 100644 --- a/benches/dot.rs +++ b/benches/dot.rs @@ -1,5 +1,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::polynomial::*; +use ezkl_lib::circuit::base::CheckMode; +#[allow(deprecated)] +use ezkl_lib::circuit::fused::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,9 +37,9 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let b = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let output = VarTensor::new_advice(cs, K, len, vec![1], true, 512); + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let b = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![1], true); let dot_node = Node { op: Op::Dot, input_order: vec![InputType::Input(0), InputType::Input(1)], @@ -59,7 +61,8 @@ impl Circuit for MyCircuit { fn rundot(c: &mut Criterion) { let mut group = c.benchmark_group("dot"); let params = gen_srs::>(17); - for &len in [16].iter() { + { + let &len = &16; unsafe { LEN = len; }; @@ -95,6 +98,7 @@ fn rundot(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/matmul.rs b/benches/matmul.rs index 1f36661b..ee552fd1 100644 --- a/benches/matmul.rs +++ b/benches/matmul.rs @@ -1,5 +1,7 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::polynomial::*; +use ezkl_lib::circuit::base::CheckMode; +#[allow(deprecated)] +use ezkl_lib::circuit::fused::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,9 +37,9 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 512); - let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 512); - let output = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 512); + let a = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); + let b = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); + let output = VarTensor::new_advice(cs, K, len * len, vec![len, len], true); let dot_node = Node { op: Op::Matmul, input_order: vec![InputType::Input(0), InputType::Input(1)], @@ -59,7 +61,8 @@ impl Circuit for MyCircuit { fn runmatmul(c: &mut Criterion) { let mut group = c.benchmark_group("matmul"); let params = gen_srs::>(17); - for &len in [4].iter() { + { + let &len = &4; unsafe { LEN = len; }; @@ -97,6 +100,7 @@ fn runmatmul(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/pack.rs b/benches/pack.rs new file mode 100644 index 00000000..231c570f --- /dev/null +++ b/benches/pack.rs @@ -0,0 +1,112 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ezkl_lib::circuit::base::CheckMode; +#[allow(deprecated)] +use ezkl_lib::circuit::fused::*; +use ezkl_lib::commands::TranscriptType; +use ezkl_lib::execute::create_proof_circuit_kzg; +use ezkl_lib::pfsys::{create_keys, gen_srs}; +use ezkl_lib::tensor::*; +use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; +use halo2_proofs::poly::kzg::strategy::SingleStrategy; +use halo2_proofs::{ + arithmetic::Field, + circuit::{Layouter, SimpleFloorPlanner, Value}, + plonk::{Circuit, ConstraintSystem, Error}, +}; +use halo2curves::bn256::{Bn256, Fr}; +use rand::rngs::OsRng; +use std::marker::PhantomData; + +static mut LEN: usize = 4; +const K: usize = 16; + +#[derive(Clone)] +struct MyCircuit { + inputs: [ValTensor; 1], + _marker: PhantomData, +} + +impl Circuit for MyCircuit { + type Config = Config; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let len = unsafe { LEN }; + + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, 1, vec![1], true); + let pack_node = Node { + op: Op::Pack(2, 1), + input_order: vec![InputType::Input(0)], + }; + + Self::Config::configure(cs, &[a], &output, &[pack_node]) + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + config.layout(&mut layouter, &self.inputs).unwrap(); + Ok(()) + } +} + +fn runpack(c: &mut Criterion) { + let mut group = c.benchmark_group("pack"); + let params = gen_srs::>(17); + { + let &len = &16; + unsafe { + LEN = len; + }; + + // parameters + let a = Tensor::from((0..len).map(|_| Value::known(Fr::random(OsRng)))); + + let circuit = MyCircuit { + inputs: [ValTensor::from(a)], + _marker: PhantomData, + }; + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| { + b.iter(|| { + create_keys::, Fr, MyCircuit>(&circuit, ¶ms) + .unwrap(); + }); + }); + + let pk = + create_keys::, Fr, MyCircuit>(&circuit, ¶ms).unwrap(); + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| { + b.iter(|| { + let prover = create_proof_circuit_kzg( + circuit.clone(), + ¶ms, + vec![], + &pk, + TranscriptType::Blake, + SingleStrategy::new(¶ms), + CheckMode::SAFE, + ); + prover.unwrap(); + }); + }); + } + group.finish(); +} + +criterion_group! { + name = benches; + config = Criterion::default().with_plots(); + targets = runpack +} +criterion_main!(benches); diff --git a/benches/pairwise_add.rs b/benches/pairwise_add.rs index 117850fb..87dd0965 100644 --- a/benches/pairwise_add.rs +++ b/benches/pairwise_add.rs @@ -1,5 +1,5 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::accumulated::*; +use ezkl_lib::circuit::base::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -35,9 +35,9 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len, vec![len], true, 1024); - let b = VarTensor::new_advice(cs, K, len, vec![len], true, 1024); - let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true, 1024); + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let b = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -93,6 +93,7 @@ fn runadd(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::SAFE, ); prover.unwrap(); }); diff --git a/benches/pairwise_pow.rs b/benches/pairwise_pow.rs new file mode 100644 index 00000000..7574f777 --- /dev/null +++ b/benches/pairwise_pow.rs @@ -0,0 +1,108 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ezkl_lib::circuit::base::*; +use ezkl_lib::commands::TranscriptType; +use ezkl_lib::execute::create_proof_circuit_kzg; +use ezkl_lib::pfsys::{create_keys, gen_srs}; +use ezkl_lib::tensor::*; +use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; +use halo2_proofs::poly::kzg::strategy::SingleStrategy; +use halo2_proofs::{ + arithmetic::Field, + circuit::{Layouter, SimpleFloorPlanner, Value}, + plonk::{Circuit, ConstraintSystem, Error}, +}; +use halo2curves::bn256::{Bn256, Fr}; +use rand::rngs::OsRng; +use std::marker::PhantomData; + +static mut LEN: usize = 4; +const K: usize = 16; + +#[derive(Clone)] +struct MyCircuit { + inputs: [ValTensor; 1], + _marker: PhantomData, +} + +impl Circuit for MyCircuit { + type Config = BaseConfig; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let len = unsafe { LEN }; + + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let b = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len + 1], true); + + Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + config + .layout(&mut layouter, &self.inputs, 0, Op::Pow(4)) + .unwrap(); + Ok(()) + } +} + +fn runpow(c: &mut Criterion) { + let mut group = c.benchmark_group("pairwise_pow"); + let params = gen_srs::>(17); + for &len in [16, 512].iter() { + unsafe { + LEN = len; + }; + + // parameters + let a = Tensor::from((0..len).map(|_| Value::known(Fr::random(OsRng)))); + + let circuit = MyCircuit { + inputs: [ValTensor::from(a)], + _marker: PhantomData, + }; + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| { + b.iter(|| { + create_keys::, Fr, MyCircuit>(&circuit, ¶ms) + .unwrap(); + }); + }); + + let pk = + create_keys::, Fr, MyCircuit>(&circuit, ¶ms).unwrap(); + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| { + b.iter(|| { + let prover = create_proof_circuit_kzg( + circuit.clone(), + ¶ms, + vec![], + &pk, + TranscriptType::Blake, + SingleStrategy::new(¶ms), + CheckMode::SAFE, + ); + prover.unwrap(); + }); + }); + } + group.finish(); +} + +criterion_group! { + name = benches; + config = Criterion::default().with_plots(); + targets = runpow +} +criterion_main!(benches); diff --git a/benches/pow.rs b/benches/pow.rs new file mode 100644 index 00000000..84a606f1 --- /dev/null +++ b/benches/pow.rs @@ -0,0 +1,112 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ezkl_lib::circuit::base::CheckMode; +#[allow(deprecated)] +use ezkl_lib::circuit::fused::*; +use ezkl_lib::commands::TranscriptType; +use ezkl_lib::execute::create_proof_circuit_kzg; +use ezkl_lib::pfsys::{create_keys, gen_srs}; +use ezkl_lib::tensor::*; +use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; +use halo2_proofs::poly::kzg::strategy::SingleStrategy; +use halo2_proofs::{ + arithmetic::Field, + circuit::{Layouter, SimpleFloorPlanner, Value}, + plonk::{Circuit, ConstraintSystem, Error}, +}; +use halo2curves::bn256::{Bn256, Fr}; +use rand::rngs::OsRng; +use std::marker::PhantomData; + +static mut LEN: usize = 4; +const K: usize = 16; + +#[derive(Clone)] +struct MyCircuit { + inputs: [ValTensor; 1], + _marker: PhantomData, +} + +impl Circuit for MyCircuit { + type Config = Config; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let len = unsafe { LEN }; + + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![len], true); + let pow_node = Node { + op: Op::Pow(4), + input_order: vec![InputType::Input(0)], + }; + + Self::Config::configure(cs, &[a], &output, &[pow_node]) + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + config.layout(&mut layouter, &self.inputs).unwrap(); + Ok(()) + } +} + +fn runpow(c: &mut Criterion) { + let mut group = c.benchmark_group("pow"); + let params = gen_srs::>(17); + { + let &len = &16; + unsafe { + LEN = len; + }; + + // parameters + let a = Tensor::from((0..len).map(|_| Value::known(Fr::random(OsRng)))); + + let circuit = MyCircuit { + inputs: [ValTensor::from(a)], + _marker: PhantomData, + }; + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| { + b.iter(|| { + create_keys::, Fr, MyCircuit>(&circuit, ¶ms) + .unwrap(); + }); + }); + + let pk = + create_keys::, Fr, MyCircuit>(&circuit, ¶ms).unwrap(); + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| { + b.iter(|| { + let prover = create_proof_circuit_kzg( + circuit.clone(), + ¶ms, + vec![], + &pk, + TranscriptType::Blake, + SingleStrategy::new(¶ms), + CheckMode::UNSAFE, + ); + prover.unwrap(); + }); + }); + } + group.finish(); +} + +criterion_group! { + name = benches; + config = Criterion::default().with_plots(); + targets = runpow +} +criterion_main!(benches); diff --git a/benches/range.rs b/benches/range.rs index b691e9c7..def302c7 100644 --- a/benches/range.rs +++ b/benches/range.rs @@ -33,7 +33,7 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, K, len, vec![len], true, 512)) + .map(|_| VarTensor::new_advice(cs, K, len, vec![len], true)) .collect_vec(); RangeCheckConfig::configure(cs, &advices[0], &advices[1], RANGE) diff --git a/benches/relu.rs b/benches/relu.rs index 85962381..b1b7edf9 100644 --- a/benches/relu.rs +++ b/benches/relu.rs @@ -1,36 +1,40 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::lookup::{Config, Op}; +use ezkl_lib::circuit::base::CheckMode; +use ezkl_lib::circuit::lookup::*; +use ezkl_lib::commands::TranscriptType; +use ezkl_lib::execute::create_proof_circuit_kzg; +use ezkl_lib::pfsys::{create_keys, gen_srs}; use ezkl_lib::tensor::*; -use halo2_proofs::dev::MockProver; +use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; +use halo2_proofs::poly::kzg::strategy::SingleStrategy; use halo2_proofs::{ - arithmetic::FieldExt, circuit::{Layouter, SimpleFloorPlanner, Value}, plonk::{Circuit, ConstraintSystem, Error}, }; -use halo2curves::pasta::Fp as F; +use halo2curves::bn256::{Bn256, Fr}; use rand::Rng; const BITS: usize = 8; static mut LEN: usize = 4; -const K: usize = 10; +const K: usize = 16; #[derive(Clone)] -struct NLCircuit { - pub input: ValTensor, +struct NLCircuit { + pub input: ValTensor, } -impl Circuit for NLCircuit { - type Config = Config; +impl Circuit for NLCircuit { + type Config = Config; type FloorPlanner = SimpleFloorPlanner; fn without_witnesses(&self) -> Self { self.clone() } - fn configure(cs: &mut ConstraintSystem) -> Self::Config { + fn configure(cs: &mut ConstraintSystem) -> Self::Config { unsafe { let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512)) + .map(|_| VarTensor::new_advice(cs, K, LEN, vec![LEN], true)) .collect::>(); let nl = Op::ReLU { scale: 128 }; @@ -42,7 +46,7 @@ impl Circuit for NLCircuit { fn synthesize( &self, mut config: Self::Config, - mut layouter: impl Layouter, // layouter is our 'write buffer' for the circuit + mut layouter: impl Layouter, // layouter is our 'write buffer' for the circuit ) -> Result<(), Error> { config.layout(&mut layouter, &self.input).unwrap(); @@ -54,24 +58,43 @@ fn runrelu(c: &mut Criterion) { let mut group = c.benchmark_group("relu"); let mut rng = rand::thread_rng(); - + let params = gen_srs::>(17); for &len in [4, 8, 16, 32, 64].iter() { unsafe { LEN = len; }; - let input: Tensor> = + let input: Tensor> = Tensor::::from((0..len).map(|_| rng.gen_range(0..10))).into(); - let circuit = NLCircuit:: { + let circuit = NLCircuit { input: ValTensor::from(input.clone()), }; group.throughput(Throughput::Elements(len as u64)); - group.bench_with_input(BenchmarkId::from_parameter(len), &len, |b, &_| { + group.bench_with_input(BenchmarkId::new("pk", len), &len, |b, &_| { b.iter(|| { - let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap(); - prover.assert_satisfied(); + create_keys::, Fr, NLCircuit>(&circuit, ¶ms) + .unwrap(); + }); + }); + + let pk = + create_keys::, Fr, NLCircuit>(&circuit, ¶ms).unwrap(); + + group.throughput(Throughput::Elements(len as u64)); + group.bench_with_input(BenchmarkId::new("prove", len), &len, |b, &_| { + b.iter(|| { + let prover = create_proof_circuit_kzg( + circuit.clone(), + ¶ms, + vec![], + &pk, + TranscriptType::Blake, + SingleStrategy::new(¶ms), + CheckMode::SAFE, + ); + prover.unwrap(); }); }); } diff --git a/benches/sum.rs b/benches/sum.rs index dc21747e..b7bd1868 100644 --- a/benches/sum.rs +++ b/benches/sum.rs @@ -1,5 +1,6 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; -use ezkl_lib::circuit::polynomial::*; +use ezkl_lib::circuit::base::CheckMode; +use ezkl_lib::circuit::fused::*; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::{create_keys, gen_srs}; @@ -20,7 +21,7 @@ const K: usize = 16; #[derive(Clone)] struct MyCircuit { - inputs: [ValTensor; 2], + inputs: [ValTensor; 1], _marker: PhantomData, } @@ -35,15 +36,14 @@ impl Circuit for MyCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let len = unsafe { LEN }; - let a = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let b = VarTensor::new_advice(cs, K, len, vec![len], true, 512); - let output = VarTensor::new_advice(cs, K, len, vec![1], true, 512); + let a = VarTensor::new_advice(cs, K, len, vec![len], true); + let output = VarTensor::new_advice(cs, K, len, vec![1], true); let sum_node = Node { op: Op::Sum, input_order: vec![InputType::Input(0)], }; - Self::Config::configure(cs, &[a, b], &output, &[sum_node]) + Self::Config::configure(cs, &[a], &output, &[sum_node]) } fn synthesize( @@ -59,7 +59,8 @@ impl Circuit for MyCircuit { fn runsum(c: &mut Criterion) { let mut group = c.benchmark_group("sum"); let params = gen_srs::>(17); - for &len in [16].iter() { + { + let &len = &16; unsafe { LEN = len; }; @@ -67,10 +68,8 @@ fn runsum(c: &mut Criterion) { // parameters let a = Tensor::from((0..len).map(|_| Value::known(Fr::random(OsRng)))); - let b = Tensor::from((0..len).map(|_| Value::known(Fr::random(OsRng)))); - let circuit = MyCircuit { - inputs: [ValTensor::from(a), ValTensor::from(b)], + inputs: [ValTensor::from(a)], _marker: PhantomData, }; @@ -95,6 +94,7 @@ fn runsum(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/benches/sumpool.rs b/benches/sumpool.rs index cfc05c3a..ebf348b0 100644 --- a/benches/sumpool.rs +++ b/benches/sumpool.rs @@ -1,9 +1,10 @@ use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use ezkl_lib::circuit::base::CheckMode; use ezkl_lib::commands::TranscriptType; use ezkl_lib::execute::create_proof_circuit_kzg; use ezkl_lib::pfsys::gen_srs; use ezkl_lib::tensor::*; -use ezkl_lib::{circuit::polynomial::*, pfsys::create_keys}; +use ezkl_lib::{circuit::fused::*, pfsys::create_keys}; use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme; use halo2_proofs::poly::kzg::strategy::SingleStrategy; use halo2_proofs::{ @@ -44,7 +45,6 @@ impl Circuit for MyCircuit { IN_CHANNELS * IMAGE_HEIGHT * IMAGE_WIDTH, vec![IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH], true, - 512, ); let output = VarTensor::new_advice( @@ -53,7 +53,6 @@ impl Circuit for MyCircuit { 3 * output_height * output_width, vec![3, output_height, output_width], true, - 512, ); // tells the config layer to add a conv op to a circuit gate @@ -121,6 +120,7 @@ fn runsumpool(c: &mut Criterion) { &pk, TranscriptType::Blake, SingleStrategy::new(¶ms), + CheckMode::UNSAFE, ); prover.unwrap(); }); diff --git a/examples/conv2d_mnist/main.rs b/examples/conv2d_mnist/main.rs index ba571c2b..6a49ea62 100644 --- a/examples/conv2d_mnist/main.rs +++ b/examples/conv2d_mnist/main.rs @@ -1,10 +1,9 @@ +use ezkl_lib::circuit::base::{BaseConfig as PolyConfig, CheckMode, Op as PolyOp}; use ezkl_lib::circuit::lookup::{Config as LookupConfig, Op as LookupOp}; -use ezkl_lib::circuit::polynomial::{ - Config as PolyConfig, InputType as PolyInputType, Node as PolyNode, Op as PolyOp, -}; use ezkl_lib::fieldutils; use ezkl_lib::fieldutils::i32_to_felt; use ezkl_lib::tensor::*; +use halo2_proofs::dev::MockProver; use halo2_proofs::{ arithmetic::FieldExt, circuit::{Layouter, SimpleFloorPlanner, Value}, @@ -29,12 +28,11 @@ use halo2curves::pasta::vesta; use halo2curves::pasta::Fp as F; use mnist::*; use rand::rngs::OsRng; -use std::cmp::max; use std::time::Instant; mod params; -const K: usize = 17; +const K: usize = 20; #[derive(Clone)] struct Config< @@ -55,10 +53,9 @@ struct Config< Value: TensorType, { // this will be a conv layer - l0: PolyConfig, - l1: LookupConfig, + layer_config: PolyConfig, + relu: LookupConfig, // this will be an affine layer - l2: PolyConfig, public_output: Column, } @@ -142,94 +139,31 @@ where // Here we wire together the layers by using the output advice in each layer as input advice in the next (not with copying / equality). // This can be automated but we will sometimes want skip connections, etc. so we need the flexibility. fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let output_height = (IMAGE_HEIGHT + 2 * PADDING - KERNEL_HEIGHT) / STRIDE + 1; - let output_width = (IMAGE_WIDTH + 2 * PADDING - KERNEL_WIDTH) / STRIDE + 1; + let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let params = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); - let input = VarTensor::new_advice( - cs, - K, - max(IN_CHANNELS * IMAGE_HEIGHT * IMAGE_WIDTH, LEN), - vec![IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH], - true, - 512, - ); - let kernel = VarTensor::new_advice( - cs, - K, - max( - OUT_CHANNELS * IN_CHANNELS * KERNEL_HEIGHT * KERNEL_WIDTH, - CLASSES * LEN, - ), - vec![OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH], - true, - 512, - ); + println!("INPUT COL {:#?}", input); - let bias = VarTensor::new_advice( + let layer_config = PolyConfig::configure( cs, - K, - max(OUT_CHANNELS, CLASSES), - vec![OUT_CHANNELS], - true, - 512, - ); - let output = VarTensor::new_advice( - cs, - K, - max(OUT_CHANNELS * output_height * output_width, LEN), - vec![OUT_CHANNELS, output_height, output_width], - true, - 512, - ); - - // tells the config layer to add a conv op to a circuit gate - let conv_node = PolyNode { - op: PolyOp::Conv { - padding: (PADDING, PADDING), - stride: (STRIDE, STRIDE), - }, - input_order: vec![ - PolyInputType::Input(0), - PolyInputType::Input(1), - PolyInputType::Input(2), - ], - }; - - let l0 = PolyConfig::configure( - cs, - &[input.clone(), kernel.clone(), bias.clone()], + &[input.clone(), params], &output, - &[conv_node], + CheckMode::SAFE, ); let input = input.reshape(&[LEN]); let output = output.reshape(&[LEN]); - let l1 = + let relu = LookupConfig::configure(cs, &input, &output, BITS, &[LookupOp::ReLU { scale: 32 }]); - // tells the config layer to add an affine op to the circuit gate - let affine_node = PolyNode { - op: PolyOp::Affine, - input_order: vec![ - PolyInputType::Input(0), - PolyInputType::Input(1), - PolyInputType::Input(2), - ], - }; - - let kernel = kernel.reshape(&[CLASSES, LEN]); - let bias = bias.reshape(&[CLASSES]); - let output = output.reshape(&[CLASSES]); - - let l2 = PolyConfig::configure(cs, &[input, kernel, bias], &output, &[affine_node]); let public_output: Column = cs.instance_column(); cs.enable_equality(public_output); Config { - l0, - l1, - l2, + layer_config, + relu, public_output, } } @@ -240,7 +174,7 @@ where mut layouter: impl Layouter, ) -> Result<(), Error> { let x = config - .l0 + .layer_config .layout( &mut layouter, &[ @@ -248,21 +182,33 @@ where self.l0_params[0].clone(), self.l0_params[1].clone(), ], + 0, + PolyOp::Conv { + padding: (PADDING, PADDING), + stride: (STRIDE, STRIDE), + }, ) .unwrap(); - let mut x = config.l1.layout(&mut layouter, &x).unwrap(); + let mut x = config.relu.layout(&mut layouter, &x).unwrap(); x.flatten(); let l2out = config - .l2 + .layer_config .layout( &mut layouter, &[x, self.l2_params[0].clone(), self.l2_params[1].clone()], + 0, + PolyOp::Affine, ) .unwrap(); match l2out { - ValTensor::PrevAssigned { inner: v, dims: _ } => v - .enum_map(|i, x| layouter.constrain_instance(x.cell(), config.public_output, i)) + ValTensor::Value { inner: v, dims: _ } => v + .enum_map(|i, x| match x { + ValType::PrevAssigned(v) => { + layouter.constrain_instance(v.cell(), config.public_output, i) + } + _ => panic!(), + }) .unwrap(), _ => panic!("Should be assigned"), }; @@ -328,7 +274,7 @@ pub fn runconv() { .flatten() .flatten() .map(|fl| { - let dx = (fl as f32) * 32_f32; + let dx = fl * 32_f32; let rounded = dx.round(); let integral: i32 = unsafe { rounded.to_int_unchecked() }; let felt = fieldutils::i32_to_felt(integral); @@ -346,14 +292,17 @@ pub fn runconv() { ) .into(); - let l2_biases: ValTensor = Tensor::>::from(myparams.biases.into_iter().map(|fl| { - let dx = fl * 32_f32; - let rounded = dx.round(); - let integral: i32 = unsafe { rounded.to_int_unchecked() }; - let felt = fieldutils::i32_to_felt(integral); - Value::known(felt) - })) - .into(); + let mut l2_biases: ValTensor = + Tensor::>::from(myparams.biases.into_iter().map(|fl| { + let dx = fl * 32_f32; + let rounded = dx.round(); + let integral: i32 = unsafe { rounded.to_int_unchecked() }; + let felt = fieldutils::i32_to_felt(integral); + Value::known(felt) + })) + .into(); + + l2_biases.reshape(&[l2_biases.len(), 1]).unwrap(); let mut l2_weights: ValTensor = Tensor::>::from(myparams.weights.into_iter().flatten().map(|fl| { @@ -410,18 +359,36 @@ pub fn runconv() { .into(); let pi_inner: Tensor = public_input.map(i32_to_felt::); + + println!("MOCK PROVING"); + let now = Instant::now(); + let prover = MockProver::run( + K as u32, + &circuit, + vec![pi_inner.clone().into_iter().collect()], + ) + .unwrap(); + prover.assert_satisfied(); + println!("MOCK PROVING took {}", now.elapsed().as_secs()); + let pi_for_real_prover: &[&[&[F]]] = &[&[&pi_inner]]; // Real proof + println!("SRS GENERATION"); + let now = Instant::now(); let params: ParamsIPA = ParamsIPA::new(K as u32); + println!("SRS GENERATION took {}", now.elapsed().as_secs()); let empty_circuit = circuit.without_witnesses(); // Initialize the proving key + println!("VK GENERATION"); let now = Instant::now(); let vk = keygen_vk(¶ms, &empty_circuit).expect("keygen_vk should not fail"); - println!("VK took {}", now.elapsed().as_secs()); + println!("VK GENERATION took {}", now.elapsed().as_secs()); + println!("PK GENERATION"); let now = Instant::now(); let pk = keygen_pk(¶ms, vk, &empty_circuit).expect("keygen_pk should not fail"); - println!("PK took {}", now.elapsed().as_secs()); + println!("PK GENERATION took {}", now.elapsed().as_secs()); + println!("PROOF GENERATION"); let now = Instant::now(); let mut transcript = Blake2bWrite::<_, _, Challenge255<_>>::init(vec![]); let mut rng = OsRng; @@ -436,7 +403,7 @@ pub fn runconv() { .expect("proof generation should not fail"); let proof = transcript.finalize(); //println!("{:?}", proof); - println!("Proof took {}", now.elapsed().as_secs()); + println!("PROOF GENERATION took {}", now.elapsed().as_secs()); let now = Instant::now(); let strategy = SingleStrategy::new(¶ms); let mut transcript = Blake2bRead::<_, _, Challenge255<_>>::init(&proof[..]); diff --git a/examples/default_run_args.json b/examples/default_run_args.json index 6a0484ea..7821f748 100644 --- a/examples/default_run_args.json +++ b/examples/default_run_args.json @@ -6,6 +6,5 @@ "public_inputs": false, "public_outputs": true, "public_params": false, - "max_rotations": 512, "single_lookup": false } \ No newline at end of file diff --git a/examples/mlp_4d.rs b/examples/mlp_4d.rs index d59a494c..4303a958 100644 --- a/examples/mlp_4d.rs +++ b/examples/mlp_4d.rs @@ -1,8 +1,6 @@ use eq_float::F32; +use ezkl_lib::circuit::base::{BaseConfig as PolyConfig, CheckMode, Op as PolyOp}; use ezkl_lib::circuit::lookup::{Config as LookupConfig, Op as LookupOp}; -use ezkl_lib::circuit::polynomial::{ - Config as PolyConfig, InputType as PolyInputType, Node as PolyNode, Op as PolyOp, -}; use ezkl_lib::fieldutils::i32_to_felt; use ezkl_lib::tensor::*; use halo2_proofs::dev::MockProver; @@ -18,9 +16,8 @@ const K: usize = 15; // A columnar ReLu MLP #[derive(Clone)] struct MyConfig { - l0: PolyConfig, + layer_config: PolyConfig, l1: LookupConfig, - l2: PolyConfig, l3: LookupConfig, l4: LookupConfig, public_output: Column, @@ -53,30 +50,18 @@ impl Circuit // Here we wire together the layers by using the output advice in each layer as input advice in the next (not with copying / equality). // This can be automated but we will sometimes want skip connections, etc. so we need the flexibility. fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512); - let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let params = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); // tells the config layer to add an affine op to the circuit gate - let affine_node = PolyNode { - op: PolyOp::Affine, - input_order: vec![ - PolyInputType::Input(0), - PolyInputType::Input(1), - PolyInputType::Input(2), - ], - }; - let l0 = PolyConfig::::configure( + let layer_config = PolyConfig::::configure( cs, - &[input.clone(), kernel.clone(), bias.clone()], + &[input.clone(), params], &output, - &[affine_node.clone()], + CheckMode::SAFE, ); - let l2 = - PolyConfig::::configure(cs, &[input.clone(), kernel, bias], &output, &[affine_node]); - // sets up a new ReLU table and resuses it for l1 and l3 non linearities let [l1, l3]: [LookupConfig; 2] = LookupConfig::configure_multiple( cs, @@ -102,9 +87,8 @@ impl Circuit cs.enable_equality(public_output); MyConfig { - l0, + layer_config, l1, - l2, l3, l4, public_output, @@ -117,7 +101,7 @@ impl Circuit mut layouter: impl Layouter, ) -> Result<(), Error> { let x = config - .l0 + .layer_config .layout( &mut layouter, &[ @@ -125,21 +109,31 @@ impl Circuit self.l0_params[0].clone(), self.l0_params[1].clone(), ], + 0, + PolyOp::Affine, ) .unwrap(); - let x = config.l1.layout(&mut layouter, &x).unwrap(); + let mut x = config.l1.layout(&mut layouter, &x).unwrap(); + x.reshape(&[x.dims()[0], 1]).unwrap(); let x = config - .l2 + .layer_config .layout( &mut layouter, &[x, self.l2_params[0].clone(), self.l2_params[1].clone()], + 0, + PolyOp::Affine, ) .unwrap(); let x = config.l3.layout(&mut layouter, &x).unwrap(); let x = config.l4.layout(&mut layouter, &x).unwrap(); match x { - ValTensor::PrevAssigned { inner: v, dims: _ } => v - .enum_map(|i, x| layouter.constrain_instance(x.cell(), config.public_output, i)) + ValTensor::Value { inner: v, dims: _ } => v + .enum_map(|i, x| match x { + ValType::PrevAssigned(v) => { + layouter.constrain_instance(v.cell(), config.public_output, i) + } + _ => panic!(), + }) .unwrap(), _ => panic!("Should be assigned"), }; @@ -155,7 +149,7 @@ pub fn runmlp() { ) .unwrap() .into(); - let l0_bias: Tensor> = Tensor::::new(Some(&[0, 0, 0, 1]), &[4]) + let l0_bias: Tensor> = Tensor::::new(Some(&[0, 0, 0, 1]), &[4, 1]) .unwrap() .into(); @@ -166,10 +160,10 @@ pub fn runmlp() { .unwrap() .into(); // input data, with 1 padding to allow for bias - let input: Tensor> = Tensor::::new(Some(&[-30, -21, 11, 40]), &[4]) + let input: Tensor> = Tensor::::new(Some(&[-30, -21, 11, 40]), &[4, 1]) .unwrap() .into(); - let l2_bias: Tensor> = Tensor::::new(Some(&[0, 0, 0, 1]), &[4]) + let l2_bias: Tensor> = Tensor::::new(Some(&[0, 0, 0, 1]), &[4, 1]) .unwrap() .into(); diff --git a/examples/onnx/1l_average/input.json b/examples/onnx/1l_average/input.json index eba05fe0..8b83148f 100644 --- a/examples/onnx/1l_average/input.json +++ b/examples/onnx/1l_average/input.json @@ -1,11 +1,4 @@ { - "input_shapes": [ - [ - 1, - 5, - 5 - ] - ], "input_data": [ [ 0.1, @@ -35,17 +28,24 @@ 0.1 ] ], + "input_shapes": [ + [ + 1, + 5, + 5 + ] + ], "output_data": [ [ - 0.914, - 0.914, - 0.914, - 0.914, - 0.914, - 0.914, - 0.914, - 0.914, - 0.914 + 0.9140625, + 0.9140625, + 0.9140625, + 0.9140625, + 0.9140625, + 0.9140625, + 0.9140625, + 0.9140625, + 0.9140625 ] ] } \ No newline at end of file diff --git a/src/bin/ezkl.rs b/src/bin/ezkl.rs index cecc2df1..1897db22 100644 --- a/src/bin/ezkl.rs +++ b/src/bin/ezkl.rs @@ -49,7 +49,7 @@ pub fn format(buf: &mut Formatter, record: &Record<'_>) -> Result<(), std::fmt:: buf, "{} {}", prefix_token(&record.level()), - format!("{}", record.args()).replace("\n", &sep), + format!("{}", record.args()).replace('\n', &sep), ) } @@ -64,7 +64,7 @@ pub fn init_logger() { prefix_token(&record.level()), start.elapsed().as_secs(), record.metadata().target(), - format!("{}", record.args()).replace("\n", &format!("\n{} ", " | ".white().bold())), + format!("{}", record.args()).replace('\n', &format!("\n{} ", " | ".white().bold())), ) }); builder.target(env_logger::Target::Stdout); diff --git a/src/circuit/accumulated/layouts.rs b/src/circuit/base/layouts.rs similarity index 71% rename from src/circuit/accumulated/layouts.rs rename to src/circuit/base/layouts.rs index 3ab5814f..6701293d 100644 --- a/src/circuit/accumulated/layouts.rs +++ b/src/circuit/base/layouts.rs @@ -8,9 +8,9 @@ use crate::{ tensor::{ ops::{ accumulated, add, affine as non_accum_affine, convolution as non_accum_conv, - dot as non_accum_dot, matmul as non_accum_matmul, mult, - scale_and_shift as ref_scale_and_shift, sub, sum as non_accum_sum, - sumpool as non_accum_sumpool, + dot as non_accum_dot, matmul as non_accum_matmul, mult, pack as non_accum_pack, + rescale as ref_rescaled, scale_and_shift as ref_scale_and_shift, sub, + sum as non_accum_sum, sumpool as non_accum_sumpool, }, Tensor, TensorError, }, @@ -18,10 +18,7 @@ use crate::{ use super::*; -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Dot product accumulated layout pub fn dot( config: &mut BaseConfig, layouter: &mut impl Layouter, @@ -45,7 +42,7 @@ pub fn dot( } // Now we can assign the dot product - let accumulated_dot = accumulated::dot(&inputs) + let accumulated_dot = accumulated::dot(&[inputs[0].clone(), inputs[1].clone()]) .expect("accum poly: dot op failed") .into(); let output = config @@ -57,7 +54,7 @@ pub fn dot( if y == 0 { config .selectors - .get(&BaseOp::InitDot) + .get(&BaseOp::Mult) .unwrap() .enable(&mut region, offset + y)?; } else { @@ -74,7 +71,7 @@ pub fn dot( .expect("accum poly: failed to fetch last elem"); if matches!(config.check_mode, CheckMode::SAFE) { - let safe_dot = non_accum_dot(&inputs.iter().map(|x| x).collect()) + let safe_dot = non_accum_dot(&inputs.iter().collect()) .map_err(|_| halo2_proofs::plonk::Error::Synthesis)?; assert_eq!( @@ -95,10 +92,7 @@ pub fn dot( Ok(ValTensor::from(t)) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Sum accumulated layout pub fn sum( config: &mut BaseConfig, layouter: &mut impl Layouter, @@ -109,9 +103,9 @@ pub fn sum( || "assign inputs", |mut region| { let input = utils::value_muxer( - &config.inputs[0], + &config.inputs[1], &{ - let res = config.inputs[0].assign(&mut region, offset, &values[0])?; + let res = config.inputs[1].assign(&mut region, offset, &values[0])?; res.map(|e| e.value_field().evaluate()) }, &values[0], @@ -168,10 +162,7 @@ pub fn sum( Ok(ValTensor::from(t)) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Pairwise (elementwise) op layout pub fn pairwise( config: &mut BaseConfig, layouter: &mut impl Layouter, @@ -235,10 +226,7 @@ pub fn pairwise( Ok(ValTensor::from(t)) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Matrix multiplication accumulated layout pub fn matmul( config: &mut BaseConfig, layouter: &mut impl Layouter, @@ -301,9 +289,8 @@ pub fn matmul( inputs[1].transpose_2d().unwrap(); // now perform matrix multiplication on the processed tensors - let accumulated_matmul = - accumulated::matmul(&vec![inputs[0].clone(), inputs[1].clone()]) - .expect("accum poly: matmul op failed"); + let accumulated_matmul = accumulated::matmul(&[inputs[0].clone(), inputs[1].clone()]) + .expect("accum poly: matmul op failed"); let output = config .output @@ -321,7 +308,7 @@ pub fn matmul( } else { config .selectors - .get(&BaseOp::InitDot) + .get(&BaseOp::Mult) .unwrap() .enable(&mut region, offset + y)?; } @@ -364,17 +351,20 @@ pub fn matmul( Ok(ValTensor::from(t)) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Affine operation accumulated layout pub fn affine( config: &mut BaseConfig, layouter: &mut impl Layouter, values: &[ValTensor; 3], offset: usize, ) -> Result, Box> { - let (mut input, kernel, bias) = (values[0].clone(), values[1].clone(), values[2].clone()); + let (mut input, kernel, mut bias) = (values[0].clone(), values[1].clone(), values[2].clone()); + if input.dims().len() == 1 { + input.reshape(&[input.len(), 1])?; + } + if bias.dims().len() == 1 { + bias.reshape(&[bias.len(), 1])?; + } input.pad_row_ones()?; let params = kernel.append_to_row(bias)?; @@ -405,10 +395,7 @@ pub fn affine( Ok(last_elem) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Sumpool accumulated layout pub fn sumpool( config: &mut BaseConfig, layouter: &mut impl Layouter, @@ -441,6 +428,10 @@ pub fn sumpool( }); last_elem.reshape(&[&[image_channels], shape].concat())?; + // if values.len() == 1 { + // panic!() + // } + if matches!(config.check_mode, CheckMode::SAFE) { // during key generation this will be 0 so we use this as a flag to check // TODO: this isn't very safe and would be better to get the phase directly @@ -461,10 +452,7 @@ pub fn sumpool( Ok(last_elem) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Convolution accumulated layout pub fn conv( config: &mut BaseConfig, layouter: &mut impl Layouter, @@ -473,9 +461,6 @@ pub fn conv( stride: (usize, usize), offset: usize, ) -> Result, Box> { - // assert!(stride.0 == 1); - // assert!(stride.1 == 1); - let has_bias = values.len() == 3; let (image, kernel) = (values[0].clone(), values[1].clone()); @@ -519,18 +504,11 @@ pub fn conv( stride.1, )?; - // expanded_kernel.downsample(0, stride.0)?; - // expanded_kernel.downsample(0, stride.1)?; - - // println!("{:?}", expanded_kernel.dims()); - // println!("{:?}", padded_image.dims()); - let mut res = if has_bias { let mut tiled_bias = values[2].clone(); if (tiled_bias.dims().len() != 1) || (tiled_bias.dims()[0] != kernel.dims()[0]) { return Err(Box::new(TensorError::DimMismatch("conv bias".to_string()))); } - // TODO: don't know if this correct tiled_bias.repeat_rows(vert_slides * horz_slides)?; tiled_bias.flatten(); tiled_bias.reshape(&[tiled_bias.dims()[0], 1])?; @@ -573,11 +551,148 @@ pub fn conv( Ok(res) } +/// Power accumulated layout +pub fn pow( + config: &mut BaseConfig, + layouter: &mut impl Layouter, + values: &[ValTensor; 1], + exponent: u32, + offset: usize, +) -> Result, Box> { + let mut t = values[0].clone(); -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. + for _ in 1..exponent { + t = pairwise( + config, + layouter, + &[t, values[0].clone()], + offset, + BaseOp::Mult, + )?; + } + + if matches!(config.check_mode, CheckMode::SAFE) { + // during key generation this will be 0 so we use this as a flag to check + // TODO: this isn't very safe and would be better to get the phase directly + let is_assigned = !Into::>::into(t.get_inner()?) + .iter() + .all(|&x| x == 0); + if is_assigned { + let safe_pow = values[0] + .get_inner() + .unwrap() + .pow(exponent) + .map_err(|_| halo2_proofs::plonk::Error::Synthesis)?; + + assert_eq!( + Into::>::into(t.get_inner()?), + Into::>::into(safe_pow), + ) + } + } + + Ok(t) +} + +/// Rescaled op accumulated layout +pub fn rescale( + config: &mut BaseConfig, + layouter: &mut impl Layouter, + values: &[ValTensor; 1], + scales: &[(usize, usize)], + offset: usize, +) -> Result>, Box> { + let mut rescaled_inputs = vec![]; + for (i, ri) in values.iter().enumerate() { + let num_elems = ri.dims().iter().product::(); + let mult = Value::known(F::from(scales[i].1 as u64)); + let mult_tensor = Tensor::new(Some(&vec![mult; num_elems]), ri.dims())?; + let scaled_input = pairwise( + config, + layouter, + &[ri.clone(), mult_tensor.into()], + offset, + BaseOp::Mult, + )?; + if matches!(config.check_mode, CheckMode::SAFE) { + // during key generation this will be 0 so we use this as a flag to check + // TODO: this isn't very safe and would be better to get the phase directly + let is_assigned = !Into::>::into(scaled_input.clone().get_inner()?) + .iter() + .all(|&x| x == 0); + if is_assigned { + let safe_rescale = ref_rescaled(&ri.get_inner().unwrap(), scales[i].1) + .map_err(|_| halo2_proofs::plonk::Error::Synthesis)?; + + assert_eq!( + Into::>::into(scaled_input.get_inner()?), + Into::>::into(safe_rescale), + ) + } + } + rescaled_inputs.push(scaled_input); + } + + Ok(rescaled_inputs) +} + +/// Pack accumulated layout +pub fn pack( + config: &mut BaseConfig, + layouter: &mut impl Layouter, + values: &[ValTensor; 1], + base: u32, + scale: u32, + offset: usize, +) -> Result, Box> { + let mut t = values[0].clone(); + t.flatten(); + + // these unwraps should never ever fail if the Tensortypes are correctly implemented + // if anything we want these to hard fail if not implemented + let mut base_t = ::zero().unwrap(); + for _ in 0..base { + base_t += ::one().unwrap(); + } + let mut accum_base = vec![]; + let base_tensor = Tensor::new(Some(&[base_t]), &[1])?; + for i in 0..t.dims().iter().product::() { + accum_base.push(Value::known(base_tensor.pow((i as u32) * (scale + 1))?[0])); + } + + let base_tensor = Tensor::new(Some(&accum_base), &[accum_base.len()])?; + + let base_prod = pairwise( + config, + layouter, + &[t, base_tensor.into()], + offset, + BaseOp::Mult, + )?; + + let res = sum(config, layouter, &[base_prod], offset)?; + + if matches!(config.check_mode, CheckMode::SAFE) { + // during key generation this will be 0 so we use this as a flag to check + // TODO: this isn't very safe and would be better to get the phase directly + let is_assigned = !Into::>::into(res.get_inner()?) + .iter() + .all(|&x| x == 0); + if is_assigned { + let safe_pow = non_accum_pack(&values[0].get_inner()?, Value::known(base_t), scale) + .map_err(|_| halo2_proofs::plonk::Error::Synthesis)?; + + assert_eq!( + Into::>::into(res.get_inner()?), + Into::>::into(safe_pow), + ) + } + } + + Ok(res) +} + +/// Dummy (no contraints) reshape layout pub fn reshape( values: &[ValTensor; 1], new_dims: &[usize], @@ -587,20 +702,52 @@ pub fn reshape( Ok(t) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Identity constraint. Usually used to constrain an instance column to an advice so the returned cells / values can be operated upon. pub fn identity( + config: &mut BaseConfig, + layouter: &mut impl Layouter, values: &[ValTensor; 1], + offset: usize, ) -> Result, Box> { - Ok(values[0].clone()) + let t = match layouter.assign_region( + || "identity", + |mut region| { + let inp = utils::value_muxer( + &config.inputs[0], + &{ + // always an advice + let res = config.inputs[1].assign(&mut region, offset, &values[0])?; + res.map(|e| e.value_field().evaluate()) + }, + &values[0], + ); + + let output = config + .output + .assign(&mut region, offset, &inp.clone().into())?; + + for i in 0..inp.len() { + let (_, y) = config.inputs[0].cartesian_coord(i); + config + .selectors + .get(&BaseOp::Identity) + .unwrap() + .enable(&mut region, offset + y)?; + } + + Ok(output) + }, + ) { + Ok(a) => a, + Err(e) => { + return Err(Box::new(e)); + } + }; + + Ok(ValTensor::from(t)) } -/// Assigns variables to the regions created when calling `configure`. -/// # Arguments -/// * `values` - The explicit values to the operations. -/// * `layouter` - A Halo2 Layouter. +/// Scale and shift accumulated layout pub fn scale_and_shift( config: &mut BaseConfig, layouter: &mut impl Layouter, @@ -614,7 +761,7 @@ pub fn scale_and_shift( if matches!(config.check_mode, CheckMode::SAFE) { // during key generation this will be 0 so we use this as a flag to check // TODO: this isn't very safe and would be better to get the phase directly - let is_assigned = !Into::>::into(res.clone().get_inner()?) + let is_assigned = !Into::>::into(res.get_inner()?) .iter() .all(|&x| x == 0); if is_assigned { diff --git a/src/circuit/base/mod.rs b/src/circuit/base/mod.rs new file mode 100644 index 00000000..b1b2e879 --- /dev/null +++ b/src/circuit/base/mod.rs @@ -0,0 +1,476 @@ +/// Layouts for specific functions (composed of base ops) +pub mod layouts; + +/// Tests +#[cfg(test)] +mod tests; + +use halo2_proofs::{ + circuit::Layouter, + plonk::{ConstraintSystem, Constraints, Expression, Selector}, +}; +use halo2curves::FieldExt; +use itertools::Itertools; +use log::trace; +use serde::{Deserialize, Serialize}; + +use crate::tensor::{self, Tensor, TensorError, TensorType, ValTensor, VarTensor}; +use std::{ + collections::BTreeMap, + error::Error, + fmt, + marker::PhantomData, + ops::{Add, Mul, Sub}, +}; + +#[allow(missing_docs)] +/// An enum representing the operations that can be used to express more complex operations via accumulation +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum BaseOp { + Dot, + Identity, + Add, + Mult, + Sub, + Sum, +} + +#[allow(missing_docs)] +/// An enum representing activating the sanity checks we can perform on the accumulated arguments +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] +pub enum CheckMode { + SAFE, + UNSAFE, +} + +impl From for CheckMode { + fn from(value: String) -> Self { + match value.to_lowercase().as_str() { + "safe" => CheckMode::SAFE, + "unsafe" => CheckMode::UNSAFE, + _ => panic!("not a valid checkmode"), + } + } +} + +/// Matches a [BaseOp] to an operation over inputs +impl BaseOp { + /// forward func + pub fn f + Sub + Mul>( + &self, + inputs: (T, T, T), + ) -> T { + let (a, b, m) = inputs; + match &self { + BaseOp::Dot => a * b + m, + BaseOp::Add => a + b, + BaseOp::Identity => b, + BaseOp::Sum => b + m, + BaseOp::Sub => a - b, + BaseOp::Mult => a * b, + } + } + + fn as_str(&self) -> &'static str { + match self { + BaseOp::Identity => "IDENTITY", + BaseOp::Dot => "DOT", + BaseOp::Add => "ADD", + BaseOp::Sub => "SUB", + BaseOp::Mult => "MULT", + BaseOp::Sum => "SUM", + } + } + fn query_offset_rng(&self) -> (i32, usize) { + match self { + BaseOp::Identity => (0, 1), + BaseOp::Dot => (-1, 2), + BaseOp::Add => (0, 1), + BaseOp::Sub => (0, 1), + BaseOp::Mult => (0, 1), + BaseOp::Sum => (-1, 2), + } + } + fn num_inputs(&self) -> usize { + match self { + BaseOp::Identity => 1, + BaseOp::Dot => 2, + BaseOp::Add => 2, + BaseOp::Sub => 2, + BaseOp::Mult => 2, + BaseOp::Sum => 1, + } + } + fn constraint_idx(&self) -> usize { + match self { + BaseOp::Identity => 0, + BaseOp::Dot => 1, + BaseOp::Add => 0, + BaseOp::Sub => 0, + BaseOp::Mult => 0, + BaseOp::Sum => 1, + } + } +} + +impl fmt::Display for BaseOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +#[allow(missing_docs)] +/// An enum representing the operations that can be used to express more complex operations via accumulation +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum Op { + Dot, + Matmul, + Affine, + Conv { + padding: (usize, usize), + stride: (usize, usize), + }, + SumPool { + padding: (usize, usize), + stride: (usize, usize), + kernel_shape: (usize, usize), + }, + Add, + Sub, + Mult, + Identity, + Reshape(Vec), + Flatten(Vec), + BatchNorm, + ScaleAndShift, + Pad(usize, usize), + Sum, + Pow(u32), + Pack(u32, u32), + GlobalSumPool, + Rescaled { + inner: Box, + scale: Vec<(usize, usize)>, + }, +} + +impl Op { + /// Matches a [Op] to an operation in the `tensor::ops` module. + pub fn f + Sub + Mul>( + &self, + mut inputs: Vec>, + ) -> Result, TensorError> { + match &self { + Op::Identity => Ok(inputs[0].clone()), + Op::Reshape(new_dims) => { + let mut t = inputs[0].clone(); + t.reshape(new_dims); + Ok(t) + } + Op::Flatten(new_dims) => { + let mut t = inputs[0].clone(); + t.reshape(new_dims); + Ok(t) + } + Op::Pad(dim1, dim2) => { + if 1 != inputs.len() { + return Err(TensorError::DimMismatch("pad inputs".to_string())); + } + tensor::ops::pad(&inputs[0], (*dim1, *dim2)) + } + Op::Add => tensor::ops::add(&inputs), + Op::Sub => tensor::ops::sub(&inputs), + Op::Mult => tensor::ops::mult(&inputs), + Op::Affine => tensor::ops::affine(&inputs), + Op::BatchNorm => tensor::ops::scale_and_shift(&inputs), + Op::ScaleAndShift => tensor::ops::scale_and_shift(&inputs), + Op::Matmul => tensor::ops::matmul(&inputs), + Op::Dot => tensor::ops::dot(&inputs.iter().collect()), + Op::Conv { padding, stride } => tensor::ops::convolution(&inputs, *padding, *stride), + Op::SumPool { + padding, + stride, + kernel_shape, + } => tensor::ops::sumpool(&inputs[0], *padding, *stride, *kernel_shape), + Op::Pack(base, scale) => { + if 1 != inputs.len() { + return Err(TensorError::DimMismatch("pack inputs".to_string())); + } + // these unwraps should never ever fail if the Tensortypes are correctly implemented + // if anything we want these to hard fail if not implemented + let mut base_t = T::zero().unwrap(); + for _ in 0..*base { + base_t = base_t + T::one().unwrap(); + } + tensor::ops::pack(&inputs[0], base_t, *scale) + } + Op::Pow(u) => { + if 1 != inputs.len() { + return Err(TensorError::DimMismatch("pow inputs".to_string())); + } + inputs[0].pow(*u) + } + Op::Sum => { + if 1 != inputs.len() { + return Err(TensorError::DimMismatch("sum inputs".to_string())); + } + tensor::ops::sum(&inputs[0]) + } + Op::Rescaled { inner, scale } => { + if scale.len() != inputs.len() { + return Err(TensorError::DimMismatch("rescaled inputs".to_string())); + } + + let mut rescaled_inputs = vec![]; + for (i, ri) in inputs.iter_mut().enumerate() { + rescaled_inputs.push(tensor::ops::rescale(ri, scale[i].1)?); + } + Ok(inner.f(rescaled_inputs)?) + } + Op::GlobalSumPool => unreachable!(), + } + } +} + +impl fmt::Display for Op { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Op::Identity => write!(f, "identity"), + Op::Reshape(new_dims) => write!(f, "reshape to {:?}", new_dims), + Op::Flatten(new_dims) => write!(f, "flatten to {:?}", new_dims), + Op::Pad(dim1, dim2) => write!(f, "padding: ({:?}, {:?})", dim1, dim2), + Op::Add => write!(f, "add"), + Op::Sub => write!(f, "sub"), + Op::Sum => write!(f, "sum"), + Op::Mult => write!(f, "mult"), + Op::Matmul => write!(f, "matmul"), + Op::Dot => write!(f, "dot"), + Op::Pack(base, _) => write!(f, "pack with base {:?}", base), + Op::Affine => write!(f, "affine"), + Op::BatchNorm => write!(f, "batchnorm"), + Op::ScaleAndShift => write!(f, "scale & shift"), + Op::Conv { padding, stride } => { + write!(f, "conv w/ padding: {:?}, stride: {:?}", padding, stride) + } + Op::SumPool { + padding, + stride, + kernel_shape, + } => { + write!( + f, + "avg pl w/ padding: {:?}, stride: {:?}, kernel shape: {:?}", + padding, stride, kernel_shape, + ) + } + Op::GlobalSumPool => write!(f, "globalsumpool"), + Op::Pow(s) => write!(f, "pow {}", s), + Op::Rescaled { inner, scale } => { + write!( + f, + "{} w/ scalings: {:?}", + **inner, + scale.iter().map(|e| e.1).collect_vec() + ) + } + } + } +} + +/// Configuration for an accumulated arg. +#[derive(Clone, Debug)] +pub struct BaseConfig { + /// the inputs to the accumulated operations. + pub inputs: Vec, + /// the (currently singular) output of the accumulated operations. + pub output: VarTensor, + /// [Selectors] generated when configuring the layer. We use a BTreeMap as we expect to configure many base gates. + pub selectors: BTreeMap, + /// Activate sanity checks + pub check_mode: CheckMode, + _marker: PhantomData, +} + +impl BaseConfig { + /// Configures [BaseOp]s for a given [ConstraintSystem]. + /// # Arguments + /// * `inputs` - The explicit inputs to the operations. + /// * `output` - The variable representing the (currently singular) output of the operations. + /// * `check_mode` - The variable representing the (currently singular) output of the operations. + pub fn configure( + meta: &mut ConstraintSystem, + inputs: &[VarTensor; 2], + output: &VarTensor, + check_mode: CheckMode, + ) -> Self { + // setup a selector per base op + let mut selectors = BTreeMap::new(); + for input in inputs { + // we don't support multiple columns rn + assert!(input.num_cols() == 1); + } + selectors.insert(BaseOp::Add, meta.selector()); + selectors.insert(BaseOp::Sub, meta.selector()); + selectors.insert(BaseOp::Dot, meta.selector()); + selectors.insert(BaseOp::Sum, meta.selector()); + selectors.insert(BaseOp::Mult, meta.selector()); + selectors.insert(BaseOp::Identity, meta.selector()); + + let config = Self { + selectors, + inputs: inputs.to_vec(), + output: output.clone(), + check_mode, + _marker: PhantomData, + }; + + for (base_op, selector) in config.selectors.iter() { + meta.create_gate(base_op.as_str(), |meta| { + let selector = meta.query_selector(*selector); + + let mut qis = vec![Expression::::zero().unwrap(); 2]; + for (i, q_i) in qis + .iter_mut() + .enumerate() + .take(2) + .skip(2 - base_op.num_inputs()) + { + *q_i = config.inputs[i] + .query_rng(meta, 0, 1) + .expect("accum: input query failed")[0] + .clone() + } + + // Get output expressions for each input channel + let (offset, rng) = base_op.query_offset_rng(); + + let expected_output: Tensor> = config + .output + .query_rng(meta, offset, rng) + .expect("poly: output query failed"); + + let res = base_op.f((qis[0].clone(), qis[1].clone(), expected_output[0].clone())); + + let constraints = vec![expected_output[base_op.constraint_idx()].clone() - res]; + + Constraints::with_selector(selector, constraints) + }); + } + + config + } + + /// Assigns variables to the regions created when calling `configure`. + /// # Arguments + /// * `values` - The explicit values to the operations. + /// * `layouter` - A Halo2 Layouter. + /// * `offset` - Offset to assign. + /// * `op` - The operation being represented. + pub fn layout( + &mut self, + layouter: &mut impl Layouter, + values: &[ValTensor], + offset: usize, + op: Op, + ) -> Result, Box> { + let mut cp_values = vec![]; + for v in values.iter() { + if let ValTensor::Instance { .. } = v { + cp_values.push(layouts::identity(self, layouter, &[v.clone()], offset)?); + } else { + cp_values.push(v.clone()); + } + } + trace!("laying out {}", op); + match op { + Op::Dot => layouts::dot(self, layouter, cp_values[..].try_into()?, offset), + Op::Sum => layouts::sum(self, layouter, cp_values[..].try_into()?, offset), + Op::Matmul => layouts::matmul(self, layouter, cp_values[..].try_into()?, offset), + Op::Affine => layouts::affine(self, layouter, cp_values[..].try_into()?, offset), + Op::Conv { padding, stride } => layouts::conv( + self, + layouter, + cp_values[..].try_into()?, + padding, + stride, + offset, + ), + Op::SumPool { + padding, + stride, + kernel_shape, + } => layouts::sumpool( + self, + layouter, + cp_values[..].try_into()?, + padding, + stride, + kernel_shape, + offset, + ), + Op::Add => layouts::pairwise( + self, + layouter, + cp_values[..].try_into()?, + offset, + BaseOp::Add, + ), + Op::Sub => layouts::pairwise( + self, + layouter, + cp_values[..].try_into()?, + offset, + BaseOp::Sub, + ), + Op::Mult => layouts::pairwise( + self, + layouter, + cp_values[..].try_into()?, + offset, + BaseOp::Mult, + ), + Op::Identity => layouts::identity(self, layouter, cp_values[..].try_into()?, offset), + Op::Reshape(d) | Op::Flatten(d) => layouts::reshape(cp_values[..].try_into()?, &d), + Op::BatchNorm => { + layouts::scale_and_shift(self, layouter, cp_values[..].try_into()?, offset) + } + Op::ScaleAndShift => { + layouts::scale_and_shift(self, layouter, cp_values[..].try_into()?, offset) + } + Op::Pad(p1, p2) => { + if values.len() != 1 { + return Err(Box::new(TensorError::DimError)); + } + let mut input = cp_values[0].clone(); + input.pad((p1, p2))?; + Ok(input) + } + Op::Pow(exp) => layouts::pow(self, layouter, cp_values[..].try_into()?, exp, offset), + Op::Pack(base, scale) => layouts::pack( + self, + layouter, + cp_values[..].try_into()?, + base, + scale, + offset, + ), + Op::Rescaled { inner, scale } => { + if scale.len() != values.len() { + return Err(Box::new(TensorError::DimMismatch( + "rescaled inputs".to_string(), + ))); + } + + let res = &layouts::rescale( + self, + layouter, + cp_values[..].try_into()?, + &scale, + offset, + )?[..]; + self.layout(layouter, res, offset, *inner) + } + Op::GlobalSumPool => unreachable!(), + } + } +} diff --git a/src/circuit/accumulated/mod.rs b/src/circuit/base/tests.rs similarity index 60% rename from src/circuit/accumulated/mod.rs rename to src/circuit/base/tests.rs index 781d02b6..1adf21e7 100644 --- a/src/circuit/accumulated/mod.rs +++ b/src/circuit/base/tests.rs @@ -1,296 +1,29 @@ -/// Layouts for specific functions (composed of base ops) -pub mod layouts; - +use crate::circuit::base::*; use halo2_proofs::{ - circuit::Layouter, - plonk::{ConstraintSystem, Constraints, Expression, Selector}, + arithmetic::FieldExt, + circuit::{Layouter, SimpleFloorPlanner, Value}, + dev::MockProver, + plonk::{Circuit, ConstraintSystem, Error}, }; -use halo2curves::FieldExt; - -use crate::tensor::{Tensor, TensorError, TensorType, ValTensor, VarTensor}; -use std::{ - collections::BTreeMap, - error::Error, - fmt, - marker::PhantomData, - ops::{Add, Mul, Sub}, -}; - -#[allow(missing_docs)] -/// An enum representing the operations that can be used to express more complex operations via accumulation -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum BaseOp { - Dot, - InitDot, - Identity, - Add, - Mult, - Sub, - Sum, -} - -#[allow(missing_docs)] -/// An enum representing activating the sanity checks we can perform on the accumulated arguments -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum CheckMode { - SAFE, - UNSAFE, -} - -/// Matches a [BaseOp] to an operation over inputs -impl BaseOp { - /// forward func - pub fn f + Sub + Mul>( - &self, - inputs: (T, T, T), - ) -> T { - let (a, b, m) = inputs; - match &self { - BaseOp::InitDot => a * b, - BaseOp::Dot => a * b + m, - BaseOp::Add => a + b, - BaseOp::Identity => a, - BaseOp::Sum => a + m, - BaseOp::Sub => a - b, - BaseOp::Mult => a * b, - } - } - - fn as_str(&self) -> &'static str { - match self { - BaseOp::InitDot => "INITDOT", - BaseOp::Identity => "IDENTITY", - BaseOp::Dot => "DOT", - BaseOp::Add => "ADD", - BaseOp::Sub => "SUB", - BaseOp::Mult => "MULT", - BaseOp::Sum => "SUM", - } - } - fn query_offset_rng(&self) -> (i32, usize) { - match self { - BaseOp::InitDot => (0, 1), - BaseOp::Identity => (0, 1), - BaseOp::Dot => (-1, 2), - BaseOp::Add => (0, 1), - BaseOp::Sub => (0, 1), - BaseOp::Mult => (0, 1), - BaseOp::Sum => (-1, 2), - } - } - fn num_inputs(&self) -> usize { - match self { - BaseOp::InitDot => 2, - BaseOp::Identity => 1, - BaseOp::Dot => 2, - BaseOp::Add => 2, - BaseOp::Sub => 2, - BaseOp::Mult => 2, - BaseOp::Sum => 1, - } - } - fn constraint_idx(&self) -> usize { - match self { - BaseOp::InitDot => 0, - BaseOp::Identity => 0, - BaseOp::Dot => 1, - BaseOp::Add => 0, - BaseOp::Sub => 0, - BaseOp::Mult => 0, - BaseOp::Sum => 1, - } - } -} - -impl fmt::Display for BaseOp { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.as_str()) - } -} - -#[allow(missing_docs)] -/// An enum representing the operations that can be used to express more complex operations via accumulation -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum Op { - Dot, - Matmul, - Affine, - Conv { - padding: (usize, usize), - stride: (usize, usize), - }, - SumPool { - padding: (usize, usize), - stride: (usize, usize), - kernel_shape: (usize, usize), - }, - Add, - Sub, - Mult, - Identity, - Reshape(Vec), - Flatten(Vec), - BatchNorm, - ScaleAndShift, - Pad(usize, usize), - Sum, -} - -/// Configuration for an accumulated arg. -#[derive(Clone, Debug)] -pub struct BaseConfig { - /// the inputs to the accumulated operations. - pub inputs: Vec, - /// the (currently singular) output of the accumulated operations. - pub output: VarTensor, - /// [Selectors] generated when configuring the layer. We use a BTreeMap as we expect to configure many base gates. - pub selectors: BTreeMap, - /// Activate sanity checks - pub check_mode: CheckMode, - _marker: PhantomData, -} - -impl BaseConfig { - /// Configures the sequence of operations into a circuit gate. - /// # Arguments - /// * `inputs` - The explicit inputs to the operations. - /// * `output` - The variable representing the (currently singular) output of the operations. - pub fn configure( - meta: &mut ConstraintSystem, - inputs: &[VarTensor; 2], - output: &VarTensor, - check_mode: CheckMode, - ) -> Self { - // setup a selector per base op - let mut selectors = BTreeMap::new(); - for input in inputs { - // we don't support multiple columns rn - assert!(input.num_cols() == 1); - } - selectors.insert(BaseOp::Add, meta.selector()); - selectors.insert(BaseOp::Sub, meta.selector()); - selectors.insert(BaseOp::Dot, meta.selector()); - selectors.insert(BaseOp::Sum, meta.selector()); - selectors.insert(BaseOp::Mult, meta.selector()); - selectors.insert(BaseOp::InitDot, meta.selector()); - selectors.insert(BaseOp::Identity, meta.selector()); - - let config = Self { - selectors, - inputs: inputs.to_vec(), - output: output.clone(), - check_mode, - _marker: PhantomData, - }; - - for (base_op, selector) in config.selectors.iter() { - meta.create_gate(base_op.as_str(), |meta| { - let selector = meta.query_selector(*selector); - - let mut qis = vec![Expression::::zero().unwrap(); 2]; - for i in 0..base_op.num_inputs() { - qis[i] = config.inputs[i] - .query_rng(meta, 0, 1) - .expect("accum: input query failed")[0] - .clone() - } - - // Get output expressions for each input channel - let (offset, rng) = base_op.query_offset_rng(); - - let expected_output: Tensor> = config - .output - .query_rng(meta, offset, rng) - .expect("poly: output query failed"); - - let res = base_op.f((qis[0].clone(), qis[1].clone(), expected_output[0].clone())); - - let constraints = vec![expected_output[base_op.constraint_idx()].clone() - res]; - - Constraints::with_selector(selector, constraints) - }); - } - - config - } - - /// Assigns variables to the regions created when calling `configure`. - /// # Arguments - /// * `values` - The explicit values to the operations. - /// * `layouter` - A Halo2 Layouter. - /// * `offset` - Offset to assign. - pub fn layout( - &mut self, - layouter: &mut impl Layouter, - values: &[ValTensor], - offset: usize, - op: Op, - ) -> Result, Box> { - match op { - Op::Dot => layouts::dot(self, layouter, values.try_into()?, offset), - Op::Sum => layouts::sum(self, layouter, values.try_into()?, offset), - Op::Matmul => layouts::matmul(self, layouter, values.try_into()?, offset), - Op::Affine => layouts::affine(self, layouter, values.try_into()?, offset), - Op::Conv { padding, stride } => { - layouts::conv(self, layouter, values.try_into()?, padding, stride, offset) - } - Op::SumPool { - padding, - stride, - kernel_shape, - } => layouts::sumpool( - self, - layouter, - values.try_into()?, - padding, - stride, - kernel_shape, - offset, - ), - Op::Add => layouts::pairwise(self, layouter, values.try_into()?, offset, BaseOp::Add), - Op::Sub => layouts::pairwise(self, layouter, values.try_into()?, offset, BaseOp::Sub), - Op::Mult => layouts::pairwise(self, layouter, values.try_into()?, offset, BaseOp::Mult), - Op::Identity => layouts::identity(values.try_into()?), - Op::Reshape(d) | Op::Flatten(d) => layouts::reshape(values.try_into()?, &d), - Op::BatchNorm => layouts::scale_and_shift(self, layouter, values.try_into()?, offset), - Op::ScaleAndShift => { - layouts::scale_and_shift(self, layouter, values.try_into()?, offset) - } - Op::Pad(p1, p2) => { - if values.len() != 1 { - return Err(Box::new(TensorError::DimError)); - } - let mut input = values[0].clone(); - input.pad((p1, p2))?; - Ok(input) - } - } - } -} +use halo2curves::pasta::pallas; +use halo2curves::pasta::Fp as F; +use rand::rngs::OsRng; +use std::marker::PhantomData; #[cfg(test)] -mod matmul_test { +mod matmul { use super::*; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 9; const LEN: usize = 3; #[derive(Clone)] - struct AffineCircuit { + struct MatmulCircuit { inputs: [ValTensor; 2], _marker: PhantomData, } - impl Circuit for AffineCircuit { + impl Circuit for MatmulCircuit { type Config = BaseConfig; type FloorPlanner = SimpleFloorPlanner; @@ -299,9 +32,9 @@ mod matmul_test { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, 1, LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true); + let b = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true); + let output = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, 1, LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -326,7 +59,7 @@ mod matmul_test { let mut w = Tensor::from((0..LEN).map(|i| Value::known(F::from((i + 1) as u64)))); w.reshape(&[LEN, 1]); - let circuit = AffineCircuit:: { + let circuit = MatmulCircuit:: { inputs: [ValTensor::from(a), ValTensor::from(w)], _marker: PhantomData, }; @@ -337,17 +70,8 @@ mod matmul_test { } #[cfg(test)] -mod dottest { +mod dot { use super::*; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 4; const LEN: usize = 4; @@ -367,9 +91,9 @@ mod dottest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -404,17 +128,8 @@ mod dottest { } #[cfg(test)] -mod sumtest { +mod sum { use super::*; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 4; const LEN: usize = 4; @@ -434,9 +149,9 @@ mod sumtest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -469,31 +184,20 @@ mod sumtest { } #[cfg(test)] -mod batchnormtest { - use std::marker::PhantomData; +mod batchnorm { use super::*; - use crate::tensor::Tensor; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 9; const LEN: usize = 3; #[derive(Clone)] - struct AffineCircuit { + struct BNCircuit { inputs: [ValTensor; 3], _marker: PhantomData, } - impl Circuit for AffineCircuit { + impl Circuit for BNCircuit { type Config = BaseConfig; type FloorPlanner = SimpleFloorPlanner; @@ -502,9 +206,9 @@ mod batchnormtest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -532,7 +236,7 @@ mod batchnormtest { let mut x = Tensor::from((0..LEN).map(|i| Value::known(F::from((i + 1) as u64)))); x.reshape(&[LEN]); - let circuit = AffineCircuit:: { + let circuit = BNCircuit:: { inputs: [ValTensor::from(x), ValTensor::from(w), ValTensor::from(b)], _marker: PhantomData, }; @@ -543,20 +247,10 @@ mod batchnormtest { } #[cfg(test)] -mod affinetest { +mod affine { use std::marker::PhantomData; use super::*; - use crate::tensor::Tensor; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 9; const LEN: usize = 3; @@ -576,10 +270,9 @@ mod affinetest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true, 512); - let b = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true, 512); - let output = - VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, 1, LEN], true, 512); + let a = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true); + let b = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true); + let output = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, 1, LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -618,17 +311,8 @@ mod affinetest { } #[cfg(test)] -mod compositiontest { +mod composition { use super::*; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 9; const LEN: usize = 4; @@ -648,9 +332,9 @@ mod compositiontest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -692,20 +376,10 @@ mod compositiontest { } #[cfg(test)] -mod convtest { - use std::marker::PhantomData; +mod conv { + use halo2_proofs::arithmetic::Field; use super::*; - use crate::tensor::Tensor; - use halo2_proofs::{ - arithmetic::{Field, FieldExt}, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - use rand::rngs::OsRng; const K: usize = 22; const LEN: usize = 100; @@ -725,10 +399,9 @@ mod convtest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true, 100000); - let b = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true, 100000); - let output = - VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, 1, LEN], true, 100000); + let a = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true); + let b = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true); + let output = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, 1, LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -821,22 +494,12 @@ mod convtest { } #[cfg(test)] -mod sumpooltest { - use std::marker::PhantomData; +mod sumpool { + use halo2_proofs::arithmetic::Field; use super::*; - use crate::tensor::Tensor; - use halo2_proofs::{ - arithmetic::{Field, FieldExt}, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - use rand::rngs::OsRng; - const K: usize = 22; + const K: usize = 20; const LEN: usize = 100; #[derive(Clone)] @@ -854,10 +517,9 @@ mod sumpooltest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true, 100000); - let b = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true, 100000); - let output = - VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, 1, LEN], true, 100000); + let a = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true); + let b = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, LEN], true); + let output = VarTensor::new_advice(cs, K, (LEN + 1) * LEN, vec![LEN + 1, 1, LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -872,8 +534,8 @@ mod sumpooltest { &self.inputs.clone(), 0, Op::SumPool { - padding: (1, 1), - stride: (2, 2), + padding: (0, 0), + stride: (1, 1), kernel_shape: (3, 3), }, ) @@ -885,8 +547,8 @@ mod sumpooltest { #[test] fn sumpoolcircuit() { let image_height = 5; - let image_width = 7; - let in_channels = 3; + let image_width = 5; + let in_channels = 1; let mut image = Tensor::from( (0..in_channels * image_height * image_width) @@ -905,17 +567,8 @@ mod sumpooltest { } #[cfg(test)] -mod addtest { +mod add { use super::*; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 4; const LEN: usize = 4; @@ -935,9 +588,9 @@ mod addtest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -972,17 +625,8 @@ mod addtest { } #[cfg(test)] -mod subtest { +mod sub { use super::*; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 4; const LEN: usize = 4; @@ -1002,9 +646,9 @@ mod subtest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -1039,17 +683,8 @@ mod subtest { } #[cfg(test)] -mod multtest { +mod mult { use super::*; - use halo2_proofs::{ - arithmetic::FieldExt, - circuit::{Layouter, SimpleFloorPlanner, Value}, - dev::MockProver, - plonk::{Circuit, ConstraintSystem, Error}, - }; - // use halo2curves::pasta::pallas; - use halo2curves::pasta::Fp as F; - // use rand::rngs::OsRng; const K: usize = 4; const LEN: usize = 4; @@ -1069,9 +704,9 @@ mod multtest { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) } @@ -1104,3 +739,180 @@ mod multtest { prover.assert_satisfied(); } } + +#[cfg(test)] +mod pow { + use super::*; + + const K: usize = 8; + const LEN: usize = 4; + + #[derive(Clone)] + struct MyCircuit { + inputs: [ValTensor; 1], + _marker: PhantomData, + } + + impl Circuit for MyCircuit { + type Config = BaseConfig; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + + Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let _ = config + .layout(&mut layouter, &self.inputs.clone(), 0, Op::Pow(5)) + .unwrap(); + Ok(()) + } + } + + #[test] + fn powcircuit() { + // parameters + let a = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1)))); + + let circuit = MyCircuit:: { + inputs: [ValTensor::from(a)], + _marker: PhantomData, + }; + + let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap(); + prover.assert_satisfied(); + } +} + +#[cfg(test)] +mod pack { + use super::*; + + const K: usize = 8; + const LEN: usize = 4; + + #[derive(Clone)] + struct MyCircuit { + inputs: [ValTensor; 1], + _marker: PhantomData, + } + + impl Circuit for MyCircuit { + type Config = BaseConfig; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + + Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let _ = config + .layout(&mut layouter, &self.inputs.clone(), 0, Op::Pack(2, 1)) + .unwrap(); + Ok(()) + } + } + + #[test] + fn packcircuit() { + // parameters + let a = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1)))); + + let circuit = MyCircuit:: { + inputs: [ValTensor::from(a)], + _marker: PhantomData, + }; + + let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap(); + prover.assert_satisfied(); + } +} + +#[cfg(test)] +mod rescaled { + use super::*; + + const K: usize = 8; + const LEN: usize = 4; + + #[derive(Clone)] + struct MyCircuit { + inputs: [ValTensor; 1], + _marker: PhantomData, + } + + impl Circuit for MyCircuit { + type Config = BaseConfig; + type FloorPlanner = SimpleFloorPlanner; + + fn without_witnesses(&self) -> Self { + self.clone() + } + + fn configure(cs: &mut ConstraintSystem) -> Self::Config { + let a = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let b = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + + Self::Config::configure(cs, &[a, b], &output, CheckMode::SAFE) + } + + fn synthesize( + &self, + mut config: Self::Config, + mut layouter: impl Layouter, + ) -> Result<(), Error> { + let _ = config + .layout( + &mut layouter, + &self.inputs.clone(), + 0, + Op::Rescaled { + inner: Box::new(Op::Sum), + scale: vec![(0, 5)], + }, + ) + .unwrap(); + Ok(()) + } + } + + #[test] + fn rescaledcircuit() { + // parameters + let mut a = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1)))); + a.reshape(&[LEN, 1]); + + let circuit = MyCircuit:: { + inputs: [ValTensor::from(a)], + _marker: PhantomData, + }; + + let prover = MockProver::run(K as u32, &circuit, vec![]).unwrap(); + prover.assert_satisfied(); + } +} diff --git a/src/circuit/polynomial.rs b/src/circuit/fused.rs similarity index 99% rename from src/circuit/polynomial.rs rename to src/circuit/fused.rs index 2195e9f2..459f0c7e 100644 --- a/src/circuit/polynomial.rs +++ b/src/circuit/fused.rs @@ -122,7 +122,7 @@ impl Op { Op::BatchNorm => scale_and_shift(&inputs), Op::ScaleAndShift => scale_and_shift(&inputs), Op::Matmul => matmul(&inputs), - Op::Dot => dot(&inputs.iter().map(|x| x).collect()), + Op::Dot => dot(&inputs.iter().collect()), Op::Conv { padding, stride } => convolution(&inputs, *padding, *stride), Op::SumPool { padding, @@ -362,10 +362,10 @@ mod tests { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512); - let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); - let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512); + let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true); + let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); + let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true); // tells the config layer to add an affine op to a circuit gate let affine_node = Node { op: Op::Affine, diff --git a/src/circuit/lookup.rs b/src/circuit/lookup.rs index 22eedd5b..00eb7b64 100644 --- a/src/circuit/lookup.rs +++ b/src/circuit/lookup.rs @@ -66,6 +66,17 @@ impl Op { } } + fn as_str(&self) -> &'static str { + match self { + Op::Div { .. } => "DIV", + Op::ReLU { .. } => "RELU", + Op::LeakyReLU { .. } => "LEAKY_RELU", + Op::PReLU { .. } => "PRELU", + Op::Sigmoid { .. } => "SIGMOID", + Op::Sqrt { .. } => "SQRT", + } + } + /// a value which is always in the table pub fn default_pair(&self) -> (F, F) { let x = vec![0_i128].into_iter().into(); @@ -203,50 +214,45 @@ impl Config { ) -> Self { let qlookup = cs.complex_selector(); - let _ = (0..input.dims().iter().product::()) - .map(|i| { - let _ = cs.lookup("lk", |cs| { - let qlookup = cs.query_selector(qlookup); - let not_qlookup = Expression::Constant(::one()) - qlookup.clone(); - let default_x = ::zero(); - let mut default_y = vec![0_i128].into_iter().into(); - for nl in table.borrow().nonlinearities.clone() { - default_y = nl.f(default_y) - } - let default_y: F = i128_to_felt(default_y[0]); - let (x, y) = input.cartesian_coord(i); - vec![ - ( - match &input { - VarTensor::Advice { inner: advices, .. } => { - qlookup.clone() - * cs.query_advice(advices[x], Rotation(y as i32)) - + not_qlookup.clone() * default_x - } - VarTensor::Fixed { inner: fixed, .. } => { - qlookup.clone() * cs.query_fixed(fixed[x], Rotation(y as i32)) - + not_qlookup.clone() * default_x - } - }, - table.borrow().table_input, - ), - ( - match &output { - VarTensor::Advice { inner: advices, .. } => { - qlookup * cs.query_advice(advices[x], Rotation(y as i32)) - + not_qlookup * default_y - } - VarTensor::Fixed { inner: fixed, .. } => { - qlookup * cs.query_fixed(fixed[x], Rotation(y as i32)) - + not_qlookup * default_y - } - }, - table.borrow().table_output, - ), - ] - }); - }) - .collect::>(); + let _ = cs.lookup(table.borrow().nonlinearities[0].as_str(), |cs| { + let qlookup = cs.query_selector(qlookup); + let not_qlookup = Expression::Constant(::one()) - qlookup.clone(); + let default_x = ::zero(); + let mut default_y = vec![0_i128].into_iter().into(); + for nl in table.borrow().nonlinearities.clone() { + default_y = nl.f(default_y) + } + let default_y: F = i128_to_felt(default_y[0]); + let (x, y) = input.cartesian_coord(0); + vec![ + ( + match &input { + VarTensor::Advice { inner: advices, .. } => { + qlookup.clone() * cs.query_advice(advices[x], Rotation(y as i32)) + + not_qlookup.clone() * default_x + } + VarTensor::Fixed { inner: fixed, .. } => { + qlookup.clone() * cs.query_fixed(fixed[x], Rotation(y as i32)) + + not_qlookup.clone() * default_x + } + }, + table.borrow().table_input, + ), + ( + match &output { + VarTensor::Advice { inner: advices, .. } => { + qlookup * cs.query_advice(advices[x], Rotation(y as i32)) + + not_qlookup * default_y + } + VarTensor::Fixed { inner: fixed, .. } => { + qlookup * cs.query_fixed(fixed[x], Rotation(y as i32)) + + not_qlookup * default_y + } + }, + table.borrow().table_output, + ), + ] + }); Self { input: input.clone(), @@ -337,7 +343,6 @@ impl Config { }; if (currently_filled_buffer + values_len) == buffer_capacity { - self.qlookup.enable(&mut region, 0)?; // can now safely unwrap let mut region_offset = values_len; for (input, output) in self.input_buffer.iter().zip(&self.output_buffer) { @@ -353,15 +358,18 @@ impl Config { self.input.assign(&mut region, 0, values)?; } }; + for i in 0..buffer_capacity { + self.qlookup.enable(&mut region, i)?; + } }; - self.input_buffer.push(w.clone()); + self.input_buffer.push(w); // constrain the calculated output to a column Ok(ValTensor::from(self.output.assign( &mut region, 0, - &ValTensor::from(output.clone()), + &ValTensor::from(output), )?)) }, ) { @@ -404,7 +412,7 @@ mod tests { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true, 512)) + .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true)) .collect::>(); let nl = Op::ReLU { scale: 1 }; diff --git a/src/circuit/mod.rs b/src/circuit/mod.rs index 22deda6b..9f354be7 100644 --- a/src/circuit/mod.rs +++ b/src/circuit/mod.rs @@ -1,10 +1,11 @@ use crate::tensor::*; /// Structs and methods for configuring and assigning "accumulated" polynomial constraints to a gate within a Halo2 circuit. -pub mod accumulated; +pub mod base; +/// Structs and methods for configuring and assigning polynomial constraints to a gate within a Halo2 circuit. +#[deprecated(note = "please use `base` module instead")] +pub mod fused; /// Element-wise operations using lookup tables. pub mod lookup; -/// Structs and methods for configuring and assigning polynomial constraints to a gate within a Halo2 circuit. -pub mod polynomial; /// A layer for range checks using polynomials. pub mod range; /// Utility functions for building gates. diff --git a/src/circuit/range.rs b/src/circuit/range.rs index 1c0a47a8..cfb51029 100644 --- a/src/circuit/range.rs +++ b/src/circuit/range.rs @@ -44,11 +44,13 @@ impl RangeCheckConfig { // v | 1 let q = cs.query_selector(config.selector); - let witnessed = input.query(cs, 0).expect("range: failed to query input"); + let witnessed = input + .query_rng(cs, 0, 1) + .expect("range: failed to query input"); // Get output expressions for each input channel let expected = expected - .query(cs, 0) + .query_rng(cs, 0, 1) .expect("range: failed to query expected value"); // Given a range R and a value v, returns the expression @@ -85,14 +87,16 @@ impl RangeCheckConfig { |mut region| { let offset = 0; - // Enable q_range_check - self.selector.enable(&mut region, offset)?; - // assigns the instance to the advice. self.input.assign(&mut region, offset, &input)?; self.expected.assign(&mut region, offset, &output)?; + for i in 0..input.len() { + // Enable q_range_check + self.selector.enable(&mut region, offset + i)?; + } + Ok(()) }, ) { @@ -135,7 +139,7 @@ mod tests { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let advices = (0..2) - .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true, 512)) + .map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true)) .collect_vec(); let input = &advices[0]; let expected = &advices[1]; @@ -147,15 +151,11 @@ mod tests { config: Self::Config, mut layouter: impl Layouter, ) -> Result<(), Error> { - config - .layout( - layouter.namespace(|| "assign value"), - self.input.clone(), - self.output.clone(), - ) - .unwrap(); - - Ok(()) + config.layout( + layouter.namespace(|| "assign value"), + self.input.clone(), + self.output.clone(), + ) } } diff --git a/src/circuit/utils.rs b/src/circuit/utils.rs index ce60bd10..1f744919 100644 --- a/src/circuit/utils.rs +++ b/src/circuit/utils.rs @@ -15,7 +15,10 @@ pub fn value_muxer( ValTensor::Value { inner: val, dims: _, - } => val.clone(), + } => val.map(|x| match x { + ValType::Value(x) => x, + _ => unimplemented!(), + }), _ => unimplemented!(), }, } diff --git a/src/commands.rs b/src/commands.rs index d144b2f5..80f85f37 100644 --- a/src/commands.rs +++ b/src/commands.rs @@ -10,6 +10,8 @@ use std::fs::File; use std::io::{stdin, stdout, Read, Write}; use std::path::PathBuf; +use crate::circuit::base::CheckMode; + #[allow(missing_docs)] #[derive(ValueEnum, Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum TranscriptType { @@ -65,16 +67,16 @@ pub struct RunArgs { /// Flags whether params are public #[arg(long, default_value = "false")] pub public_params: bool, - /// Flags to set maximum rotations - #[arg(short = 'M', long, default_value = "512")] - pub max_rotations: usize, /// Base used to pack the public-inputs to the circuit. (value > 1) to pack instances as a single int. /// Useful when verifying on the EVM. Note that this will often break for very long inputs. Use with caution, still experimental. #[arg(long, default_value = "1")] pub pack_base: u32, /// use a single argument for all lookups - #[arg(long, default_value = "false")] + #[arg(long, default_value = "true", action = clap::ArgAction::Set)] pub single_lookup: bool, + /// use a single argument for all lookups + #[arg(long, default_value = "safe")] + pub check_mode: CheckMode, } const EZKLCONF: &str = "EZKLCONF"; diff --git a/src/eth.rs b/src/eth.rs index f69eb2f4..56bec4dc 100644 --- a/src/eth.rs +++ b/src/eth.rs @@ -336,24 +336,22 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { // Count the number of pub inputs let mut start = None; let mut end = None; - let mut i = 0; - for line in reader.lines() { + for (i, line) in reader.lines().enumerate() { let line = line?; if line.trim().starts_with("mstore(0x20") { - start = Some(i); + start = Some(i as u32); } if line.trim().starts_with("mstore(0x0") { - end = Some(i); + end = Some(i as u32); break; } - i += 1; } - let num_pubinputs = if start.is_none() { - 0 + let num_pubinputs = if let Some(s) = start { + end.unwrap() - s } else { - end.unwrap() - start.unwrap() + 0 }; let mut max_pubinputs_addr = 0; @@ -372,9 +370,9 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = calldata_pattern.captures(&line); - if m.is_some() { - let calldata_and_addr = m.as_ref().unwrap().get(1).unwrap().as_str(); - let addr = m.unwrap().get(2).unwrap().as_str(); + if let Some(m) = m { + let calldata_and_addr = m.get(1).unwrap().as_str(); + let addr = m.get(2).unwrap().as_str(); let addr_as_num = u32::from_str_radix(addr.strip_prefix("0x").unwrap(), 16)?; if addr_as_num <= max_pubinputs_addr { @@ -393,9 +391,9 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = mstore8_pattern.captures(&line); - if m.is_some() { - let mstore = m.as_ref().unwrap().get(1).unwrap().as_str(); - let addr = m.unwrap().get(2).unwrap().as_str(); + if let Some(m) = m { + let mstore = m.get(1).unwrap().as_str(); + let addr = m.get(2).unwrap().as_str(); let addr_as_num = u32::from_str_radix(addr, 10)?; let transcript_addr = format!("{:#x}", addr_as_num); transcript_addrs.push(addr_as_num); @@ -406,9 +404,9 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = mstoren_pattern.captures(&line); - if m.is_some() { - let mstore = m.as_ref().unwrap().get(1).unwrap().as_str(); - let addr = m.unwrap().get(2).unwrap().as_str(); + if let Some(m) = m { + let mstore = m.get(1).unwrap().as_str(); + let addr = m.get(2).unwrap().as_str(); let addr_as_num = u32::from_str_radix(addr, 10)?; let transcript_addr = format!("{:#x}", addr_as_num); transcript_addrs.push(addr_as_num); @@ -419,10 +417,10 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = modexp_pattern.captures(&line); - if m.is_some() { - let modexp = m.as_ref().unwrap().get(1).unwrap().as_str(); - let start_addr = m.as_ref().unwrap().get(2).unwrap().as_str(); - let result_addr = m.unwrap().get(3).unwrap().as_str(); + if let Some(m) = m { + let modexp = m.get(1).unwrap().as_str(); + let start_addr = m.get(2).unwrap().as_str(); + let result_addr = m.get(3).unwrap().as_str(); let start_addr_as_num = u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; let result_addr_as_num = @@ -441,10 +439,10 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = ecmul_pattern.captures(&line); - if m.is_some() { - let ecmul = m.as_ref().unwrap().get(1).unwrap().as_str(); - let start_addr = m.as_ref().as_ref().unwrap().get(2).unwrap().as_str(); - let result_addr = m.unwrap().get(3).unwrap().as_str(); + if let Some(m) = m { + let ecmul = m.get(1).unwrap().as_str(); + let start_addr = m.get(2).unwrap().as_str(); + let result_addr = m.get(3).unwrap().as_str(); let start_addr_as_num = u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; let result_addr_as_num = @@ -464,10 +462,10 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = ecadd_pattern.captures(&line); - if m.is_some() { - let ecadd = m.as_ref().unwrap().get(1).unwrap().as_str(); - let start_addr = m.as_ref().unwrap().get(2).unwrap().as_str(); - let result_addr = m.unwrap().get(3).unwrap().as_str(); + if let Some(m) = m { + let ecadd = m.get(1).unwrap().as_str(); + let start_addr = m.get(2).unwrap().as_str(); + let result_addr = m.get(3).unwrap().as_str(); let start_addr_as_num = u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; let result_addr_as_num = @@ -487,10 +485,10 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = ecpairing_pattern.captures(&line); - if m.is_some() { - let ecpairing = m.as_ref().unwrap().get(1).unwrap().as_str(); - let start_addr = m.as_ref().unwrap().get(2).unwrap().as_str(); - let result_addr = m.unwrap().get(3).unwrap().as_str(); + if let Some(m) = m { + let ecpairing = m.get(1).unwrap().as_str(); + let start_addr = m.get(2).unwrap().as_str(); + let result_addr = m.get(3).unwrap().as_str(); let start_addr_as_num = u32::from_str_radix(start_addr.strip_prefix("0x").unwrap(), 16)?; let result_addr_as_num = @@ -510,9 +508,9 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = mstore_pattern.captures(&line); - if m.is_some() { - let mstore = m.as_ref().unwrap().get(1).unwrap().as_str(); - let addr = m.as_ref().unwrap().get(2).unwrap().as_str(); + if let Some(m) = m { + let mstore = m.get(1).unwrap().as_str(); + let addr = m.get(2).unwrap().as_str(); let addr_as_num = u32::from_str_radix(addr, 16)?; let transcript_addr = format!("{:#x}", addr_as_num); transcript_addrs.push(addr_as_num); @@ -523,9 +521,9 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { } let m = keccak_pattern.captures(&line); - if m.is_some() { - let keccak = m.as_ref().unwrap().get(1).unwrap().as_str(); - let addr = m.as_ref().unwrap().get(2).unwrap().as_str(); + if let Some(m) = m { + let keccak = m.get(1).unwrap().as_str(); + let addr = m.get(2).unwrap().as_str(); let addr_as_num = u32::from_str_radix(addr.strip_prefix("0x").unwrap(), 16)?; let transcript_addr = format!("{:#x}", addr_as_num); transcript_addrs.push(addr_as_num); @@ -584,5 +582,5 @@ pub fn fix_verifier_sol(input_file: PathBuf) -> Result> { write!(write, "{}", line).unwrap(); } writeln!(write, "}} return success; }} }}")?; - return Ok(contract); + Ok(contract) } diff --git a/src/execute.rs b/src/execute.rs index a7cdf47e..daae86d9 100644 --- a/src/execute.rs +++ b/src/execute.rs @@ -1,3 +1,4 @@ +use crate::circuit::base::CheckMode; use crate::commands::{Cli, Commands, StrategyType, TranscriptType}; #[cfg(not(target_arch = "wasm32"))] use crate::eth::{deploy_verifier, fix_verifier_sol, send_proof, verify_proof_via_solidity}; @@ -99,6 +100,7 @@ pub fn create_proof_circuit_kzg< pk: &ProvingKey, transcript: TranscriptType, strategy: Strategy, + check_mode: CheckMode, ) -> Result, Box> { match transcript { TranscriptType::EVM => create_proof_circuit::< @@ -111,20 +113,22 @@ pub fn create_proof_circuit_kzg< _, EvmTranscript, EvmTranscript, - >(circuit, public_inputs, params, pk, strategy) - .map_err(Box::::from), - TranscriptType::Poseidon => create_proof_circuit::< - KZGCommitmentScheme<_>, - Fr, - _, - ProverGWC<_>, - VerifierGWC<_>, - _, - _, - PoseidonTranscript, - PoseidonTranscript, - >(circuit, public_inputs, params, pk, strategy) + >(circuit, public_inputs, params, pk, strategy, check_mode) .map_err(Box::::from), + TranscriptType::Poseidon => { + create_proof_circuit::< + KZGCommitmentScheme<_>, + Fr, + _, + ProverGWC<_>, + VerifierGWC<_>, + _, + _, + PoseidonTranscript, + PoseidonTranscript, + >(circuit, public_inputs, params, pk, strategy, check_mode) + .map_err(Box::::from) + } TranscriptType::Blake => create_proof_circuit::< KZGCommitmentScheme<_>, Fr, @@ -135,7 +139,7 @@ pub fn create_proof_circuit_kzg< Challenge255<_>, Blake2bWrite<_, _, _>, Blake2bRead<_, _, _>, - >(circuit, public_inputs, params, pk, strategy) + >(circuit, public_inputs, params, pk, strategy, check_mode) .map_err(Box::::from), } } @@ -169,7 +173,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { } Commands::Table { model: _ } => { let om = Model::from_ezkl_conf(cli)?; - info!("{}", Table::new(om.nodes.flatten())); + info!("{}", Table::new(om.nodes.iter())); } #[cfg(feature = "render")] Commands::RenderCircuit { @@ -242,6 +246,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { let num_instance = public_inputs.iter().map(|x| x.len()).collect(); let mut params: ParamsKZG = load_params::>(params_path.to_path_buf())?; + info!("downsizing params to {} logrows", cli.args.logrows); if cli.args.logrows < params.k() { params.downsize(cli.args.logrows); } @@ -295,6 +300,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { let (circuit, public_inputs) = prepare_model_circuit_and_public_input(&data, &cli)?; let mut params: ParamsKZG = load_params::>(params_path.to_path_buf())?; + info!("downsizing params to {} logrows", cli.args.logrows); if cli.args.logrows < params.k() { params.downsize(cli.args.logrows); } @@ -315,6 +321,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { &pk, transcript, strategy, + cli.args.check_mode, )? } StrategyType::Accum => { @@ -326,6 +333,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { &pk, transcript, strategy, + cli.args.check_mode, )? } }; @@ -348,6 +356,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { // the K used for the aggregation circuit let mut params: ParamsKZG = load_params::>(params_path.to_path_buf())?; + info!("downsizing params to {} logrows", cli.args.logrows); if cli.args.logrows < params.k() { params.downsize(cli.args.logrows); } @@ -355,6 +364,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { let mut snarks = vec![]; // the K used when generating the application snark proof. we assume K is homogenous across snarks to aggregate let mut params_app = params.clone(); + info!("downsizing app params to {} logrows", app_logrows); if app_logrows < params.k() { params_app.downsize(app_logrows); } @@ -385,6 +395,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { &agg_pk, transcript, AccumulatorStrategy::new(¶ms), + cli.args.check_mode, )?; info!("Aggregation proof took {}", now.elapsed().as_secs()); @@ -401,6 +412,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { } => { let mut params: ParamsKZG = load_params::>(params_path)?; + info!("downsizing params to {} logrows", cli.args.logrows); if cli.args.logrows < params.k() { params.downsize(cli.args.logrows); } @@ -427,6 +439,7 @@ pub async fn run(cli: Cli) -> Result<(), Box> { } => { let mut params: ParamsKZG = load_params::>(params_path)?; + info!("downsizing params to {} logrows", cli.args.logrows); if cli.args.logrows < params.k() { params.downsize(cli.args.logrows); } diff --git a/src/graph/mod.rs b/src/graph/mod.rs index 9e0de1c4..5ecbebd6 100644 --- a/src/graph/mod.rs +++ b/src/graph/mod.rs @@ -87,19 +87,14 @@ impl Circuit for ModelCircuit { fn configure(cs: &mut ConstraintSystem) -> Self::Config { let model = Model::from_arg().expect("model should load from args"); - let advice_shapes = model.advice_shapes(); - let fixed_shapes = model.fixed_shapes(); - // for now the number of instances corresponds to the number of graph / model outputs let instance_shapes = model.instance_shapes(); let mut vars = ModelVars::new( cs, model.run_args.logrows as usize, - model.run_args.max_rotations, - advice_shapes, - fixed_shapes, instance_shapes.clone(), + model.visibility.clone(), ); info!( "number of advices used: {:?}", diff --git a/src/graph/model.rs b/src/graph/model.rs index 5b00c897..786330df 100644 --- a/src/graph/model.rs +++ b/src/graph/model.rs @@ -1,15 +1,12 @@ use super::node::*; use super::vars::*; use super::GraphError; +use crate::circuit::base::BaseConfig as PolyConfig; +use crate::circuit::base::CheckMode; +use crate::circuit::base::Op as PolyOp; use crate::circuit::lookup::Config as LookupConfig; use crate::circuit::lookup::Op as LookupOp; use crate::circuit::lookup::Table as LookupTable; -use crate::circuit::polynomial::Config as PolyConfig; -use crate::circuit::polynomial::InputType as PolyInputType; -use crate::circuit::polynomial::Node as PolyNode; -use crate::circuit::polynomial::Op as PolyOp; - -// use crate::circuit::polynomial::InputType as PolyInputType; use crate::circuit::range::*; use crate::commands::RunArgs; @@ -17,8 +14,9 @@ use crate::commands::{Cli, Commands}; use crate::graph::scale_to_multiplier; use crate::tensor::TensorType; use crate::tensor::{Tensor, ValTensor, VarTensor}; +use anyhow::Context; //use clap::Parser; -use anyhow::{Context, Error as AnyError}; +use anyhow::Error as AnyError; use core::panic; use halo2_proofs::{ arithmetic::FieldExt, @@ -29,8 +27,7 @@ use itertools::Itertools; use log::{debug, info, trace}; use std::cell::RefCell; use std::cmp::max; -use std::cmp::min; -use std::collections::{BTreeMap, HashSet}; +use std::collections::BTreeMap; use std::error::Error; use std::path::Path; use std::rc::Rc; @@ -62,7 +59,7 @@ pub struct ModelConfig { /// (optional) range checked outputs of the model graph pub range_checks: Vec>, /// (optional) packed outputs of the model graph - pub packed_outputs: Vec>, + pub packed_outputs: Vec>>>, /// A wrapper for holding all columns that will be assigned to by the model pub vars: ModelVars, } @@ -109,12 +106,12 @@ impl Model { let om = Model { model: model.clone(), run_args, - nodes: Self::assign_execution_buckets(nodes)?, + nodes, mode, visibility, }; - debug!("{}", Table::new(om.nodes.flatten()).to_string()); + debug!("{}", Table::new(om.nodes.iter()).to_string()); Ok(om) } @@ -238,53 +235,54 @@ impl Model { info!("configuring model"); let mut results = BTreeMap::new(); let mut tables = BTreeMap::new(); + let mut base_gates = BTreeMap::new(); - for (bucket, bucket_nodes) in self.nodes.0.iter() { - trace!("configuring bucket: {:?}", bucket); - let non_op_nodes: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_const() || n.opkind.is_input()) - .collect(); - if !non_op_nodes.is_empty() { - for (i, node) in non_op_nodes { - let config = self.conf_non_op_node(node)?; - results.insert(*i, config); - } + let non_op_nodes: BTreeMap<&usize, &Node> = self + .nodes + .iter() + .filter(|(_, n)| n.opkind.is_const() || n.opkind.is_input()) + .collect(); + if !non_op_nodes.is_empty() { + for (i, node) in non_op_nodes { + let config = self.conf_non_op_node(node)?; + results.insert(*i, config); } + } - let lookup_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_lookup()) - .collect(); + let lookup_ops: BTreeMap<&usize, &Node> = self + .nodes + .iter() + .filter(|(_, n)| n.opkind.is_lookup()) + .collect(); - if !lookup_ops.is_empty() { - for (i, node) in lookup_ops { - let config = if !self.run_args.single_lookup { - // assume a single input - let input_len = node.in_dims[0].iter().product(); - self.conf_lookup(node, input_len, meta, vars, &mut tables)? - } else { - self.reuse_lookup_conf(*i, node, &results, meta, vars, &mut tables)? - }; - results.insert(*i, config); - } + if !lookup_ops.is_empty() { + for (i, node) in lookup_ops { + let config = if !self.run_args.single_lookup { + // assume a single input + let input_len = node.in_dims[0].iter().product(); + self.conf_lookup(node, input_len, meta, vars, &mut tables)? + } else { + self.reuse_lookup_conf(*i, node, &results, meta, vars, &mut tables)? + }; + results.insert(*i, config); } + } - // preserves ordering - let poly_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_poly()) - .collect(); - // preserves ordering - if !poly_ops.is_empty() { - let config = self.conf_poly_ops(&poly_ops, meta, vars)?; - results.insert(**poly_ops.keys().max().unwrap(), config); + // preserves ordering + let poly_ops: BTreeMap<&usize, &Node> = self + .nodes + .iter() + .filter(|(_, n)| n.opkind.is_poly()) + .collect(); + // preserves ordering + if !poly_ops.is_empty() { + for (i, node) in poly_ops { + let config = self.conf_poly_ops(node, meta, vars, &mut base_gates)?; + results.insert(*i, config); let mut display: String = "Poly nodes: ".to_string(); - for idx in poly_ops.keys().map(|k| **k).sorted() { - let node = &self.nodes.filter(idx); - display.push_str(&format!("| {} ({:?}) | ", idx, node.opkind)); - } + display.push_str(&format!("| {} ({:?}) | ", i, node.opkind)); + trace!("{}", display); } } @@ -294,7 +292,7 @@ impl Model { if self.visibility.output.is_public() { if self.run_args.pack_base > 1 { info!("packing outputs..."); - packed_outputs = self.pack_outputs(meta, vars, self.output_shapes()); + packed_outputs = self.pack_outputs(meta, vars, &mut base_gates); range_checks = self.range_check_outputs( meta, vars, @@ -319,24 +317,27 @@ impl Model { &self, meta: &mut ConstraintSystem, vars: &mut ModelVars, - output_shapes: Vec>, - ) -> Vec> { + base_gates: &mut BTreeMap>>>, + ) -> Vec>>> { let mut configs = vec![]; - for s in &output_shapes { - let input = vars.advices[0].reshape(s); - let output = vars.advices[1].reshape(&[1]); - - // tells the config layer to add a pack op to the circuit gate - let pack_node = PolyNode { - op: PolyOp::Pack(self.run_args.pack_base, self.run_args.scale), - input_order: vec![PolyInputType::Input(0)], + for _ in self.output_shapes() { + let config = match base_gates.get(&false) { + Some(config) => config.clone(), + None => { + let config = Rc::new(RefCell::new(PolyConfig::::configure( + meta, + &[vars.advices[0].clone(), vars.advices[1].clone()], + &vars.advices[2], + CheckMode::SAFE, + ))); + base_gates.insert(false, config.clone()); + config + } }; - - let config = PolyConfig::::configure(meta, &[input.clone()], &output, &[pack_node]); - configs.push(config); } + configs } @@ -354,19 +355,16 @@ impl Model { let mut conf = None; // iterate in reverse order so we get the last relevant op for (_, prev_config) in prev_configs.iter().rev() { - match prev_config { - NodeConfig::Lookup { config, .. } => { - // check if there's a config for the same op - if config.borrow().table.borrow().nonlinearities == vec![op.clone()] { - conf = Some(NodeConfig::Lookup { - config: config.clone(), - inputs: node.inputs.iter().map(|e| e.node).collect(), - }); + if let NodeConfig::Lookup { config, .. } = prev_config { + // check if there's a config for the same op + if config.borrow().table.borrow().nonlinearities == vec![op.clone()] { + conf = Some(NodeConfig::Lookup { + config: config.clone(), + inputs: node.inputs.iter().map(|e| e.node).collect(), + }); - break; - } + break; } - _ => {} } } let conf = match conf { @@ -438,98 +436,60 @@ impl Model { /// * `vars` - [ModelVars] for the model. fn conf_poly_ops( &self, - nodes: &BTreeMap<&usize, &Node>, + node: &Node, meta: &mut ConstraintSystem, vars: &mut ModelVars, + base_gates: &mut BTreeMap>>>, ) -> Result, Box> { - let mut input_nodes: BTreeMap<(&usize, &PolyOp), Vec> = BTreeMap::new(); - - for (i, e) in nodes.iter() { - let key = ( - *i, - match &e.opkind { - OpKind::Poly(f) => f, - _ => { - return Err(Box::new(GraphError::WrongMethod(e.idx, e.opkind.clone()))); - } - }, - ); - let value = e - .inputs - .iter() - .map(|i| self.nodes.filter(i.node)) - .collect_vec(); - input_nodes.insert(key, value); - } - - // This works because retain only keeps items for which the predicate returns true, and - // insert only returns true if the item was not previously present in the set. - // Since the vector is traversed in order, we end up keeping just the first occurrence of each item. - let mut seen = HashSet::new(); - let mut advice_idx = 0; - let mut fixed_idx = 0; - // impose an execution order here - let inputs_to_layer: Vec<(usize, VarTensor)> = input_nodes + let input_nodes = node + .inputs .iter() - .flat_map(|x| { - x.1.iter() - .filter(|i| !nodes.contains_key(&i.idx) && seen.insert(i.idx)) - .map(|f| { - let s = f.out_dims.clone(); - if f.opkind.is_const() && self.visibility.params.is_public() { - let vars = (f.idx, vars.fixed[fixed_idx].reshape(&s)); - fixed_idx += 1; - vars - } else { - let vars = (f.idx, vars.advices[advice_idx].reshape(&s)); - advice_idx += 1; - vars - } - }) - .collect_vec() - }) + .map(|i| self.nodes.get(&i.node).unwrap()) .collect_vec(); - let output_shape = self.nodes.filter(**nodes.keys().max().unwrap()).out_dims; - // output node - let output = &vars.advices[advice_idx].reshape(&output_shape); + let input_idx = input_nodes.iter().map(|f| f.idx).collect_vec(); - let mut inter_counter = 0; - let fused_nodes: Vec = input_nodes + let fixed_flag = !input_nodes .iter() - .map(|(op, e)| { - let order = e - .iter() - .map(|n| { - if !nodes.contains_key(&n.idx) { - PolyInputType::Input( - inputs_to_layer.iter().position(|r| r.0 == n.idx).unwrap(), - ) - } else { - inter_counter += 1; - PolyInputType::Inter(inter_counter - 1) - } - }) - .collect_vec(); - PolyNode { - op: op.1.clone(), - input_order: order, - } - }) - .collect_vec(); + .filter(|f| f.opkind.is_const() && self.visibility.params.is_public()) + .collect_vec() + .is_empty(); - let inputs = inputs_to_layer.iter(); - - let config = NodeConfig::Poly { - config: PolyConfig::configure( - meta, - &inputs.clone().map(|x| x.1.clone()).collect_vec(), - output, - &fused_nodes, - ), - inputs: inputs.map(|x| x.0).collect_vec(), + let config = match base_gates.get(&fixed_flag) { + Some(config) => { + trace!("reusing base gate config"); + config.clone() + } + None => { + let inputs: [VarTensor; 2] = if fixed_flag { + [vars.fixed[0].clone(), vars.advices[1].clone()] + } else { + [vars.advices[0].clone(), vars.advices[1].clone()] + }; + // output node + let output_shape = &node.out_dims; + let output = &vars.advices[2].reshape(output_shape); + let config = Rc::new(RefCell::new(PolyConfig::configure( + meta, + inputs.into_iter().collect_vec()[..].try_into()?, + output, + CheckMode::SAFE, + ))); + base_gates.insert(fixed_flag, config.clone()); + config + } }; - Ok(config) + + if let OpKind::Poly(op) = &node.opkind { + let config = NodeConfig::Poly { + config, + inputs: input_idx, + op: op.clone(), + }; + Ok(config) + } else { + panic!() + } } /// Configures a lookup table based operation. These correspond to operations that are represented in @@ -624,7 +584,12 @@ impl Model { // pack outputs if need be for (i, packed_output) in config.packed_outputs.iter_mut().enumerate() { info!("packing outputs..."); - outputs[i] = packed_output.layout(layouter, &outputs[i..i + 1])?; + outputs[i] = packed_output.borrow_mut().layout( + layouter, + &outputs[i..i + 1], + 0, + PolyOp::Pack(self.run_args.pack_base, self.run_args.scale), + )?; // only use with mock prover if matches!(self.mode, Mode::Mock) { trace!("------------ packed output {:?}", outputs[i].show()); @@ -667,13 +632,14 @@ impl Model { // The node kind and the config should be the same. let res = match config.clone() { NodeConfig::Poly { - mut config, + config, inputs: idx, + op, } => { let values: Vec> = idx .iter() .map(|i| { - let node = &self.nodes.filter(*i); + let node = &self.nodes.get(i).unwrap(); match node.opkind { OpKind::Const => { let val = node @@ -688,7 +654,7 @@ impl Model { }) .collect_vec(); - Some(config.layout(layouter, &values)?) + Some(config.borrow_mut().layout(layouter, &values, 0, op)?) } NodeConfig::Lookup { config, @@ -713,53 +679,6 @@ impl Model { Ok(res) } - /// Iterates over Nodes and assigns execution buckets to them. Each bucket holds either: - /// a) independent lookup operations (i.e operations that don't feed into one another so can be processed in parallel). - /// b) operations that can be fused together, i.e the output of one op might feed into another. - /// The logic for bucket assignment is thus: we assign all data intake nodes to the 0 bucket. - /// We iterate over each node in turn. If the node is a polynomial op, assign to it the maximum bucket of it's inputs. - /// If the node is a lookup table, assign to it the maximum bucket of it's inputs incremented by 1. - /// # Arguments - /// - /// * `nodes` - [BTreeMap] of (node index, [Node]) pairs. - pub fn assign_execution_buckets( - mut nodes: BTreeMap, - ) -> Result { - info!("assigning configuration buckets to operations"); - - let mut bucketed_nodes = NodeGraph(BTreeMap::, BTreeMap>::new()); - - for (_, node) in nodes.iter_mut() { - let mut prev_buckets = vec![]; - for n in node - .inputs - .iter() - .filter(|n| !bucketed_nodes.filter(n.node).opkind.is_const()) - { - match bucketed_nodes.filter(n.node).bucket { - Some(b) => prev_buckets.push(b), - None => { - return Err(GraphError::MissingNode(n.node)); - } - } - } - let prev_bucket: Option<&usize> = prev_buckets.iter().max(); - - match &node.opkind { - OpKind::Input => node.bucket = Some(0), - OpKind::Const => node.bucket = None, - OpKind::Poly(_) => node.bucket = Some(*prev_bucket.unwrap()), - OpKind::Lookup(_) => node.bucket = Some(prev_bucket.unwrap() + 1), - op => { - return Err(GraphError::WrongMethod(node.idx, op.clone())); - } - } - bucketed_nodes.insert(node.bucket, node.idx, node.clone()); - } - - Ok(bucketed_nodes) - } - /// Get a linear extension of the model (an evaluation order), for example to feed to circuit construction. /// Note that this order is not stable over multiple reloads of the model. For example, it will freely /// interchange the order of evaluation of fixed parameters. For example weight could have id 1 on one load, @@ -794,7 +713,7 @@ impl Model { self.model .inputs .iter() - .map(|o| self.nodes.filter(o.node).out_dims) + .map(|o| self.nodes.get(&o.node).unwrap().out_dims.clone()) .collect_vec() } @@ -809,7 +728,7 @@ impl Model { self.model .outputs .iter() - .map(|o| self.nodes.filter(o.node).out_dims) + .map(|o| self.nodes.get(&o.node).unwrap().out_dims.clone()) .collect_vec() } @@ -817,148 +736,18 @@ impl Model { pub fn get_output_scales(&self) -> Vec { let output_nodes = self.model.outputs.iter(); output_nodes - .map(|o| self.nodes.filter(o.node).out_scale) + .map(|o| self.nodes.get(&o.node).unwrap().out_scale) .collect_vec() } - /// Max parameter sizes (i.e trainable weights) across the computational graph - pub fn max_params_poly(&self) -> Vec { - let mut maximum_sizes = vec![]; - for (_, bucket_nodes) in self.nodes.0.iter() { - let fused_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_poly()) - .collect(); - - let params = fused_ops - .iter() - .flat_map(|(_, n)| n.inputs.iter().map(|o| o.node).collect_vec()) - // here we remove intermediary calculation / nodes within the layer - .filter(|id| { - !fused_ops.contains_key(id) && self.nodes.filter(*id).opkind.is_const() - }) - .unique() - .collect_vec(); - - for (i, id) in params.iter().enumerate() { - let param_size = self.nodes.filter(*id).out_dims.iter().product(); - if i >= maximum_sizes.len() { - // we've already ascertained this is a param node so out_dims = parameter shape - maximum_sizes.push(param_size) - } else { - maximum_sizes[i] = max(maximum_sizes[i], param_size); - } - } - } - maximum_sizes - } - - /// Maximum number of input variables in fused layers - pub fn max_vars_and_params_poly(&self) -> Vec { - let mut maximum_sizes = vec![]; - for (_, bucket_nodes) in self.nodes.0.iter() { - let poly_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_poly()) - .collect(); - - let inputs = poly_ops - .iter() - .flat_map(|(_, n)| n.inputs.iter().map(|o| o.node).collect_vec()) - // here we remove intermediary calculation / nodes within the layer - .filter(|id| !poly_ops.contains_key(id)) - .unique() - .collect_vec(); - - for (i, id) in inputs.iter().enumerate() { - let input_size = self.nodes.filter(*id).out_dims.iter().product(); - if i >= maximum_sizes.len() { - // we've already ascertained this is the input node so out_dims = input shape - maximum_sizes.push(input_size) - } else { - maximum_sizes[i] = max(maximum_sizes[i], input_size); - } - } - - // handle output variables - let max_id = poly_ops.keys().max(); - // is None if the bucket is empty - if let Some(m) = max_id { - let output_size = self.nodes.filter(**m).out_dims.iter().product(); - if inputs.len() == maximum_sizes.len() { - maximum_sizes.push(output_size) - } else { - let output_idx = inputs.len(); - // set last entry to be the output column - maximum_sizes[output_idx] = max(maximum_sizes[output_idx], output_size); - } - }; - } - // add 1 for layer output - maximum_sizes - } - - /// Maximum of non params variable sizes in fused layers - pub fn max_vars_poly(&self) -> Vec { - let mut maximum_sizes = vec![]; - for (_, bucket_nodes) in self.nodes.0.iter() { - let fused_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_poly()) - .collect(); - - let inputs = fused_ops - .iter() - .flat_map(|(_, n)| n.inputs.iter().map(|o| o.node).collect_vec()) - // here we remove intermediary calculation / nodes within the layer - .filter(|id| { - !fused_ops.contains_key(id) && !self.nodes.filter(*id).opkind.is_const() - }) - .unique() - .collect_vec(); - - for (i, id) in inputs.iter().enumerate() { - let input_size = self.nodes.filter(*id).out_dims.iter().product(); - if i >= maximum_sizes.len() { - // we've already ascertained this is the input node so out_dims = input shape - maximum_sizes.push(input_size) - } else { - maximum_sizes[i] = max(maximum_sizes[i], input_size); - } - } - - // handle output variables - let max_id = fused_ops.keys().max(); - // None if the bucket is empty - if let Some(m) = max_id { - let output_size = self.nodes.filter(**m).out_dims.iter().product(); - if inputs.len() == maximum_sizes.len() { - maximum_sizes.push(output_size) - } else { - let output_idx = inputs.len(); - // set last entry to be the output column - maximum_sizes[output_idx] = max(maximum_sizes[output_idx], output_size); - } - }; - } - - // add 1 for layer output - maximum_sizes - } - /// Total number of variables in lookup layers pub fn num_vars_lookup_op(&self, lookup_op: &LookupOp) -> Vec { let mut count = BTreeMap::::new(); - for (_, bucket_nodes) in self.nodes.0.iter() { - let lookup_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| (n.opkind == OpKind::Lookup(lookup_op.clone()))) - .collect(); - - for (_, n) in lookup_ops { + for (_, n) in self.nodes.iter() { + if n.opkind == OpKind::Lookup(lookup_op.clone()) { match &n.opkind { OpKind::Lookup(op) => { - let elem = count.get_mut(&op); + let elem = count.get_mut(op); // handle output variables let output_size: usize = n.out_dims.iter().product(); let input_size = output_size; @@ -986,79 +775,6 @@ impl Model { vec![num_inputs, num_outputs] } - /// Total number of variables in lookup layers - pub fn num_vars_lookup(&self) -> Vec { - let mut count = BTreeMap::::new(); - for (_, bucket_nodes) in self.nodes.0.iter() { - let lookup_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_lookup()) - .collect(); - - for (_, n) in lookup_ops { - match &n.opkind { - OpKind::Lookup(op) => { - let elem = count.get_mut(&op); - // handle output variables - let output_size: usize = n.out_dims.iter().product(); - let input_size = output_size; - match elem { - None => { - count.insert(op.clone(), (input_size, output_size)); - } - Some(m) => { - m.0 += input_size; - m.1 += output_size; - } - } - } - // should never reach here - _ => panic!(), - } - } - } - // now get the max across all ops - let (mut num_inputs, mut num_outputs) = (0, 0); - for (_, v) in count.iter() { - num_inputs = max(num_inputs, v.0); - num_outputs = max(num_outputs, v.1); - } - vec![num_inputs, num_outputs] - } - - /// Maximum variable sizes in lookup layers - pub fn max_vars_lookup(&self) -> Vec { - let mut maximum_sizes = vec![]; - for (_, bucket_nodes) in self.nodes.0.iter() { - let lookup_ops: BTreeMap<&usize, &Node> = bucket_nodes - .iter() - .filter(|(_, n)| n.opkind.is_lookup()) - .collect(); - - for (_, n) in lookup_ops { - // lookups currently only accept a single input var - for (j, dims) in n.in_dims.iter().enumerate() { - let input_size = dims.iter().product(); - if j >= maximum_sizes.len() { - maximum_sizes.push(input_size) - } else { - maximum_sizes[j] = max(maximum_sizes[j], input_size); - } - } - // handle output variables - let output_size = n.out_dims.iter().product(); - if (n.in_dims.len()) == maximum_sizes.len() { - maximum_sizes.push(output_size) - } else { - let output_idx = n.in_dims.len(); - // set last entry to be the output column - maximum_sizes[output_idx] = max(maximum_sizes[output_idx], output_size); - } - } - } - maximum_sizes - } - /// Number of instances used by the circuit pub fn instance_shapes(&self) -> Vec> { // for now the number of instances corresponds to the number of graph / model outputs @@ -1071,41 +787,4 @@ impl Model { } instance_shapes } - - /// Number of advice used by the circuit - pub fn advice_shapes(&self) -> Vec { - // max sizes in lookup - let max_lookup_sizes = if self.run_args.single_lookup { - self.num_vars_lookup() - } else { - self.max_vars_lookup() - }; - let max_poly_sizes = if self.visibility.params.is_public() { - // max sizes for poly inputs - self.max_vars_poly() - } else { - // max sizes for poly inputs + params - self.max_vars_and_params_poly() - }; - - let mut advice_shapes = if max_poly_sizes.len() >= max_lookup_sizes.len() { - max_poly_sizes.clone() - } else { - max_lookup_sizes.clone() - }; - - for i in 0..min(max_poly_sizes.len(), max_lookup_sizes.len()) { - advice_shapes[i] = max(max_poly_sizes[i], max_lookup_sizes[i]); - } - advice_shapes - } - - /// Maximum sizes of fixed columns (and their sizes) used by the circuit - pub fn fixed_shapes(&self) -> Vec { - let mut fixed_shapes = vec![]; - if self.visibility.params.is_public() { - fixed_shapes = self.max_params_poly(); - } - fixed_shapes - } } diff --git a/src/graph/node.rs b/src/graph/node.rs index 562d0c43..8873d907 100644 --- a/src/graph/node.rs +++ b/src/graph/node.rs @@ -1,8 +1,8 @@ use super::utilities::{node_output_shapes, scale_to_multiplier, vector_to_quantized}; +use crate::circuit::base::BaseConfig as PolyConfig; +use crate::circuit::base::Op as PolyOp; use crate::circuit::lookup::Config as LookupConfig; use crate::circuit::lookup::Op as LookupOp; -use crate::circuit::polynomial::Config as PolyConfig; -use crate::circuit::polynomial::Op as PolyOp; use crate::graph::GraphError; use crate::tensor::Tensor; use crate::tensor::TensorType; @@ -12,7 +12,7 @@ use halo2_proofs::arithmetic::FieldExt; use itertools::Itertools; use log::{info, trace, warn}; use std::cell::RefCell; -use std::collections::{btree_map::Entry, BTreeMap}; +use std::collections::BTreeMap; use std::error::Error; use std::fmt; use std::ops::Deref; @@ -149,8 +149,9 @@ pub enum NodeConfig { inputs: Vec, }, Poly { - config: PolyConfig, + config: Rc>>, inputs: Vec, + op: PolyOp, }, Const, Input, @@ -159,58 +160,7 @@ pub enum NodeConfig { } /// Representation of an execution graph divided into execution 'buckets'. -#[derive(Clone, Default, Debug)] -pub struct NodeGraph(pub BTreeMap, BTreeMap>); - -impl NodeGraph { - /// Create an empty NodeGraph - pub fn new() -> Self { - NodeGraph(BTreeMap::new()) - } - - /// Insert the node with given tract `node_idx` and config at `idx` - pub fn insert(&mut self, idx: Option, node_idx: usize, config: Node) { - match self.0.entry(idx) { - Entry::Vacant(e) => { - e.insert(BTreeMap::from([(node_idx, config)])); - } - Entry::Occupied(mut e) => { - e.get_mut().insert(node_idx, config); - } - } - } - - /// Flattens the inner [BTreeMap] into a [Vec] of [Node]s. - pub fn flatten(&self) -> Vec { - let a = self - .0 - .clone() - .into_values() - .map(|d| d.into_values().collect()) - .collect::>>(); - let mut c: Vec = a - .iter() - .flatten() - .collect::>() - .iter() - .map(|e| (*e).clone()) - .collect(); - - c.sort_by_key(|v| v.idx); - c - } - - /// Retrieves a node, as specified by idx, from the Graph of bucketed nodes. - pub fn filter(&self, idx: usize) -> Node { - let a = self.flatten(); - let c = &a - .iter() - .filter(|i| i.idx == idx) - .cloned() - .collect::>()[0]; - c.clone() - } -} +pub type NodeGraph = BTreeMap; fn display_option(o: &Option) -> String { match o { @@ -1102,7 +1052,7 @@ impl Node { Node { idx, opkind: OpKind::Poly(PolyOp::Reshape(new_dims.clone())), - inputs: node.inputs.clone(), + inputs: node.inputs[0..1].to_vec(), in_dims: inputs.iter().map(|inp| inp.out_dims.clone()).collect(), out_dims: new_dims, in_scale: input_node.out_scale, diff --git a/src/graph/vars.rs b/src/graph/vars.rs index 79cbee2e..022b6734 100644 --- a/src/graph/vars.rs +++ b/src/graph/vars.rs @@ -10,7 +10,7 @@ use serde::Deserialize; use super::GraphError; /// Label Enum to track whether model input, model parameters, and model output are public or private -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub enum Visibility { /// Mark an item as private to the prover (not in the proof submitted for verification) Private, @@ -98,37 +98,19 @@ impl ModelVars { pub fn new( cs: &mut ConstraintSystem, logrows: usize, - max_rotations: usize, - // len is max number of advice, and each elem is the max advice size at that index - advice_dims: Vec, - // len is max number of fixed, and each elem is the max fixed size at that index - fixed_dims: Vec, instance_dims: Vec>, + visibility: VarVisibility, ) -> Self { - let advices = (0..advice_dims.len()) - .map(|i| { - VarTensor::new_advice( - cs, - logrows, - advice_dims[i], - vec![advice_dims[i]], - true, - max_rotations, - ) - }) - .collect_vec(); - let fixed = (0..fixed_dims.len()) - .map(|i| { - VarTensor::new_fixed( - cs, - logrows, - fixed_dims[i], - vec![fixed_dims[i]], - true, - max_rotations, - ) - }) + let advices = (0..3) + .map(|_| VarTensor::new_advice(cs, logrows, 1, vec![1], true)) .collect_vec(); + let mut fixed = vec![]; + if visibility.params == Visibility::Public { + fixed = (0..1) + .map(|_| VarTensor::new_fixed(cs, logrows, 1, vec![1], true)) + .collect_vec(); + } + // will be empty if instances dims has len 0 let instances = (0..instance_dims.len()) .map(|i| ValTensor::new_instance(cs, instance_dims[i].clone(), true)) .collect_vec(); diff --git a/src/pfsys/mod.rs b/src/pfsys/mod.rs index 5213eb5b..02bc147d 100644 --- a/src/pfsys/mod.rs +++ b/src/pfsys/mod.rs @@ -1,6 +1,7 @@ /// EVM related proving and verification pub mod evm; +use crate::circuit::base::CheckMode; use crate::commands::{data_path, Cli, RunArgs}; use crate::execute::ExecutionError; use crate::fieldutils::i128_to_felt; @@ -313,6 +314,7 @@ pub fn create_proof_circuit< params: &'params Scheme::ParamsProver, pk: &ProvingKey, strategy: Strategy, + check_mode: CheckMode, ) -> Result, Box> where C: Circuit, @@ -320,7 +322,7 @@ where Scheme::Scalar: SerdeObject, { // quickly mock prove as a sanity check - { + if check_mode == CheckMode::SAFE { debug!("running mock prover"); let prover = MockProver::run(params.k(), &circuit, instances.clone()) .map_err(Box::::from)?; @@ -361,7 +363,7 @@ where let checkable_pf = Snark::new(protocol, instances, proof); // sanity check that the generated proof is valid - { + if check_mode == CheckMode::SAFE { debug!("verifying generated proof"); let verifier_params = params.verifier_params(); verify_proof_circuit::( diff --git a/src/tensor/mod.rs b/src/tensor/mod.rs index 11845757..bbe32320 100644 --- a/src/tensor/mod.rs +++ b/src/tensor/mod.rs @@ -687,9 +687,9 @@ impl Tensor { rows.push(row); } let mut res = vec![]; - for i in 0..self.dims[0] { + for row in rows.iter().take(self.dims[0]) { for _ in 0..n { - res.push(rows[i].clone()); + res.push(row.clone()); } } @@ -728,11 +728,11 @@ impl Tensor { Tensor::new(None, &[h_blocks * num_rows, w_blocks * num_cols])?; second_channels ]; - for j in 0..second_channels { + for (j, row_block) in row.iter_mut().enumerate().take(second_channels) { let mut r = self.get_slice(&[i..i + 1, j..j + 1])?; let dims = r.dims()[2..].to_vec(); r.reshape(&dims); - row[j] = r.doubly_blocked_toeplitz( + *row_block = r.doubly_blocked_toeplitz( h_blocks, w_blocks, num_rows, num_cols, h_stride, w_stride, )?; } diff --git a/src/tensor/ops.rs b/src/tensor/ops.rs index ef8f57f6..d48e7801 100644 --- a/src/tensor/ops.rs +++ b/src/tensor/ops.rs @@ -24,12 +24,12 @@ pub use std::ops::{Add, Div, Mul, Sub}; /// Some(&[0, 0]), /// &[2], /// ).unwrap(); -/// let result = affine(&vec![x, k, b]).unwrap(); +/// let result = affine(&[x, k, b]).unwrap(); /// let expected = Tensor::::new(Some(&[26, 7, 11, 3, 15, 3, 7, 2]), &[2, 4]).unwrap(); /// assert_eq!(result, expected); /// ``` pub fn affine + Add>( - inputs: &Vec>, + inputs: &[Tensor], ) -> Result, TensorError> { let (mut input, kernel, bias) = (inputs[0].clone(), inputs[1].clone(), inputs[2].clone()); if (inputs.len() != 3) @@ -81,12 +81,12 @@ pub fn affine + Add>( /// Some(&[2, 1, 2, 1, 1, 1]), /// &[2, 3], /// ).unwrap(); -/// let result = scale_and_shift(&vec![x, k, b]).unwrap(); +/// let result = scale_and_shift(&[x, k, b]).unwrap(); /// let expected = Tensor::::new(Some(&[6, 2, 6, 2, 2, 2]), &[2, 3]).unwrap(); /// assert_eq!(result, expected); /// ``` pub fn scale_and_shift + Add>( - inputs: &Vec>, + inputs: &[Tensor], ) -> Result, TensorError> { if (inputs.len() != 3) || (inputs[1].dims() != inputs[2].dims()) @@ -124,7 +124,7 @@ pub fn scale_and_shift + Add>( /// assert_eq!(result, expected); /// ``` pub fn matmul + Add>( - inputs: &Vec>, + inputs: &[Tensor], ) -> Result, TensorError> { let (a, b) = (inputs[0].clone(), inputs[1].clone()); @@ -176,7 +176,7 @@ pub fn matmul + Add>( /// Some(&[2, 3, 2, 1, 1, 1]), /// &[2, 3], /// ).unwrap(); -/// let result = add(&vec![x, k]).unwrap(); +/// let result = add(&[x, k]).unwrap(); /// let expected = Tensor::::new(Some(&[4, 4, 4, 2, 2, 2]), &[2, 3]).unwrap(); /// assert_eq!(result, expected); /// @@ -188,11 +188,11 @@ pub fn matmul + Add>( /// let k = Tensor::::new( /// Some(&[2]), /// &[1]).unwrap(); -/// let result = add(&vec![x, k]).unwrap(); +/// let result = add(&[x, k]).unwrap(); /// let expected = Tensor::::new(Some(&[4, 3, 4, 3, 3, 3]), &[2, 3]).unwrap(); /// assert_eq!(result, expected); /// ``` -pub fn add>(t: &Vec>) -> Result, TensorError> { +pub fn add>(t: &[Tensor]) -> Result, TensorError> { // calculate value of output let mut output: Tensor = t[0].clone(); @@ -220,7 +220,7 @@ pub fn add>(t: &Vec>) -> Result::new(Some(&[0, -2, 0, 0, 0, 0]), &[2, 3]).unwrap(); /// assert_eq!(result, expected); /// @@ -233,11 +233,11 @@ pub fn add>(t: &Vec>) -> Result::new(Some(&[0, -1, 0, -1, -1, -1]), &[2, 3]).unwrap(); /// assert_eq!(result, expected); /// ``` -pub fn sub>(t: &Vec>) -> Result, TensorError> { +pub fn sub>(t: &[Tensor]) -> Result, TensorError> { // calculate value of output let mut output: Tensor = t[0].clone(); @@ -265,7 +265,7 @@ pub fn sub>(t: &Vec>) -> Result::new(Some(&[4, 3, 4, 1, 1, 1]), &[2, 3]).unwrap(); /// assert_eq!(result, expected); /// @@ -277,11 +277,11 @@ pub fn sub>(t: &Vec>) -> Result::new( /// Some(&[2]), /// &[1]).unwrap(); -/// let result = mult(&vec![x, k]).unwrap(); +/// let result = mult(&[x, k]).unwrap(); /// let expected = Tensor::::new(Some(&[4, 2, 4, 2, 2, 2]), &[2, 3]).unwrap(); /// assert_eq!(result, expected); /// ``` -pub fn mult>(t: &Vec>) -> Result, TensorError> { +pub fn mult>(t: &[Tensor]) -> Result, TensorError> { // calculate value of output let mut output: Tensor = t[0].clone(); @@ -371,12 +371,12 @@ pub fn sum>(a: &Tensor) -> Result, /// Some(&[0]), /// &[1], /// ).unwrap(); -/// let result = convolution::(&vec![x, k, b], (0, 0), (1, 1)).unwrap(); +/// let result = convolution::(&[x, k, b], (0, 0), (1, 1)).unwrap(); /// let expected = Tensor::::new(Some(&[31, 16, 8, 26]), &[1, 2, 2]).unwrap(); /// assert_eq!(result, expected); /// ``` pub fn convolution + Add>( - inputs: &Vec>, + inputs: &[Tensor], padding: (usize, usize), stride: (usize, usize), ) -> Result, TensorError> { @@ -691,12 +691,12 @@ where { // base ^ (scale + tensor) let mut output = T::zero().unwrap(); - let base_tensor = Tensor::new(Some(&vec![base]), &[1])?; + let base_tensor = Tensor::new(Some(&[base]), &[1])?; for (i, a_i) in a.iter().enumerate() { let pow_value = &base_tensor.pow((i as u32) * (scale + 1))?[0]; output = output + pow_value.clone() * a_i.clone(); } - Ok(Tensor::new(Some(&vec![output]), &[1])?) + Tensor::new(Some(&[output]), &[1]) } // --------------------------------------------------------------------------------------------------------- @@ -902,12 +902,12 @@ pub mod accumulated { /// Some(&[25, 35]), /// &[2], /// ).unwrap(); - /// assert_eq!(dot(&vec![x, y]).unwrap(), expected); + /// assert_eq!(dot(&[x, y]).unwrap(), expected); /// ``` pub fn dot + Add>( - inputs: &Vec>, + inputs: &[Tensor; 2], ) -> Result, TensorError> { - if (inputs.len() != 2) || (inputs[0].clone().len() != inputs[1].clone().len()) { + if inputs[0].clone().len() != inputs[1].clone().len() { return Err(TensorError::DimMismatch("dot".to_string())); } let (a, b): (Tensor, Tensor) = (inputs[0].clone(), inputs[1].clone()); @@ -974,16 +974,15 @@ pub mod accumulated { /// Some(&[2, 1, 2, 1, 1, 1]), /// &[2, 3], /// ).unwrap(); - /// let result = matmul(&vec![k, x]).unwrap(); + /// let result = matmul(&[k, x]).unwrap(); /// let expected = Tensor::::new(Some(&[10, 12, 18, 5, 7, 10]), &[2, 1, 3]).unwrap(); /// assert_eq!(result, expected); /// ``` pub fn matmul + Add>( - inputs: &Vec>, + inputs: &[Tensor; 2], ) -> Result, TensorError> { let (a, b) = (inputs[0].clone(), inputs[1].clone()); - if (inputs.len() != 2) - || (a.dims()[a.dims().len() - 1] != b.dims()[a.dims().len() - 2]) + if (a.dims()[a.dims().len() - 1] != b.dims()[a.dims().len() - 2]) || (a.dims()[0..a.dims().len() - 2] != b.dims()[0..a.dims().len() - 2]) { return Err(TensorError::DimMismatch("matmul".to_string())); @@ -1007,7 +1006,7 @@ pub mod accumulated { .map(|&d| d..(d + 1)) .collect::>(); col[coord.len() - 2] = 0..b.dims()[coord.len() - 2]; - let dot_transcript = dot(&vec![a.get_slice(&row[0..])?, b.get_slice(&col[0..])?])?; + let dot_transcript = dot(&[a.get_slice(&row[0..])?, b.get_slice(&col[0..])?])?; transcripts.push(dot_transcript); } let mut output = Tensor::new(Some(&transcripts), &[transcripts.len()])?.combine()?; diff --git a/src/tensor/val.rs b/src/tensor/val.rs index c934a942..4b707ceb 100644 --- a/src/tensor/val.rs +++ b/src/tensor/val.rs @@ -1,5 +1,81 @@ use super::{ops::pad, *}; -use halo2_proofs::plonk::Instance; +use halo2_proofs::{arithmetic::Field, plonk::Instance}; +use log::warn; + +#[derive(Debug, Clone)] +/// +pub enum ValType { + /// value + Value(Value), + /// assigned value + AssignedValue(Value>), + /// previously assigned value + PrevAssigned(AssignedCell), +} + +impl From> for i32 { + fn from(val: ValType) -> Self { + match val { + ValType::Value(v) => { + let mut output = 0_i32; + let mut i = 0; + v.map(|y| { + let e = felt_to_i32(y); + output = e; + i += 1; + }); + output + } + ValType::AssignedValue(v) => { + let mut output = 0_i32; + let mut i = 0; + v.evaluate().map(|y| { + let e = felt_to_i32(y); + output = e; + i += 1; + }); + output + } + ValType::PrevAssigned(v) => { + let mut output = 0_i32; + let mut i = 0; + v.value().map(|y| { + let e = felt_to_i32(*y); + output = e; + i += 1; + }); + output + } + } + } +} + +impl From> for ValType { + fn from(t: Value) -> ValType { + ValType::Value(t) + } +} + +impl From>> for ValType { + fn from(t: Value>) -> ValType { + ValType::AssignedValue(t) + } +} + +impl From> for ValType { + fn from(t: AssignedCell) -> ValType { + ValType::PrevAssigned(t) + } +} + +impl TensorType for ValType { + fn zero() -> Option { + Some(ValType::Value(Value::known(::zero()))) + } + fn one() -> Option { + Some(ValType::Value(Value::known(::one()))) + } +} /// A wrapper around a [Tensor] where the inner type is one of Halo2's [`Value`], [`Value>`], [`AssignedCell, F>`]. /// This enum is generally used to assign values to variables / advices already configured in a Halo2 circuit (usually represented as a [VarTensor]). /// For instance can represent pre-trained neural network weights; or a known input to a network. @@ -8,24 +84,10 @@ pub enum ValTensor { /// A tensor of [Value], each containing a field element Value { /// Underlying [Tensor]. - inner: Tensor>, + inner: Tensor>, /// Vector of dimensions of the tensor. dims: Vec, }, - /// A tensor of [Value], each containing a ratio of field elements, which may be evaluated to produce plain field elements. - AssignedValue { - /// Underlying [Tensor]. - inner: Tensor>>, - /// Vector of dimensions of the [Tensor]. - dims: Vec, - }, - /// A tensor of AssignedCells, with data both a value and the matrix cell to which it is assigned. - PrevAssigned { - /// Underlying [Tensor]. - inner: Tensor>, - /// Vector of dimensions of the [Tensor]. - dims: Vec, - }, /// A tensor backed by an [Instance] column Instance { /// [Instance] @@ -35,10 +97,19 @@ pub enum ValTensor { }, } +impl From>> for ValTensor { + fn from(t: Tensor>) -> ValTensor { + ValTensor::Value { + inner: t.map(|x| x), + dims: t.dims().to_vec(), + } + } +} + impl From>> for ValTensor { fn from(t: Tensor>) -> ValTensor { ValTensor::Value { - inner: t.clone(), + inner: t.map(|x| x.into()), dims: t.dims().to_vec(), } } @@ -46,8 +117,8 @@ impl From>> for ValTensor { impl From>>> for ValTensor { fn from(t: Tensor>>) -> ValTensor { - ValTensor::AssignedValue { - inner: t.clone(), + ValTensor::Value { + inner: t.map(|x| x.into()), dims: t.dims().to_vec(), } } @@ -55,8 +126,8 @@ impl From>>> for ValTensor impl From>> for ValTensor { fn from(t: Tensor>) -> ValTensor { - ValTensor::PrevAssigned { - inner: t.clone(), + ValTensor::Value { + inner: t.map(|x| x.into()), dims: t.dims().to_vec(), } } @@ -78,30 +149,17 @@ impl ValTensor { let mut integer_evals: Vec = vec![]; match self { ValTensor::Value { inner: v, dims: _ } => { - let _ = v.map(|vaf| { - // we have to push to an externally created vector or else vaf.map() returns an evaluation wrapped in Value<> (which we don't want) - vaf.map(|f| { + // we have to push to an externally created vector or else vaf.map() returns an evaluation wrapped in Value<> (which we don't want) + let _ = v.map(|vaf| match vaf { + ValType::Value(v) => v.map(|f| { integer_evals.push(crate::fieldutils::felt_to_i128(f)); - }) - }); - } - ValTensor::AssignedValue { inner: v, dims: _ } => { - let _ = v.map(|vaf| { - // we have to push to an externally created vector or else vaf.map() returns an evaluation wrapped in Value<> (which we don't want) - vaf.map(|f| { + }), + ValType::AssignedValue(v) => v.map(|f| { integer_evals.push(crate::fieldutils::felt_to_i128(f.evaluate())); - }) - }); - } - ValTensor::PrevAssigned { inner: v, dims: _ } => { - // convert assigned cells to Value> so we can extract the inner field element - let w_vaf: Tensor>> = v.map(|acaf| (acaf).value_field()); - - let _ = w_vaf.map(|vaf| { - // we have to push to an externally created vector or else vaf.map() returns an evaluation wrapped in Value<> (which we don't want) - vaf.map(|f| { + }), + ValType::PrevAssigned(v) => v.value_field().map(|f| { integer_evals.push(crate::fieldutils::felt_to_i128(f.evaluate())); - }) + }), }); } _ => return Err(Box::new(TensorError::WrongMethod)), @@ -119,20 +177,6 @@ impl ValTensor { dims: slice.dims().to_vec(), } } - ValTensor::AssignedValue { inner: v, dims: _ } => { - let slice = v.get_slice(indices)?; - ValTensor::AssignedValue { - inner: slice.clone(), - dims: slice.dims().to_vec(), - } - } - ValTensor::PrevAssigned { inner: v, dims: _ } => { - let slice = v.get_slice(indices)?; - ValTensor::PrevAssigned { - inner: slice.clone(), - dims: slice.dims().to_vec(), - } - } _ => return Err(Box::new(TensorError::WrongMethod)), }; Ok(slice) @@ -145,14 +189,6 @@ impl ValTensor { v.transpose_2d()?; *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - v.transpose_2d()?; - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - v.transpose_2d()?; - *d = v.dims().to_vec(); - } ValTensor::Instance { dims: d, .. } => { *d = vec![d.iter().product()]; } @@ -160,14 +196,16 @@ impl ValTensor { Ok(()) } - /// Transposes the inner tensor + /// Fetches the inner tensor as a [Tensor>] pub fn get_inner(&self) -> Result>, TensorError> { + warn!("using 'get_inner' in constraints can create soundness issues."); Ok(match self { - ValTensor::Value { inner: v, .. } => v.clone().into(), - ValTensor::AssignedValue { inner: v, .. } => v.map(|x| x.evaluate()).into(), - ValTensor::PrevAssigned { inner: v, .. } => { - v.map(|x| x.value_field().evaluate()).into() - } + ValTensor::Value { inner: v, .. } => v + .map(|x| match x { + ValType::Value(v) => v, + ValType::AssignedValue(v) => v.evaluate(), + ValType::PrevAssigned(v) => v.value_field().evaluate(), + }), ValTensor::Instance { .. } => return Err(TensorError::WrongMethod), }) } @@ -179,14 +217,6 @@ impl ValTensor { v.reshape(new_dims); *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - v.reshape(new_dims); - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - v.reshape(new_dims); - *d = v.dims().to_vec(); - } ValTensor::Instance { dims: d, .. } => { if d.iter().product::() != new_dims.iter().product::() { return Err(Box::new(TensorError::DimError)); @@ -204,14 +234,6 @@ impl ValTensor { v.flatten(); *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - v.flatten(); - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - v.flatten(); - *d = v.dims().to_vec(); - } ValTensor::Instance { dims: d, .. } => { *d = vec![d.iter().product()]; } @@ -225,14 +247,6 @@ impl ValTensor { *v = v.tile(n)?; *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - *v = v.tile(n)?; - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - *v = v.tile(n)?; - *d = v.dims().to_vec(); - } ValTensor::Instance { .. } => { return Err(TensorError::WrongMethod); } @@ -247,14 +261,6 @@ impl ValTensor { *v = pad(v, padding)?; *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - *v = pad(v, padding)?; - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - *v = pad(v, padding)?; - *d = v.dims().to_vec(); - } ValTensor::Instance { .. } => { return Err(TensorError::WrongMethod); } @@ -269,14 +275,6 @@ impl ValTensor { *v = v.repeat_rows(n)?; *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - *v = v.repeat_rows(n)?; - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - *v = v.repeat_rows(n)?; - *d = v.dims().to_vec(); - } ValTensor::Instance { .. } => { return Err(TensorError::WrongMethod); } @@ -284,6 +282,15 @@ impl ValTensor { Ok(()) } + /// Calls `len` on the inner [Tensor]. + pub fn len(&self) -> usize { + match self { + ValTensor::Value { dims, .. } | ValTensor::Instance { dims, .. } => { + dims.iter().product::() + } + } + } + /// Calls `pad_row_ones` on the inner [Tensor]. pub fn pad_row_ones(&mut self) -> Result<(), TensorError> { match self { @@ -291,14 +298,6 @@ impl ValTensor { *v = v.pad_row_ones()?; *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - *v = v.pad_row_ones()?; - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - *v = v.pad_row_ones()?; - *d = v.dims().to_vec(); - } ValTensor::Instance { .. } => { return Err(TensorError::WrongMethod); } @@ -323,18 +322,6 @@ impl ValTensor { )?; *d = v.dims().to_vec(); } - ValTensor::AssignedValue { inner: v, dims: d } => { - *v = v.multi_ch_doubly_blocked_toeplitz( - h_blocks, w_blocks, num_rows, num_cols, h_stride, w_stride, - )?; - *d = v.dims().to_vec(); - } - ValTensor::PrevAssigned { inner: v, dims: d } => { - *v = v.multi_ch_doubly_blocked_toeplitz( - h_blocks, w_blocks, num_rows, num_cols, h_stride, w_stride, - )?; - *d = v.dims().to_vec(); - } ValTensor::Instance { .. } => { return Err(TensorError::WrongMethod); } @@ -348,16 +335,8 @@ impl ValTensor { (ValTensor::Value { inner: v, .. }, ValTensor::Value { inner: v2, .. }) => { Ok(v.append_to_row(v2)?.into()) } - ( - ValTensor::AssignedValue { inner: v, .. }, - ValTensor::AssignedValue { inner: v2, .. }, - ) => Ok(v.append_to_row(v2)?.into()), - ( - ValTensor::PrevAssigned { inner: v, .. }, - ValTensor::PrevAssigned { inner: v2, .. }, - ) => Ok(v.append_to_row(v2)?.into()), _ => { - return Err(TensorError::WrongMethod); + Err(TensorError::WrongMethod) } } } @@ -368,14 +347,6 @@ impl ValTensor { (ValTensor::Value { inner: v1, .. }, ValTensor::Value { inner: v2, .. }) => { ValTensor::from(Tensor::new(Some(&[v1.clone(), v2]), &[2])?.combine()?) } - ( - ValTensor::AssignedValue { inner: v1, .. }, - ValTensor::AssignedValue { inner: v2, .. }, - ) => ValTensor::from(Tensor::new(Some(&[v1.clone(), v2]), &[2])?.combine()?), - ( - ValTensor::PrevAssigned { inner: v1, .. }, - ValTensor::PrevAssigned { inner: v2, .. }, - ) => ValTensor::from(Tensor::new(Some(&[v1.clone(), v2]), &[2])?.combine()?), _ => { return Err(TensorError::WrongMethod); } @@ -386,21 +357,14 @@ impl ValTensor { /// Returns the `dims` attribute of the [ValTensor]. pub fn dims(&self) -> &[usize] { match self { - ValTensor::Value { dims: d, .. } - | ValTensor::AssignedValue { dims: d, .. } - | ValTensor::PrevAssigned { dims: d, .. } - | ValTensor::Instance { dims: d, .. } => d, + ValTensor::Value { dims: d, .. } | ValTensor::Instance { dims: d, .. } => d, } } /// A [String] representation of the [ValTensor] for display, for example in showing intermediate values in a computational graph. pub fn show(&self) -> String { match self.clone() { - ValTensor::PrevAssigned { inner: v, dims: _ } => { - let r: Tensor = v.into(); - format!("PrevAssigned {:?}", r) - } ValTensor::Value { inner: v, dims: _ } => { - let r: Tensor = v.into(); + let r: Tensor = v.map(|x| x.into()); format!("Value {:?}", r) } _ => "ValTensor not PrevAssigned".into(), diff --git a/src/tensor/var.rs b/src/tensor/var.rs index fe620405..bd469b76 100644 --- a/src/tensor/var.rs +++ b/src/tensor/var.rs @@ -1,5 +1,4 @@ use super::*; -use std::cmp::min; /// A wrapper around Halo2's `Column` or `Column`. /// The wrapper allows for `VarTensor`'s dimensions to differ from that of the inner (wrapped) columns. /// The inner vector might, for instance, contain 3 Advice Columns. Each of those columns in turn @@ -50,15 +49,14 @@ impl VarTensor { capacity: usize, dims: Vec, equality: bool, - max_rot: usize, ) -> Self { let base = 2u32; - let max_rows = min( - max_rot, - base.pow(k as u32) as usize - cs.blinding_factors() - 1, - ); + let max_rows = base.pow(k as u32) as usize - cs.blinding_factors() - 1; let modulo = (capacity / max_rows) + 1; let mut advices = vec![]; + if modulo > 1 { + unimplemented!("we'll be implementing multi-column variables in a future release but for now, increase K.") + } for _ in 0..modulo { let col = cs.advice_column(); if equality { @@ -88,13 +86,9 @@ impl VarTensor { capacity: usize, dims: Vec, equality: bool, - max_rot: usize, ) -> Self { let base = 2u32; - let max_rows = min( - max_rot, - base.pow(k as u32) as usize - cs.blinding_factors() - 1, - ); + let max_rows = base.pow(k as u32) as usize - cs.blinding_factors() - 1; let modulo = (capacity / max_rows) + 1; let mut fixed = vec![]; for _ in 0..modulo { @@ -284,36 +278,34 @@ impl VarTensor { } _ => Err(halo2_proofs::plonk::Error::Synthesis), }, - ValTensor::Value { inner: v, .. } => v.enum_map(|coord, k| match &self { - VarTensor::Fixed { inner: fixed, .. } => { - let (x, y) = self.cartesian_coord(offset + coord); - - region.assign_fixed(|| "k", fixed[x], y, || k) + ValTensor::Value { inner: v, .. } => v.enum_map(|coord, k| { + let (x, y) = self.cartesian_coord(offset + coord); + if x > 0 { + unimplemented!("we'll be implementing multi-column variables in a future release but for now, increase K.") } - VarTensor::Advice { inner: advices, .. } => { - let (x, y) = self.cartesian_coord(offset + coord); - region.assign_advice(|| "k", advices[x], y, || k) - } - }), - ValTensor::PrevAssigned { inner: v, .. } => v.enum_map(|coord, xcell| match &self { - VarTensor::Advice { inner: advices, .. } => { - let (x, y) = self.cartesian_coord(offset + coord); - xcell.copy_advice(|| "k", region, advices[x], y) - } - _ => Err(halo2_proofs::plonk::Error::Synthesis), - }), - ValTensor::AssignedValue { inner: v, .. } => v.enum_map(|coord, k| match &self { - VarTensor::Fixed { inner: fixed, .. } => { - let (x, y) = self.cartesian_coord(offset + coord); - region - .assign_fixed(|| "k", fixed[x], y, || k) - .map(|a| a.evaluate()) - } - VarTensor::Advice { inner: advices, .. } => { - let (x, y) = self.cartesian_coord(offset + coord); - region - .assign_advice(|| "k", advices[x], y, || k) - .map(|a| a.evaluate()) + match k { + ValType::Value(v) => match &self { + VarTensor::Fixed { inner: fixed, .. } => { + region.assign_fixed(|| "k", fixed[x], y, || v) + } + VarTensor::Advice { inner: advices, .. } => { + region.assign_advice(|| "k", advices[x], y, || v) + } + }, + ValType::PrevAssigned(v) => match &self { + VarTensor::Advice { inner: advices, .. } => { + v.copy_advice(|| "k", region, advices[x], y) + } + _ => Err(halo2_proofs::plonk::Error::Synthesis), + }, + ValType::AssignedValue(v) => match &self { + VarTensor::Fixed { inner: fixed, .. } => region + .assign_fixed(|| "k", fixed[x], y, || v) + .map(|a| a.evaluate()), + VarTensor::Advice { inner: advices, .. } => region + .assign_advice(|| "k", advices[x], y, || v) + .map(|a| a.evaluate()), + }, } }), } diff --git a/tests/integration_tests.rs b/tests/integration_tests.rs index 7f0ab65a..74dbb91e 100644 --- a/tests/integration_tests.rs +++ b/tests/integration_tests.rs @@ -14,12 +14,24 @@ lazy_static! { fn init() { println!("using cargo target dir: {}", *CARGO_TARGET_DIR); build_ezkl(); + let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) + .args([ + "-K=20", + "gen-srs", + &format!( + "--params-path={}/kzg20.params", + TEST_DIR.path().to_str().unwrap() + ), + ]) + .status() + .expect("failed to execute process"); + assert!(status.success()); let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "-K=23", "gen-srs", &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg23.params", TEST_DIR.path().to_str().unwrap() ), ]) @@ -164,8 +176,8 @@ macro_rules! test_func { use crate::forward_pass; use crate::kzg_prove_and_verify; use crate::render_circuit; - use crate::mock_single_lookup; - use crate::kzg_prove_and_verify_single_lookup; + use crate::mock_non_single_lookup; + use crate::kzg_prove_and_verify_non_single_lookup; seq!(N in 0..=18 { @@ -180,8 +192,8 @@ macro_rules! test_func { } #(#[test_case(TESTS[N])])* - fn mock_single_lookup_(test: &str) { - mock_single_lookup(test.to_string()); + fn mock_non_single_lookup_(test: &str) { + mock_non_single_lookup(test.to_string()); } #(#[test_case(TESTS[N])])* @@ -205,8 +217,8 @@ macro_rules! test_func { } #(#[test_case(TESTS[N])])* - fn kzg_prove_and_verify_single_lookup_(test: &str) { - kzg_prove_and_verify_single_lookup(test.to_string()); + fn kzg_prove_and_verify_non_single_lookup_(test: &str) { + kzg_prove_and_verify_non_single_lookup(test.to_string()); } }); @@ -281,7 +293,7 @@ macro_rules! test_neg_examples { use crate::NEG_TESTS; use test_case::test_case; use crate::neg_mock as run; - use crate::neg_mock_single_lookup as run_single_lookup; + use crate::neg_mock_non_single_lookup as run_non_single_lookup; seq!(N in 0..=1 { #(#[test_case(NEG_TESTS[N])])* fn neg_examples_(test: (&str, &str)) { @@ -289,8 +301,8 @@ macro_rules! test_neg_examples { } #(#[test_case(NEG_TESTS[N])])* - fn neg_examples_single_lookup_(test: (&str, &str)) { - run_single_lookup(test.0.to_string(), test.1.to_string()); + fn neg_examples_non_single_lookup_(test: (&str, &str)) { + run_non_single_lookup(test.0.to_string(), test.1.to_string()); } }); } @@ -309,7 +321,7 @@ fn neg_mock(example_name: String, counter_example: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", counter_example).as_str(), @@ -322,12 +334,12 @@ fn neg_mock(example_name: String, counter_example: String) { } // Mock prove (fast, but does not cover some potential issues) -fn neg_mock_single_lookup(example_name: String, counter_example: String) { +fn neg_mock_non_single_lookup(example_name: String, counter_example: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "--single-lookup", - "-K=17", + "--single-lookup=false", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", counter_example).as_str(), @@ -353,7 +365,7 @@ fn forward_pass(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "forward", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -376,7 +388,7 @@ fn forward_pass(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!( @@ -398,7 +410,7 @@ fn render_circuit(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "render-circuit", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -422,7 +434,7 @@ fn mock(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -439,7 +451,7 @@ fn mock_packed_outputs(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "--pack-base=2", "mock", "-D", @@ -457,8 +469,7 @@ fn mock_everything(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", - "--single-lookup", + "-K=20", "--public-inputs", "--pack-base=2", "mock", @@ -473,12 +484,12 @@ fn mock_everything(example_name: String) { } // Mock prove (fast, but does not cover some potential issues) -fn mock_single_lookup(example_name: String) { +fn mock_non_single_lookup(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", - "--single-lookup", + "-K=20", + "--single-lookup=false", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -497,7 +508,7 @@ fn mock_public_inputs(example_name: String) { "--public-inputs", "--public-outputs=false", "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -516,7 +527,7 @@ fn mock_public_params(example_name: String) { "--public-params", "--public-outputs=false", "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -533,7 +544,7 @@ fn kzg_aggr_prove_and_verify(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "prove", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -544,7 +555,7 @@ fn kzg_aggr_prove_and_verify(example_name: String) { "--vk-path", &format!("{}/{}.vk", TEST_DIR.path().to_str().unwrap(), example_name), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg23.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=poseidon", @@ -558,7 +569,7 @@ fn kzg_aggr_prove_and_verify(example_name: String) { "--bits=16", "-K=23", "aggregate", - "--app-logrows=17", + "--app-logrows=20", "-M", format!("./examples/onnx/{}/network.onnx", example_name).as_str(), "--aggregation-snarks", @@ -578,7 +589,7 @@ fn kzg_aggr_prove_and_verify(example_name: String) { example_name ), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg23.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=blake", @@ -589,7 +600,7 @@ fn kzg_aggr_prove_and_verify(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "verify-aggr", "--proof-path", &format!( @@ -604,7 +615,7 @@ fn kzg_aggr_prove_and_verify(example_name: String) { example_name ), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg23.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=blake", @@ -619,7 +630,7 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "prove", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -638,7 +649,7 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { example_name ), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg23.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=poseidon", @@ -652,7 +663,7 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { "--bits=16", "-K=23", "aggregate", - "--app-logrows=17", + "--app-logrows=20", "-M", format!("./examples/onnx/{}/network.onnx", example_name).as_str(), "--aggregation-snarks", @@ -680,7 +691,7 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { example_name ), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg23.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=evm", @@ -691,7 +702,7 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "create-evm-verifier-aggr", "--deployment-code-path", &format!( @@ -700,7 +711,7 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { example_name ), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg23.params", TEST_DIR.path().to_str().unwrap() ), "--vk-path", @@ -716,7 +727,7 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "verify-evm", "--proof-path", &format!( @@ -737,12 +748,12 @@ fn kzg_evm_aggr_prove_and_verify(example_name: String) { } // prove-serialize-verify, the usual full path -fn kzg_prove_and_verify_single_lookup(example_name: String) { +fn kzg_prove_and_verify_non_single_lookup(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "--single-lookup", - "-K=17", + "--single-lookup=false", + "-K=20", "prove", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -753,7 +764,7 @@ fn kzg_prove_and_verify_single_lookup(example_name: String) { "--vk-path", &format!("{}/{}.vk", TEST_DIR.path().to_str().unwrap(), example_name), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg20.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=blake", @@ -765,8 +776,8 @@ fn kzg_prove_and_verify_single_lookup(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "--single-lookup", - "-K=17", + "--single-lookup=false", + "-K=20", "verify", "-M", format!("./examples/onnx/{}/network.onnx", example_name).as_str(), @@ -775,7 +786,7 @@ fn kzg_prove_and_verify_single_lookup(example_name: String) { "--vk-path", &format!("{}/{}.vk", TEST_DIR.path().to_str().unwrap(), example_name), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg20.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=blake", @@ -790,7 +801,7 @@ fn kzg_prove_and_verify(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "prove", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -801,7 +812,7 @@ fn kzg_prove_and_verify(example_name: String) { "--vk-path", &format!("{}/{}.vk", TEST_DIR.path().to_str().unwrap(), example_name), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg20.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=blake", @@ -813,7 +824,7 @@ fn kzg_prove_and_verify(example_name: String) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "verify", "-M", format!("./examples/onnx/{}/network.onnx", example_name).as_str(), @@ -822,7 +833,7 @@ fn kzg_prove_and_verify(example_name: String) { "--vk-path", &format!("{}/{}.vk", TEST_DIR.path().to_str().unwrap(), example_name), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg20.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=blake", @@ -837,7 +848,7 @@ fn kzg_evm_prove_and_verify(example_name: String, with_solidity: bool) { let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR)) .args([ "--bits=16", - "-K=17", + "-K=20", "prove", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -848,7 +859,7 @@ fn kzg_evm_prove_and_verify(example_name: String, with_solidity: bool) { "--vk-path", &format!("{}/{}.vk", TEST_DIR.path().to_str().unwrap(), example_name), &format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg20.params", TEST_DIR.path().to_str().unwrap() ), "--transcript=evm", @@ -867,13 +878,13 @@ fn kzg_evm_prove_and_verify(example_name: String, with_solidity: bool) { ); let vk_arg = format!("{}/{}.vk", TEST_DIR.path().to_str().unwrap(), example_name); let param_arg = format!( - "--params-path={}/kzg.params", + "--params-path={}/kzg20.params", TEST_DIR.path().to_str().unwrap() ); let mut args = vec![ "--bits=16", - "-K=17", + "-K=20", "create-evm-verifier", "-D", input_arg.as_str(), @@ -902,7 +913,7 @@ fn kzg_evm_prove_and_verify(example_name: String, with_solidity: bool) { let mut args = vec![ "--bits=16", - "-K=17", + "-K=20", "verify-evm", "--proof-path", pf_arg.as_str(), diff --git a/tests/wasi_integration_tests.rs b/tests/wasi_integration_tests.rs index 5ea64a3c..794ce29b 100644 --- a/tests/wasi_integration_tests.rs +++ b/tests/wasi_integration_tests.rs @@ -96,7 +96,7 @@ macro_rules! wasi_test_func { use crate::mock_public_inputs; use crate::mock_public_params; use crate::forward_pass; - use crate::mock_single_lookup; + use crate::mock_non_single_lookup; seq!(N in 0..=18 { @@ -107,8 +107,8 @@ macro_rules! wasi_test_func { } #(#[test_case(TESTS[N])])* - fn mock_single_lookup_(test: &str) { - mock_single_lookup(test.to_string()); + fn mock_non_single_lookup_(test: &str) { + mock_non_single_lookup(test.to_string()); } #(#[test_case(TESTS[N])])* @@ -141,7 +141,7 @@ macro_rules! wasi_test_neg_examples { use crate::NEG_TESTS; use test_case::test_case; use crate::neg_mock as run; - use crate::neg_mock_single_lookup as run_single_lookup; + use crate::neg_mock_non_single_lookup as run_non_single_lookup; seq!(N in 0..=1 { #(#[test_case(NEG_TESTS[N])])* fn neg_examples_(test: (&str, &str)) { @@ -149,8 +149,8 @@ macro_rules! wasi_test_neg_examples { } #(#[test_case(NEG_TESTS[N])])* - fn neg_examples_single_lookup_(test: (&str, &str)) { - run_single_lookup(test.0.to_string(), test.1.to_string()); + fn neg_examples_non_single_lookup_(test: (&str, &str)) { + run_non_single_lookup(test.0.to_string(), test.1.to_string()); } }); } @@ -170,7 +170,7 @@ fn neg_mock(example_name: String, counter_example: String) { ".", "--", "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", counter_example).as_str(), @@ -183,7 +183,7 @@ fn neg_mock(example_name: String, counter_example: String) { } // Mock prove (fast, but does not cover some potential issues) -fn neg_mock_single_lookup(example_name: String, counter_example: String) { +fn neg_mock_non_single_lookup(example_name: String, counter_example: String) { let status = Command::new("wasmtime") .args([ &format!("{}/wasm32-wasi/release/ezkl.wasm", *CARGO_TARGET_DIR), @@ -191,8 +191,8 @@ fn neg_mock_single_lookup(example_name: String, counter_example: String) { ".", "--", "--bits=16", - "--single-lookup", - "-K=17", + "--single-lookup=false", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", counter_example).as_str(), @@ -213,7 +213,7 @@ fn forward_pass(example_name: String) { ".", "--", "--bits=16", - "-K=17", + "-K=20", "forward", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -235,7 +235,7 @@ fn forward_pass(example_name: String) { ".", "--", "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input_forward.json", example_name).as_str(), @@ -256,7 +256,7 @@ fn mock(example_name: String) { ".", "--", "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -277,7 +277,7 @@ fn mock_packed_outputs(example_name: String) { ".", "--", "--bits=16", - "-K=17", + "-K=20", "--pack-base=2", "mock", "-D", @@ -299,8 +299,7 @@ fn mock_everything(example_name: String) { ".", "--", "--bits=16", - "-K=17", - "--single-lookup", + "-K=20", "--public-inputs", "--pack-base=2", "mock", @@ -315,7 +314,7 @@ fn mock_everything(example_name: String) { } // Mock prove (fast, but does not cover some potential issues) -fn mock_single_lookup(example_name: String) { +fn mock_non_single_lookup(example_name: String) { let status = Command::new("wasmtime") .args([ &format!("{}/wasm32-wasi/release/ezkl.wasm", *CARGO_TARGET_DIR), @@ -323,8 +322,8 @@ fn mock_single_lookup(example_name: String) { ".", "--", "--bits=16", - "-K=17", - "--single-lookup", + "-K=20", + "--single-lookup=false", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -347,7 +346,7 @@ fn mock_public_inputs(example_name: String) { "--public-inputs", "--public-outputs=false", "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(), @@ -370,7 +369,7 @@ fn mock_public_params(example_name: String) { "--public-params", "--public-outputs=false", "--bits=16", - "-K=17", + "-K=20", "mock", "-D", format!("./examples/onnx/{}/input.json", example_name).as_str(),