mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 16:27:59 -05:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e0d3f4f145 | ||
|
|
bceac2fab5 | ||
|
|
04d7b5feaa | ||
|
|
45fd12a04f |
18
.github/workflows/rust.yml
vendored
18
.github/workflows/rust.yml
vendored
@@ -427,21 +427,21 @@ jobs:
|
||||
crate: cargo-nextest
|
||||
locked: true
|
||||
- name: KZG prove and verify tests (kzg outputs)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_kzg_output --features icicle --test-threads 1
|
||||
- name: KZG prove and verify tests (public outputs + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_::w --features icicle --test-threads 1
|
||||
- name: KZG prove and verify tests (public outputs + fixed params + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_with_overflow_fixed_params_ --features icicle --test-threads 1
|
||||
- name: KZG prove and verify tests (public outputs)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
|
||||
- name: KZG prove and verify tests (public outputs + column overflow)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_::t --features icicle --test-threads 1
|
||||
- name: KZG prove and verify tests (public inputs)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_public_input --features icicle --test-threads 1
|
||||
- name: KZG prove and verify tests (fixed params)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_fixed_params --features icicle --test-threads 1
|
||||
- name: KZG prove and verify tests (hashed outputs)
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 2
|
||||
run: cargo nextest run --release --verbose tests::kzg_prove_and_verify_hashed --features icicle --test-threads 1
|
||||
|
||||
fuzz-tests:
|
||||
runs-on: ubuntu-latest-32-cores
|
||||
@@ -603,6 +603,8 @@ jobs:
|
||||
run: python -m venv .env; source .env/bin/activate; pip install -r requirements.txt;
|
||||
- name: Build python ezkl
|
||||
run: source .env/bin/activate; maturin develop --features python-bindings --release
|
||||
- name: Div rebase
|
||||
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_div_rebase_
|
||||
- name: Public inputs
|
||||
run: source .env/bin/activate; cargo nextest run --release --verbose tests::accuracy_measurement_public_inputs_
|
||||
- name: fixed params
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use ezkl::circuit::table::Range;
|
||||
use ezkl::circuit::*;
|
||||
|
||||
use ezkl::circuit::lookup::LookupOp;
|
||||
@@ -16,7 +17,7 @@ use halo2_proofs::{
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
const BITS: (i128, i128) = (-32768, 32768);
|
||||
const BITS: Range = (-32768, 32768);
|
||||
static mut LEN: usize = 4;
|
||||
const K: usize = 16;
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ use ezkl::circuit::*;
|
||||
|
||||
use ezkl::circuit::lookup::LookupOp;
|
||||
use ezkl::circuit::poly::PolyOp;
|
||||
use ezkl::circuit::table::Range;
|
||||
use ezkl::pfsys::create_proof_circuit_kzg;
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
use ezkl::pfsys::{create_keys, srs::gen_srs};
|
||||
@@ -16,7 +17,7 @@ use halo2_proofs::{
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use std::marker::PhantomData;
|
||||
|
||||
const BITS: (i128, i128) = (-8180, 8180);
|
||||
const BITS: Range = (-8180, 8180);
|
||||
static mut LEN: usize = 4;
|
||||
static mut K: usize = 16;
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use ezkl::circuit::region::RegionCtx;
|
||||
use ezkl::circuit::table::Range;
|
||||
use ezkl::circuit::{ops::lookup::LookupOp, BaseConfig as Config, CheckMode};
|
||||
use ezkl::pfsys::create_proof_circuit_kzg;
|
||||
use ezkl::pfsys::TranscriptType;
|
||||
@@ -14,7 +15,7 @@ use halo2_proofs::{
|
||||
use halo2curves::bn256::{Bn256, Fr};
|
||||
use rand::Rng;
|
||||
|
||||
const BITS: (i128, i128) = (-32768, 32768);
|
||||
const BITS: Range = (-32768, 32768);
|
||||
static mut LEN: usize = 4;
|
||||
const K: usize = 16;
|
||||
|
||||
|
||||
@@ -19,7 +19,10 @@ use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
circuit::ops::base::BaseOp,
|
||||
circuit::{table::Table, utils},
|
||||
circuit::{
|
||||
table::{Range, RangeCheck, Table},
|
||||
utils,
|
||||
},
|
||||
tensor::{Tensor, TensorType, ValTensor, VarTensor},
|
||||
};
|
||||
use std::{collections::BTreeMap, error::Error, marker::PhantomData};
|
||||
@@ -176,6 +179,10 @@ pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> {
|
||||
pub lookup_selectors: BTreeMap<(LookupOp, usize, usize), Selector>,
|
||||
///
|
||||
pub tables: BTreeMap<LookupOp, Table<F>>,
|
||||
///
|
||||
pub range_checks: BTreeMap<Range, RangeCheck<F>>,
|
||||
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many lookup ops.
|
||||
pub range_check_selectors: BTreeMap<(Range, usize, usize), Selector>,
|
||||
/// Activate sanity checks
|
||||
pub check_mode: CheckMode,
|
||||
_marker: PhantomData<F>,
|
||||
@@ -194,7 +201,9 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
lookup_index: dummy_var,
|
||||
selectors: BTreeMap::new(),
|
||||
lookup_selectors: BTreeMap::new(),
|
||||
range_check_selectors: BTreeMap::new(),
|
||||
tables: BTreeMap::new(),
|
||||
range_checks: BTreeMap::new(),
|
||||
check_mode: CheckMode::SAFE,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
@@ -325,11 +334,13 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
Self {
|
||||
selectors,
|
||||
lookup_selectors: BTreeMap::new(),
|
||||
range_check_selectors: BTreeMap::new(),
|
||||
inputs: inputs.to_vec(),
|
||||
lookup_input: VarTensor::Empty,
|
||||
lookup_output: VarTensor::Empty,
|
||||
lookup_index: VarTensor::Empty,
|
||||
tables: BTreeMap::new(),
|
||||
range_checks: BTreeMap::new(),
|
||||
output: output.clone(),
|
||||
check_mode,
|
||||
_marker: PhantomData,
|
||||
@@ -344,7 +355,7 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
input: &VarTensor,
|
||||
output: &VarTensor,
|
||||
index: &VarTensor,
|
||||
lookup_range: (i128, i128),
|
||||
lookup_range: Range,
|
||||
logrows: usize,
|
||||
nl: &LookupOp,
|
||||
) -> Result<(), Box<dyn Error>>
|
||||
@@ -482,6 +493,74 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Configures and creates lookup selectors
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn configure_range_check(
|
||||
&mut self,
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
input: &VarTensor,
|
||||
range: Range,
|
||||
) -> Result<(), Box<dyn Error>>
|
||||
where
|
||||
F: Field,
|
||||
{
|
||||
let mut selectors = BTreeMap::new();
|
||||
|
||||
if !input.is_advice() {
|
||||
return Err("wrong input type for lookup input".into());
|
||||
}
|
||||
|
||||
// we borrow mutably twice so we need to do this dance
|
||||
|
||||
let range_check = if !self.range_checks.contains_key(&range) {
|
||||
// as all tables have the same input we see if there's another table who's input we can reuse
|
||||
let range_check = RangeCheck::<F>::configure(cs, range);
|
||||
self.range_checks.insert(range, range_check.clone());
|
||||
range_check
|
||||
} else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
for x in 0..input.num_blocks() {
|
||||
for y in 0..input.num_inner_cols() {
|
||||
let single_col_sel = cs.complex_selector();
|
||||
|
||||
cs.lookup("", |cs| {
|
||||
let mut res = vec![];
|
||||
let sel = cs.query_selector(single_col_sel);
|
||||
|
||||
let input_query = match &input {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let default_x = range_check.get_first_element();
|
||||
|
||||
let not_sel = Expression::Constant(F::ONE) - sel.clone();
|
||||
|
||||
res.extend([(
|
||||
sel.clone() * input_query.clone()
|
||||
+ not_sel.clone() * Expression::Constant(default_x),
|
||||
range_check.input,
|
||||
)]);
|
||||
|
||||
res
|
||||
});
|
||||
selectors.insert((range, x, y), single_col_sel);
|
||||
}
|
||||
}
|
||||
self.range_check_selectors.extend(selectors);
|
||||
// if we haven't previously initialized the input/output, do so now
|
||||
if let VarTensor::Empty = self.lookup_input {
|
||||
debug!("assigning lookup input");
|
||||
self.lookup_input = input.clone();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// layout_tables must be called before layout.
|
||||
pub fn layout_tables(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn Error>> {
|
||||
for (i, table) in self.tables.values_mut().enumerate() {
|
||||
@@ -500,6 +579,20 @@ impl<F: PrimeField + TensorType + PartialOrd> BaseConfig<F> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// layout_range_checks must be called before layout.
|
||||
pub fn layout_range_checks(
|
||||
&mut self,
|
||||
layouter: &mut impl Layouter<F>,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
for range_check in self.range_checks.values_mut() {
|
||||
if !range_check.is_assigned {
|
||||
debug!("laying out range check for {:?}", range_check.range);
|
||||
range_check.layout(layouter)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Assigns variables to the regions created when calling `configure`.
|
||||
/// # Arguments
|
||||
/// * `values` - The explicit values to the operations.
|
||||
|
||||
@@ -19,7 +19,7 @@ use super::{
|
||||
};
|
||||
use crate::{
|
||||
circuit::{ops::base::BaseOp, utils},
|
||||
fieldutils::i128_to_felt,
|
||||
fieldutils::{felt_to_i128, i128_to_felt},
|
||||
tensor::{
|
||||
get_broadcasted_shape,
|
||||
ops::{accumulated, add, mult, sub},
|
||||
@@ -51,6 +51,66 @@ pub fn overflowed_len(starting_idx: usize, mut total_len: usize, column_len: usi
|
||||
total_len
|
||||
}
|
||||
|
||||
/// Div accumulated layout
|
||||
pub fn div<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
value: &[ValTensor<F>; 1],
|
||||
div: F,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
let input = value[0].clone();
|
||||
let input_dims = input.dims();
|
||||
|
||||
let range_check_bracket = felt_to_i128(div) - 1;
|
||||
|
||||
let mut divisor = Tensor::from(vec![ValType::Constant(div)].into_iter());
|
||||
divisor.set_visibility(&crate::graph::Visibility::Fixed);
|
||||
let divisor = region.assign(&config.inputs[1], &divisor.into())?;
|
||||
region.increment(divisor.len());
|
||||
|
||||
let is_assigned = !input.any_unknowns()? && !divisor.any_unknowns()?;
|
||||
|
||||
let mut claimed_output: ValTensor<F> = if is_assigned {
|
||||
let input_evals = input.get_int_evals()?;
|
||||
let divisor_evals = divisor.get_int_evals()?;
|
||||
tensor::ops::div(&[input_evals.clone(), divisor_evals.clone()])?
|
||||
.iter()
|
||||
.map(|x| Ok(Value::known(i128_to_felt(*x))))
|
||||
.collect::<Result<Tensor<Value<F>>, Box<dyn Error>>>()?
|
||||
.into()
|
||||
} else {
|
||||
Tensor::new(
|
||||
Some(&vec![Value::<F>::unknown(); input.len()]),
|
||||
&[input.len()],
|
||||
)?
|
||||
.into()
|
||||
};
|
||||
claimed_output.reshape(input_dims)?;
|
||||
|
||||
let product = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[claimed_output.clone(), divisor.clone()],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let diff_with_input = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[product.clone(), input.clone()],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
range_check(
|
||||
config,
|
||||
region,
|
||||
&[diff_with_input],
|
||||
&(-range_check_bracket, range_check_bracket),
|
||||
)?;
|
||||
|
||||
Ok(claimed_output)
|
||||
}
|
||||
|
||||
/// Dot product accumulated layout
|
||||
pub fn dot<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
@@ -2304,6 +2364,50 @@ pub fn enforce_equality<F: PrimeField + TensorType + PartialOrd>(
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// layout for range check.
|
||||
pub fn range_check<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 1],
|
||||
range: &crate::circuit::table::Range,
|
||||
) -> Result<ValTensor<F>, Box<dyn Error>> {
|
||||
// time the entire operation
|
||||
let timer = instant::Instant::now();
|
||||
|
||||
let x = values[0].clone();
|
||||
|
||||
let w = region.assign(&config.lookup_input, &x)?;
|
||||
|
||||
let assigned_len = x.len();
|
||||
|
||||
let is_dummy = region.is_dummy();
|
||||
|
||||
if !is_dummy {
|
||||
(0..assigned_len)
|
||||
.map(|i| {
|
||||
let (x, y, z) = config
|
||||
.lookup_input
|
||||
.cartesian_coord(region.linear_coord() + i);
|
||||
let selector = config.range_check_selectors.get(&(range.clone(), x, y));
|
||||
region.enable(selector, z)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
|
||||
}
|
||||
|
||||
region.increment(assigned_len);
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
trace!(
|
||||
"range check {:?} layout took {:?}, row: {:?}",
|
||||
range,
|
||||
elapsed,
|
||||
region.row()
|
||||
);
|
||||
|
||||
Ok(w)
|
||||
}
|
||||
|
||||
/// layout for nonlinearity check.
|
||||
pub fn nonlinearity<F: PrimeField + TensorType + PartialOrd>(
|
||||
config: &BaseConfig<F>,
|
||||
|
||||
@@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
|
||||
use crate::{
|
||||
circuit::{layouts, utils},
|
||||
circuit::{layouts, table::Range, utils},
|
||||
fieldutils::{felt_to_i128, i128_to_felt},
|
||||
graph::{multiplier_to_scale, scale_to_multiplier},
|
||||
tensor::{self, Tensor, TensorError, TensorType},
|
||||
@@ -57,7 +57,7 @@ pub enum LookupOp {
|
||||
|
||||
impl LookupOp {
|
||||
/// Returns the range of values that can be represented by the table
|
||||
pub fn bit_range(max_len: usize) -> (i128, i128) {
|
||||
pub fn bit_range(max_len: usize) -> Range {
|
||||
let range = (max_len - 1) as f64 / 2_f64;
|
||||
let range = range as i128;
|
||||
(-range, range)
|
||||
|
||||
@@ -10,6 +10,8 @@ use halo2curves::ff::PrimeField;
|
||||
|
||||
use self::{lookup::LookupOp, region::RegionCtx};
|
||||
|
||||
use super::table::Range;
|
||||
|
||||
///
|
||||
pub mod base;
|
||||
///
|
||||
@@ -60,6 +62,11 @@ pub trait Op<F: PrimeField + TensorType + PartialOrd>: std::fmt::Debug + Send +
|
||||
vec![]
|
||||
}
|
||||
|
||||
/// Returns the range checks required by the operation.
|
||||
fn required_range_checks(&self) -> Vec<Range> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
/// Returns true if the operation is an input.
|
||||
fn is_input(&self) -> bool {
|
||||
false
|
||||
|
||||
@@ -290,12 +290,7 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
|
||||
|
||||
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
|
||||
let scale = match self {
|
||||
PolyOp::MultiBroadcastTo { .. } => in_scales[0],
|
||||
PolyOp::Xor | PolyOp::Or | PolyOp::And | PolyOp::Not => 0,
|
||||
PolyOp::Neg => in_scales[0],
|
||||
PolyOp::MoveAxis { .. } => in_scales[0],
|
||||
PolyOp::Downsample { .. } => in_scales[0],
|
||||
PolyOp::Resize { .. } => in_scales[0],
|
||||
PolyOp::Iff => in_scales[1],
|
||||
PolyOp::Einsum { .. } => {
|
||||
let mut scale = in_scales[0];
|
||||
@@ -339,14 +334,9 @@ impl<F: PrimeField + TensorType + PartialOrd + Serialize + for<'de> Deserialize<
|
||||
scale += in_scales[1];
|
||||
scale
|
||||
}
|
||||
PolyOp::Identity => in_scales[0],
|
||||
PolyOp::Reshape(_) | PolyOp::Flatten(_) => in_scales[0],
|
||||
PolyOp::Pad(_) => in_scales[0],
|
||||
PolyOp::Pow(pow) => in_scales[0] * (*pow as crate::Scale),
|
||||
PolyOp::Pack(_, _) => in_scales[0],
|
||||
PolyOp::GlobalSumPool => in_scales[0],
|
||||
PolyOp::Concat { axis: _ } => in_scales[0],
|
||||
PolyOp::Slice { .. } => in_scales[0],
|
||||
_ => in_scales[0],
|
||||
};
|
||||
Ok(scale)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,9 @@ use crate::circuit::lookup::LookupOp;
|
||||
|
||||
use super::Op;
|
||||
|
||||
/// The range of the lookup table.
|
||||
pub type Range = (i128, i128);
|
||||
|
||||
/// The safety factor for the range of the lookup table.
|
||||
pub const RANGE_MULTIPLIER: i128 = 2;
|
||||
/// The safety factor offset for the number of rows in the lookup table.
|
||||
@@ -91,7 +94,7 @@ pub struct Table<F: PrimeField> {
|
||||
/// Flags if table has been previously assigned to.
|
||||
pub is_assigned: bool,
|
||||
/// Number of bits used in lookup table.
|
||||
pub range: (i128, i128),
|
||||
pub range: Range,
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
@@ -129,7 +132,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
|
||||
}
|
||||
|
||||
///
|
||||
pub fn num_cols_required(range: (i128, i128), col_size: usize) -> usize {
|
||||
pub fn num_cols_required(range: Range, col_size: usize) -> usize {
|
||||
// double it to be safe
|
||||
let range_len = range.1 - range.0;
|
||||
// number of cols needed to store the range
|
||||
@@ -141,7 +144,7 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
|
||||
/// Configures the table.
|
||||
pub fn configure(
|
||||
cs: &mut ConstraintSystem<F>,
|
||||
range: (i128, i128),
|
||||
range: Range,
|
||||
logrows: usize,
|
||||
nonlinearity: &LookupOp,
|
||||
preexisting_inputs: Option<Vec<TableColumn>>,
|
||||
@@ -257,3 +260,86 @@ impl<F: PrimeField + TensorType + PartialOrd> Table<F> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Halo2 range check column
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct RangeCheck<F: PrimeField> {
|
||||
/// Input to table.
|
||||
pub input: TableColumn,
|
||||
/// selector cn
|
||||
pub selector_constructor: SelectorConstructor<F>,
|
||||
/// Flags if table has been previously assigned to.
|
||||
pub is_assigned: bool,
|
||||
/// Number of bits used in lookup table.
|
||||
pub range: Range,
|
||||
_marker: PhantomData<F>,
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
|
||||
/// get first_element of column
|
||||
pub fn get_first_element(&self) -> F {
|
||||
i128_to_felt(self.range.0)
|
||||
}
|
||||
|
||||
///
|
||||
pub fn cal_col_size(logrows: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(logrows as u32) - reserved_blinding_rows
|
||||
}
|
||||
|
||||
///
|
||||
pub fn cal_bit_range(bits: usize, reserved_blinding_rows: usize) -> usize {
|
||||
2usize.pow(bits as u32) - reserved_blinding_rows
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: PrimeField + TensorType + PartialOrd> RangeCheck<F> {
|
||||
/// Configures the table.
|
||||
pub fn configure(cs: &mut ConstraintSystem<F>, range: Range) -> RangeCheck<F> {
|
||||
log::debug!("range check range: {:?}", range);
|
||||
|
||||
let inputs = cs.lookup_table_column();
|
||||
|
||||
RangeCheck {
|
||||
input: inputs,
|
||||
is_assigned: false,
|
||||
selector_constructor: SelectorConstructor::new(2),
|
||||
range,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Assigns values to the constraints generated when calling `configure`.
|
||||
pub fn layout(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn Error>> {
|
||||
if self.is_assigned {
|
||||
return Err(Box::new(CircuitError::TableAlreadyAssigned));
|
||||
}
|
||||
|
||||
let smallest = self.range.0;
|
||||
let largest = self.range.1;
|
||||
|
||||
let inputs: Tensor<F> = Tensor::from(smallest..=largest).map(|x| i128_to_felt(x));
|
||||
|
||||
self.is_assigned = true;
|
||||
|
||||
layouter.assign_table(
|
||||
|| "range check table",
|
||||
|mut table| {
|
||||
let _ = inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(row_offset, input)| {
|
||||
table.assign_cell(
|
||||
|| format!("rc_i_col row {}", row_offset),
|
||||
self.input,
|
||||
row_offset,
|
||||
|| Value::known(*input),
|
||||
)?;
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<()>, halo2_proofs::plonk::Error>>()?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,6 +83,8 @@ pub const DEFAULT_RENDER_VK_SEPERATELY: &str = "false";
|
||||
pub const DEFAULT_VK_SOL: &str = "vk.sol";
|
||||
/// Default VK abi path
|
||||
pub const DEFAULT_VK_ABI: &str = "vk.abi";
|
||||
/// Default scale rebase multipliers for calibration
|
||||
pub const DEFAULT_SCALE_REBASE_MULTIPLIERS: &str = "1,2,10";
|
||||
|
||||
impl std::fmt::Display for TranscriptType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
@@ -323,9 +325,20 @@ pub enum Commands {
|
||||
/// Optional scales to specifically try for calibration. Example, --scales 0,4
|
||||
#[arg(long, value_delimiter = ',', allow_hyphen_values = true)]
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
/// Optional scale rebase multipliers to specifically try for calibration. This is the multiplier at which we divide to return to the input scale. Example, --scale-rebase-multipliers 0,4
|
||||
#[arg(
|
||||
long,
|
||||
value_delimiter = ',',
|
||||
allow_hyphen_values = true,
|
||||
default_value = DEFAULT_SCALE_REBASE_MULTIPLIERS
|
||||
)]
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
/// max logrows to use for calibration, 26 is the max public SRS size
|
||||
#[arg(long)]
|
||||
max_logrows: Option<u32>,
|
||||
// whether to fix the div_rebasing value truthiness during calibration. this changes how we rebase
|
||||
#[arg(long)]
|
||||
div_rebasing: Option<bool>,
|
||||
},
|
||||
|
||||
/// Generates a dummy SRS
|
||||
|
||||
@@ -176,7 +176,9 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
target,
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
max_logrows,
|
||||
div_rebasing,
|
||||
} => calibrate(
|
||||
model,
|
||||
data,
|
||||
@@ -184,6 +186,8 @@ pub async fn run(command: Commands) -> Result<String, Box<dyn Error>> {
|
||||
target,
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
max_logrows,
|
||||
)
|
||||
.map(|e| serde_json::to_string(&e).unwrap()),
|
||||
@@ -715,8 +719,7 @@ impl AccuracyResults {
|
||||
let error = (original.clone() - calibrated.clone())?;
|
||||
let abs_error = error.map(|x| x.abs());
|
||||
let squared_error = error.map(|x| x.powi(2));
|
||||
let percentage_error =
|
||||
error.enum_map(|i, x| Ok::<_, TensorError>(x / original[i]))?;
|
||||
let percentage_error = error.enum_map(|i, x| Ok::<_, TensorError>(x / original[i]))?;
|
||||
let abs_percentage_error = percentage_error.map(|x| x.abs());
|
||||
|
||||
errors.extend(error.into_iter());
|
||||
@@ -734,22 +737,22 @@ impl AccuracyResults {
|
||||
let median_error = errors[errors.len() / 2];
|
||||
let max_error = *errors
|
||||
.iter()
|
||||
.max_by(|a, b| a.partial_cmp(b).unwrap())
|
||||
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
|
||||
.unwrap();
|
||||
let min_error = *errors
|
||||
.iter()
|
||||
.min_by(|a, b| a.partial_cmp(b).unwrap())
|
||||
.min_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
|
||||
.unwrap();
|
||||
|
||||
let mean_abs_error = abs_errors.iter().sum::<f32>() / abs_errors.len() as f32;
|
||||
let median_abs_error = abs_errors[abs_errors.len() / 2];
|
||||
let max_abs_error = *abs_errors
|
||||
.iter()
|
||||
.max_by(|a, b| a.partial_cmp(b).unwrap())
|
||||
.max_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
|
||||
.unwrap();
|
||||
let min_abs_error = *abs_errors
|
||||
.iter()
|
||||
.min_by(|a, b| a.partial_cmp(b).unwrap())
|
||||
.min_by(|a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
|
||||
.unwrap();
|
||||
|
||||
let mean_squared_error = squared_errors.iter().sum::<f32>() / squared_errors.len() as f32;
|
||||
@@ -780,6 +783,8 @@ pub(crate) fn calibrate(
|
||||
target: CalibrationTarget,
|
||||
lookup_safety_margin: i128,
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
div_rebasing: Option<bool>,
|
||||
max_logrows: Option<u32>,
|
||||
) -> Result<GraphSettings, Box<dyn Error>> {
|
||||
use std::collections::HashMap;
|
||||
@@ -823,9 +828,13 @@ pub(crate) fn calibrate(
|
||||
}
|
||||
};
|
||||
|
||||
let mut found_params: Vec<GraphSettings> = vec![];
|
||||
let div_rebasing = if let Some(div_rebasing) = div_rebasing {
|
||||
vec![div_rebasing]
|
||||
} else {
|
||||
vec![true, false]
|
||||
};
|
||||
|
||||
let scale_rebase_multiplier = [1, 2, 10];
|
||||
let mut found_params: Vec<GraphSettings> = vec![];
|
||||
|
||||
// 2 x 2 grid
|
||||
let range_grid = range
|
||||
@@ -862,15 +871,21 @@ pub(crate) fn calibrate(
|
||||
.map(|(a, b)| (*a, *b))
|
||||
.collect::<Vec<((crate::Scale, crate::Scale), u32)>>();
|
||||
|
||||
let range_grid = range_grid
|
||||
.iter()
|
||||
.cartesian_product(div_rebasing.iter())
|
||||
.map(|(a, b)| (*a, *b))
|
||||
.collect::<Vec<(((crate::Scale, crate::Scale), u32), bool)>>();
|
||||
|
||||
let mut forward_pass_res = HashMap::new();
|
||||
|
||||
let pb = init_bar(range_grid.len() as u64);
|
||||
pb.set_message("calibrating...");
|
||||
|
||||
for ((input_scale, param_scale), scale_rebase_multiplier) in range_grid {
|
||||
for (((input_scale, param_scale), scale_rebase_multiplier), div_rebasing) in range_grid {
|
||||
pb.set_message(format!(
|
||||
"input scale: {}, param scale: {}, scale rebase multiplier: {}",
|
||||
input_scale, param_scale, scale_rebase_multiplier
|
||||
"input scale: {}, param scale: {}, scale rebase multiplier: {}, div rebasing: {}",
|
||||
input_scale, param_scale, scale_rebase_multiplier, div_rebasing
|
||||
));
|
||||
|
||||
#[cfg(unix)]
|
||||
@@ -890,6 +905,7 @@ pub(crate) fn calibrate(
|
||||
input_scale,
|
||||
param_scale,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
..settings.run_args.clone()
|
||||
};
|
||||
|
||||
@@ -964,6 +980,7 @@ pub(crate) fn calibrate(
|
||||
let found_run_args = RunArgs {
|
||||
input_scale: new_settings.run_args.input_scale,
|
||||
param_scale: new_settings.run_args.param_scale,
|
||||
div_rebasing: new_settings.run_args.div_rebasing,
|
||||
lookup_range: new_settings.run_args.lookup_range,
|
||||
logrows: new_settings.run_args.logrows,
|
||||
scale_rebase_multiplier: new_settings.run_args.scale_rebase_multiplier,
|
||||
@@ -973,6 +990,7 @@ pub(crate) fn calibrate(
|
||||
let found_settings = GraphSettings {
|
||||
run_args: found_run_args,
|
||||
required_lookups: new_settings.required_lookups,
|
||||
required_range_checks: new_settings.required_range_checks,
|
||||
model_output_scales: new_settings.model_output_scales,
|
||||
model_input_scales: new_settings.model_input_scales,
|
||||
num_rows: new_settings.num_rows,
|
||||
|
||||
@@ -23,7 +23,7 @@ use self::input::{FileSource, GraphData};
|
||||
use self::modules::{GraphModules, ModuleConfigs, ModuleForwardResult, ModuleSizes};
|
||||
use crate::circuit::lookup::LookupOp;
|
||||
use crate::circuit::modules::ModulePlanner;
|
||||
use crate::circuit::table::{Table, RESERVED_BLINDING_ROWS_PAD};
|
||||
use crate::circuit::table::{Range, Table, RESERVED_BLINDING_ROWS_PAD};
|
||||
use crate::circuit::{CheckMode, InputType};
|
||||
use crate::fieldutils::felt_to_f64;
|
||||
use crate::pfsys::PrettyElements;
|
||||
@@ -431,6 +431,8 @@ pub struct GraphSettings {
|
||||
pub module_sizes: ModuleSizes,
|
||||
/// required_lookups
|
||||
pub required_lookups: Vec<LookupOp>,
|
||||
/// required range_checks
|
||||
pub required_range_checks: Vec<Range>,
|
||||
/// check mode
|
||||
pub check_mode: CheckMode,
|
||||
/// ezkl version used
|
||||
@@ -639,7 +641,7 @@ impl GraphCircuit {
|
||||
}
|
||||
|
||||
// dummy module settings, must load from GraphData after
|
||||
let mut settings = model.gen_params(run_args, CheckMode::UNSAFE)?;
|
||||
let mut settings = model.gen_params(run_args, run_args.check_mode)?;
|
||||
|
||||
let mut num_params = 0;
|
||||
if !model.const_shapes().is_empty() {
|
||||
@@ -960,19 +962,20 @@ impl GraphCircuit {
|
||||
min_lookup_inputs: i128,
|
||||
max_lookup_inputs: i128,
|
||||
lookup_safety_margin: i128,
|
||||
) -> (i128, i128) {
|
||||
) -> Range {
|
||||
let mut margin = (
|
||||
lookup_safety_margin * min_lookup_inputs,
|
||||
lookup_safety_margin * max_lookup_inputs,
|
||||
);
|
||||
if lookup_safety_margin == 1 {
|
||||
margin.0 -= 1;
|
||||
margin.0 += 1;
|
||||
margin.1 += 1;
|
||||
}
|
||||
|
||||
margin
|
||||
}
|
||||
|
||||
fn calc_num_cols(safe_range: (i128, i128), max_logrows: u32) -> usize {
|
||||
fn calc_num_cols(safe_range: Range, max_logrows: u32) -> usize {
|
||||
let max_col_size = Table::<Fp>::cal_col_size(
|
||||
max_logrows as usize,
|
||||
Self::reserved_blinding_rows() as usize,
|
||||
@@ -1456,6 +1459,7 @@ impl Circuit<Fp> for GraphCircuit {
|
||||
params.run_args.lookup_range,
|
||||
params.run_args.logrows as usize,
|
||||
params.required_lookups,
|
||||
params.required_range_checks,
|
||||
params.check_mode,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -6,6 +6,7 @@ use super::GraphError;
|
||||
use super::GraphSettings;
|
||||
use crate::circuit::hybrid::HybridOp;
|
||||
use crate::circuit::region::RegionCtx;
|
||||
use crate::circuit::table::Range;
|
||||
use crate::circuit::Input;
|
||||
use crate::circuit::InputType;
|
||||
use crate::circuit::Unknown;
|
||||
@@ -240,6 +241,13 @@ impl NodeType {
|
||||
NodeType::SubGraph { model, .. } => model.required_lookups(),
|
||||
}
|
||||
}
|
||||
/// Returns the lookups required by a graph
|
||||
pub fn required_range_checks(&self) -> Vec<Range> {
|
||||
match self {
|
||||
NodeType::Node(n) => n.opkind.required_range_checks(),
|
||||
NodeType::SubGraph { model, .. } => model.required_range_checks(),
|
||||
}
|
||||
}
|
||||
/// Returns the scales of the node's output.
|
||||
pub fn out_scales(&self) -> Vec<crate::Scale> {
|
||||
match self {
|
||||
@@ -432,6 +440,15 @@ impl Model {
|
||||
.collect_vec()
|
||||
}
|
||||
|
||||
///
|
||||
fn required_range_checks(&self) -> Vec<Range> {
|
||||
self.graph
|
||||
.nodes
|
||||
.values()
|
||||
.flat_map(|n| n.required_range_checks())
|
||||
.collect_vec()
|
||||
}
|
||||
|
||||
/// Creates a `Model` from a specified path to an Onnx file.
|
||||
/// # Arguments
|
||||
/// * `reader` - A reader for an Onnx file.
|
||||
@@ -489,6 +506,8 @@ impl Model {
|
||||
|
||||
// extract the requisite lookup ops from the model
|
||||
let mut lookup_ops: Vec<LookupOp> = self.required_lookups();
|
||||
// extract the requisite lookup ops from the model
|
||||
let mut range_checks: Vec<Range> = self.required_range_checks();
|
||||
|
||||
// if we're using percentage tolerance, we need to add the necessary range check ops for it.
|
||||
|
||||
@@ -504,6 +523,9 @@ impl Model {
|
||||
let set: HashSet<_> = lookup_ops.drain(..).collect(); // dedup
|
||||
lookup_ops.extend(set.into_iter().sorted());
|
||||
|
||||
let set: HashSet<_> = range_checks.drain(..).collect(); // dedup
|
||||
range_checks.extend(set.into_iter().sorted());
|
||||
|
||||
Ok(GraphSettings {
|
||||
run_args: run_args.clone(),
|
||||
model_instance_shapes: instance_shapes,
|
||||
@@ -511,6 +533,7 @@ impl Model {
|
||||
num_rows,
|
||||
total_assignments: linear_coord,
|
||||
required_lookups: lookup_ops,
|
||||
required_range_checks: range_checks,
|
||||
model_output_scales: self.graph.get_output_scales()?,
|
||||
model_input_scales: self.graph.get_input_scales(),
|
||||
total_const_size,
|
||||
@@ -611,7 +634,7 @@ impl Model {
|
||||
debug!("intermediate min lookup inputs: {}", min);
|
||||
}
|
||||
debug!(
|
||||
"------------ output node int {}: {} \n ------------ float: {} \n ------------ max: {} \n ------------ min: {} ------------ scale: {}",
|
||||
"------------ output node int {}: {} \n ------------ float: {} \n ------------ max: {} \n ------------ min: {} \n ------------ scale: {}",
|
||||
idx,
|
||||
res.output.map(crate::fieldutils::felt_to_i32).show(),
|
||||
res.output
|
||||
@@ -1042,6 +1065,7 @@ impl Model {
|
||||
&run_args.param_visibility,
|
||||
i,
|
||||
symbol_values,
|
||||
run_args.div_rebasing,
|
||||
)?;
|
||||
if let Some(ref scales) = override_input_scales {
|
||||
if let Some(inp) = n.opkind.get_input() {
|
||||
@@ -1058,9 +1082,20 @@ impl Model {
|
||||
if scales.contains_key(&i) {
|
||||
let scale_diff = n.out_scale - scales[&i];
|
||||
n.opkind = if scale_diff > 0 {
|
||||
RebaseScale::rebase(n.opkind, scales[&i], n.out_scale, 1)
|
||||
RebaseScale::rebase(
|
||||
n.opkind,
|
||||
scales[&i],
|
||||
n.out_scale,
|
||||
1,
|
||||
run_args.div_rebasing,
|
||||
)
|
||||
} else {
|
||||
RebaseScale::rebase_up(n.opkind, scales[&i], n.out_scale)
|
||||
RebaseScale::rebase_up(
|
||||
n.opkind,
|
||||
scales[&i],
|
||||
n.out_scale,
|
||||
run_args.div_rebasing,
|
||||
)
|
||||
};
|
||||
n.out_scale = scales[&i];
|
||||
}
|
||||
@@ -1155,9 +1190,10 @@ impl Model {
|
||||
pub fn configure(
|
||||
meta: &mut ConstraintSystem<Fp>,
|
||||
vars: &ModelVars<Fp>,
|
||||
lookup_range: (i128, i128),
|
||||
lookup_range: Range,
|
||||
logrows: usize,
|
||||
required_lookups: Vec<LookupOp>,
|
||||
required_range_checks: Vec<Range>,
|
||||
check_mode: CheckMode,
|
||||
) -> Result<PolyConfig<Fp>, Box<dyn Error>> {
|
||||
info!("configuring model");
|
||||
@@ -1176,6 +1212,10 @@ impl Model {
|
||||
base_gate.configure_lookup(meta, input, output, index, lookup_range, logrows, &op)?;
|
||||
}
|
||||
|
||||
for range in required_range_checks {
|
||||
base_gate.configure_range_check(meta, input, range)?;
|
||||
}
|
||||
|
||||
Ok(base_gate)
|
||||
}
|
||||
|
||||
@@ -1216,6 +1256,7 @@ impl Model {
|
||||
let instance_idx = vars.get_instance_idx();
|
||||
|
||||
config.base.layout_tables(layouter)?;
|
||||
config.base.layout_range_checks(layouter)?;
|
||||
|
||||
let mut num_rows = 0;
|
||||
let mut linear_coord = 0;
|
||||
|
||||
@@ -132,6 +132,8 @@ pub struct RebaseScale {
|
||||
pub target_scale: i32,
|
||||
/// The original scale of the operation's inputs.
|
||||
pub original_scale: i32,
|
||||
/// if true then the operation is a multiplicative division
|
||||
pub div_rebasing: bool,
|
||||
}
|
||||
|
||||
impl RebaseScale {
|
||||
@@ -141,6 +143,7 @@ impl RebaseScale {
|
||||
global_scale: crate::Scale,
|
||||
op_out_scale: crate::Scale,
|
||||
scale_rebase_multiplier: u32,
|
||||
div_rebasing: bool,
|
||||
) -> SupportedOp {
|
||||
if (op_out_scale > (global_scale * scale_rebase_multiplier as i32))
|
||||
&& !inner.is_constant()
|
||||
@@ -154,6 +157,7 @@ impl RebaseScale {
|
||||
target_scale: op.target_scale,
|
||||
multiplier: op.multiplier * multiplier,
|
||||
original_scale: op.original_scale,
|
||||
div_rebasing,
|
||||
})
|
||||
} else {
|
||||
SupportedOp::RebaseScale(RebaseScale {
|
||||
@@ -161,6 +165,7 @@ impl RebaseScale {
|
||||
target_scale: global_scale * scale_rebase_multiplier as i32,
|
||||
multiplier,
|
||||
original_scale: op_out_scale,
|
||||
div_rebasing,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
@@ -173,6 +178,7 @@ impl RebaseScale {
|
||||
inner: SupportedOp,
|
||||
target_scale: crate::Scale,
|
||||
op_out_scale: crate::Scale,
|
||||
div_rebasing: bool,
|
||||
) -> SupportedOp {
|
||||
if (op_out_scale < (target_scale)) && !inner.is_constant() && !inner.is_input() {
|
||||
let multiplier = scale_to_multiplier(op_out_scale - target_scale);
|
||||
@@ -182,6 +188,7 @@ impl RebaseScale {
|
||||
target_scale: op.target_scale,
|
||||
multiplier: op.multiplier * multiplier,
|
||||
original_scale: op.original_scale,
|
||||
div_rebasing,
|
||||
})
|
||||
} else {
|
||||
SupportedOp::RebaseScale(RebaseScale {
|
||||
@@ -189,12 +196,22 @@ impl RebaseScale {
|
||||
target_scale,
|
||||
multiplier,
|
||||
original_scale: op_out_scale,
|
||||
div_rebasing,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
inner
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate the require range bracket for the operation
|
||||
fn range_bracket(&self) -> i128 {
|
||||
if self.div_rebasing {
|
||||
0
|
||||
} else {
|
||||
self.multiplier as i128 - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Op<Fp> for RebaseScale {
|
||||
@@ -203,19 +220,28 @@ impl Op<Fp> for RebaseScale {
|
||||
}
|
||||
fn f(&self, x: &[Tensor<Fp>]) -> Result<crate::circuit::ForwardResult<Fp>, TensorError> {
|
||||
let mut res = Op::<Fp>::f(&*self.inner, x)?;
|
||||
let ri = res.output.map(felt_to_i128);
|
||||
let rescaled = crate::tensor::ops::nonlinearities::const_div(&ri, self.multiplier);
|
||||
res.output = rescaled.map(i128_to_felt);
|
||||
|
||||
res.intermediate_lookups.push(ri);
|
||||
if self.div_rebasing {
|
||||
let ri = res.output.map(felt_to_i128);
|
||||
let rescaled = crate::tensor::ops::nonlinearities::const_div(&ri, self.multiplier);
|
||||
res.output = rescaled.map(i128_to_felt);
|
||||
res.intermediate_lookups.push(ri);
|
||||
} else {
|
||||
let ri = res.output.map(felt_to_i128);
|
||||
let divisor = Tensor::from(vec![self.multiplier as i128].into_iter());
|
||||
let rescaled = crate::tensor::ops::div(&[ri, divisor.clone()])?;
|
||||
res.output = rescaled.map(i128_to_felt);
|
||||
res.intermediate_lookups.extend([-divisor.clone(), divisor]);
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn as_string(&self) -> String {
|
||||
format!(
|
||||
"REBASED (div={:?}) ({})",
|
||||
"REBASED (div={:?}, div_r={}) ({})",
|
||||
self.multiplier,
|
||||
self.div_rebasing,
|
||||
self.inner.as_string()
|
||||
)
|
||||
}
|
||||
@@ -225,13 +251,24 @@ impl Op<Fp> for RebaseScale {
|
||||
}
|
||||
|
||||
fn required_lookups(&self) -> Vec<LookupOp> {
|
||||
let mut lookups = self.inner.required_lookups();
|
||||
lookups.push(LookupOp::Div {
|
||||
denom: crate::circuit::utils::F32(self.multiplier as f32),
|
||||
});
|
||||
let mut lookups: Vec<LookupOp> = self.inner.required_lookups();
|
||||
if self.div_rebasing {
|
||||
lookups.push(LookupOp::Div {
|
||||
denom: crate::circuit::utils::F32(self.multiplier as f32),
|
||||
});
|
||||
}
|
||||
lookups
|
||||
}
|
||||
|
||||
fn required_range_checks(&self) -> Vec<crate::circuit::table::Range> {
|
||||
let mut range_checks = self.inner.required_range_checks();
|
||||
if !self.div_rebasing {
|
||||
let bracket = self.range_bracket();
|
||||
range_checks.push((-bracket, bracket));
|
||||
}
|
||||
range_checks
|
||||
}
|
||||
|
||||
fn layout(
|
||||
&self,
|
||||
config: &mut crate::circuit::BaseConfig<Fp>,
|
||||
@@ -243,14 +280,23 @@ impl Op<Fp> for RebaseScale {
|
||||
.layout(config, region, values)?
|
||||
.ok_or("no layout")?;
|
||||
|
||||
Ok(Some(crate::circuit::layouts::nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[original_res],
|
||||
&LookupOp::Div {
|
||||
denom: crate::circuit::utils::F32(self.multiplier as f32),
|
||||
},
|
||||
)?))
|
||||
if !self.div_rebasing {
|
||||
Ok(Some(crate::circuit::layouts::div(
|
||||
config,
|
||||
region,
|
||||
&[original_res],
|
||||
Fp::from(self.multiplier as u64),
|
||||
)?))
|
||||
} else {
|
||||
Ok(Some(crate::circuit::layouts::nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[original_res],
|
||||
&LookupOp::Div {
|
||||
denom: crate::circuit::utils::F32(self.multiplier as f32),
|
||||
},
|
||||
)?))
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
|
||||
@@ -437,6 +483,10 @@ impl Op<Fp> for SupportedOp {
|
||||
self.as_op().required_lookups()
|
||||
}
|
||||
|
||||
fn required_range_checks(&self) -> Vec<crate::circuit::table::Range> {
|
||||
self.as_op().required_range_checks()
|
||||
}
|
||||
|
||||
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
|
||||
self.as_op().out_scale(in_scales)
|
||||
}
|
||||
@@ -477,6 +527,7 @@ impl Tabled for Node {
|
||||
"inputs",
|
||||
"out_dims",
|
||||
"required_lookups",
|
||||
"required_range_checks",
|
||||
] {
|
||||
headers.push(std::borrow::Cow::Borrowed(i));
|
||||
}
|
||||
@@ -498,6 +549,10 @@ impl Tabled for Node {
|
||||
.map(<LookupOp as Op<Fp>>::as_string)
|
||||
.collect_vec()
|
||||
)));
|
||||
fields.push(std::borrow::Cow::Owned(format!(
|
||||
"{:?}",
|
||||
self.opkind.required_range_checks()
|
||||
)));
|
||||
fields
|
||||
}
|
||||
}
|
||||
@@ -527,6 +582,7 @@ impl Node {
|
||||
param_visibility: &Visibility,
|
||||
idx: usize,
|
||||
symbol_values: &SymbolValues,
|
||||
div_rebasing: bool,
|
||||
) -> Result<Self, Box<dyn Error>> {
|
||||
use log::warn;
|
||||
|
||||
@@ -631,7 +687,13 @@ impl Node {
|
||||
let mut out_scale = opkind.out_scale(in_scales.clone())?;
|
||||
// rescale the inputs if necessary to get consistent fixed points, we select the largest scale (highest precision)
|
||||
let global_scale = scales.get_max();
|
||||
opkind = RebaseScale::rebase(opkind, global_scale, out_scale, scales.rebase_multiplier);
|
||||
opkind = RebaseScale::rebase(
|
||||
opkind,
|
||||
global_scale,
|
||||
out_scale,
|
||||
scales.rebase_multiplier,
|
||||
div_rebasing,
|
||||
);
|
||||
|
||||
out_scale = opkind.out_scale(in_scales)?;
|
||||
|
||||
|
||||
@@ -71,7 +71,6 @@ pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i12
|
||||
pub fn dequantize(felt: Fp, scale: crate::Scale, shift: f64) -> f64 {
|
||||
let int_rep = crate::fieldutils::felt_to_i128(felt);
|
||||
let multiplier = scale_to_multiplier(scale);
|
||||
|
||||
int_rep as f64 / multiplier - shift
|
||||
}
|
||||
|
||||
@@ -717,8 +716,8 @@ pub fn new_op_from_onnx(
|
||||
}
|
||||
}
|
||||
"Recip" => {
|
||||
// Extract the slope layer hyperparams
|
||||
let in_scale = inputs[0].out_scales()[0];
|
||||
// If the input scale is larger than the params scale
|
||||
let scale_diff = std::cmp::max(scales.input, scales.params) - inputs[0].out_scales()[0];
|
||||
let additional_scale = if scale_diff > 0 {
|
||||
scale_to_multiplier(scale_diff)
|
||||
|
||||
12
src/lib.rs
12
src/lib.rs
@@ -29,7 +29,7 @@
|
||||
//! A library for turning computational graphs, such as neural networks, into ZK-circuits.
|
||||
//!
|
||||
|
||||
use circuit::Tolerance;
|
||||
use circuit::{table::Range, CheckMode, Tolerance};
|
||||
use clap::Args;
|
||||
use graph::Visibility;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -91,7 +91,7 @@ pub struct RunArgs {
|
||||
pub scale_rebase_multiplier: u32,
|
||||
/// The min and max elements in the lookup table input column
|
||||
#[arg(short = 'B', long, value_parser = parse_tuple::<i128>, default_value = "(-32768,32768)")]
|
||||
pub lookup_range: (i128, i128),
|
||||
pub lookup_range: Range,
|
||||
/// The log_2 number of rows
|
||||
#[arg(short = 'K', long, default_value = "17")]
|
||||
pub logrows: u32,
|
||||
@@ -110,6 +110,12 @@ pub struct RunArgs {
|
||||
/// Flags whether params are public, private, hashed
|
||||
#[arg(long, default_value = "private")]
|
||||
pub param_visibility: Visibility,
|
||||
#[arg(long, default_value = "false")]
|
||||
/// Multiplicative division
|
||||
pub div_rebasing: bool,
|
||||
/// check mode (safe, unsafe, etc)
|
||||
#[arg(long, default_value = "unsafe")]
|
||||
pub check_mode: CheckMode,
|
||||
}
|
||||
|
||||
impl Default for RunArgs {
|
||||
@@ -126,6 +132,8 @@ impl Default for RunArgs {
|
||||
input_visibility: Visibility::Private,
|
||||
output_visibility: Visibility::Public,
|
||||
param_visibility: Visibility::Private,
|
||||
div_rebasing: false,
|
||||
check_mode: CheckMode::UNSAFE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ struct PyRunArgs {
|
||||
#[pyo3(get, set)]
|
||||
pub scale_rebase_multiplier: u32,
|
||||
#[pyo3(get, set)]
|
||||
pub lookup_range: (i128, i128),
|
||||
pub lookup_range: crate::circuit::table::Range,
|
||||
#[pyo3(get, set)]
|
||||
pub logrows: u32,
|
||||
#[pyo3(get, set)]
|
||||
@@ -159,6 +159,10 @@ struct PyRunArgs {
|
||||
pub param_visibility: Visibility,
|
||||
#[pyo3(get, set)]
|
||||
pub variables: Vec<(String, usize)>,
|
||||
#[pyo3(get, set)]
|
||||
pub div_rebasing: bool,
|
||||
#[pyo3(get, set)]
|
||||
pub check_mode: CheckMode,
|
||||
}
|
||||
|
||||
/// default instantiation of PyRunArgs
|
||||
@@ -185,6 +189,8 @@ impl From<PyRunArgs> for RunArgs {
|
||||
output_visibility: py_run_args.output_visibility,
|
||||
param_visibility: py_run_args.param_visibility,
|
||||
variables: py_run_args.variables,
|
||||
div_rebasing: py_run_args.div_rebasing,
|
||||
check_mode: py_run_args.check_mode,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -203,6 +209,8 @@ impl Into<PyRunArgs> for RunArgs {
|
||||
output_visibility: self.output_visibility,
|
||||
param_visibility: self.param_visibility,
|
||||
variables: self.variables,
|
||||
div_rebasing: self.div_rebasing,
|
||||
check_mode: self.check_mode,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -511,7 +519,9 @@ fn gen_settings(
|
||||
target = CalibrationTarget::default(), // default is "resources
|
||||
lookup_safety_margin = DEFAULT_LOOKUP_SAFETY_MARGIN.parse().unwrap(),
|
||||
scales = None,
|
||||
scale_rebase_multiplier = DEFAULT_SCALE_REBASE_MULTIPLIERS.split(",").map(|x| x.parse().unwrap()).collect(),
|
||||
max_logrows = None,
|
||||
div_rebasing = None,
|
||||
))]
|
||||
fn calibrate_settings(
|
||||
data: PathBuf,
|
||||
@@ -520,7 +530,9 @@ fn calibrate_settings(
|
||||
target: CalibrationTarget,
|
||||
lookup_safety_margin: i128,
|
||||
scales: Option<Vec<crate::Scale>>,
|
||||
scale_rebase_multiplier: Vec<u32>,
|
||||
max_logrows: Option<u32>,
|
||||
div_rebasing: Option<bool>,
|
||||
) -> Result<bool, PyErr> {
|
||||
crate::execute::calibrate(
|
||||
model,
|
||||
@@ -529,6 +541,8 @@ fn calibrate_settings(
|
||||
target,
|
||||
lookup_safety_margin,
|
||||
scales,
|
||||
scale_rebase_multiplier,
|
||||
div_rebasing,
|
||||
max_logrows,
|
||||
)
|
||||
.map_err(|e| {
|
||||
|
||||
@@ -30,11 +30,11 @@ use halo2_proofs::{
|
||||
poly::Rotation,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use std::cmp::max;
|
||||
use std::error::Error;
|
||||
use std::fmt::Debug;
|
||||
use std::iter::Iterator;
|
||||
use std::ops::{Add, Deref, DerefMut, Div, Mul, Neg, Range, Sub};
|
||||
use std::{cmp::max, ops::Rem};
|
||||
use thiserror::Error;
|
||||
/// A wrapper for tensor related errors.
|
||||
#[derive(Debug, Error)]
|
||||
@@ -1452,6 +1452,43 @@ impl<T: TensorType + Div<Output = T> + std::marker::Send + std::marker::Sync> Di
|
||||
}
|
||||
}
|
||||
|
||||
// implement remainder
|
||||
impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync> Rem for Tensor<T> {
|
||||
type Output = Result<Tensor<T>, TensorError>;
|
||||
|
||||
/// Elementwise remainder of a tensor with another tensor.
|
||||
/// # Arguments
|
||||
/// * `self` - Tensor
|
||||
/// * `rhs` - Tensor
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use std::ops::Rem;
|
||||
/// let x = Tensor::<i32>::new(
|
||||
/// Some(&[4, 1, 4, 1, 1, 4]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let y = Tensor::<i32>::new(
|
||||
/// Some(&[2, 1, 2, 1, 1, 1]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let result = x.rem(y).unwrap();
|
||||
/// let expected = Tensor::<i32>::new(Some(&[0, 0, 0, 0, 0, 0]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
fn rem(self, rhs: Self) -> Self::Output {
|
||||
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
|
||||
let mut lhs = self.expand(&broadcasted_shape).unwrap();
|
||||
let rhs = rhs.expand(&broadcasted_shape).unwrap();
|
||||
|
||||
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
|
||||
*o = o.clone() % r;
|
||||
});
|
||||
|
||||
Ok(lhs)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the broadcasted shape of two tensors
|
||||
/// ```
|
||||
/// use ezkl::tensor::get_broadcasted_shape;
|
||||
|
||||
@@ -950,8 +950,7 @@ pub fn neg<T: TensorType + Neg<Output = T> + std::marker::Send + std::marker::Sy
|
||||
/// Elementwise multiplies multiple tensors.
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `a` - Tensor
|
||||
/// * `b` - Tensor
|
||||
/// * `t` - Tensors
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
@@ -993,6 +992,45 @@ pub fn mult<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::S
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Divides multiple tensors.
|
||||
/// # Arguments
|
||||
/// * `t` - Tensors
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::tensor::ops::div;
|
||||
/// let x = Tensor::<i128>::new(
|
||||
/// Some(&[2, 1, 2, 1, 1, 1]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let k = Tensor::<i128>::new(
|
||||
/// Some(&[2, 3, 2, 1, 1, 1]),
|
||||
/// &[2, 3],
|
||||
/// ).unwrap();
|
||||
/// let result = div(&[x, k]).unwrap();
|
||||
/// let expected = Tensor::<i128>::new(Some(&[1, 0, 1, 1, 1, 1]), &[2, 3]).unwrap();
|
||||
/// assert_eq!(result, expected);
|
||||
/// ```
|
||||
pub fn div<
|
||||
T: TensorType
|
||||
+ Div<Output = T>
|
||||
+ Mul<Output = T>
|
||||
+ From<u64>
|
||||
+ std::marker::Send
|
||||
+ std::marker::Sync,
|
||||
>(
|
||||
t: &[Tensor<T>],
|
||||
) -> Result<Tensor<T>, TensorError> {
|
||||
// calculate value of output
|
||||
let mut output: Tensor<T> = t[0].clone();
|
||||
|
||||
for e in t[1..].iter() {
|
||||
output = (output / e.clone())?;
|
||||
}
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
/// Rescale a tensor with a const integer (similar to const_mult).
|
||||
/// # Arguments
|
||||
///
|
||||
|
||||
@@ -512,13 +512,23 @@ mod native_tests {
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn accuracy_measurement_div_rebase_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, true);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(TESTS[N])])*
|
||||
fn accuracy_measurement_public_outputs_(test: &str) {
|
||||
crate::native_tests::init_binary();
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -528,7 +538,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6);
|
||||
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6 , false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -538,7 +548,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6);
|
||||
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -549,7 +559,7 @@ mod native_tests {
|
||||
crate::native_tests::setup_py_env();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 18.0);
|
||||
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 18.0, false);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -826,10 +836,10 @@ mod native_tests {
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
env_logger::init();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, Some(vec![0,1]), true, "single");
|
||||
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "private", "public", 1, None, true, "single");
|
||||
#[cfg(not(feature = "icicle"))]
|
||||
run_js_tests(path, test.to_string(), "testWasm");
|
||||
test_dir.close().unwrap();
|
||||
// test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
#(#[test_case(WASM_TESTS[N])])*
|
||||
@@ -839,7 +849,7 @@ mod native_tests {
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
env_logger::init();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, Some(vec![0,1]), true, "single");
|
||||
kzg_prove_and_verify(path, test.to_string(), "safe", "private", "fixed", "public", 1, None, true, "single");
|
||||
#[cfg(not(feature = "icicle"))]
|
||||
run_js_tests(path, test.to_string(), "testWasm");
|
||||
test_dir.close().unwrap();
|
||||
@@ -855,7 +865,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
kzg_prove_and_verify(path, test.to_string(), "unsafe", "private", "fixed", "public", 1, Some(vec![0,6]), false, "single");
|
||||
kzg_prove_and_verify(path, test.to_string(), "unsafe", "private", "fixed", "public", 1, None, false, "single");
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
|
||||
@@ -865,7 +875,7 @@ mod native_tests {
|
||||
crate::native_tests::init_binary();
|
||||
let test_dir = TempDir::new(test).unwrap();
|
||||
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
|
||||
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", Some(vec![0,6]));
|
||||
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None);
|
||||
test_dir.close().unwrap();
|
||||
}
|
||||
});
|
||||
@@ -883,7 +893,7 @@ mod native_tests {
|
||||
use test_case::test_case;
|
||||
use crate::native_tests::kzg_evm_prove_and_verify;
|
||||
use crate::native_tests::kzg_evm_prove_and_verify_render_seperately;
|
||||
|
||||
|
||||
use crate::native_tests::kzg_evm_on_chain_input_prove_and_verify;
|
||||
use crate::native_tests::kzg_evm_aggr_prove_and_verify;
|
||||
use crate::native_tests::kzg_fuzz;
|
||||
@@ -1273,6 +1283,7 @@ mod native_tests {
|
||||
cal_target,
|
||||
scales_to_use,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
@@ -1299,22 +1310,29 @@ mod native_tests {
|
||||
cal_target: &str,
|
||||
scales_to_use: Option<Vec<u32>>,
|
||||
num_inner_columns: usize,
|
||||
div_rebasing: bool,
|
||||
) {
|
||||
let mut args = vec![
|
||||
"gen-settings".to_string(),
|
||||
"-M".to_string(),
|
||||
format!("{}/{}/network.onnx", test_dir, example_name),
|
||||
format!(
|
||||
"--settings-path={}/{}/settings.json",
|
||||
test_dir, example_name
|
||||
),
|
||||
format!("--variables=batch_size={}", batch_size),
|
||||
format!("--input-visibility={}", input_visibility),
|
||||
format!("--param-visibility={}", param_visibility),
|
||||
format!("--output-visibility={}", output_visibility),
|
||||
format!("--num-inner-cols={}", num_inner_columns),
|
||||
];
|
||||
|
||||
if div_rebasing {
|
||||
args.push("--div-rebasing".to_string());
|
||||
};
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
.args([
|
||||
"gen-settings",
|
||||
"-M",
|
||||
format!("{}/{}/network.onnx", test_dir, example_name).as_str(),
|
||||
&format!(
|
||||
"--settings-path={}/{}/settings.json",
|
||||
test_dir, example_name
|
||||
),
|
||||
&format!("--variables=batch_size={}", batch_size),
|
||||
&format!("--input-visibility={}", input_visibility),
|
||||
&format!("--param-visibility={}", param_visibility),
|
||||
&format!("--output-visibility={}", output_visibility),
|
||||
&format!("--num-inner-cols={}", num_inner_columns),
|
||||
])
|
||||
.args(args)
|
||||
.stdout(std::process::Stdio::null())
|
||||
.status()
|
||||
.expect("failed to execute process");
|
||||
@@ -1392,6 +1410,7 @@ mod native_tests {
|
||||
batch_size: usize,
|
||||
cal_target: &str,
|
||||
target_perc: f32,
|
||||
div_rebasing: bool,
|
||||
) {
|
||||
gen_circuit_settings_and_witness(
|
||||
test_dir,
|
||||
@@ -1403,6 +1422,7 @@ mod native_tests {
|
||||
cal_target,
|
||||
None,
|
||||
2,
|
||||
div_rebasing,
|
||||
);
|
||||
|
||||
println!(
|
||||
@@ -1661,6 +1681,7 @@ mod native_tests {
|
||||
target_str,
|
||||
scales_to_use,
|
||||
num_inner_columns,
|
||||
false,
|
||||
);
|
||||
|
||||
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
|
||||
@@ -1737,6 +1758,7 @@ mod native_tests {
|
||||
"resources",
|
||||
None,
|
||||
2,
|
||||
false,
|
||||
);
|
||||
|
||||
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
|
||||
@@ -2012,6 +2034,7 @@ mod native_tests {
|
||||
// we need the accuracy
|
||||
Some(vec![7, 8]),
|
||||
1,
|
||||
false,
|
||||
);
|
||||
|
||||
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);
|
||||
|
||||
Binary file not shown.
@@ -1 +1,60 @@
|
||||
{"run_args":{"tolerance":{"val":0.0,"scale":1.0},"input_scale":0,"param_scale":0,"scale_rebase_multiplier":10,"lookup_range":[-2,0],"logrows":6,"num_inner_cols":2,"variables":[["batch_size",1]],"input_visibility":"Private","output_visibility":"Public","param_visibility":"Private"},"num_rows":16,"total_assignments":32,"total_const_size":8,"model_instance_shapes":[[1,4]],"model_output_scales":[0],"model_input_scales":[0],"module_sizes":{"kzg":[],"poseidon":[0,[0]]},"required_lookups":["ReLU"],"check_mode":"UNSAFE","version":"0.0.0","num_blinding_factors":null,"timestamp":1702474230544}
|
||||
{
|
||||
"run_args": {
|
||||
"tolerance": {
|
||||
"val": 0.0,
|
||||
"scale": 1.0
|
||||
},
|
||||
"input_scale": 0,
|
||||
"param_scale": 0,
|
||||
"scale_rebase_multiplier": 10,
|
||||
"lookup_range": [
|
||||
-2,
|
||||
0
|
||||
],
|
||||
"logrows": 6,
|
||||
"num_inner_cols": 2,
|
||||
"variables": [
|
||||
[
|
||||
"batch_size",
|
||||
1
|
||||
]
|
||||
],
|
||||
"input_visibility": "Private",
|
||||
"output_visibility": "Public",
|
||||
"param_visibility": "Private",
|
||||
"div_rebasing": false,
|
||||
"check_mode": "UNSAFE"
|
||||
},
|
||||
"num_rows": 16,
|
||||
"total_assignments": 32,
|
||||
"total_const_size": 8,
|
||||
"model_instance_shapes": [
|
||||
[
|
||||
1,
|
||||
4
|
||||
]
|
||||
],
|
||||
"model_output_scales": [
|
||||
0
|
||||
],
|
||||
"model_input_scales": [
|
||||
0
|
||||
],
|
||||
"module_sizes": {
|
||||
"kzg": [],
|
||||
"poseidon": [
|
||||
0,
|
||||
[
|
||||
0
|
||||
]
|
||||
]
|
||||
},
|
||||
"required_lookups": [
|
||||
"ReLU"
|
||||
],
|
||||
"required_range_checks": [],
|
||||
"check_mode": "UNSAFE",
|
||||
"version": "0.0.0",
|
||||
"num_blinding_factors": null,
|
||||
"timestamp": 1702474230544
|
||||
}
|
||||
Reference in New Issue
Block a user