Compare commits

...

2 Commits

Author SHA1 Message Date
dante
523c77c912 feat: lookupless sqrt and rsqrt (#867) 2024-11-10 15:56:38 +00:00
dante
948e5cd4b9 chore: version proof and witness (#865) 2024-11-08 02:55:35 +00:00
20 changed files with 429 additions and 396 deletions

View File

@@ -240,6 +240,8 @@ jobs:
locked: true
# - name: The Worm Mock
# run: cargo nextest run --release --verbose tests::large_mock_::large_tests_5_expects -- --include-ignored
- name: public outputs and bounded lookup log
run: cargo nextest run --release --verbose tests::mock_bounded_lookup_log --test-threads 32
- name: public outputs and tolerance > 0
run: cargo nextest run --release --verbose tests::mock_tolerance_public_outputs_ --test-threads 32
- name: public outputs + batch size == 10

View File

@@ -648,7 +648,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.5"
"version": "3.12.7"
},
"orig_nbformat": 4
},

View File

@@ -171,7 +171,7 @@
"json.dump(data, open(cal_path, 'w'))\n",
"\n",
"\n",
"ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
"await ezkl.calibrate_settings(cal_path, model_path, settings_path, \"resources\")"
]
},
{
@@ -328,7 +328,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 26,
"id": "171702d3",
"metadata": {},
"outputs": [],
@@ -348,7 +348,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 27,
"id": "671dfdd5",
"metadata": {},
"outputs": [],
@@ -364,7 +364,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 28,
"id": "50eba2f4",
"metadata": {},
"outputs": [],
@@ -399,9 +399,9 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
}

View File

@@ -180,9 +180,6 @@ struct PyRunArgs {
/// list[tuple[str, int]]: Hand-written parser for graph variables, eg. batch_size=1
pub variables: Vec<(String, usize)>,
#[pyo3(get, set)]
/// bool: Rebase the scale using lookup table for division instead of using a range check
pub div_rebasing: bool,
#[pyo3(get, set)]
/// bool: Should constants with 0.0 fraction be rebased to scale 0
pub rebase_frac_zero_constants: bool,
#[pyo3(get, set)]
@@ -227,7 +224,6 @@ impl From<PyRunArgs> for RunArgs {
output_visibility: py_run_args.output_visibility,
param_visibility: py_run_args.param_visibility,
variables: py_run_args.variables,
div_rebasing: py_run_args.div_rebasing,
rebase_frac_zero_constants: py_run_args.rebase_frac_zero_constants,
check_mode: py_run_args.check_mode,
commitment: Some(py_run_args.commitment.into()),
@@ -252,7 +248,6 @@ impl Into<PyRunArgs> for RunArgs {
output_visibility: self.output_visibility,
param_visibility: self.param_visibility,
variables: self.variables,
div_rebasing: self.div_rebasing,
rebase_frac_zero_constants: self.rebase_frac_zero_constants,
check_mode: self.check_mode,
commitment: self.commitment.into(),
@@ -878,8 +873,6 @@ fn gen_settings(
/// max_logrows: int
/// Optional max logrows to use for calibration
///
/// only_range_check_rebase: bool
/// Check ranges when rebasing
///
/// Returns
/// -------
@@ -894,7 +887,6 @@ fn gen_settings(
scales = None,
scale_rebase_multiplier = DEFAULT_SCALE_REBASE_MULTIPLIERS.split(",").map(|x| x.parse().unwrap()).collect(),
max_logrows = None,
only_range_check_rebase = DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap(),
))]
fn calibrate_settings(
py: Python,
@@ -906,7 +898,6 @@ fn calibrate_settings(
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
max_logrows: Option<u32>,
only_range_check_rebase: bool,
) -> PyResult<Bound<'_, PyAny>> {
pyo3_asyncio::tokio::future_into_py(py, async move {
crate::execute::calibrate(
@@ -917,7 +908,6 @@ fn calibrate_settings(
lookup_safety_margin,
scales,
scale_rebase_multiplier,
only_range_check_rebase,
max_logrows,
)
.await

View File

@@ -16,7 +16,13 @@ pub enum HybridOp {
Ln {
scale: utils::F32,
},
Rsqrt {
input_scale: utils::F32,
output_scale: utils::F32,
},
Sqrt {
scale: utils::F32,
},
RoundHalfToEven {
scale: utils::F32,
legs: usize,
@@ -39,7 +45,6 @@ pub enum HybridOp {
},
Div {
denom: utils::F32,
use_range_check_for_int: bool,
},
ReduceMax {
axes: Vec<usize>,
@@ -116,6 +121,14 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
fn as_string(&self) -> String {
match self {
HybridOp::Rsqrt {
input_scale,
output_scale,
} => format!(
"RSQRT (input_scale={}, output_scale={})",
input_scale, output_scale
),
HybridOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
HybridOp::Ln { scale } => format!("LN(scale={})", scale),
HybridOp::RoundHalfToEven { scale, legs } => {
format!("ROUND_HALF_TO_EVEN(scale={}, legs={})", scale, legs)
@@ -124,8 +137,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
HybridOp::Floor { scale, legs } => format!("FLOOR(scale={}, legs={})", scale, legs),
HybridOp::Round { scale, legs } => format!("ROUND(scale={}, legs={})", scale, legs),
HybridOp::Max => format!("MAX"),
HybridOp::Min => format!("MIN"),
HybridOp::Max => "MAX".to_string(),
HybridOp::Min => "MIN".to_string(),
HybridOp::Recip {
input_scale,
output_scale,
@@ -133,13 +146,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
"RECIP (input_scale={}, output_scale={})",
input_scale, output_scale
),
HybridOp::Div {
denom,
use_range_check_for_int,
} => format!(
"DIV (denom={}, use_range_check_for_int={})",
denom, use_range_check_for_int
),
HybridOp::Div { denom } => format!("DIV (denom={})", denom),
HybridOp::SumPool {
padding,
stride,
@@ -194,6 +201,19 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
values: &[ValTensor<F>],
) -> Result<Option<ValTensor<F>>, CircuitError> {
Ok(Some(match self {
HybridOp::Rsqrt {
input_scale,
output_scale,
} => layouts::rsqrt(
config,
region,
values[..].try_into()?,
*input_scale,
*output_scale,
)?,
HybridOp::Sqrt { scale } => {
layouts::sqrt(config, region, values[..].try_into()?, *scale)?
}
HybridOp::Ln { scale } => layouts::ln(config, region, values[..].try_into()?, *scale)?,
HybridOp::RoundHalfToEven { scale, legs } => {
layouts::round_half_to_even(config, region, values[..].try_into()?, *scale, *legs)?
@@ -233,13 +253,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
integer_rep_to_felt(input_scale.0 as i128),
integer_rep_to_felt(output_scale.0 as i128),
)?,
HybridOp::Div {
denom,
use_range_check_for_int,
..
} => {
if denom.0.fract() == 0.0 && *use_range_check_for_int {
layouts::loop_div(
HybridOp::Div { denom, .. } => {
if denom.0.fract() == 0.0 {
layouts::div(
config,
region,
values[..].try_into()?,
@@ -330,9 +346,15 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
| HybridOp::ReduceArgMax { .. }
| HybridOp::OneHot { .. }
| HybridOp::ReduceArgMin { .. } => 0,
HybridOp::Softmax { output_scale, .. } | HybridOp::Recip { output_scale, .. } => {
HybridOp::Recip { output_scale, .. } | HybridOp::Rsqrt { output_scale, .. } => {
multiplier_to_scale(output_scale.0 as f64)
}
HybridOp::Softmax {
output_scale,
input_scale,
..
} => multiplier_to_scale((output_scale.0 * input_scale.0) as f64),
HybridOp::Ln {
scale: output_scale,
} => 4 * multiplier_to_scale(output_scale.0 as f64),

View File

@@ -29,41 +29,96 @@ use crate::{
use super::*;
use crate::circuit::ops::lookup::LookupOp;
/// Same as div but splits the division into N parts
pub(crate) fn loop_div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
/// Calculate the L1 distance between two tensors.
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::fieldutils::IntegerRep;
/// use ezkl::circuit::ops::layouts::l1_distance;
/// use halo2curves::bn256::Fr as Fp;
/// use ezkl::circuit::region::RegionCtx;
/// use ezkl::circuit::region::RegionSettings;
/// use ezkl::circuit::BaseConfig;
/// use ezkl::tensor::ValTensor;
/// let dummy_config = BaseConfig::dummy(12, 2);
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
/// Some(&[1, 2, 3, 2, 3, 4, 3, 4, 5]),
/// &[3, 3],
/// ).unwrap());
/// let k = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
/// Some(&[1, 2, 3, 1, 2, 3, 1, 2, 3]),
/// &[3, 3],
/// ).unwrap());
/// let result = l1_distance::<Fp>(&dummy_config, &mut dummy_region, &[x, k]).unwrap();
/// let expected = Tensor::<IntegerRep>::new(Some(&[0, 0, 0, 1, 1, 1, 2, 2, 2]), &[3, 3]).unwrap();
/// assert_eq!(result.int_evals().unwrap(), expected);
/// ```
pub fn l1_distance<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
value: &[ValTensor<F>; 1],
divisor: F,
values: &[ValTensor<F>; 2],
) -> Result<ValTensor<F>, CircuitError> {
if divisor == F::ONE {
return Ok(value[0].clone());
}
let diff = pairwise(config, region, values, BaseOp::Sub)?;
let abs_diff = abs(config, region, &[diff])?;
// if integer val is divisible by 2, we can use a faster method and div > F::S
let mut divisor = divisor;
let mut num_parts = 1;
Ok(abs_diff)
}
while felt_to_integer_rep(divisor) % 2 == 0
&& felt_to_integer_rep(divisor) > (2_i128.pow(F::S - 4))
{
divisor = integer_rep_to_felt(felt_to_integer_rep(divisor) / 2);
num_parts += 1;
}
/// Determines if from a set of 3 tensors the 1st is closest to a reference tensor.
/// should only be used in the context of a monotonic function like the product used in the division, recipe, and sqrt arguments;
/// or the increasing powers of 2 in the ln argument. Which is used to construct a convex error function.
fn optimum_convex_function<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
x: &ValTensor<F>,
f: impl Fn(&BaseConfig<F>, &mut RegionCtx<F>, &ValTensor<F>) -> Result<ValTensor<F>, CircuitError>,
) -> Result<(), CircuitError> {
let one = create_constant_tensor(F::from(1), 1);
let output = div(config, region, value, divisor)?;
if num_parts == 1 {
return Ok(output);
}
let f_x = f(config, region, x)?;
let divisor_int = 2_i128.pow(num_parts - 1);
let divisor_felt = integer_rep_to_felt(divisor_int);
if divisor_int <= 2_i128.pow(F::S - 3) {
div(config, region, &[output], divisor_felt)
} else {
// keep splitting the divisor until it satisfies the condition
loop_div(config, region, &[output], divisor_felt)
}
let x_plus_1 = pairwise(config, region, &[x.clone(), one.clone()], BaseOp::Add)?;
let f_x_plus_1 = f(config, region, &x_plus_1)?;
let x_minus_1 = pairwise(config, region, &[x.clone(), one.clone()], BaseOp::Sub)?;
let f_x_minus_1 = f(config, region, &x_minus_1)?;
// because the function is convex, the result should be the minimum of the three
// not that we offset the x by 1 to get the next value
// f(x) <= f(x+1) and f(x) <= f(x-1)
// the result is 1 if the function is optimal solely because of the convexity of the function
// the distances can be equal but this is only possible if f(x) and f(x+1) are both optimal (or f(x) and f(x-1)).
let f_x_is_opt_rhs = less_equal(config, region, &[f_x.clone(), f_x_plus_1.clone()])?;
let f_x_is_opt_lhs = less_equal(config, region, &[f_x.clone(), f_x_minus_1.clone()])?;
let is_opt = and(config, region, &[f_x_is_opt_lhs, f_x_is_opt_rhs])?;
let mut comparison_unit = create_constant_tensor(F::ONE, is_opt.len());
comparison_unit.reshape(is_opt.dims())?;
// assert that the result is 1
enforce_equality(config, region, &[is_opt, comparison_unit])?;
Ok(())
}
/// Err is less than some constant
pub fn diff_less_than<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 2],
constant: F,
) -> Result<(), CircuitError> {
let distance = l1_distance(config, region, values)?;
let constant = create_constant_tensor(constant, 1);
let is_less = less(config, region, &[distance.clone(), constant.clone()])?;
// assert the result is 1
let comparison_unit = create_constant_tensor(F::ONE, is_less.len());
enforce_equality(config, region, &[is_less, comparison_unit])?;
Ok(())
}
/// Div accumulated layout
@@ -80,13 +135,8 @@ pub(crate) fn div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
let input = value[0].clone();
let input_dims = input.dims();
let range_check_bracket = felt_to_integer_rep(div) / 2;
let divisor = create_constant_tensor(div, 1);
let divisor = region.assign(&config.custom_gates.inputs[1], &divisor)?;
region.increment(divisor.len());
let is_assigned = !input.any_unknowns()? && !divisor.any_unknowns()?;
let mut claimed_output: ValTensor<F> = if is_assigned {
@@ -117,19 +167,7 @@ pub(crate) fn div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
BaseOp::Mult,
)?;
let diff_with_input = pairwise(
config,
region,
&[product.clone(), input.clone()],
BaseOp::Sub,
)?;
range_check(
config,
region,
&[diff_with_input],
&(-range_check_bracket, range_check_bracket),
)?;
diff_less_than(config, region, &[input.clone(), product.clone()], div)?;
Ok(claimed_output)
}
@@ -145,19 +183,7 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
let input = value[0].clone();
let input_dims = input.dims();
let integer_input_scale = felt_to_integer_rep(input_scale);
let integer_output_scale = felt_to_integer_rep(output_scale);
// range_check_bracket is min of input_scale * output_scale and 2^F::S - 3
let range_check_len = std::cmp::min(integer_output_scale, 2_i128.pow(F::S - 4));
let input_scale_ratio = if range_check_len > 0 {
integer_rep_to_felt(integer_input_scale * integer_output_scale / range_check_len)
} else {
F::ONE
};
let range_check_bracket = range_check_len / 2;
let unit_scale = create_constant_tensor(output_scale * input_scale, 1);
let is_assigned = !input.any_unknowns()?;
@@ -183,25 +209,22 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
region.increment(claimed_output.len());
// this is now of scale 2 * scale
let product = pairwise(
config,
region,
&[claimed_output.clone(), input.clone()],
BaseOp::Mult,
)?;
// divide by input_scale
let rebased_div = loop_div(config, region, &[product], input_scale_ratio)?;
let zero_inverse_val =
tensor::ops::nonlinearities::zero_recip(felt_to_integer_rep(output_scale) as f64)[0];
let zero_inverse = create_constant_tensor(integer_rep_to_felt(zero_inverse_val), 1);
let equal_zero_mask = equals_zero(config, region, &[input.clone()])?;
let not_equal_zero_mask = not(config, region, &[equal_zero_mask.clone()])?;
let equal_inverse_mask = equals(config, region, &[claimed_output.clone(), zero_inverse])?;
let masked_unit_scale = pairwise(
config,
region,
&[unit_scale.clone(), not_equal_zero_mask.clone()],
BaseOp::Mult,
)?;
// assert the two masks are equal
enforce_equality(
config,
@@ -209,24 +232,135 @@ pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&[equal_zero_mask.clone(), equal_inverse_mask],
)?;
let unit_scale = create_constant_tensor(integer_rep_to_felt(range_check_len), 1);
let err_func = |config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
x: &ValTensor<F>|
-> Result<ValTensor<F>, CircuitError> {
let product = pairwise(config, region, &[x.clone(), input.clone()], BaseOp::Mult)?;
let unit_mask = pairwise(config, region, &[equal_zero_mask, unit_scale], BaseOp::Mult)?;
let distance = l1_distance(
config,
region,
&[product.clone(), masked_unit_scale.clone()],
)?;
Ok(distance)
};
// now add the unit mask to the rebased_div
let rebased_offset_div = pairwise(config, region, &[rebased_div, unit_mask], BaseOp::Add)?;
// at most the error should be in the original unit scale's range
range_check(
config,
region,
&[rebased_offset_div],
&(range_check_bracket, 3 * range_check_bracket),
)?;
optimum_convex_function(config, region, &claimed_output, err_func)?;
Ok(claimed_output)
}
/// Square root accumulated layout
/// # Example
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::fieldutils::IntegerRep;
/// use ezkl::circuit::ops::layouts::sqrt;
/// use halo2curves::bn256::Fr as Fp;
/// use ezkl::circuit::region::RegionCtx;
/// use ezkl::circuit::region::RegionSettings;
/// use ezkl::circuit::BaseConfig;
/// use ezkl::tensor::ValTensor;
/// let dummy_config = BaseConfig::dummy(12, 2);
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
/// Some(&[1, 2, 3, 2, 3, 4, 3, 4, 9]),
/// &[3, 3],
/// ).unwrap());
/// let result = sqrt::<Fp>(&dummy_config, &mut dummy_region, &[x], 1.0.into()).unwrap();
/// let expected = Tensor::<IntegerRep>::new(Some(&[1, 1, 2, 1, 2, 2, 2, 2, 3]), &[3, 3]).unwrap();
/// assert_eq!(result.int_evals().unwrap(), expected);
/// ```
pub fn sqrt<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
value: &[ValTensor<F>; 1],
input_scale: utils::F32,
) -> Result<ValTensor<F>, CircuitError> {
let input = value[0].clone();
let input_dims = input.dims();
let unit_scale = create_constant_tensor(integer_rep_to_felt(input_scale.0 as IntegerRep), 1);
let is_assigned = !input.any_unknowns()?;
let mut claimed_output: ValTensor<F> = if is_assigned {
let input_evals = input.int_evals()?;
tensor::ops::nonlinearities::sqrt(&input_evals, input_scale.0 as f64)
.par_iter()
.map(|x| Value::known(integer_rep_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
.into()
} else {
Tensor::new(
Some(&vec![Value::<F>::unknown(); input.len()]),
&[input.len()],
)?
.into()
};
claimed_output.reshape(input_dims)?;
let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?;
region.increment(claimed_output.len());
// force the output to be positive or zero
let claimed_output = abs(config, region, &[claimed_output.clone()])?;
// rescaled input
let rescaled_input = pairwise(config, region, &[input.clone(), unit_scale], BaseOp::Mult)?;
let err_func = |config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
x: &ValTensor<F>|
-> Result<ValTensor<F>, CircuitError> {
let product = pairwise(config, region, &[x.clone(), x.clone()], BaseOp::Mult)?;
let distance = l1_distance(config, region, &[product.clone(), rescaled_input.clone()])?;
Ok(distance)
};
optimum_convex_function(config, region, &claimed_output, err_func)?;
Ok(claimed_output)
}
/// Reciprocal square root accumulated layout
/// # Example
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::fieldutils::IntegerRep;
/// use ezkl::circuit::ops::layouts::rsqrt;
/// use halo2curves::bn256::Fr as Fp;
/// use ezkl::circuit::region::RegionCtx;
/// use ezkl::circuit::region::RegionSettings;
/// use ezkl::circuit::BaseConfig;
/// use ezkl::tensor::ValTensor;
/// let dummy_config = BaseConfig::dummy(12, 2);
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
/// Some(&[1, 2, 3, 2, 3, 4, 3, 4, 5]),
/// &[3, 3],
/// ).unwrap());
/// let result = rsqrt::<Fp>(&dummy_config, &mut dummy_region, &[x], 1.0.into(), 1.0.into()).unwrap();
/// let expected = Tensor::<IntegerRep>::new(Some(&[1, 1, 1, 1, 1, 1, 1, 1, 1]), &[3, 3]).unwrap();
/// assert_eq!(result.int_evals().unwrap(), expected);
/// ```
pub fn rsqrt<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
value: &[ValTensor<F>; 1],
input_scale: utils::F32,
output_scale: utils::F32,
) -> Result<ValTensor<F>, CircuitError> {
let sqrt = sqrt(config, region, value, input_scale)?;
let felt_output_scale = integer_rep_to_felt(output_scale.0 as IntegerRep);
let felt_input_scale = integer_rep_to_felt(input_scale.0 as IntegerRep);
let recip = recip(config, region, &[sqrt], felt_input_scale, felt_output_scale)?;
Ok(recip)
}
/// Dot product of two tensors.
/// ```
/// use ezkl::tensor::Tensor;
@@ -1805,6 +1939,10 @@ pub fn sum<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
) -> Result<ValTensor<F>, CircuitError> {
if values[0].len() == 1 {
return Ok(values[0].clone());
}
region.flush()?;
// time this entire function run
let global_start = instant::Instant::now();
@@ -3102,7 +3240,7 @@ pub fn sumpool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
last_elem.reshape(&[&[batch_size, image_channels], shape].concat())?;
if normalized {
last_elem = loop_div(config, region, &[last_elem], F::from(kernel_len as u64))?;
last_elem = div(config, region, &[last_elem], F::from(kernel_len as u64))?;
}
Ok(last_elem)
}
@@ -4547,15 +4685,10 @@ pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
// first generate the claimed val
let mut input = values[0].clone();
println!("input {}", input.show());
let scale_as_felt = integer_rep_to_felt(scale.0.round() as IntegerRep);
let assigned_triple_scaled_as_felt_tensor = region.assign(
&config.custom_gates.inputs[1],
&create_constant_tensor(scale_as_felt * scale_as_felt * scale_as_felt, 1),
)?;
let triple_scaled_as_felt_tensor =
create_constant_tensor(scale_as_felt * scale_as_felt * scale_as_felt, 1);
// natural ln is log2(x) * ln(2)
let ln2 = utils::F32::from(2.0_f32.ln());
@@ -4564,10 +4697,8 @@ pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
integer_rep_to_felt((ln2.0 * scale.0).round() as IntegerRep),
1,
);
region.assign(&config.custom_gates.inputs[0], &ln2_tensor)?;
let unit = create_constant_tensor(integer_rep_to_felt(1), 1);
region.assign(&config.custom_gates.inputs[1], &unit)?;
region.increment(1);
let negative_one = create_constant_tensor(integer_rep_to_felt(-1), 1);
// 2. assign the image
if !input.all_prev_assigned() {
@@ -4639,27 +4770,6 @@ pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&LookupOp::PowersOfTwo { scale },
)?;
// assert that the original input is closest to the claimed output than the prior power of 2 and the next power of 2
let distance_to_prior = pairwise(
config,
region,
&[input.clone(), prior_pow2.clone()],
BaseOp::Sub,
)?;
// now take abs of the distance
let distance_to_prior_l1 = abs(config, region, &[distance_to_prior.clone()])?;
let distance_to_next = pairwise(
config,
region,
&[input.clone(), next_pow2.clone()],
BaseOp::Sub,
)?;
// now take abs of the distance
let distance_to_next_l1 = abs(config, region, &[distance_to_next.clone()])?;
let distance_to_claimed = pairwise(
config,
region,
@@ -4667,66 +4777,58 @@ pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
BaseOp::Sub,
)?;
// now take abs of the distance
let distance_to_claimed_l1 = abs(config, region, &[distance_to_claimed.clone()])?;
let abs_distance_to_claimed = abs(config, region, &[distance_to_claimed.clone()])?;
// can be less than or equal because we round up
let is_distance_to_prior_less = less_equal(
config,
region,
&[distance_to_claimed_l1.clone(), distance_to_prior_l1.clone()],
)?;
let abs_distance_to_next_pow2 =
l1_distance(config, region, &[input.clone(), next_pow2.clone()])?;
// should be striclty less because we round up
let is_distance_to_next_less = less(
config,
region,
&[distance_to_claimed_l1, distance_to_next_l1.clone()],
)?;
let abs_distance_to_prior_pow2 =
l1_distance(config, region, &[input.clone(), prior_pow2.clone()])?;
let is_distance_to_prior_less_and_distance_to_next_less = and(
// because we round up this can be equal
let is_closest_to_0: ValTensor<F> = less(
config,
region,
&[
is_distance_to_prior_less.clone(),
is_distance_to_next_less.clone(),
abs_distance_to_claimed.clone(),
abs_distance_to_next_pow2.clone(),
],
)?;
let mut comparison_unit = create_constant_tensor(
integer_rep_to_felt(1),
is_distance_to_prior_less_and_distance_to_next_less.len(),
);
let is_closest_to_1 = less(
config,
region,
&[
abs_distance_to_claimed.clone(),
abs_distance_to_prior_pow2.clone(),
],
)?;
comparison_unit.reshape(is_distance_to_prior_less_and_distance_to_next_less.dims())?;
let is_closest = and(
config,
region,
&[is_closest_to_0.clone(), is_closest_to_1.clone()],
)?;
// assigned unit
let mut comparison_unit = create_constant_tensor(integer_rep_to_felt(1), is_closest.len());
comparison_unit.reshape(is_closest.dims())?;
let assigned_unit = region.assign(&config.custom_gates.inputs[1], &comparison_unit)?;
region.increment(assigned_unit.len());
// assert that the values are truthy
enforce_equality(
config,
region,
&[
is_distance_to_prior_less_and_distance_to_next_less,
assigned_unit.clone(),
],
)?;
enforce_equality(config, region, &[is_closest, assigned_unit])?;
// get a linear interpolation now
let sign_of_distance_to_claimed = sign(config, region, &[distance_to_claimed.clone()])?;
let sign_of_distance_to_claimed_is_positive = equals(
let sign_of_distance_to_claimed_is_negative = equals(
config,
region,
&[sign_of_distance_to_claimed.clone(), assigned_unit.clone()],
&[sign_of_distance_to_claimed.clone(), negative_one.clone()],
)?;
let sign_of_distance_to_claimed_is_negative = not(
let sign_of_distance_to_claimed_is_positive = not(
config,
region,
&[sign_of_distance_to_claimed_is_positive.clone()],
&[sign_of_distance_to_claimed_is_negative.clone()],
)?;
let pow2_prior_to_claimed_distance = pairwise(
@@ -4802,10 +4904,7 @@ pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
let scaled_claimed_output = pairwise(
config,
region,
&[
claimed_output.clone(),
assigned_triple_scaled_as_felt_tensor,
],
&[claimed_output.clone(), triple_scaled_as_felt_tensor],
BaseOp::Mult,
)?;
@@ -4877,11 +4976,7 @@ pub fn round<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
let zero = ValType::Constant(F::ZERO);
let one = create_constant_tensor(integer_rep_to_felt(1), 1);
let assigned_one = region.assign(&config.custom_gates.inputs[1], &one)?;
let negative_one = create_constant_tensor(integer_rep_to_felt(-1), 1);
let assigned_negative_one = region.assign(&config.custom_gates.output, &negative_one)?;
region.increment(1);
// if scale is not exactly divisible by 2 we warn
if scale.0 % 2.0 != 0.0 {
@@ -4914,8 +5009,8 @@ pub fn round<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
let last_elem = sliced_input.last()?;
let sign = sliced_input.first()?;
let is_positive = equals(config, region, &[sign.clone(), assigned_one.clone()])?;
let is_negative = equals(config, region, &[sign, assigned_negative_one.clone()])?;
let is_positive = equals(config, region, &[sign.clone(), one.clone()])?;
let is_negative = equals(config, region, &[sign, negative_one.clone()])?;
let is_greater_than_midway = greater_equal(
config,
@@ -5032,9 +5127,6 @@ pub fn round_half_to_even<F: PrimeField + TensorType + PartialOrd + std::hash::H
integer_rep_to_felt((scale.0 / 2.0).round() as IntegerRep),
1,
);
let assigned_midway_point = region.assign(&config.custom_gates.inputs[1], &midway_point)?;
region.increment(1);
let dims = decomposition.dims().to_vec();
let first_dims = decomposition.dims().to_vec()[..decomposition.dims().len() - 1].to_vec();
@@ -5058,11 +5150,8 @@ pub fn round_half_to_even<F: PrimeField + TensorType + PartialOrd + std::hash::H
let penultimate_elem =
sliced_input.get_slice(&[sliced_input.len() - 2..sliced_input.len() - 1])?;
let is_equal_to_midway = equals(
config,
region,
&[last_elem.clone(), assigned_midway_point.clone()],
)?;
let is_equal_to_midway =
equals(config, region, &[last_elem.clone(), midway_point.clone()])?;
// penultimate_elem is equal to midway point and even, do nothing
let is_odd = nonlinearity(
config,
@@ -5077,11 +5166,8 @@ pub fn round_half_to_even<F: PrimeField + TensorType + PartialOrd + std::hash::H
&[is_odd.clone(), is_equal_to_midway.clone()],
)?;
let is_greater_than_midway = greater(
config,
region,
&[last_elem.clone(), assigned_midway_point.clone()],
)?;
let is_greater_than_midway =
greater(config, region, &[last_elem.clone(), midway_point.clone()])?;
// if the number is equal to midway point and odd increment, or if it is is_greater_than_midway
let is_odd_and_equal_to_midway_or_greater_than_midway = or(
@@ -5494,11 +5580,8 @@ pub(crate) fn percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>
input_felt_scale,
output_felt_scale,
)?;
// product of num * (1 / denom) = 2*output_scale
let percent = pairwise(config, region, &[input, inv_denom], BaseOp::Mult)?;
// rebase the percent to 2x the scale
loop_div(config, region, &[percent], input_felt_scale)
// product of num * (1 / denom) = input_scale * output_scale
pairwise(config, region, &[input, inv_denom], BaseOp::Mult)
}
/// Applies softmax
@@ -5522,7 +5605,7 @@ pub(crate) fn percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>
/// ).unwrap());
/// let result = softmax::<Fp>(&dummy_config, &mut dummy_region, &[x], 128.0.into(), (128.0 * 128.0).into()).unwrap();
/// // doubles the scale of the input
/// let expected = Tensor::<IntegerRep>::new(Some(&[2734, 2734, 2756, 2734, 2734, 2691]), &[2, 3]).unwrap();
/// let expected = Tensor::<IntegerRep>::new(Some(&[350012, 350012, 352768, 350012, 350012, 344500]), &[2, 3]).unwrap();
/// assert_eq!(result.int_evals().unwrap(), expected);
/// ```
pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
@@ -5602,17 +5685,8 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::
let int_scale = scale.0 as IntegerRep;
// felt scale
let felt_scale = integer_rep_to_felt(int_scale);
// range check len capped at 2^(S-3) and make it divisible 2
let range_check_bracket = std::cmp::min(
utils::F32(scale.0),
utils::F32(2_f32.powf((F::S - 5) as f32)),
)
.0;
let range_check_bracket_int = range_check_bracket as IntegerRep;
// input scale ratio we multiply by tol such that in the new scale range_check_len represents tol percent
let input_scale_ratio = ((scale.0.powf(2.0) / range_check_bracket) * tol) as IntegerRep / 2 * 2;
let input_scale_ratio = (scale.0 * tol) as IntegerRep / 2 * 2;
let recip = recip(
config,
@@ -5628,7 +5702,7 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::
let product = pairwise(config, region, &[diff, recip], BaseOp::Mult)?;
log::debug!("product: {}", product.show());
let rebased_product = loop_div(
let rebased_product = div(
config,
region,
&[product],
@@ -5637,10 +5711,5 @@ pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::
log::debug!("rebased_product: {}", rebased_product.show());
// check that it is within the tolerance range
range_check(
config,
region,
&[rebased_product],
&(-range_check_bracket_int, range_check_bracket_int),
)
range_check(config, region, &[rebased_product], &(-int_scale, int_scale))
}

View File

@@ -18,8 +18,6 @@ pub enum LookupOp {
IsOdd,
PowersOfTwo { scale: utils::F32 },
Ln { scale: utils::F32 },
Sqrt { scale: utils::F32 },
Rsqrt { scale: utils::F32 },
Sigmoid { scale: utils::F32 },
Exp { scale: utils::F32 },
Cos { scale: utils::F32 },
@@ -56,8 +54,6 @@ impl LookupOp {
LookupOp::IsOdd => "is_odd".to_string(),
LookupOp::Div { denom } => format!("div_{}", denom),
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
LookupOp::Sqrt { scale } => format!("sqrt_{}", scale),
LookupOp::Rsqrt { scale } => format!("rsqrt_{}", scale),
LookupOp::Erf { scale } => format!("erf_{}", scale),
LookupOp::Exp { scale } => format!("exp_{}", scale),
LookupOp::Cos { scale } => format!("cos_{}", scale),
@@ -100,12 +96,6 @@ impl LookupOp {
LookupOp::Sigmoid { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sigmoid(&x, scale.into()))
}
LookupOp::Sqrt { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::sqrt(&x, scale.into()))
}
LookupOp::Rsqrt { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::rsqrt(&x, scale.into()))
}
LookupOp::Erf { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::erffunc(&x, scale.into()))
}
@@ -174,9 +164,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
LookupOp::Pow { a, scale } => format!("POW(scale={}, exponent={})", scale, a),
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
LookupOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
LookupOp::Erf { scale } => format!("ERF(scale={})", scale),
LookupOp::Rsqrt { scale } => format!("RSQRT(scale={})", scale),
LookupOp::Exp { scale } => format!("EXP(scale={})", scale),
LookupOp::Tan { scale } => format!("TAN(scale={})", scale),
LookupOp::ATan { scale } => format!("ATAN(scale={})", scale),
@@ -210,9 +198,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
/// Returns the scale of the output of the operation.
fn out_scale(&self, inputs_scale: Vec<crate::Scale>) -> Result<crate::Scale, CircuitError> {
let scale = match self {
_ => inputs_scale[0],
};
let scale = inputs_scale[0];
Ok(scale)
}

View File

@@ -333,18 +333,6 @@ impl<'source> FromPyObject<'source> for ContractType {
}
}
}
// not wasm
use lazy_static::lazy_static;
// if CARGO VERSION is 0.0.0 replace with "source - no compatibility guaranteed"
lazy_static! {
/// The version of the ezkl library
pub static ref VERSION: &'static str = if env!("CARGO_PKG_VERSION") == "0.0.0" {
"source - no compatibility guaranteed"
} else {
env!("CARGO_PKG_VERSION")
};
}
/// Get the styles for the CLI
pub fn get_styles() -> clap::builder::Styles {
@@ -395,7 +383,7 @@ pub fn print_completions<G: Generator>(gen: G, cmd: &mut Command) {
#[allow(missing_docs)]
#[derive(Parser, Debug, Clone)]
#[command(author, about, long_about = None)]
#[clap(version = *VERSION, styles = get_styles(), trailing_var_arg = true)]
#[clap(version = crate::version(), styles = get_styles(), trailing_var_arg = true)]
pub struct Cli {
/// If provided, outputs the completion file for given shell
#[clap(long = "generate", value_parser)]
@@ -486,9 +474,6 @@ pub enum Commands {
/// max logrows to use for calibration, 26 is the max public SRS size
#[arg(long, value_hint = clap::ValueHint::Other)]
max_logrows: Option<u32>,
// whether to only range check rebases (instead of trying both range check and lookup)
#[arg(long, default_value = DEFAULT_ONLY_RANGE_CHECK_REBASE, action = clap::ArgAction::SetTrue)]
only_range_check_rebase: Option<bool>,
},
/// Generates a dummy SRS

View File

@@ -140,7 +140,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
scales,
scale_rebase_multiplier,
max_logrows,
only_range_check_rebase,
} => calibrate(
model.unwrap_or(DEFAULT_MODEL.into()),
data.unwrap_or(DEFAULT_DATA.into()),
@@ -149,7 +148,6 @@ pub async fn run(command: Commands) -> Result<String, EZKLError> {
lookup_safety_margin,
scales,
scale_rebase_multiplier,
only_range_check_rebase.unwrap_or(DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap()),
max_logrows,
)
.await
@@ -671,10 +669,10 @@ pub(crate) async fn get_srs_cmd(
let srs_uri = format!("{}{}", PUBLIC_SRS_URL, k);
let mut reader = Cursor::new(fetch_srs(&srs_uri).await?);
// check the SRS
let pb = init_spinner();
pb.set_message("Validating SRS (this may take a while) ...");
let pb = init_spinner();
pb.set_message("Validating SRS (this may take a while) ...");
let params = ParamsKZG::<Bn256>::read(&mut reader)?;
pb.finish_with_message("SRS validated.");
pb.finish_with_message("SRS validated.");
info!("Saving SRS to disk...");
let computed_srs_path = get_srs_path(k, srs_path.clone(), commitment);
@@ -682,7 +680,10 @@ pub(crate) async fn get_srs_cmd(
let mut buffer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, &mut file);
params.write(&mut buffer)?;
info!("Saved SRS to {}.", computed_srs_path.as_os_str().to_str().unwrap_or("disk"));
info!(
"Saved SRS to {}.",
computed_srs_path.as_os_str().to_str().unwrap_or("disk")
);
info!("SRS downloaded");
} else {
@@ -728,7 +729,7 @@ pub(crate) async fn gen_witness(
None
};
let mut input = circuit.load_graph_input(&data).await?;
let mut input = circuit.load_graph_input(&data).await?;
#[cfg(any(not(feature = "ezkl"), target_arch = "wasm32"))]
let mut input = circuit.load_graph_input(&data)?;
@@ -968,7 +969,6 @@ pub(crate) async fn calibrate(
lookup_safety_margin: f64,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
only_range_check_rebase: bool,
max_logrows: Option<u32>,
) -> Result<GraphSettings, EZKLError> {
use log::error;
@@ -1004,12 +1004,6 @@ pub(crate) async fn calibrate(
(11..14).collect::<Vec<crate::Scale>>()
};
let div_rebasing = if only_range_check_rebase {
vec![false]
} else {
vec![true, false]
};
let mut found_params: Vec<GraphSettings> = vec![];
// 2 x 2 grid
@@ -1047,12 +1041,6 @@ pub(crate) async fn calibrate(
.map(|(a, b)| (*a, *b))
.collect::<Vec<((crate::Scale, crate::Scale), u32)>>();
let range_grid = range_grid
.iter()
.cartesian_product(div_rebasing.iter())
.map(|(a, b)| (*a, *b))
.collect::<Vec<(((crate::Scale, crate::Scale), u32), bool)>>();
let mut forward_pass_res = HashMap::new();
let pb = init_bar(range_grid.len() as u64);
@@ -1061,30 +1049,23 @@ pub(crate) async fn calibrate(
let mut num_failed = 0;
let mut num_passed = 0;
for (((input_scale, param_scale), scale_rebase_multiplier), div_rebasing) in range_grid {
for ((input_scale, param_scale), scale_rebase_multiplier) in range_grid {
pb.set_message(format!(
"i-scale: {}, p-scale: {}, rebase-(x): {}, div-rebase: {}, fail: {}, pass: {}",
"i-scale: {}, p-scale: {}, rebase-(x): {}, fail: {}, pass: {}",
input_scale.to_string().blue(),
param_scale.to_string().blue(),
scale_rebase_multiplier.to_string().blue(),
div_rebasing.to_string().yellow(),
scale_rebase_multiplier.to_string().yellow(),
num_failed.to_string().red(),
num_passed.to_string().green()
));
let key = (
input_scale,
param_scale,
scale_rebase_multiplier,
div_rebasing,
);
let key = (input_scale, param_scale, scale_rebase_multiplier);
forward_pass_res.insert(key, vec![]);
let local_run_args = RunArgs {
input_scale,
param_scale,
scale_rebase_multiplier,
div_rebasing,
lookup_range: (IntegerRep::MIN, IntegerRep::MAX),
..settings.run_args.clone()
};
@@ -1188,7 +1169,6 @@ pub(crate) async fn calibrate(
let found_run_args = RunArgs {
input_scale: new_settings.run_args.input_scale,
param_scale: new_settings.run_args.param_scale,
div_rebasing: new_settings.run_args.div_rebasing,
lookup_range: new_settings.run_args.lookup_range,
logrows: new_settings.run_args.logrows,
scale_rebase_multiplier: new_settings.run_args.scale_rebase_multiplier,
@@ -1296,7 +1276,6 @@ pub(crate) async fn calibrate(
best_params.run_args.input_scale,
best_params.run_args.param_scale,
best_params.run_args.scale_rebase_multiplier,
best_params.run_args.div_rebasing,
))
.ok_or("no params found")?
.iter()
@@ -2022,7 +2001,7 @@ pub(crate) fn mock_aggregate(
}
}
// proof aggregation
let pb = {
let pb = {
let pb = init_spinner();
pb.set_message("Aggregating (may take a while)...");
pb
@@ -2033,7 +2012,7 @@ pub(crate) fn mock_aggregate(
let prover = halo2_proofs::dev::MockProver::run(logrows, &circuit, vec![circuit.instances()])
.map_err(|e| ExecutionError::MockProverError(e.to_string()))?;
prover.verify().map_err(ExecutionError::VerifyError)?;
pb.finish_with_message("Done.");
pb.finish_with_message("Done.");
Ok(String::new())
}
@@ -2127,7 +2106,7 @@ pub(crate) fn aggregate(
}
// proof aggregation
let pb = {
let pb = {
let pb = init_spinner();
pb.set_message("Aggregating (may take a while)...");
pb
@@ -2276,7 +2255,7 @@ pub(crate) fn aggregate(
);
snark.save(&proof_path)?;
pb.finish_with_message("Done.");
pb.finish_with_message("Done.");
Ok(snark)
}

View File

@@ -133,6 +133,8 @@ pub struct GraphWitness {
pub min_lookup_inputs: IntegerRep,
/// max range check size
pub max_range_size: IntegerRep,
/// (optional) version of ezkl used
pub version: Option<String>,
}
impl GraphWitness {
@@ -161,6 +163,7 @@ impl GraphWitness {
max_lookup_inputs: 0,
min_lookup_inputs: 0,
max_range_size: 0,
version: None,
}
}
@@ -1350,6 +1353,7 @@ impl GraphCircuit {
max_lookup_inputs: model_results.max_lookup_inputs,
min_lookup_inputs: model_results.min_lookup_inputs,
max_range_size: model_results.max_range_size,
version: Some(crate::version().to_string()),
};
witness.generate_rescaled_elements(

View File

@@ -915,20 +915,9 @@ impl Model {
if scales.contains_key(&i) {
let scale_diff = n.out_scale - scales[&i];
n.opkind = if scale_diff > 0 {
RebaseScale::rebase(
n.opkind,
scales[&i],
n.out_scale,
1,
run_args.div_rebasing,
)
RebaseScale::rebase(n.opkind, scales[&i], n.out_scale, 1)
} else {
RebaseScale::rebase_up(
n.opkind,
scales[&i],
n.out_scale,
run_args.div_rebasing,
)
RebaseScale::rebase_up(n.opkind, scales[&i], n.out_scale)
};
n.out_scale = scales[&i];
}

View File

@@ -120,7 +120,6 @@ impl RebaseScale {
global_scale: crate::Scale,
op_out_scale: crate::Scale,
scale_rebase_multiplier: u32,
div_rebasing: bool,
) -> SupportedOp {
if (op_out_scale > (global_scale * scale_rebase_multiplier as i32))
&& !inner.is_constant()
@@ -137,7 +136,6 @@ impl RebaseScale {
multiplier,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32((multiplier) as f32),
use_range_check_for_int: !div_rebasing,
},
original_scale: op.original_scale,
})
@@ -148,7 +146,6 @@ impl RebaseScale {
multiplier,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32(multiplier as f32),
use_range_check_for_int: !div_rebasing,
},
original_scale: op_out_scale,
})
@@ -163,7 +160,6 @@ impl RebaseScale {
inner: SupportedOp,
target_scale: crate::Scale,
op_out_scale: crate::Scale,
div_rebasing: bool,
) -> SupportedOp {
if (op_out_scale < (target_scale)) && !inner.is_constant() && !inner.is_input() {
let multiplier = scale_to_multiplier(op_out_scale - target_scale);
@@ -176,7 +172,6 @@ impl RebaseScale {
original_scale: op.original_scale,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32((multiplier) as f32),
use_range_check_for_int: !div_rebasing,
},
})
} else {
@@ -187,7 +182,6 @@ impl RebaseScale {
original_scale: op_out_scale,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32(multiplier as f32),
use_range_check_for_int: !div_rebasing,
},
})
}
@@ -595,13 +589,7 @@ impl Node {
let mut out_scale = opkind.out_scale(in_scales.clone())?;
// rescale the inputs if necessary to get consistent fixed points, we select the largest scale (highest precision)
let global_scale = scales.get_max();
opkind = RebaseScale::rebase(
opkind,
global_scale,
out_scale,
scales.rebase_multiplier,
run_args.div_rebasing,
);
opkind = RebaseScale::rebase(opkind, global_scale, out_scale, scales.rebase_multiplier);
out_scale = opkind.out_scale(in_scales)?;

View File

@@ -764,7 +764,7 @@ pub fn new_op_from_onnx(
.collect::<Vec<_>>();
if inputs.len() == 2 {
if const_inputs.len() > 0 {
if !const_inputs.is_empty() {
let const_idx = const_inputs[0];
let boxed_op = inputs[const_idx].opkind();
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
@@ -842,12 +842,17 @@ pub fn new_op_from_onnx(
"Sigmoid" => SupportedOp::Nonlinear(LookupOp::Sigmoid {
scale: scale_to_multiplier(input_scales[0]).into(),
}),
"Sqrt" => SupportedOp::Nonlinear(LookupOp::Sqrt {
scale: scale_to_multiplier(input_scales[0]).into(),
}),
"Rsqrt" => SupportedOp::Nonlinear(LookupOp::Rsqrt {
"Sqrt" => SupportedOp::Hybrid(HybridOp::Sqrt {
scale: scale_to_multiplier(input_scales[0]).into(),
}),
"Rsqrt" => {
let in_scale = input_scales[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale);
SupportedOp::Hybrid(HybridOp::Rsqrt {
input_scale: (scale_to_multiplier(in_scale) as f32).into(),
output_scale: (scale_to_multiplier(max_scale) as f32).into(),
})
}
"Exp" => SupportedOp::Nonlinear(LookupOp::Exp {
scale: scale_to_multiplier(input_scales[0]).into(),
}),

View File

@@ -108,6 +108,18 @@ use serde::{Deserialize, Serialize};
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
use tosubcommand::ToFlags;
// if CARGO VERSION is 0.0.0 replace with "source - no compatibility guaranteed"
/// The version of the ezkl library
const VERSION: &str = env!("CARGO_PKG_VERSION");
/// Get the version of the library
pub fn version() -> &'static str {
match VERSION {
"0.0.0" => "source - no compatibility guaranteed",
_ => VERSION,
}
}
/// Bindings managment
#[cfg(any(
feature = "ios-bindings",
@@ -297,8 +309,6 @@ pub struct RunArgs {
all(feature = "ezkl", not(target_arch = "wasm32")),
arg(long, default_value = "false")
)]
/// Rebase the scale using lookup table for division instead of using a range check
pub div_rebasing: bool,
/// Should constants with 0.0 fraction be rebased to scale 0
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
@@ -340,7 +350,6 @@ impl Default for RunArgs {
input_visibility: Visibility::Private,
output_visibility: Visibility::Public,
param_visibility: Visibility::Private,
div_rebasing: false,
rebase_frac_zero_constants: false,
check_mode: CheckMode::UNSAFE,
commitment: None,

View File

@@ -59,10 +59,7 @@ fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
#[allow(missing_docs)]
#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd)]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
derive(ValueEnum)
)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), derive(ValueEnum))]
pub enum ProofType {
#[default]
Single,
@@ -134,10 +131,7 @@ impl<'source> pyo3::FromPyObject<'source> for ProofType {
#[allow(missing_docs)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
derive(ValueEnum)
)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), derive(ValueEnum))]
pub enum StrategyType {
Single,
Accum,
@@ -203,10 +197,7 @@ pub enum PfSysError {
#[allow(missing_docs)]
#[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd)]
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
derive(ValueEnum)
)]
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), derive(ValueEnum))]
pub enum TranscriptType {
Poseidon,
#[default]
@@ -324,6 +315,8 @@ where
pub timestamp: Option<u128>,
/// commitment
pub commitment: Option<Commitments>,
/// (optional) version of ezkl used to generate the proof
version: Option<String>,
}
#[cfg(feature = "python-bindings")]
@@ -385,6 +378,7 @@ where
.as_millis(),
),
commitment,
version: Some(crate::version().to_string()),
}
}
@@ -920,6 +914,7 @@ mod tests {
pretty_public_inputs: None,
timestamp: None,
commitment: None,
version: None,
};
snark

View File

@@ -1546,9 +1546,17 @@ pub mod nonlinearities {
pub fn ilog2(a: &Tensor<IntegerRep>, scale_input: f64) -> Tensor<IntegerRep> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let kix = (kix).log2();
let rounded = kix.round();
Ok::<_, TensorError>(rounded as IntegerRep)
let log = (kix).log2();
let floor = log.floor();
let ceil = log.ceil();
let floor_dist = ((2.0_f64).powf(floor) - kix).abs();
let ceil_dist = (kix - (2.0_f64).powf(ceil)).abs();
if floor_dist < ceil_dist {
Ok::<_, TensorError>(floor as IntegerRep)
} else {
Ok::<_, TensorError>(ceil as IntegerRep)
}
})
.unwrap()
}

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -1 +1 @@
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":0,"max_range_size":127}
{"inputs":[["0200000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000","0100000000000000000000000000000000000000000000000000000000000000"]],"pretty_elements":{"rescaled_inputs":[["2","1","1"]],"inputs":[["0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000001"]],"processed_inputs":[],"processed_params":[],"processed_outputs":[],"rescaled_outputs":[["0","0","0","0"]],"outputs":[["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"]]},"outputs":[["0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000","0000000000000000000000000000000000000000000000000000000000000000"]],"processed_inputs":null,"processed_params":null,"processed_outputs":null,"max_lookup_inputs":0,"min_lookup_inputs":0,"max_range_size":127,"version":"source - no compatibility guaranteed"}

View File

@@ -539,7 +539,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None, 0.0);
mock(path, test.to_string(), "public", "fixed", "public", 1, "accuracy", None, 0.0, false);
test_dir.close().unwrap();
}
});
@@ -556,15 +556,7 @@ mod native_tests {
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn accuracy_measurement_div_rebase_(test: &str) {
crate::native_tests::init_binary();
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, true);
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn accuracy_measurement_public_outputs_(test: &str) {
@@ -572,7 +564,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6, false);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "accuracy", 2.6);
test_dir.close().unwrap();
}
@@ -582,7 +574,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6 , false);
accuracy_measurement(path, test.to_string(), "private", "fixed", "private", 1, "accuracy", 2.6 );
test_dir.close().unwrap();
}
@@ -592,7 +584,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6, false);
accuracy_measurement(path, test.to_string(), "public", "private", "private", 1, "accuracy", 2.6);
test_dir.close().unwrap();
}
@@ -603,7 +595,7 @@ mod native_tests {
crate::native_tests::setup_py_env();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 3.1, false);
accuracy_measurement(path, test.to_string(), "private", "private", "public", 1, "resources", 3.1);
test_dir.close().unwrap();
}
@@ -612,7 +604,17 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0);
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
#(#[test_case(TESTS[N])])*
fn mock_bounded_lookup_log(test: &str) {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, 0.0, true);
test_dir.close().unwrap();
}
@@ -623,7 +625,7 @@ mod native_tests {
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
// gen random number between 0.0 and 1.0
let tolerance = rand::thread_rng().gen_range(0.0..1.0) * 100.0;
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance);
mock(path, test.to_string(), "private", "private", "public", 1, "resources", None, tolerance, false);
test_dir.close().unwrap();
}
@@ -638,7 +640,7 @@ mod native_tests {
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
let large_batch_dir = &format!("large_batches_{}", test);
crate::native_tests::mk_data_batches_(path, test, &large_batch_dir, 10);
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None, 0.0);
mock(path, large_batch_dir.to_string(), "private", "private", "public", 10, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
}
@@ -648,7 +650,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None, 0.0);
mock(path, test.to_string(), "public", "private", "private", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -657,7 +659,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "hashed", "private", 1, "resources", None, 0.0);
mock(path, test.to_string(), "public", "hashed", "private", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -666,7 +668,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None, 0.0);
mock(path, test.to_string(), "fixed", "private", "private", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -675,7 +677,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None, 0.0);
mock(path, test.to_string(), "private", "private", "fixed", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -684,7 +686,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None, 0.0);
mock(path, test.to_string(), "private", "fixed", "private", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -693,7 +695,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None, 0.0);
mock(path, test.to_string(), "hashed", "private", "public", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -702,7 +704,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "polycommit", "private", "public", 1, "resources", None, 0.0);
mock(path, test.to_string(), "polycommit", "private", "public", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -712,7 +714,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None, 0.0);
mock(path, test.to_string(), "private", "hashed", "public", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -722,7 +724,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "polycommit", "public", 1, "resources", None, 0.0);
mock(path, test.to_string(), "private", "polycommit", "public", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -731,7 +733,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None, 0.0);
mock(path, test.to_string(), "public", "private", "hashed", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -741,7 +743,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "private", "polycommit", 1, "resources", None, 0.0);
mock(path, test.to_string(), "public", "private", "polycommit", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -750,7 +752,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None, 0.0);
mock(path, test.to_string(), "public", "fixed", "hashed", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -760,7 +762,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "public", "polycommit", "hashed", 1, "resources", None, 0.0);
mock(path, test.to_string(), "public", "polycommit", "hashed", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -770,7 +772,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "polycommit", "polycommit", "polycommit", 1, "resources", None, 0.0);
mock(path, test.to_string(), "polycommit", "polycommit", "polycommit", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -780,7 +782,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None, 0.0);
mock(path, test.to_string(), "hashed", "private", "hashed", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -790,7 +792,7 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
// needs an extra row for the large model
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None, 0.0);
mock(path, test.to_string(),"hashed", "hashed", "public", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -800,7 +802,7 @@ mod native_tests {
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
// needs an extra row for the large model
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None, 0.0);
mock(path, test.to_string(),"hashed", "hashed", "hashed", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
@@ -977,7 +979,7 @@ mod native_tests {
crate::native_tests::init_binary();
let test_dir = TempDir::new(test).unwrap();
let path = test_dir.path().to_str().unwrap(); crate::native_tests::mv_test_(path, test);
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0);
mock(path, test.to_string(), "private", "fixed", "public", 1, "resources", None, 0.0, false);
test_dir.close().unwrap();
}
});
@@ -1454,6 +1456,7 @@ mod native_tests {
cal_target: &str,
scales_to_use: Option<Vec<u32>>,
tolerance: f32,
bounded_lookup_log: bool,
) {
let mut tolerance = tolerance;
gen_circuit_settings_and_witness(
@@ -1466,10 +1469,10 @@ mod native_tests {
cal_target,
scales_to_use,
2,
false,
&mut tolerance,
Commitments::KZG,
2,
bounded_lookup_log,
);
if tolerance > 0.0 {
@@ -1607,10 +1610,10 @@ mod native_tests {
cal_target: &str,
scales_to_use: Option<Vec<u32>>,
num_inner_columns: usize,
div_rebasing: bool,
tolerance: &mut f32,
commitment: Commitments,
lookup_safety_margin: usize,
bounded_lookup_log: bool,
) {
let mut args = vec![
"gen-settings".to_string(),
@@ -1629,9 +1632,9 @@ mod native_tests {
format!("--commitment={}", commitment),
];
if div_rebasing {
args.push("--div-rebasing".to_string());
};
if bounded_lookup_log {
args.push("--bounded-log-lookup".to_string());
}
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(args)
@@ -1731,7 +1734,6 @@ mod native_tests {
batch_size: usize,
cal_target: &str,
target_perc: f32,
div_rebasing: bool,
) {
gen_circuit_settings_and_witness(
test_dir,
@@ -1743,10 +1745,10 @@ mod native_tests {
cal_target,
None,
2,
div_rebasing,
&mut 0.0,
Commitments::KZG,
2,
false,
);
println!(
@@ -2027,10 +2029,10 @@ mod native_tests {
target_str,
scales_to_use,
num_inner_columns,
false,
&mut 0.0,
commitment,
lookup_safety_margin,
false,
);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
@@ -2459,10 +2461,10 @@ mod native_tests {
// we need the accuracy
Some(vec![4]),
1,
false,
&mut 0.0,
Commitments::KZG,
2,
false,
);
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);