mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-13 00:08:12 -05:00
Compare commits
2 Commits
ac/chebysh
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5144991b21 | ||
|
|
64acb1d9d6 |
16
Cargo.toml
16
Cargo.toml
@@ -186,7 +186,7 @@ harness = false
|
||||
|
||||
|
||||
[[bench]]
|
||||
name = "tanh"
|
||||
name = "sigmoid"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
@@ -194,12 +194,12 @@ name = "relu_lookupless"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "accum_matmul_tanh"
|
||||
name = "accum_matmul_sigmoid"
|
||||
harness = false
|
||||
|
||||
|
||||
[[bench]]
|
||||
name = "accum_matmul_tanh_overflow"
|
||||
name = "accum_matmul_sigmoid_overflow"
|
||||
harness = false
|
||||
|
||||
[[bin]]
|
||||
@@ -282,11 +282,11 @@ halo2_proofs = { git = "https://github.com/zkonduit/halo2#1dd2090741f006fd031a07
|
||||
[patch.crates-io]
|
||||
uniffi_testing = { git = "https://github.com/ElusAegis/uniffi-rs", branch = "feat/testing-feature-build-fix" }
|
||||
|
||||
# [profile.release]
|
||||
# # debug = true
|
||||
# rustflags = ["-C", "relocation-model=pic"]
|
||||
# lto = "fat"
|
||||
# codegen-units = 1
|
||||
[profile.release]
|
||||
# debug = true
|
||||
rustflags = ["-C", "relocation-model=pic"]
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
# panic = "abort"
|
||||
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
&a,
|
||||
BITS,
|
||||
k,
|
||||
&LookupOp::Tanh { scale: 1.0.into() },
|
||||
&LookupOp::Sigmoid { scale: 1.0.into() },
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@@ -95,7 +95,7 @@ impl Circuit<Fr> for MyCircuit {
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&output.unwrap()],
|
||||
Box::new(LookupOp::Tanh { scale: 1.0.into() }),
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
@@ -42,7 +42,7 @@ impl Circuit<Fr> for NLCircuit {
|
||||
.map(|_| VarTensor::new_advice(cs, K, 1, LEN))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let nl = LookupOp::Tanh { scale: 1.0.into() };
|
||||
let nl = LookupOp::Sigmoid { scale: 1.0.into() };
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
@@ -68,7 +68,7 @@ impl Circuit<Fr> for NLCircuit {
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input],
|
||||
Box::new(LookupOp::Tanh { scale: 1.0.into() }),
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
@@ -175,6 +175,9 @@ struct PyRunArgs {
|
||||
/// int: The number of legs used for decomposition
|
||||
#[pyo3(get, set)]
|
||||
pub decomp_legs: usize,
|
||||
/// bool: Should the circuit use unbounded lookups for log
|
||||
#[pyo3(get, set)]
|
||||
pub bounded_log_lookup: bool,
|
||||
/// bool: Should the circuit use range checks for inputs and outputs (set to false if the input is a felt)
|
||||
#[pyo3(get, set)]
|
||||
pub ignore_range_check_inputs_outputs: bool,
|
||||
@@ -199,6 +202,7 @@ impl PyRunArgs {
|
||||
impl From<PyRunArgs> for RunArgs {
|
||||
fn from(py_run_args: PyRunArgs) -> Self {
|
||||
RunArgs {
|
||||
bounded_log_lookup: py_run_args.bounded_log_lookup,
|
||||
input_scale: py_run_args.input_scale,
|
||||
param_scale: py_run_args.param_scale,
|
||||
rebase_scale: py_run_args.rebase_scale,
|
||||
@@ -225,6 +229,7 @@ impl Into<PyRunArgs> for RunArgs {
|
||||
fn into(self) -> PyRunArgs {
|
||||
let eps = self.get_epsilon();
|
||||
PyRunArgs {
|
||||
bounded_log_lookup: self.bounded_log_lookup,
|
||||
input_scale: self.input_scale,
|
||||
param_scale: self.param_scale,
|
||||
rebase_scale: self.rebase_scale,
|
||||
|
||||
@@ -15,10 +15,7 @@ use serde::{Deserialize, Serialize};
|
||||
pub enum HybridOp {
|
||||
Ln {
|
||||
scale: utils::F32,
|
||||
eps: utils::F32,
|
||||
},
|
||||
Sigmoid {
|
||||
scale: utils::F32,
|
||||
eps: f64,
|
||||
},
|
||||
Rsqrt {
|
||||
input_scale: utils::F32,
|
||||
@@ -142,7 +139,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
),
|
||||
HybridOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
|
||||
HybridOp::Ln { scale, eps } => format!("LN(scale={}, eps={})", scale, eps),
|
||||
HybridOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
|
||||
HybridOp::RoundHalfToEven { scale, legs } => {
|
||||
format!("ROUND_HALF_TO_EVEN(scale={}, legs={})", scale, legs)
|
||||
}
|
||||
@@ -238,9 +234,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
HybridOp::Ln { scale, eps } => {
|
||||
layouts::ln(config, region, values[..].try_into()?, *scale, *eps)?
|
||||
}
|
||||
HybridOp::Sigmoid { scale } => {
|
||||
layouts::sigmoid(config, region, values[..].try_into()?, *scale)?
|
||||
}
|
||||
HybridOp::RoundHalfToEven { scale, legs } => {
|
||||
layouts::round_half_to_even(config, region, values[..].try_into()?, *scale, *legs)?
|
||||
}
|
||||
@@ -388,10 +381,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
HybridOp::Ln {
|
||||
scale: output_scale,
|
||||
eps: _,
|
||||
} => 2 * multiplier_to_scale(output_scale.0 as f64),
|
||||
HybridOp::Sigmoid {
|
||||
scale: output_scale,
|
||||
} => 2 * multiplier_to_scale(output_scale.0 as f64),
|
||||
} => 4 * multiplier_to_scale(output_scale.0 as f64),
|
||||
_ => in_scales[0],
|
||||
};
|
||||
Ok(scale)
|
||||
|
||||
@@ -159,14 +159,14 @@ pub fn diff_less_than<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>
|
||||
values: &[&ValTensor<F>; 2],
|
||||
constant: F,
|
||||
) -> Result<(), CircuitError> {
|
||||
let diff = pairwise(config, region, values, BaseOp::Sub)?;
|
||||
let int_rep_constant = felt_to_integer_rep(constant);
|
||||
range_check(
|
||||
config,
|
||||
region,
|
||||
&[&diff],
|
||||
&(-int_rep_constant, int_rep_constant),
|
||||
)?;
|
||||
let distance = l1_distance(config, region, values)?;
|
||||
|
||||
let constant = create_constant_tensor(constant, 1);
|
||||
let is_less = less(config, region, &[&distance, &constant])?;
|
||||
|
||||
// assert the result is 1
|
||||
let comparison_unit = create_constant_tensor(F::ONE, is_less.len());
|
||||
enforce_equality(config, region, &[&is_less, &comparison_unit])?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -5630,98 +5630,7 @@ pub fn ceil<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
)
|
||||
}
|
||||
|
||||
fn sixth_order_chebyshev_approximation<
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
|
||||
>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[&ValTensor<F>; 1],
|
||||
coeffs: &[F],
|
||||
scale: F,
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
let coeff_tensors: Vec<ValTensor<F>> = coeffs
|
||||
.iter()
|
||||
.map(|c| create_constant_tensor(*c, 1))
|
||||
.collect();
|
||||
|
||||
let x = values[0];
|
||||
|
||||
let x_squared = pairwise(config, region, &[x, x], BaseOp::Mult)?;
|
||||
let x_squared = div(config, region, &[&x_squared], scale)?;
|
||||
let x_cubed = pairwise(config, region, &[&x_squared, x], BaseOp::Mult)?;
|
||||
let x_cubed = div(config, region, &[&x_cubed], scale)?;
|
||||
let x_fourth = pairwise(config, region, &[&x_squared, &x_squared], BaseOp::Mult)?;
|
||||
let x_fourth = div(config, region, &[&x_fourth], scale)?;
|
||||
let x_fifth = pairwise(config, region, &[&x_fourth, x], BaseOp::Mult)?;
|
||||
let x_fifth = div(config, region, &[&x_fifth], scale)?;
|
||||
|
||||
let one = create_constant_tensor(scale, 1);
|
||||
let term1 = pairwise(config, region, &[&coeff_tensors[0], &one], BaseOp::Mult)?;
|
||||
let term2 = pairwise(config, region, &[&coeff_tensors[1], x], BaseOp::Mult)?;
|
||||
let term3 = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&coeff_tensors[2], &x_squared],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
let term4 = pairwise(config, region, &[&coeff_tensors[3], &x_cubed], BaseOp::Mult)?;
|
||||
let term5 = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&coeff_tensors[4], &x_fourth],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
let term6 = pairwise(config, region, &[&coeff_tensors[5], &x_fifth], BaseOp::Mult)?;
|
||||
|
||||
let temp_sum1 = pairwise(config, region, &[&term1, &term2], BaseOp::Add)?;
|
||||
let temp_sum2 = pairwise(config, region, &[&term3, &term4], BaseOp::Add)?;
|
||||
let temp_sum3 = pairwise(config, region, &[&term5, &term6], BaseOp::Add)?;
|
||||
let temp_sum4 = pairwise(config, region, &[&temp_sum2, &temp_sum3], BaseOp::Add)?;
|
||||
let temp_sum5 = pairwise(config, region, &[&temp_sum1, &temp_sum4], BaseOp::Add)?;
|
||||
let claimed_val = temp_sum5;
|
||||
|
||||
Ok(claimed_val)
|
||||
}
|
||||
|
||||
fn fourth_order_chebyshev_approximation_symmetric_func<
|
||||
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
|
||||
>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[&ValTensor<F>; 1],
|
||||
coeffs: &[F],
|
||||
scale: F,
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
let coeff_tensors: Vec<ValTensor<F>> = coeffs
|
||||
.iter()
|
||||
.map(|c| create_constant_tensor(*c, 1))
|
||||
.collect();
|
||||
|
||||
let x = values[0];
|
||||
|
||||
let x_squared = pairwise(config, region, &[x, x], BaseOp::Mult)?;
|
||||
let x_squared = div(config, region, &[&x_squared], scale)?;
|
||||
let x_cubed = pairwise(config, region, &[&x_squared, x], BaseOp::Mult)?;
|
||||
let x_cubed = div(config, region, &[&x_cubed], scale)?;
|
||||
let x_fourth = pairwise(config, region, &[&x_squared, &x_squared], BaseOp::Mult)?;
|
||||
let x_fourth = div(config, region, &[&x_fourth], scale)?;
|
||||
let x_fifth = pairwise(config, region, &[&x_fourth, x], BaseOp::Mult)?;
|
||||
let x_fifth = div(config, region, &[&x_fifth], scale)?;
|
||||
|
||||
let one = create_constant_tensor(scale, 1);
|
||||
|
||||
let term1 = pairwise(config, region, &[&coeff_tensors[0], &one], BaseOp::Mult)?;
|
||||
let term2 = pairwise(config, region, &[&coeff_tensors[1], x], BaseOp::Mult)?;
|
||||
let term4 = pairwise(config, region, &[&coeff_tensors[2], &x_cubed], BaseOp::Mult)?;
|
||||
let term6 = pairwise(config, region, &[&coeff_tensors[3], &x_fifth], BaseOp::Mult)?;
|
||||
let temp_sum1 = pairwise(config, region, &[&term1, &term2], BaseOp::Add)?;
|
||||
let temp_sum2 = pairwise(config, region, &[&term4, &term6], BaseOp::Add)?;
|
||||
let temp_sum3 = pairwise(config, region, &[&temp_sum1, &temp_sum2], BaseOp::Add)?;
|
||||
let claimed_val = temp_sum3;
|
||||
Ok(claimed_val)
|
||||
}
|
||||
|
||||
/// Integer ln approximated using 5 term chebyshev polynomials, optimized over interval [0.1, 5]
|
||||
/// integer ln layout
|
||||
/// # Arguments
|
||||
/// * `config` - BaseConfig
|
||||
/// * `region` - RegionCtx
|
||||
@@ -5747,7 +5656,7 @@ fn fourth_order_chebyshev_approximation_symmetric_func<
|
||||
/// &[1, 1, 2, 2],
|
||||
/// ).unwrap());
|
||||
///
|
||||
/// let result = ln::<Fp>(&dummy_config, &mut dummy_region, &[&x], 2.0.into()).unwrap();
|
||||
/// let result = ln::<Fp>(&dummy_config, &mut dummy_region, &[&x], 2.0.into(), f64::EPSILON).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[4, 0, 4, -8]), &[1, 1, 2, 2]).unwrap();
|
||||
/// assert_eq!(result.int_evals().unwrap(), expected);
|
||||
///
|
||||
@@ -5757,80 +5666,235 @@ pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[&ValTensor<F>; 1],
|
||||
scale: utils::F32,
|
||||
eps: utils::F32,
|
||||
eps: f64,
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
// first generate the claimed val
|
||||
let coeffs = vec![
|
||||
integer_rep_to_felt((-2.619115 * scale.0).round() as IntegerRep), // c0
|
||||
integer_rep_to_felt((5.335041 * scale.0).round() as IntegerRep), // c1
|
||||
integer_rep_to_felt((-3.845028 * scale.0).round() as IntegerRep), // c2
|
||||
integer_rep_to_felt((1.443970 * scale.0).round() as IntegerRep), // c3
|
||||
integer_rep_to_felt((-0.258144 * scale.0).round() as IntegerRep), // c4
|
||||
integer_rep_to_felt((0.017459 * scale.0).round() as IntegerRep), // c5
|
||||
];
|
||||
|
||||
let scale_f = integer_rep_to_felt::<F>(scale.0.round() as IntegerRep);
|
||||
let mut input = values[0].clone();
|
||||
let scale_as_felt = integer_rep_to_felt(scale.0.round() as IntegerRep);
|
||||
|
||||
let res = sixth_order_chebyshev_approximation(config, region, values, &coeffs, scale_f)?;
|
||||
// add eps to avoid log(0)
|
||||
let eps_tensor = create_constant_tensor(
|
||||
integer_rep_to_felt::<F>((eps.0 * scale.0.powi(2)).round() as IntegerRep),
|
||||
res.len(),
|
||||
let triple_scaled_as_felt_tensor =
|
||||
create_constant_tensor(scale_as_felt * scale_as_felt * scale_as_felt, 1);
|
||||
|
||||
// natural ln is log2(x) * ln(2)
|
||||
let ln2 = utils::F32::from(2.0_f32.ln());
|
||||
// now create a constant tensor for ln2 with scale
|
||||
let ln2_tensor: ValTensor<F> = create_constant_tensor(
|
||||
integer_rep_to_felt((ln2.0 * scale.0).round() as IntegerRep),
|
||||
1,
|
||||
);
|
||||
let unit = create_constant_tensor(integer_rep_to_felt(1), 1);
|
||||
let negative_one = create_constant_tensor(integer_rep_to_felt(-1), 1);
|
||||
|
||||
let res = pairwise(config, region, &[&res, &eps_tensor], BaseOp::Add)?;
|
||||
// 2. assign the image
|
||||
if !input.all_prev_assigned() {
|
||||
input = region.assign(&config.custom_gates.inputs[0], &input)?;
|
||||
// don't need to increment because the claimed output is assigned to output and incremented accordingly
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
let is_assigned = !input.any_unknowns()?;
|
||||
|
||||
/// Sigmoid ln approximated using 5 term chebyshev polynomials, optimized over interval [-6, 6]
|
||||
/// # Arguments
|
||||
/// * `config` - BaseConfig
|
||||
/// * `region` - RegionCtx
|
||||
/// * `values` - &[&ValTensor<F>; 1]
|
||||
/// * `scale` - utils::F32
|
||||
/// # Returns
|
||||
/// * ValTensor<F>
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use ezkl::tensor::Tensor;
|
||||
/// use ezkl::fieldutils::IntegerRep;
|
||||
/// use ezkl::circuit::ops::layouts::sigmoid;
|
||||
/// use ezkl::tensor::val::ValTensor;
|
||||
/// use halo2curves::bn256::Fr as Fp;
|
||||
/// use ezkl::circuit::region::RegionCtx;
|
||||
/// use ezkl::circuit::region::RegionSettings;
|
||||
/// use ezkl::circuit::BaseConfig;
|
||||
/// let dummy_config = BaseConfig::dummy(12, 2);
|
||||
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(65536, 4));
|
||||
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
|
||||
/// Some(&[3, 2, 3, 1]),
|
||||
/// &[1, 1, 2, 2],
|
||||
/// ).unwrap());
|
||||
///
|
||||
/// let result = sigmoid::<Fp>(&dummy_config, &mut dummy_region, &[&x], 2.0.into()).unwrap();
|
||||
/// let expected = Tensor::<IntegerRep>::new(Some(&[4, 0, 4, -8]), &[1, 1, 2, 2]).unwrap();
|
||||
/// assert_eq!(result.int_evals().unwrap(), expected);
|
||||
///
|
||||
/// ```
|
||||
pub fn sigmoid<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[&ValTensor<F>; 1],
|
||||
scale: utils::F32,
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
// first generate the claimed val
|
||||
let coeffs = vec![
|
||||
integer_rep_to_felt((0.500000 * scale.0.round()) as IntegerRep), // c0
|
||||
integer_rep_to_felt((0.212251 * scale.0.round()) as IntegerRep), // c1
|
||||
integer_rep_to_felt((-0.007144 * scale.0.round()) as IntegerRep), // c3
|
||||
integer_rep_to_felt((0.000100 * scale.0.round()) as IntegerRep), // c5
|
||||
];
|
||||
let mut claimed_output: ValTensor<F> = if is_assigned {
|
||||
let input_evals = input.int_evals()?;
|
||||
// returns an integer with the base 2 logarithm
|
||||
tensor::ops::nonlinearities::ilog2(&input_evals, scale.0 as f64)
|
||||
.par_iter()
|
||||
.map(|x| Value::known(integer_rep_to_felt(*x)))
|
||||
.collect::<Tensor<Value<F>>>()
|
||||
.into()
|
||||
} else {
|
||||
Tensor::new(
|
||||
Some(&vec![Value::<F>::unknown(); input.len()]),
|
||||
&[input.len()],
|
||||
)?
|
||||
.into()
|
||||
};
|
||||
claimed_output.reshape(input.dims())?;
|
||||
let claimed_output = decompose(
|
||||
config,
|
||||
region,
|
||||
&[&claimed_output],
|
||||
®ion.base(),
|
||||
®ion.legs(),
|
||||
true,
|
||||
)?
|
||||
.1;
|
||||
region.increment(claimed_output.len());
|
||||
|
||||
let scale_f = integer_rep_to_felt::<F>(scale.0.round() as IntegerRep);
|
||||
let pow2_of_claimed_output = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[&claimed_output],
|
||||
&LookupOp::PowersOfTwo { scale },
|
||||
)?;
|
||||
|
||||
fourth_order_chebyshev_approximation_symmetric_func(config, region, values, &coeffs, scale_f)
|
||||
let num_bits = (std::mem::size_of::<IntegerRep>() * 8) as IntegerRep;
|
||||
|
||||
region.update_max_min_lookup_inputs_force(-num_bits, num_bits)?;
|
||||
|
||||
// now subtract 1 from the claimed output
|
||||
let claimed_output_minus_one =
|
||||
pairwise(config, region, &[&claimed_output, &unit], BaseOp::Sub)?;
|
||||
|
||||
// now add 1 to the claimed output
|
||||
let claimed_output_plus_one = pairwise(config, region, &[&claimed_output, &unit], BaseOp::Add)?;
|
||||
|
||||
// prior power of 2 is less than claimed output
|
||||
let prior_pow2 = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[&claimed_output_minus_one],
|
||||
&LookupOp::PowersOfTwo { scale },
|
||||
)?;
|
||||
|
||||
// next power of 2 is greater than claimed output
|
||||
let next_pow2 = nonlinearity(
|
||||
config,
|
||||
region,
|
||||
&[&claimed_output_plus_one],
|
||||
&LookupOp::PowersOfTwo { scale },
|
||||
)?;
|
||||
|
||||
let distance_to_claimed = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&input, &pow2_of_claimed_output],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
let abs_distance_to_claimed = abs(config, region, &[&distance_to_claimed])?;
|
||||
|
||||
let abs_distance_to_next_pow2 = l1_distance(config, region, &[&input, &next_pow2])?;
|
||||
|
||||
let abs_distance_to_prior_pow2 = l1_distance(config, region, &[&input, &prior_pow2])?;
|
||||
|
||||
// because we round up this can be equal
|
||||
let is_closest_to_0: ValTensor<F> = less_equal(
|
||||
config,
|
||||
region,
|
||||
&[&abs_distance_to_claimed, &abs_distance_to_next_pow2],
|
||||
)?;
|
||||
|
||||
let is_closest_to_1 = less_equal(
|
||||
config,
|
||||
region,
|
||||
&[&abs_distance_to_claimed, &abs_distance_to_prior_pow2],
|
||||
)?;
|
||||
|
||||
let is_closest = and(config, region, &[&is_closest_to_0, &is_closest_to_1])?;
|
||||
|
||||
let mut comparison_unit = create_constant_tensor(integer_rep_to_felt(1), is_closest.len());
|
||||
comparison_unit.reshape(is_closest.dims())?;
|
||||
let assigned_unit = region.assign(&config.custom_gates.inputs[1], &comparison_unit)?;
|
||||
|
||||
enforce_equality(config, region, &[&is_closest, &assigned_unit])?;
|
||||
|
||||
// get a linear interpolation now
|
||||
|
||||
let sign_of_distance_to_claimed = sign(config, region, &[&distance_to_claimed], true)?;
|
||||
let sign_of_distance_to_claimed_is_negative = equals(
|
||||
config,
|
||||
region,
|
||||
&[&sign_of_distance_to_claimed, &negative_one],
|
||||
)?;
|
||||
|
||||
let sign_of_distance_to_claimed_is_positive =
|
||||
not(config, region, &[&sign_of_distance_to_claimed_is_negative])?;
|
||||
|
||||
let pow2_prior_to_claimed_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&pow2_of_claimed_output, &prior_pow2],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
let pow2_next_to_claimed_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&next_pow2, &pow2_of_claimed_output],
|
||||
BaseOp::Sub,
|
||||
)?;
|
||||
|
||||
let recip_pow2_prior_to_claimed_distance = recip(
|
||||
config,
|
||||
region,
|
||||
&[&pow2_prior_to_claimed_distance],
|
||||
scale_as_felt,
|
||||
scale_as_felt * scale_as_felt,
|
||||
eps,
|
||||
)?;
|
||||
|
||||
let interpolated_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&recip_pow2_prior_to_claimed_distance, &distance_to_claimed],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let gated_prior_interpolated_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
&interpolated_distance,
|
||||
&sign_of_distance_to_claimed_is_negative,
|
||||
],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let recip_next_to_claimed_distance = recip(
|
||||
config,
|
||||
region,
|
||||
&[&pow2_next_to_claimed_distance],
|
||||
scale_as_felt,
|
||||
scale_as_felt * scale_as_felt,
|
||||
eps,
|
||||
)?;
|
||||
|
||||
let interpolated_distance_next = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&recip_next_to_claimed_distance, &distance_to_claimed],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let gated_next_interpolated_distance = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[
|
||||
&interpolated_distance_next,
|
||||
&sign_of_distance_to_claimed_is_positive,
|
||||
],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let scaled_claimed_output = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&claimed_output, &triple_scaled_as_felt_tensor],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let claimed_output = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&scaled_claimed_output, &gated_prior_interpolated_distance],
|
||||
BaseOp::Add,
|
||||
)?;
|
||||
|
||||
let claimed_output = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&claimed_output, &gated_next_interpolated_distance],
|
||||
BaseOp::Add,
|
||||
)?;
|
||||
|
||||
// now multiply the claimed output by ln2
|
||||
pairwise(
|
||||
config,
|
||||
region,
|
||||
&[&claimed_output, &ln2_tensor],
|
||||
BaseOp::Mult,
|
||||
)
|
||||
}
|
||||
|
||||
/// round layout
|
||||
|
||||
@@ -17,6 +17,8 @@ pub enum LookupOp {
|
||||
Div { denom: utils::F32 },
|
||||
IsOdd,
|
||||
PowersOfTwo { scale: utils::F32 },
|
||||
Ln { scale: utils::F32 },
|
||||
Sigmoid { scale: utils::F32 },
|
||||
Exp { scale: utils::F32, base: utils::F32 },
|
||||
Cos { scale: utils::F32 },
|
||||
ACos { scale: utils::F32 },
|
||||
@@ -47,9 +49,11 @@ impl LookupOp {
|
||||
pub fn as_path(&self) -> String {
|
||||
match self {
|
||||
LookupOp::Pow { scale, a } => format!("pow_{}_{}", scale, a),
|
||||
LookupOp::Ln { scale } => format!("ln_{}", scale),
|
||||
LookupOp::PowersOfTwo { scale } => format!("pow2_{}", scale),
|
||||
LookupOp::IsOdd => "is_odd".to_string(),
|
||||
LookupOp::Div { denom } => format!("div_{}", denom),
|
||||
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
|
||||
LookupOp::Erf { scale } => format!("erf_{}", scale),
|
||||
LookupOp::Exp { scale, base } => format!("exp_{}_{}", scale, base),
|
||||
LookupOp::Cos { scale } => format!("cos_{}", scale),
|
||||
@@ -76,6 +80,9 @@ impl LookupOp {
|
||||
let x = x[0].clone().map(|x| felt_to_integer_rep(x));
|
||||
let res =
|
||||
match &self {
|
||||
LookupOp::Ln { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::ln(&x, scale.into()))
|
||||
}
|
||||
LookupOp::PowersOfTwo { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::ipow2(&x, scale.0.into()))
|
||||
}
|
||||
@@ -86,6 +93,9 @@ impl LookupOp {
|
||||
LookupOp::Div { denom } => Ok::<_, TensorError>(
|
||||
tensor::ops::nonlinearities::const_div(&x, f32::from(*denom).into()),
|
||||
),
|
||||
LookupOp::Sigmoid { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::sigmoid(&x, scale.into()))
|
||||
}
|
||||
LookupOp::Erf { scale } => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::erffunc(&x, scale.into()))
|
||||
}
|
||||
@@ -148,10 +158,12 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
|
||||
/// Returns the name of the operation
|
||||
fn as_string(&self) -> String {
|
||||
match self {
|
||||
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
|
||||
LookupOp::PowersOfTwo { scale } => format!("POWERS_OF_TWO(scale={})", scale),
|
||||
LookupOp::IsOdd => "IS_ODD".to_string(),
|
||||
LookupOp::Pow { a, scale } => format!("POW(scale={}, exponent={})", scale, a),
|
||||
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
|
||||
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
|
||||
LookupOp::Erf { scale } => format!("ERF(scale={})", scale),
|
||||
LookupOp::Exp { scale, base } => format!("EXP(scale={}, base={})", scale, base),
|
||||
LookupOp::Tan { scale } => format!("TAN(scale={})", scale),
|
||||
|
||||
@@ -2553,7 +2553,7 @@ mod lookup_ultra_overflow {
|
||||
.map(|_| VarTensor::new_advice(cs, 4, 1, 3))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let nl = LookupOp::Tanh { scale: 1.0.into() };
|
||||
let nl = LookupOp::Sigmoid { scale: 1.0.into() };
|
||||
|
||||
let mut config = BaseConfig::default();
|
||||
|
||||
@@ -2586,7 +2586,7 @@ mod lookup_ultra_overflow {
|
||||
.layout(
|
||||
&mut region,
|
||||
&[&self.input],
|
||||
Box::new(LookupOp::Tanh { scale: 1.0.into() }),
|
||||
Box::new(LookupOp::Sigmoid { scale: 1.0.into() }),
|
||||
)
|
||||
.map_err(|_| Error::Synthesis)
|
||||
},
|
||||
|
||||
@@ -2298,6 +2298,7 @@ pub mod tests {
|
||||
"commitment": "KZG",
|
||||
"decomp_base": 128,
|
||||
"decomp_legs": 2,
|
||||
"bounded_log_lookup": false,
|
||||
"ignore_range_check_inputs_outputs": false,
|
||||
"disable_freivalds": false
|
||||
},
|
||||
|
||||
@@ -889,6 +889,9 @@ pub fn new_op_from_onnx(
|
||||
"HardSwish" => SupportedOp::Nonlinear(LookupOp::HardSwish {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Sigmoid" => SupportedOp::Nonlinear(LookupOp::Sigmoid {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Sqrt" => SupportedOp::Hybrid(HybridOp::Sqrt {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
@@ -908,13 +911,19 @@ pub fn new_op_from_onnx(
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
base: E.into(),
|
||||
}),
|
||||
"Ln" => SupportedOp::Hybrid(HybridOp::Ln {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
eps: run_args.get_epsilon().into(),
|
||||
}),
|
||||
"Sigmoid" => SupportedOp::Hybrid(HybridOp::Sigmoid {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
"Ln" => {
|
||||
if run_args.bounded_log_lookup {
|
||||
SupportedOp::Hybrid(HybridOp::Ln {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
eps: run_args.get_epsilon(),
|
||||
})
|
||||
} else {
|
||||
SupportedOp::Nonlinear(LookupOp::Ln {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
"Sin" => SupportedOp::Nonlinear(LookupOp::Sin {
|
||||
scale: scale_to_multiplier(input_scales[0]).into(),
|
||||
}),
|
||||
|
||||
@@ -260,6 +260,12 @@ pub struct RunArgs {
|
||||
/// Controls decomposition granularity
|
||||
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "2", value_hint = clap::ValueHint::Other))]
|
||||
pub decomp_legs: usize,
|
||||
/// Whether to use bounded lookup for logarithm computation
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
arg(long, default_value = "false")
|
||||
)]
|
||||
pub bounded_log_lookup: bool,
|
||||
/// Range check inputs and outputs (turn off if the inputs are felts)
|
||||
#[cfg_attr(
|
||||
all(feature = "ezkl", not(target_arch = "wasm32")),
|
||||
@@ -292,6 +298,7 @@ impl Default for RunArgs {
|
||||
/// while maintaining reasonable proving time and circuit size
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
bounded_log_lookup: false,
|
||||
input_scale: 7,
|
||||
param_scale: 7,
|
||||
rebase_scale: None,
|
||||
|
||||
@@ -2325,8 +2325,8 @@ pub mod nonlinearities {
|
||||
/// ```
|
||||
pub fn const_div(a: &Tensor<IntegerRep>, denom: f64) -> Tensor<IntegerRep> {
|
||||
a.par_enum_map(|_, a_i| {
|
||||
let d_inv_x = (a_i) / (denom as i128);
|
||||
Ok::<_, TensorError>(d_inv_x)
|
||||
let d_inv_x = (a_i as f64) / (denom);
|
||||
Ok::<_, TensorError>(d_inv_x.round() as IntegerRep)
|
||||
})
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user