Compare commits

...

1 Commits

Author SHA1 Message Date
dante
00155e585f feat: bounded lookup log argument (#864) 2024-11-07 12:16:55 +00:00
17 changed files with 492 additions and 19 deletions

View File

@@ -592,7 +592,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
"version": "3.12.5"
},
"orig_nbformat": 4
},

View File

@@ -648,10 +648,10 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.15"
"version": "3.12.5"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
}

View File

@@ -271,7 +271,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.2"
"version": "3.12.7"
}
},
"nbformat": 4,

42
examples/onnx/log/gen.py Normal file
View File

@@ -0,0 +1,42 @@
from torch import nn
import torch
import json
import numpy as np
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, x):
m = torch.log(x)
return m
circuit = MyModel()
x = torch.empty(1, 8).uniform_(0, 3)
out = circuit(x)
print(out)
torch.onnx.export(circuit, x, "network.onnx",
export_params=True, # store the trained parameter weights inside the model file
opset_version=17, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['input'], # the model's input names
output_names=['output'], # the model's output names
dynamic_axes={'input': {0: 'batch_size'}, # variable length axes
'output': {0: 'batch_size'}})
d1 = ((x).detach().numpy()).reshape([-1]).tolist()
data = dict(
input_data=[d1],
)
# Serialize data into file:
json.dump(data, open("input.json", 'w'))

View File

@@ -0,0 +1 @@
{"input_data": [[1.9252371788024902, 1.8418371677398682, 0.8400403261184692, 2.083845853805542, 0.9760497808456421, 0.6940176486968994, 0.015579521656036377, 2.2689192295074463]]}

View File

@@ -0,0 +1,14 @@
pytorch2.2.2:o

inputoutput/Log"Log
main_graphZ!
input


batch_size
b"
output


batch_size
B

View File

@@ -197,6 +197,9 @@ struct PyRunArgs {
/// int: The number of legs used for decomposition
#[pyo3(get, set)]
pub decomp_legs: usize,
/// bool: Should the circuit use unbounded lookups for log
#[pyo3(get, set)]
pub bounded_log_lookup: bool,
}
/// default instantiation of PyRunArgs
@@ -212,6 +215,7 @@ impl PyRunArgs {
impl From<PyRunArgs> for RunArgs {
fn from(py_run_args: PyRunArgs) -> Self {
RunArgs {
bounded_log_lookup: py_run_args.bounded_log_lookup,
tolerance: Tolerance::from(py_run_args.tolerance),
input_scale: py_run_args.input_scale,
param_scale: py_run_args.param_scale,
@@ -236,6 +240,7 @@ impl From<PyRunArgs> for RunArgs {
impl Into<PyRunArgs> for RunArgs {
fn into(self) -> PyRunArgs {
PyRunArgs {
bounded_log_lookup: self.bounded_log_lookup,
tolerance: self.tolerance.val,
input_scale: self.input_scale,
param_scale: self.param_scale,

View File

@@ -13,6 +13,10 @@ use serde::{Deserialize, Serialize};
/// An enum representing the operations that consist of both lookups and arithmetic operations.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum HybridOp {
Ln {
scale: utils::F32,
},
RoundHalfToEven {
scale: utils::F32,
legs: usize,
@@ -112,6 +116,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
fn as_string(&self) -> String {
match self {
HybridOp::Ln { scale } => format!("LN(scale={})", scale),
HybridOp::RoundHalfToEven { scale, legs } => {
format!("ROUND_HALF_TO_EVEN(scale={}, legs={})", scale, legs)
}
@@ -189,6 +194,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
values: &[ValTensor<F>],
) -> Result<Option<ValTensor<F>>, CircuitError> {
Ok(Some(match self {
HybridOp::Ln { scale } => layouts::ln(config, region, values[..].try_into()?, *scale)?,
HybridOp::RoundHalfToEven { scale, legs } => {
layouts::round_half_to_even(config, region, values[..].try_into()?, *scale, *legs)?
}
@@ -327,6 +333,9 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
HybridOp::Softmax { output_scale, .. } | HybridOp::Recip { output_scale, .. } => {
multiplier_to_scale(output_scale.0 as f64)
}
HybridOp::Ln {
scale: output_scale,
} => 4 * multiplier_to_scale(output_scale.0 as f64),
_ => in_scales[0],
};
Ok(scale)

View File

@@ -4507,6 +4507,332 @@ pub fn ceil<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
)
}
/// integer ln layout
/// # Arguments
/// * `config` - BaseConfig
/// * `region` - RegionCtx
/// * `values` - &[ValTensor<F>; 1]
/// * `scale` - utils::F32
/// # Returns
/// * ValTensor<F>
/// # Example
///
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::fieldutils::IntegerRep;
/// use ezkl::circuit::ops::layouts::ln;
/// use ezkl::tensor::val::ValTensor;
/// use halo2curves::bn256::Fr as Fp;
/// use ezkl::circuit::region::RegionCtx;
/// use ezkl::circuit::region::RegionSettings;
/// use ezkl::circuit::BaseConfig;
/// let dummy_config = BaseConfig::dummy(12, 2);
/// let mut dummy_region = RegionCtx::new_dummy(0,2,RegionSettings::all_true(128,2));
/// let x = ValTensor::from_integer_rep_tensor(Tensor::<IntegerRep>::new(
/// Some(&[3, 2, 3, 1]),
/// &[1, 1, 2, 2],
/// ).unwrap());
///
/// let result = ln::<Fp>(&dummy_config, &mut dummy_region, &[x], 2.0.into()).unwrap();
/// let expected = Tensor::<IntegerRep>::new(Some(&[4, 0, 4, -8]), &[1, 1, 2, 2]).unwrap();
/// assert_eq!(result.int_evals().unwrap(), expected);
///
/// ```
pub fn ln<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
config: &BaseConfig<F>,
region: &mut RegionCtx<F>,
values: &[ValTensor<F>; 1],
scale: utils::F32,
) -> Result<ValTensor<F>, CircuitError> {
// first generate the claimed val
let mut input = values[0].clone();
println!("input {}", input.show());
let scale_as_felt = integer_rep_to_felt(scale.0.round() as IntegerRep);
let assigned_triple_scaled_as_felt_tensor = region.assign(
&config.custom_gates.inputs[1],
&create_constant_tensor(scale_as_felt * scale_as_felt * scale_as_felt, 1),
)?;
// natural ln is log2(x) * ln(2)
let ln2 = utils::F32::from(2.0_f32.ln());
// now create a constant tensor for ln2 with scale
let ln2_tensor: ValTensor<F> = create_constant_tensor(
integer_rep_to_felt((ln2.0 * scale.0).round() as IntegerRep),
1,
);
region.assign(&config.custom_gates.inputs[0], &ln2_tensor)?;
let unit = create_constant_tensor(integer_rep_to_felt(1), 1);
region.assign(&config.custom_gates.inputs[1], &unit)?;
region.increment(1);
// 2. assign the image
if !input.all_prev_assigned() {
input = region.assign(&config.custom_gates.inputs[0], &input)?;
// don't need to increment because the claimed output is assigned to output and incremented accordingly
}
let is_assigned = !input.any_unknowns()?;
let mut claimed_output: ValTensor<F> = if is_assigned {
let input_evals = input.int_evals()?;
// returns an integer with the base 2 logarithm
tensor::ops::nonlinearities::ilog2(&input_evals.clone(), scale.0 as f64)
.par_iter()
.map(|x| Value::known(integer_rep_to_felt(*x)))
.collect::<Tensor<Value<F>>>()
.into()
} else {
Tensor::new(
Some(&vec![Value::<F>::unknown(); input.len()]),
&[input.len()],
)?
.into()
};
claimed_output.reshape(input.dims())?;
region.assign(&config.custom_gates.output, &claimed_output)?;
region.increment(claimed_output.len());
let pow2_of_claimed_output = nonlinearity(
config,
region,
&[claimed_output.clone()],
&LookupOp::PowersOfTwo { scale },
)?;
let num_bits = (std::mem::size_of::<IntegerRep>() * 8) as IntegerRep;
region.update_max_min_lookup_inputs_force(-num_bits, num_bits)?;
// now subtract 1 from the claimed output
let claimed_output_minus_one = pairwise(
config,
region,
&[claimed_output.clone(), unit.clone()],
BaseOp::Sub,
)?;
// now add 1 to the claimed output
let claimed_output_plus_one = pairwise(
config,
region,
&[claimed_output.clone(), unit.clone()],
BaseOp::Add,
)?;
// prior power of 2 is less than claimed output
let prior_pow2 = nonlinearity(
config,
region,
&[claimed_output_minus_one],
&LookupOp::PowersOfTwo { scale },
)?;
// next power of 2 is greater than claimed output
let next_pow2 = nonlinearity(
config,
region,
&[claimed_output_plus_one],
&LookupOp::PowersOfTwo { scale },
)?;
// assert that the original input is closest to the claimed output than the prior power of 2 and the next power of 2
let distance_to_prior = pairwise(
config,
region,
&[input.clone(), prior_pow2.clone()],
BaseOp::Sub,
)?;
// now take abs of the distance
let distance_to_prior_l1 = abs(config, region, &[distance_to_prior.clone()])?;
let distance_to_next = pairwise(
config,
region,
&[input.clone(), next_pow2.clone()],
BaseOp::Sub,
)?;
// now take abs of the distance
let distance_to_next_l1 = abs(config, region, &[distance_to_next.clone()])?;
let distance_to_claimed = pairwise(
config,
region,
&[input.clone(), pow2_of_claimed_output.clone()],
BaseOp::Sub,
)?;
// now take abs of the distance
let distance_to_claimed_l1 = abs(config, region, &[distance_to_claimed.clone()])?;
// can be less than or equal because we round up
let is_distance_to_prior_less = less_equal(
config,
region,
&[distance_to_claimed_l1.clone(), distance_to_prior_l1.clone()],
)?;
// should be striclty less because we round up
let is_distance_to_next_less = less(
config,
region,
&[distance_to_claimed_l1, distance_to_next_l1.clone()],
)?;
let is_distance_to_prior_less_and_distance_to_next_less = and(
config,
region,
&[
is_distance_to_prior_less.clone(),
is_distance_to_next_less.clone(),
],
)?;
let mut comparison_unit = create_constant_tensor(
integer_rep_to_felt(1),
is_distance_to_prior_less_and_distance_to_next_less.len(),
);
comparison_unit.reshape(is_distance_to_prior_less_and_distance_to_next_less.dims())?;
// assigned unit
let assigned_unit = region.assign(&config.custom_gates.inputs[1], &comparison_unit)?;
region.increment(assigned_unit.len());
// assert that the values are truthy
enforce_equality(
config,
region,
&[
is_distance_to_prior_less_and_distance_to_next_less,
assigned_unit.clone(),
],
)?;
// get a linear interpolation now
let sign_of_distance_to_claimed = sign(config, region, &[distance_to_claimed.clone()])?;
let sign_of_distance_to_claimed_is_positive = equals(
config,
region,
&[sign_of_distance_to_claimed.clone(), assigned_unit.clone()],
)?;
let sign_of_distance_to_claimed_is_negative = not(
config,
region,
&[sign_of_distance_to_claimed_is_positive.clone()],
)?;
let pow2_prior_to_claimed_distance = pairwise(
config,
region,
&[pow2_of_claimed_output.clone(), prior_pow2.clone()],
BaseOp::Sub,
)?;
let pow2_next_to_claimed_distance = pairwise(
config,
region,
&[next_pow2.clone(), pow2_of_claimed_output.clone()],
BaseOp::Sub,
)?;
let recip_pow2_prior_to_claimed_distance = recip(
config,
region,
&[pow2_prior_to_claimed_distance],
scale_as_felt,
scale_as_felt * scale_as_felt,
)?;
let interpolated_distance = pairwise(
config,
region,
&[
recip_pow2_prior_to_claimed_distance.clone(),
distance_to_claimed.clone(),
],
BaseOp::Mult,
)?;
let gated_prior_interpolated_distance = pairwise(
config,
region,
&[
interpolated_distance.clone(),
sign_of_distance_to_claimed_is_negative.clone(),
],
BaseOp::Mult,
)?;
let recip_next_to_claimed_distance = recip(
config,
region,
&[pow2_next_to_claimed_distance],
scale_as_felt,
scale_as_felt * scale_as_felt,
)?;
let interpolated_distance_next = pairwise(
config,
region,
&[
recip_next_to_claimed_distance.clone(),
distance_to_claimed.clone(),
],
BaseOp::Mult,
)?;
let gated_next_interpolated_distance = pairwise(
config,
region,
&[
interpolated_distance_next.clone(),
sign_of_distance_to_claimed_is_positive.clone(),
],
BaseOp::Mult,
)?;
let scaled_claimed_output = pairwise(
config,
region,
&[
claimed_output.clone(),
assigned_triple_scaled_as_felt_tensor,
],
BaseOp::Mult,
)?;
let claimed_output = pairwise(
config,
region,
&[
scaled_claimed_output.clone(),
gated_prior_interpolated_distance.clone(),
],
BaseOp::Add,
)?;
let claimed_output = pairwise(
config,
region,
&[
claimed_output.clone(),
gated_next_interpolated_distance.clone(),
],
BaseOp::Add,
)?;
// now multiply the claimed output by ln2
pairwise(config, region, &[claimed_output, ln2_tensor], BaseOp::Mult)
}
/// round layout
/// # Arguments
/// * `config` - BaseConfig

View File

@@ -16,10 +16,11 @@ use halo2curves::ff::PrimeField;
pub enum LookupOp {
Div { denom: utils::F32 },
IsOdd,
PowersOfTwo { scale: utils::F32 },
Ln { scale: utils::F32 },
Sqrt { scale: utils::F32 },
Rsqrt { scale: utils::F32 },
Sigmoid { scale: utils::F32 },
Ln { scale: utils::F32 },
Exp { scale: utils::F32 },
Cos { scale: utils::F32 },
ACos { scale: utils::F32 },
@@ -50,6 +51,8 @@ impl LookupOp {
pub fn as_path(&self) -> String {
match self {
LookupOp::Pow { scale, a } => format!("pow_{}_{}", scale, a),
LookupOp::Ln { scale } => format!("ln_{}", scale),
LookupOp::PowersOfTwo { scale } => format!("pow2_{}", scale),
LookupOp::IsOdd => "is_odd".to_string(),
LookupOp::Div { denom } => format!("div_{}", denom),
LookupOp::Sigmoid { scale } => format!("sigmoid_{}", scale),
@@ -57,7 +60,6 @@ impl LookupOp {
LookupOp::Rsqrt { scale } => format!("rsqrt_{}", scale),
LookupOp::Erf { scale } => format!("erf_{}", scale),
LookupOp::Exp { scale } => format!("exp_{}", scale),
LookupOp::Ln { scale } => format!("ln_{}", scale),
LookupOp::Cos { scale } => format!("cos_{}", scale),
LookupOp::ACos { scale } => format!("acos_{}", scale),
LookupOp::Cosh { scale } => format!("cosh_{}", scale),
@@ -82,6 +84,12 @@ impl LookupOp {
let x = x[0].clone().map(|x| felt_to_integer_rep(x));
let res =
match &self {
LookupOp::Ln { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::ln(&x, scale.into()))
}
LookupOp::PowersOfTwo { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::ipow2(&x, scale.0.into()))
}
LookupOp::IsOdd => Ok::<_, TensorError>(tensor::ops::nonlinearities::is_odd(&x)),
LookupOp::Pow { scale, a } => Ok::<_, TensorError>(
tensor::ops::nonlinearities::pow(&x, scale.0.into(), a.0.into()),
@@ -104,9 +112,6 @@ impl LookupOp {
LookupOp::Exp { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::exp(&x, scale.into()))
}
LookupOp::Ln { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::ln(&x, scale.into()))
}
LookupOp::Cos { scale } => {
Ok::<_, TensorError>(tensor::ops::nonlinearities::cos(&x, scale.into()))
}
@@ -163,10 +168,11 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
/// Returns the name of the operation
fn as_string(&self) -> String {
match self {
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
LookupOp::PowersOfTwo { scale } => format!("POWERS_OF_TWO(scale={})", scale),
LookupOp::IsOdd => "IS_ODD".to_string(),
LookupOp::Pow { a, scale } => format!("POW(scale={}, exponent={})", scale, a),
LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom),
LookupOp::Ln { scale } => format!("LN(scale={})", scale),
LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale),
LookupOp::Sqrt { scale } => format!("SQRT(scale={})", scale),
LookupOp::Erf { scale } => format!("ERF(scale={})", scale),

View File

@@ -851,9 +851,18 @@ pub fn new_op_from_onnx(
"Exp" => SupportedOp::Nonlinear(LookupOp::Exp {
scale: scale_to_multiplier(input_scales[0]).into(),
}),
"Ln" => SupportedOp::Nonlinear(LookupOp::Ln {
scale: scale_to_multiplier(input_scales[0]).into(),
}),
"Ln" => {
if run_args.bounded_log_lookup {
SupportedOp::Hybrid(HybridOp::Ln {
scale: scale_to_multiplier(input_scales[0]).into(),
})
} else {
SupportedOp::Nonlinear(LookupOp::Ln {
scale: scale_to_multiplier(input_scales[0]).into(),
})
}
}
"Sin" => SupportedOp::Nonlinear(LookupOp::Sin {
scale: scale_to_multiplier(input_scales[0]).into(),
}),

View File

@@ -317,11 +317,18 @@ pub struct RunArgs {
#[cfg_attr(all(feature = "ezkl", not(target_arch = "wasm32")), arg(long, default_value = "2", value_hint = clap::ValueHint::Other))]
/// the number of legs used for decompositions
pub decomp_legs: usize,
#[cfg_attr(
all(feature = "ezkl", not(target_arch = "wasm32")),
arg(long, default_value = "false")
)]
/// use unbounded lookup for the log
pub bounded_log_lookup: bool,
}
impl Default for RunArgs {
fn default() -> Self {
Self {
bounded_log_lookup: false,
tolerance: Tolerance::default(),
input_scale: 7,
param_scale: 7,

View File

@@ -1500,6 +1500,59 @@ pub mod nonlinearities {
.unwrap()
}
/// Powers of 2
/// # Arguments
/// * `a` - Tensor
/// * `scale` - Single value
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::fieldutils::IntegerRep;
/// use ezkl::tensor::ops::nonlinearities::ipow2;
/// let x = Tensor::<IntegerRep>::new(
/// Some(&[2, 15, 2, 1, 1, 0]),
/// &[2, 3],
/// ).unwrap();
/// let result = ipow2(&x, 1.0);
/// let expected = Tensor::<IntegerRep>::new(Some(&[4, 32768, 4, 2, 2, 1]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn ipow2(a: &Tensor<IntegerRep>, scale_output: f64) -> Tensor<IntegerRep> {
a.par_enum_map(|_, a_i| {
let kix = a_i as f64;
let kix = scale_output * (2.0_f64).powf(kix);
let rounded = kix.round();
Ok::<_, TensorError>(rounded as IntegerRep)
})
.unwrap()
}
/// Elementwise applies ln base 2 to a tensor of integers.
/// # Arguments
/// * `a` - Tensor
/// * `scale_input` - Single value
/// ```
/// use ezkl::tensor::Tensor;
/// use ezkl::fieldutils::IntegerRep;
/// use ezkl::tensor::ops::nonlinearities::ilog2;
/// let x = Tensor::<IntegerRep>::new(
/// Some(&[2, 15, 2, 1, 1, 2]),
/// &[2, 3],
/// ).unwrap();
/// let result = ilog2(&x, 1.0);
/// let expected = Tensor::<IntegerRep>::new(Some(&[1, 4, 1, 0, 0, 1]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn ilog2(a: &Tensor<IntegerRep>, scale_input: f64) -> Tensor<IntegerRep> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let kix = (kix).log2();
let rounded = kix.round();
Ok::<_, TensorError>(rounded as IntegerRep)
})
.unwrap()
}
/// Elementwise applies sigmoid to a tensor of integers.
/// # Arguments
///
@@ -1628,12 +1681,11 @@ pub mod nonlinearities {
.unwrap()
}
/// Elementwise applies exponential to a tensor of integers.
/// Elementwise applies ln to a tensor of integers.
/// # Arguments
///
/// * `a` - Tensor
/// * `scale_input` - Single value
/// * `scale_output` - Single value
/// # Examples
/// ```
/// use ezkl::tensor::Tensor;

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -27,7 +27,8 @@
"check_mode": "UNSAFE",
"commitment": "KZG",
"decomp_base": 128,
"decomp_legs": 2
"decomp_legs": 2,
"bounded_log_lookup": false
},
"num_rows": 46,
"total_assignments": 92,

View File

@@ -205,7 +205,7 @@ mod native_tests {
"1l_tiny_div",
];
const TESTS: [&str; 95] = [
const TESTS: [&str; 96] = [
"1l_mlp", //0
"1l_slice",
"1l_concat",
@@ -305,6 +305,7 @@ mod native_tests {
"lstm_medium", // 92
"lenet_5", // 93
"rsqrt", // 94
"log", // 95
];
const WASM_TESTS: [&str; 46] = [
@@ -543,7 +544,7 @@ mod native_tests {
}
});
seq!(N in 0..=94 {
seq!(N in 0..=95 {
#(#[test_case(TESTS[N])])*
#[ignore]