Compare commits

..

26 Commits

Author SHA1 Message Date
dante
d21b43502d Merge branch 'main' into ac/small-worm 2024-07-14 22:41:16 -04:00
dante
9f1495bf7f Update rust.yml 2024-07-14 17:25:28 -04:00
dante
355aed3b1f Update binding_tests.py 2024-07-12 23:50:21 +01:00
dante
ca7fe53687 Update Readme.md 2024-07-12 13:04:29 +01:00
dante
1b49f8774f Create Readme.md 2024-07-12 13:02:32 +01:00
dante
ec6be275cf lock 2024-07-12 12:48:34 +01:00
dante
51ace92b73 Update Cargo.toml 2024-07-12 12:47:51 +01:00
dante
e4332ebb14 Update model.rs 2024-07-12 12:45:16 +01:00
dante
186515448c Merge branch 'ac/small-worm' of https://github.com/zkonduit/ezkl into ac/small-worm 2024-07-12 12:44:45 +01:00
dante
c70fd153e2 revert 2024-07-12 12:43:57 +01:00
dante
66d733d0ce Delete examples/onnx/1l_conv_transpose/witness.json 2024-07-12 12:41:46 +01:00
dante
89c5238130 Revert "rm for now"
This reverts commit c6cba69bc3.
2024-07-12 12:40:10 +01:00
dante
e88f362596 Merge branch 'main' into ac/small-worm 2024-07-12 12:39:26 +01:00
Alexander Camuto
069ac6ee6e Update ops.rs 2024-05-02 17:09:20 +01:00
Alexander Camuto
c6cba69bc3 rm for now 2024-05-02 17:02:01 +01:00
Alexander Camuto
eec19d6058 accelerate dot calc 2024-05-02 16:57:09 +01:00
Alexander Camuto
685f462766 cleanup 2024-05-02 15:12:12 +01:00
Alexander Camuto
9b027fc908 smoller 2024-05-02 15:07:01 +01:00
Alexander Camuto
2e211d1314 smol 2024-05-02 15:07:01 +01:00
Alexander Camuto
feae28042e Update rust.yml 2024-05-02 15:07:01 +01:00
Alexander Camuto
885cd880d2 Update rust.yml 2024-05-02 15:07:01 +01:00
Alexander Camuto
b88bb6ccda Update rust.yml 2024-05-02 15:07:01 +01:00
Alexander Camuto
7b32a99856 Update rust.yml 2024-05-02 15:07:00 +01:00
Alexander Camuto
e80eae41f0 worm town 2024-05-02 15:07:00 +01:00
Alexander Camuto
f4d2fc0ccb chore: smallworm example 2024-05-02 15:06:41 +01:00
Alexander Camuto
e7f76c5ae6 Create kmeans.ipynb 2024-05-02 15:04:43 +01:00
8 changed files with 36 additions and 73 deletions

View File

@@ -72,7 +72,6 @@ impl Circuit<Fr> for MyCircuit {
Box::new(PolyOp::Conv {
padding: vec![(0, 0)],
stride: vec![1; 2],
group: 1,
}),
)
.unwrap();

View File

@@ -1,4 +1,4 @@
ezkl==11.6.3
ezkl==0.0.0
sphinx
sphinx-rtd-theme
sphinxcontrib-napoleon

View File

@@ -1,7 +1,7 @@
import ezkl
project = 'ezkl'
release = '11.6.3'
release = '0.0.0'
version = release

View File

@@ -205,7 +205,6 @@ where
let op = PolyOp::Conv {
padding: vec![(PADDING, PADDING); 2],
stride: vec![STRIDE; 2],
group: 1,
};
let x = config
.layer_config

View File

@@ -3023,7 +3023,7 @@ pub fn sumpool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + IntoI
.map(|coord| {
let (b, i) = (coord[0], coord[1]);
let input = values[0].get_slice(&[b..b + 1, i..i + 1])?;
let output = conv(config, region, &[input, kernel.clone()], padding, stride, 1)?;
let output = conv(config, region, &[input, kernel.clone()], padding, stride)?;
res.push(output);
Ok(())
})
@@ -3159,7 +3159,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// &[1, 1, 2, 2],
/// ).unwrap());
///
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, c], &vec![(1, 1); 2], &vec![1;2], &vec![2;2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, c], &vec![(1, 1); 2], &vec![1;2], &vec![2;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[0, 32, 0, 32, 0, 6, 0, 12, 0, 4, 0, 8, 0, 4, 0, 8, 0, 0, 0, 3, 0, 0, 0, 2]), &[1, 2, 3, 4]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3171,7 +3171,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// Some(&[3, 1, 1, 5]),
/// &[1, 1, 2, 2],
/// ).unwrap());
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(0, 0); 2], &vec![0;2], &vec![1;2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(0, 0); 2], &vec![0;2], &vec![1;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[6, 14, 4, 2, 17, 21, 0, 1, 5]), &[1, 1, 3, 3]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3184,7 +3184,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// Some(&[3, 1, 1, 5]),
/// &[1, 1, 2, 2],
/// ).unwrap());
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(1, 1); 2], &vec![0;2], &vec![1;2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(1, 1); 2], &vec![0;2], &vec![1;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[17]), &[1, 1, 1, 1]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3197,7 +3197,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// Some(&[3, 1, 1, 5]),
/// &[1, 1, 2, 2],
/// ).unwrap());
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(1, 1); 2], &vec![0;2], &vec![2; 2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(1, 1); 2], &vec![0;2], &vec![2; 2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[10, 4, 0, 3]), &[1, 1, 2, 2]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3209,7 +3209,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// Some(&[3, 1, 1, 5]),
/// &[1, 1, 2, 2],
/// ).unwrap());
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(0, 0); 2], &vec![0;2], &vec![2; 2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(0, 0); 2], &vec![0;2], &vec![2; 2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[6, 2, 12, 4, 2, 10, 4, 20, 0, 0, 3, 1, 0, 0, 1, 5]), &[1, 1, 4, 4]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3221,7 +3221,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// Some(&[3, 2]),
/// &[1, 1, 2, 1],
/// ).unwrap());
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(1, 1); 2], &vec![0;2], &vec![2; 2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(1, 1); 2], &vec![0;2], &vec![2; 2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[0, 0]), &[1, 1, 2, 1]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3233,7 +3233,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// Some(&[3, 2]),
/// &[1, 1, 2, 1],
/// ).unwrap());
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(0, 0); 2], &vec![0;2], &vec![2; 2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k], &vec![(0, 0); 2], &vec![0;2], &vec![2; 2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[6, 0, 12, 4, 0, 8, 0, 0, 3, 0, 0, 2]), &[1, 1, 4, 3]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3244,7 +3244,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// &[1, 1, 2, 2],
/// ).unwrap());
///
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, c], &vec![(1, 1); 2], &vec![0;2], &vec![2;2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, c], &vec![(1, 1); 2], &vec![0;2], &vec![2;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[0, 32, 0, 0, 6, 0, 0, 4, 0, 0, 0, 0]), &[1, 2, 2, 3]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
/// let x = ValTensor::from_i64_tensor(Tensor::<i64>::new(
@@ -3259,7 +3259,7 @@ pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Into
/// Some(&[1]),
/// &[1],
/// ).unwrap());
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(1, 1); 2], &vec![0;2], &vec![1;2], 1).unwrap();
/// let result = deconv::<Fp>(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(1, 1); 2], &vec![0;2], &vec![1;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[55, 58, 66, 69]), &[1, 1, 2, 2]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3279,7 +3279,6 @@ pub fn deconv<
padding: &[(usize, usize)],
output_padding: &[usize],
stride: &[usize],
num_groups: usize,
) -> Result<ValTensor<F>, CircuitError> {
let has_bias = inputs.len() == 3;
let (image, kernel) = (&inputs[0], &inputs[1]);
@@ -3365,7 +3364,6 @@ pub fn deconv<
&conv_input,
&vec![(0, 0); conv_dim],
&vec![1; conv_dim],
num_groups,
)?;
Ok(output)
@@ -3397,7 +3395,7 @@ pub fn deconv<
/// Some(&[0]),
/// &[1],
/// ).unwrap());
/// let result = conv::<Fp>(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(0, 0); 2], &vec![1;2], 1).unwrap();
/// let result = conv::<Fp>(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(0, 0); 2], &vec![1;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[31, 16, 8, 26]), &[1, 1, 2, 2]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3415,7 +3413,7 @@ pub fn deconv<
/// &[2],
/// ).unwrap());
///
/// let result = conv::<Fp>(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(0, 0); 2], &vec![1;2], 2).unwrap();
/// let result = conv::<Fp>(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(0, 0); 2], &vec![1;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[32, 17, 9, 27, 34, 20, 13, 26]), &[1, 2, 2, 2]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
///
@@ -3433,7 +3431,7 @@ pub fn deconv<
/// &[4],
/// ).unwrap());
///
/// let result =conv(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(0, 0); 2], &vec![1;2], 1).unwrap();
/// let result =conv(&dummy_config, &mut dummy_region, &[x, k, b], &vec![(0, 0); 2], &vec![1;2]).unwrap();
/// let expected = Tensor::<i64>::new(Some(&[65, 36, 21, 52, 73, 48, 37, 48, 65, 36, 21, 52, 73, 48, 37, 48]), &[1, 4, 2, 2]).unwrap();
/// assert_eq!(result.get_int_evals().unwrap(), expected);
/// ```
@@ -3452,7 +3450,6 @@ pub fn conv<
values: &[ValTensor<F>],
padding: &[(usize, usize)],
stride: &[usize],
num_groups: usize,
) -> Result<ValTensor<F>, CircuitError> {
let has_bias = values.len() == 3;
let (mut image, mut kernel) = (values[0].clone(), values[1].clone());
@@ -3483,11 +3480,6 @@ pub fn conv<
region.increment(*assigned_len.iter().max().unwrap());
}
// if image is 3d add a dummy batch dimension
if image.dims().len() == kernel.dims().len() - 1 {
image.reshape(&[1, image.dims()[0], image.dims()[1], image.dims()[2]])?;
}
let image_dims = image.dims();
let kernel_dims = kernel.dims();
@@ -3521,17 +3513,10 @@ pub fn conv<
log::debug!("slides: {:?}", slides);
let num_groups = input_channels / kernel_dims[1];
let input_channels_per_group = input_channels / num_groups;
let output_channels_per_group = output_channels / num_groups;
if output_channels_per_group == 0 || input_channels_per_group == 0 {
return Err(TensorError::DimMismatch(format!(
"Given groups={}, expected input channels and output channels to be divisible by groups, but got input_channels={}, output_channels={}",
num_groups, input_channels, output_channels
))
.into());
}
log::debug!(
"num_groups: {}, input_channels_per_group: {}, output_channels_per_group: {}",
num_groups,
@@ -3539,6 +3524,14 @@ pub fn conv<
output_channels_per_group
);
if output_channels_per_group == 0 {
return Err(TensorError::DimMismatch(format!(
"Given groups={}, expected kernel to be at least {} at dimension 0 but got {} instead",
num_groups, num_groups, output_channels_per_group
))
.into());
}
let num_outputs =
batch_size * num_groups * output_channels_per_group * slides.iter().product::<usize>();

View File

@@ -33,7 +33,6 @@ pub enum PolyOp {
Conv {
padding: Vec<(usize, usize)>,
stride: Vec<usize>,
group: usize,
},
Downsample {
axis: usize,
@@ -44,7 +43,6 @@ pub enum PolyOp {
padding: Vec<(usize, usize)>,
output_padding: Vec<usize>,
stride: Vec<usize>,
group: usize,
},
Add,
Sub,
@@ -150,25 +148,17 @@ impl<
PolyOp::Sum { axes } => format!("SUM (axes={:?})", axes),
PolyOp::Prod { .. } => "PROD".into(),
PolyOp::Pow(_) => "POW".into(),
PolyOp::Conv {
stride,
padding,
group,
} => {
format!(
"CONV (stride={:?}, padding={:?}, group={})",
stride, padding, group
)
PolyOp::Conv { stride, padding } => {
format!("CONV (stride={:?}, padding={:?})", stride, padding)
}
PolyOp::DeConv {
stride,
padding,
output_padding,
group,
} => {
format!(
"DECONV (stride={:?}, padding={:?}, output_padding={:?}, group={})",
stride, padding, output_padding, group
"DECONV (stride={:?}, padding={:?}, output_padding={:?})",
stride, padding, output_padding
)
}
PolyOp::Concat { axis } => format!("CONCAT (axis={})", axis),
@@ -222,18 +212,9 @@ impl<
PolyOp::Prod { axes, .. } => {
layouts::prod_axes(config, region, values[..].try_into()?, axes)?
}
PolyOp::Conv {
padding,
stride,
group,
} => layouts::conv(
config,
region,
values[..].try_into()?,
padding,
stride,
*group,
)?,
PolyOp::Conv { padding, stride } => {
layouts::conv(config, region, values[..].try_into()?, padding, stride)?
}
PolyOp::GatherElements { dim, constant_idx } => {
if let Some(idx) = constant_idx {
tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into()
@@ -280,7 +261,6 @@ impl<
padding,
output_padding,
stride,
group,
} => layouts::deconv(
config,
region,
@@ -288,7 +268,6 @@ impl<
padding,
output_padding,
stride,
*group,
)?,
PolyOp::Add => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Add)?,
PolyOp::Sub => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Sub)?,

View File

@@ -1050,7 +1050,6 @@ mod conv {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis)
@@ -1201,7 +1200,6 @@ mod conv_col_ultra_overflow {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis)
@@ -1347,7 +1345,6 @@ mod conv_relu_col_ultra_overflow {
Box::new(PolyOp::Conv {
padding: vec![(1, 1); 2],
stride: vec![2; 2],
group: 1,
}),
)
.map_err(|_| Error::Synthesis);

View File

@@ -283,7 +283,10 @@ pub fn new_op_from_onnx(
.flat_map(|x| x.out_scales())
.collect::<Vec<_>>();
let input_dims = inputs.iter().flat_map(|x| x.out_dims()).collect::<Vec<_>>();
let input_dims = inputs
.iter()
.flat_map(|x| x.out_dims())
.collect::<Vec<_>>();
let mut replace_const = |scale: crate::Scale,
index: usize,
@@ -1189,13 +1192,7 @@ pub fn new_op_from_onnx(
}
}
let group = conv_node.group;
SupportedOp::Linear(PolyOp::Conv {
padding,
stride,
group,
})
SupportedOp::Linear(PolyOp::Conv { padding, stride })
}
"Not" => SupportedOp::Linear(PolyOp::Not),
"And" => SupportedOp::Linear(PolyOp::And),
@@ -1250,7 +1247,6 @@ pub fn new_op_from_onnx(
padding,
output_padding: deconv_node.adjustments.to_vec(),
stride,
group: deconv_node.group,
})
}
"Downsample" => {