From 76ec430dc55aa0e2fafd37b80ca83de286d9617c Mon Sep 17 00:00:00 2001 From: Alexander Camuto <45801863+alexander-camuto@users.noreply.github.com> Date: Tue, 20 Sep 2022 19:57:49 +0100 Subject: [PATCH] chore: cleanup layers and update readme (#20) * cleanup eltwise and conv * phrasing --- README.md | 13 ++++++++-- examples/conv2d_mnist/main.rs | 12 ++------- {onnx_models => examples/onnx_models}/ff.onnx | Bin examples/smallonnx.rs | 4 +-- src/nn/cnvrl.rs | 17 ++++++------- src/tensor_ops/eltwise.rs | 24 +++--------------- 6 files changed, 26 insertions(+), 44 deletions(-) rename {onnx_models => examples/onnx_models}/ff.onnx (100%) diff --git a/README.md b/README.md index 852df1bf..6f801f9f 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,12 @@ This is a proof-of-concept implementation of inference for deep learning models We give an example of proving inference with a model that achieves 97.5% accuracy on MNIST in the examples. + +Note that the library requires a nightly version of the rust toolchain. You can change the default toolchain by running: +```bash +rustup override set nightly +``` + ## Running examples The MNIST inference example is contained in `examples/conv2d_mnist`. To run it: @@ -17,7 +23,10 @@ We also provide an example which runs an MLP on input data with four dimensions. cargo run --release --example mlp_4d ``` -Note that the library requires a nightly version of the rust toolchain. You can change the default toolchain by running: +### Running onnx example + +To run the example which loads parameters from an ONNX model file you need to enable the `onnx` build feature: + ```bash -rustup override set nightly +cargo run --release --example smallonnx --features onnx ``` diff --git a/examples/conv2d_mnist/main.rs b/examples/conv2d_mnist/main.rs index dda56e9e..09b99b40 100644 --- a/examples/conv2d_mnist/main.rs +++ b/examples/conv2d_mnist/main.rs @@ -52,7 +52,7 @@ struct Config< > where Value: TensorType, { - l0: ConvConfig, + l0: ConvConfig, l0q: EltwiseConfig>, l1: EltwiseConfig>, l2: Affine1dConfig, @@ -155,15 +155,7 @@ where .into(); kernel.reshape(&[KERNEL_WIDTH, KERNEL_HEIGHT]); - let l0 = ConvConfig::< - F, - - OUT_CHANNELS, - STRIDE, - - IN_CHANNELS, - PADDING, - >::configure( + let l0 = ConvConfig::::configure( cs, &[VarTensor::from(kernel)], advices.get_slice(&[0..IMAGE_WIDTH], &[IMAGE_WIDTH, IMAGE_HEIGHT]), diff --git a/onnx_models/ff.onnx b/examples/onnx_models/ff.onnx similarity index 100% rename from onnx_models/ff.onnx rename to examples/onnx_models/ff.onnx diff --git a/examples/smallonnx.rs b/examples/smallonnx.rs index fef6fa66..295d64d5 100644 --- a/examples/smallonnx.rs +++ b/examples/smallonnx.rs @@ -38,7 +38,7 @@ mod onnx_example { } fn configure(cs: &mut ConstraintSystem) -> Self::Config { - let onnx_model = OnnxModel::new("onnx_models/ff.onnx"); + let onnx_model = OnnxModel::new("examples/onnx_models/ff.onnx"); let l0_kernel = onnx_model.get_tensor_by_node_name("fc1.weight", 0f32, 256f32); let shape = l0_kernel.dims(); let in_dims = shape[1]; @@ -112,7 +112,7 @@ mod onnx_example { pub fn run() { let k = 15; //2^k rows - let onnx_model = OnnxModel::new("onnx_models/ff.onnx"); + let onnx_model = OnnxModel::new("examples/onnx_models/ff.onnx"); let l0_kernel = onnx_model.get_tensor_by_node_name("fc1.weight", 0f32, 256f32); let mut l0_bias = onnx_model.get_tensor_by_node_name("fc1.bias", 0f32, 256f32); diff --git a/src/nn/cnvrl.rs b/src/nn/cnvrl.rs index 896a2e67..a7fdf3b6 100644 --- a/src/nn/cnvrl.rs +++ b/src/nn/cnvrl.rs @@ -12,7 +12,6 @@ use crate::tensor_ops::*; #[derive(Debug, Clone)] pub struct ConvConfig< F: FieldExt + TensorType, - const OUT_CHANNELS: usize, const STRIDE: usize, const IN_CHANNELS: usize, const PADDING: usize, @@ -27,11 +26,10 @@ pub struct ConvConfig< impl< F: FieldExt + TensorType, - const OUT_CHANNELS: usize, const STRIDE: usize, const IN_CHANNELS: usize, const PADDING: usize, - > LayerConfig for ConvConfig + > LayerConfig for ConvConfig where Value: TensorType, { @@ -88,7 +86,7 @@ where ) -> Tensor, F>> { assert!(params.len() == 1); let kernel = params[0].clone(); - let image_height = input.dims()[2]; + let (in_channels, image_height) = (input.dims()[0], input.dims()[2]); layouter .assign_region( || "assign image and kernel", @@ -96,7 +94,7 @@ where let mut offset = 0; self.selector.enable(&mut region, offset)?; - let outputs = (0..IN_CHANNELS) + let outputs = (0..in_channels) .map(|i| { self.kernel .assign(&mut region, offset, kernel.get_slice(&[i..i + 1])); @@ -143,11 +141,12 @@ where assert_eq!(params.len(), 1); let kernel = params[0].clone(); let (image_width, image_height) = (input.dims()[1], input.dims()[2]); - let (kernel_width, kernel_height) = (kernel.dims()[2], kernel.dims()[3]); + let (out_channels, kernel_width, kernel_height) = + (kernel.dims()[0], kernel.dims()[2], kernel.dims()[3]); let horz = (image_width + 2 * PADDING - kernel_width) / STRIDE + 1; let vert = (image_height + 2 * PADDING - kernel_height) / STRIDE + 1; - let t = Tensor::from((0..OUT_CHANNELS).map(|i| { + let t = Tensor::from((0..out_channels).map(|i| { self.assign( &mut layouter.namespace(|| format!("filter: {:?}", i)), input.clone(), @@ -155,7 +154,7 @@ where ) })); let mut t = t.flatten(); - t.reshape(&[OUT_CHANNELS, horz, vert]); + t.reshape(&[out_channels, horz, vert]); ValTensor::from(t) } } @@ -217,7 +216,7 @@ mod tests { where Value: TensorType, { - type Config = ConvConfig; + type Config = ConvConfig; type FloorPlanner = SimpleFloorPlanner; fn without_witnesses(&self) -> Self { diff --git a/src/tensor_ops/eltwise.rs b/src/tensor_ops/eltwise.rs index 5647bfd4..be32fbac 100644 --- a/src/tensor_ops/eltwise.rs +++ b/src/tensor_ops/eltwise.rs @@ -13,29 +13,11 @@ pub trait Nonlinearity { } #[derive(Clone, Debug)] -pub struct Nonlin1d> { +pub struct Nonlin1d> { pub input: ValTensor, pub output: ValTensor, pub _marker: PhantomData<(F, NL)>, } -impl> Nonlin1d { - pub fn fill(mut f: Func) -> Self - where - Func: FnMut(Tensor) -> ValTensor, - { - Nonlin1d { - input: f(Tensor::from(0..LEN)), - output: f(Tensor::from(0..LEN)), - _marker: PhantomData, - } - } - pub fn without_witnesses() -> Nonlin1d { - Nonlin1d::::fill(|x| { - let t: Tensor> = x.map(|_| Value::default()); - ValTensor::from(t) - }) - } -} // Table that should be reused across all lookups (so no Clone) #[derive(Clone, Debug)] @@ -254,7 +236,7 @@ impl> #[derive(Clone)] struct NLCircuit> { - assigned: Nonlin1d, + assigned: Nonlin1d, _marker: PhantomData, // nonlinearity: Box F>, } @@ -346,7 +328,7 @@ mod tests { let k = 9; //2^k rows let output = Tensor::::new(Some(&[1, 2, 3, 4]), &[4]).unwrap(); let relu_v: Tensor> = output.into(); - let assigned: Nonlin1d> = Nonlin1d { + let assigned: Nonlin1d> = Nonlin1d { input: ValTensor::from(relu_v.clone()), output: ValTensor::from(relu_v), _marker: PhantomData,