chore: cleanup layers and update readme (#20)

* cleanup eltwise and conv

* phrasing
This commit is contained in:
Alexander Camuto
2022-09-20 19:57:49 +01:00
committed by GitHub
parent 55df4c63e6
commit 76ec430dc5
6 changed files with 26 additions and 44 deletions

View File

@@ -6,6 +6,12 @@ This is a proof-of-concept implementation of inference for deep learning models
We give an example of proving inference with a model that achieves 97.5% accuracy on MNIST in the examples.
Note that the library requires a nightly version of the rust toolchain. You can change the default toolchain by running:
```bash
rustup override set nightly
```
## Running examples
The MNIST inference example is contained in `examples/conv2d_mnist`. To run it:
@@ -17,7 +23,10 @@ We also provide an example which runs an MLP on input data with four dimensions.
cargo run --release --example mlp_4d
```
Note that the library requires a nightly version of the rust toolchain. You can change the default toolchain by running:
### Running onnx example
To run the example which loads parameters from an ONNX model file you need to enable the `onnx` build feature:
```bash
rustup override set nightly
cargo run --release --example smallonnx --features onnx
```

View File

@@ -52,7 +52,7 @@ struct Config<
> where
Value<F>: TensorType,
{
l0: ConvConfig<F, OUT_CHANNELS, STRIDE, IN_CHANNELS, PADDING>,
l0: ConvConfig<F, STRIDE, IN_CHANNELS, PADDING>,
l0q: EltwiseConfig<F, BITS, DivideBy<F, 32>>,
l1: EltwiseConfig<F, BITS, ReLu<F>>,
l2: Affine1dConfig<F>,
@@ -155,15 +155,7 @@ where
.into();
kernel.reshape(&[KERNEL_WIDTH, KERNEL_HEIGHT]);
let l0 = ConvConfig::<
F,
OUT_CHANNELS,
STRIDE,
IN_CHANNELS,
PADDING,
>::configure(
let l0 = ConvConfig::<F, STRIDE, IN_CHANNELS, PADDING>::configure(
cs,
&[VarTensor::from(kernel)],
advices.get_slice(&[0..IMAGE_WIDTH], &[IMAGE_WIDTH, IMAGE_HEIGHT]),

View File

@@ -38,7 +38,7 @@ mod onnx_example {
}
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
let onnx_model = OnnxModel::new("onnx_models/ff.onnx");
let onnx_model = OnnxModel::new("examples/onnx_models/ff.onnx");
let l0_kernel = onnx_model.get_tensor_by_node_name("fc1.weight", 0f32, 256f32);
let shape = l0_kernel.dims();
let in_dims = shape[1];
@@ -112,7 +112,7 @@ mod onnx_example {
pub fn run() {
let k = 15; //2^k rows
let onnx_model = OnnxModel::new("onnx_models/ff.onnx");
let onnx_model = OnnxModel::new("examples/onnx_models/ff.onnx");
let l0_kernel = onnx_model.get_tensor_by_node_name("fc1.weight", 0f32, 256f32);
let mut l0_bias = onnx_model.get_tensor_by_node_name("fc1.bias", 0f32, 256f32);

View File

@@ -12,7 +12,6 @@ use crate::tensor_ops::*;
#[derive(Debug, Clone)]
pub struct ConvConfig<
F: FieldExt + TensorType,
const OUT_CHANNELS: usize,
const STRIDE: usize,
const IN_CHANNELS: usize,
const PADDING: usize,
@@ -27,11 +26,10 @@ pub struct ConvConfig<
impl<
F: FieldExt + TensorType,
const OUT_CHANNELS: usize,
const STRIDE: usize,
const IN_CHANNELS: usize,
const PADDING: usize,
> LayerConfig<F> for ConvConfig<F, OUT_CHANNELS, STRIDE, IN_CHANNELS, PADDING>
> LayerConfig<F> for ConvConfig<F, STRIDE, IN_CHANNELS, PADDING>
where
Value<F>: TensorType,
{
@@ -88,7 +86,7 @@ where
) -> Tensor<AssignedCell<Assigned<F>, F>> {
assert!(params.len() == 1);
let kernel = params[0].clone();
let image_height = input.dims()[2];
let (in_channels, image_height) = (input.dims()[0], input.dims()[2]);
layouter
.assign_region(
|| "assign image and kernel",
@@ -96,7 +94,7 @@ where
let mut offset = 0;
self.selector.enable(&mut region, offset)?;
let outputs = (0..IN_CHANNELS)
let outputs = (0..in_channels)
.map(|i| {
self.kernel
.assign(&mut region, offset, kernel.get_slice(&[i..i + 1]));
@@ -143,11 +141,12 @@ where
assert_eq!(params.len(), 1);
let kernel = params[0].clone();
let (image_width, image_height) = (input.dims()[1], input.dims()[2]);
let (kernel_width, kernel_height) = (kernel.dims()[2], kernel.dims()[3]);
let (out_channels, kernel_width, kernel_height) =
(kernel.dims()[0], kernel.dims()[2], kernel.dims()[3]);
let horz = (image_width + 2 * PADDING - kernel_width) / STRIDE + 1;
let vert = (image_height + 2 * PADDING - kernel_height) / STRIDE + 1;
let t = Tensor::from((0..OUT_CHANNELS).map(|i| {
let t = Tensor::from((0..out_channels).map(|i| {
self.assign(
&mut layouter.namespace(|| format!("filter: {:?}", i)),
input.clone(),
@@ -155,7 +154,7 @@ where
)
}));
let mut t = t.flatten();
t.reshape(&[OUT_CHANNELS, horz, vert]);
t.reshape(&[out_channels, horz, vert]);
ValTensor::from(t)
}
}
@@ -217,7 +216,7 @@ mod tests {
where
Value<F>: TensorType,
{
type Config = ConvConfig<F, OUT_CHANNELS, STRIDE, IN_CHANNELS, PADDING>;
type Config = ConvConfig<F, STRIDE, IN_CHANNELS, PADDING>;
type FloorPlanner = SimpleFloorPlanner;
fn without_witnesses(&self) -> Self {

View File

@@ -13,29 +13,11 @@ pub trait Nonlinearity<F: FieldExt> {
}
#[derive(Clone, Debug)]
pub struct Nonlin1d<F: FieldExt + TensorType, const LEN: usize, NL: Nonlinearity<F>> {
pub struct Nonlin1d<F: FieldExt + TensorType, NL: Nonlinearity<F>> {
pub input: ValTensor<F>,
pub output: ValTensor<F>,
pub _marker: PhantomData<(F, NL)>,
}
impl<F: FieldExt + TensorType, const LEN: usize, NL: Nonlinearity<F>> Nonlin1d<F, LEN, NL> {
pub fn fill<Func>(mut f: Func) -> Self
where
Func: FnMut(Tensor<usize>) -> ValTensor<F>,
{
Nonlin1d {
input: f(Tensor::from(0..LEN)),
output: f(Tensor::from(0..LEN)),
_marker: PhantomData,
}
}
pub fn without_witnesses() -> Nonlin1d<F, LEN, NL> {
Nonlin1d::<F, LEN, NL>::fill(|x| {
let t: Tensor<Value<F>> = x.map(|_| Value::default());
ValTensor::from(t)
})
}
}
// Table that should be reused across all lookups (so no Clone)
#[derive(Clone, Debug)]
@@ -254,7 +236,7 @@ impl<F: FieldExt + TensorType, const BITS: usize, NL: 'static + Nonlinearity<F>>
#[derive(Clone)]
struct NLCircuit<F: FieldExt + TensorType, const LEN: usize, const BITS: usize, NL: Nonlinearity<F>>
{
assigned: Nonlin1d<F, LEN, NL>,
assigned: Nonlin1d<F, NL>,
_marker: PhantomData<NL>, // nonlinearity: Box<dyn Fn(F) -> F>,
}
@@ -346,7 +328,7 @@ mod tests {
let k = 9; //2^k rows
let output = Tensor::<i32>::new(Some(&[1, 2, 3, 4]), &[4]).unwrap();
let relu_v: Tensor<Value<F>> = output.into();
let assigned: Nonlin1d<F, 4, ReLu<F>> = Nonlin1d {
let assigned: Nonlin1d<F, ReLu<F>> = Nonlin1d {
input: ValTensor::from(relu_v.clone()),
output: ValTensor::from(relu_v),
_marker: PhantomData,