chore: documentation (#28)

* val and var tensor docstrings

* doc strings for tensor ops

* layer doc strings

* consistent interfaces for io and eltwise

* io docstrings

* cleanup

* onnx docs

* rm IOConfig and replace with VarTensor methods

* phrasing
This commit is contained in:
Alexander Camuto
2022-09-28 18:46:02 +01:00
committed by GitHub
parent 7a51bee68c
commit 815a5cd324
13 changed files with 370 additions and 329 deletions

View File

@@ -49,3 +49,7 @@ criterion_group! {
targets = runrelu
}
```
## Docs
Use `cargo doc --open --feature onnx` to compile and open the docs in your default browser.

View File

@@ -177,7 +177,6 @@ pub fn runmlp() {
.iter()
.map(|x| i32_to_felt::<F>(*x).into())
.collect()],
// vec![vec![(4).into(), (1).into(), (35).into(), (22).into()]],
)
.unwrap();
prover.assert_satisfied();

View File

@@ -1,3 +1,4 @@
/// Utilities for converting from Halo2 Field types to integers (and vice-versa).
use halo2_proofs::arithmetic::FieldExt;
pub fn i32_to_felt<F: FieldExt>(x: i32) -> F {
if x >= 0 {

View File

@@ -1,8 +1,14 @@
#![feature(slice_flatten)]
pub mod nn;
/// Utilities for converting from Halo2 Field types to integers (and vice-versa).
pub mod fieldutils;
pub mod tensor_ops;
pub mod tensor;
/// Methods for configuring neural network layers and assigning values to them in a Halo2 circuit.
pub mod nn;
/// Methods for loading onnx format models and automatically laying them out in
/// a Halo2 circuit.
#[cfg(feature = "onnx")]
pub mod onnx;
/// An implementation of multi-dimensional tensors.
pub mod tensor;
/// Implementations of common operations on tensors.
pub mod tensor_ops;

View File

@@ -1,26 +1,27 @@
use super::*;
use crate::nn::io::*;
use crate::tensor::{Tensor, TensorType};
use halo2_proofs::{
arithmetic::FieldExt,
circuit::{AssignedCell, Layouter, Value},
circuit::{Layouter, Value},
plonk::{Assigned, ConstraintSystem, Constraints, Expression, Selector},
};
use std::marker::PhantomData;
/// Configuration for an affine layer which (mat)multiplies a weight kernel to an input and adds
/// a bias vector to the result.
#[derive(Clone)]
pub struct Affine1dConfig<F: FieldExt + TensorType> {
// kernel is weights and biases concatenated
pub kernel: IOConfig<F>,
pub bias: IOConfig<F>,
pub input: IOConfig<F>,
pub output: IOConfig<F>,
pub kernel: VarTensor,
pub bias: VarTensor,
pub input: VarTensor,
pub output: VarTensor,
pub selector: Selector,
_marker: PhantomData<F>,
}
impl<F: FieldExt + TensorType> LayerConfig<F> for Affine1dConfig<F> {
// Takes the layer's input tensor as an argument, and completes the advice by generating new for the rest
/// Configures and creates an affine gate within a circuit.
/// Also constrains the output of the gate.
fn configure(
meta: &mut ConstraintSystem<F>,
params: &[VarTensor],
@@ -39,11 +40,10 @@ impl<F: FieldExt + TensorType> LayerConfig<F> for Affine1dConfig<F> {
let config = Self {
selector: meta.selector(),
kernel: IOConfig::configure(meta, kernel),
bias: IOConfig::configure(meta, bias),
// add 1 to incorporate bias !
input: IOConfig::configure(meta, input),
output: IOConfig::configure(meta, output),
kernel,
bias,
input,
output,
_marker: PhantomData,
};
@@ -69,16 +69,17 @@ impl<F: FieldExt + TensorType> LayerConfig<F> for Affine1dConfig<F> {
config
}
fn assign(
/// Assigns values to the affine gate variables created when calling `configure`.
fn layout(
&self,
layouter: &mut impl Layouter<F>,
input: ValTensor<F>,
params: &[ValTensor<F>],
) -> Tensor<AssignedCell<Assigned<F>, F>> {
) -> ValTensor<F> {
assert_eq!(params.len(), 2);
let (kernel, bias) = (params[0].clone(), params[1].clone());
layouter
let t = layouter
.assign_region(
|| "assign image and kernel",
|mut region| {
@@ -104,15 +105,8 @@ impl<F: FieldExt + TensorType> LayerConfig<F> for Affine1dConfig<F> {
.assign(&mut region, offset, ValTensor::from(output)))
},
)
.unwrap()
}
fn layout(
&self,
layouter: &mut impl Layouter<F>,
input: ValTensor<F>,
params: &[ValTensor<F>],
) -> ValTensor<F> {
assert!(params.len() == 2);
ValTensor::from(self.assign(layouter, input, params))
.unwrap();
ValTensor::from(t)
}
}

View File

@@ -1,23 +1,25 @@
use crate::tensor::{Tensor, TensorType};
use crate::tensor::TensorType;
use halo2_proofs::{
arithmetic::FieldExt,
circuit::{AssignedCell, Layouter, Value},
plonk::{Assigned, ConstraintSystem, Constraints, Selector},
circuit::{Layouter, Value},
plonk::{ConstraintSystem, Constraints, Selector},
};
use super::*;
use crate::nn::io::*;
use crate::tensor_ops::*;
use std::marker::PhantomData;
/// Configuration for a convolutional layer which convolves a kernel with an input (image).
#[derive(Debug, Clone)]
pub struct ConvConfig<F: FieldExt + TensorType, const STRIDE: usize, const PADDING: usize>
where
Value<F>: TensorType,
{
selector: Selector,
kernel: IOConfig<F>,
image: IOConfig<F>,
pub output: IOConfig<F>,
kernel: VarTensor,
input: VarTensor,
pub output: VarTensor,
_marker: PhantomData<F>,
}
impl<F: FieldExt + TensorType, const STRIDE: usize, const PADDING: usize> LayerConfig<F>
@@ -25,6 +27,8 @@ impl<F: FieldExt + TensorType, const STRIDE: usize, const PADDING: usize> LayerC
where
Value<F>: TensorType,
{
/// Configures and creates a convolution gate within a circuit.
/// Also constrains the output of the gate.
fn configure(
meta: &mut ConstraintSystem<F>,
params: &[VarTensor],
@@ -45,16 +49,17 @@ where
let config = Self {
selector: meta.selector(),
kernel: IOConfig::configure(meta, kernel),
image: IOConfig::configure(meta, input),
output: IOConfig::configure(meta, output),
kernel,
input,
output,
_marker: PhantomData,
};
meta.create_gate("convolution", |meta| {
let selector = meta.query_selector(config.selector);
// Get output expressions for each input channel
let image = config.image.query(meta, 0);
let image = config.input.query(meta, 0);
let kernel = config.kernel.query(meta, 0);
let expected_output = convolution::<_, PADDING, STRIDE>(kernel, image);
@@ -69,23 +74,30 @@ where
config
}
fn assign(
/// Assigns values to the convolution gate variables created when calling `configure`.
fn layout(
&self,
layouter: &mut impl Layouter<F>,
input: ValTensor<F>,
params: &[ValTensor<F>],
) -> Tensor<AssignedCell<Assigned<F>, F>> {
) -> ValTensor<F> {
assert_eq!(params.len(), 1);
let kernel = params[0].clone();
let image_width = input.dims()[2];
layouter
let (image_height, image_width) = (input.dims()[1], input.dims()[2]);
let (out_channels, kernel_height, kernel_width) =
(kernel.dims()[0], kernel.dims()[2], kernel.dims()[3]);
let horz = (image_height + 2 * PADDING - kernel_height) / STRIDE + 1;
let vert = (image_width + 2 * PADDING - kernel_width) / STRIDE + 1;
let mut t = layouter
.assign_region(
|| "assign image and kernel",
|mut region| {
self.selector.enable(&mut region, 0)?;
self.kernel.assign(&mut region, 0, kernel.clone());
self.image.assign(&mut region, 0, input.clone());
self.input.assign(&mut region, 0, input.clone());
let output = match input.clone() {
ValTensor::Value {
@@ -105,29 +117,7 @@ where
.assign(&mut region, image_width, ValTensor::from(output)))
},
)
.unwrap()
}
fn layout(
&self,
layouter: &mut impl Layouter<F>,
input: ValTensor<F>,
params: &[ValTensor<F>],
) -> ValTensor<F> {
assert_eq!(params.len(), 1);
let kernel = params[0].clone();
let (image_height, image_width) = (input.dims()[1], input.dims()[2]);
let (out_channels, kernel_height, kernel_width) =
(kernel.dims()[0], kernel.dims()[2], kernel.dims()[3]);
let horz = (image_height + 2 * PADDING - kernel_height) / STRIDE + 1;
let vert = (image_width + 2 * PADDING - kernel_width) / STRIDE + 1;
let mut t = self.assign(
&mut layouter.namespace(|| format!("filter")),
input.clone(),
params,
);
.unwrap();
t.reshape(&[out_channels, horz, vert]);
ValTensor::from(t)
}

View File

@@ -1,152 +0,0 @@
use super::*;
use crate::tensor::{ValTensor, VarTensor};
use halo2_proofs::{
arithmetic::FieldExt,
circuit::{AssignedCell, Layouter, Region, Value},
plonk::{Assigned, ConstraintSystem, Expression, Selector, VirtualCells},
poly::Rotation,
};
use std::marker::PhantomData;
#[derive(Debug, Clone)]
pub struct IOConfig<F: FieldExt + TensorType> {
pub values: VarTensor,
selector: Selector,
marker: PhantomData<F>,
}
impl<F: FieldExt + TensorType> IOConfig<F> {
pub fn configure(meta: &mut ConstraintSystem<F>, values: VarTensor) -> Self {
Self {
values,
selector: meta.selector(),
marker: PhantomData,
}
}
pub fn query(&self, meta: &mut VirtualCells<'_, F>, offset: usize) -> Tensor<Expression<F>> {
let mut t = match &self.values {
// when fixed we have 1 col per param
VarTensor::Fixed { inner: f, dims: _ } => {
f.map(|c| meta.query_fixed(c, Rotation(offset as i32)))
}
// when advice we have 1 col per row
VarTensor::Advice { inner: a, dims: d } => a
.map(|column| {
Tensor::from(
(0..*d.last().unwrap())
.map(|i| meta.query_advice(column, Rotation(offset as i32 + i as i32))),
)
})
.combine(),
};
t.reshape(self.values.dims());
t
}
pub fn query_idx(
&self,
meta: &mut VirtualCells<'_, F>,
idx: usize,
offset: usize,
) -> Expression<F> {
match &self.values {
VarTensor::Fixed { inner: f, dims: _ } => {
meta.query_fixed(f[idx], Rotation(offset as i32))
}
VarTensor::Advice { inner: a, dims: _ } => {
meta.query_advice(a[idx], Rotation(offset as i32))
}
}
}
pub fn assign(
&self,
region: &mut Region<'_, F>,
offset: usize,
kernel: ValTensor<F>,
) -> Tensor<AssignedCell<Assigned<F>, F>> {
match kernel {
ValTensor::Value { inner: v, dims: _ } => {
v.mc_enum_map(|coord, k| match &self.values {
VarTensor::Fixed { inner: f, dims: _ } => region
.assign_fixed(|| "k", f.get(&coord), offset, || k.into())
.unwrap(),
VarTensor::Advice { inner: a, dims: _ } => {
let coord = format_advice_coord(coord);
let last = coord.len() - 1;
// 1D advice doesn't match to 1D iterates
region
.assign_advice(
|| "k",
a.get(&coord[0..last]),
offset + coord[last],
|| k.into(),
)
.unwrap()
}
})
}
ValTensor::PrevAssigned { inner: v, dims: _ } => {
v.mc_enum_map(|coord, x| match &self.values {
VarTensor::Fixed { inner: _, dims: _ } => panic!("not implemented"),
VarTensor::Advice { inner: a, dims: _ } => {
let coord = format_advice_coord(coord);
let last = coord.len() - 1;
x.copy_advice(|| "k", region, a.get(&coord[0..last]), offset + coord[last])
.unwrap()
}
})
}
ValTensor::AssignedValue { inner: v, dims: _ } => {
v.mc_enum_map(|coord, k| match &self.values {
VarTensor::Fixed { inner: f, dims: _ } => region
.assign_fixed(|| "k", f.get(&coord), offset, || k)
.unwrap(),
VarTensor::Advice { inner: a, dims: _ } => {
let coord = format_advice_coord(coord);
let last = coord.len() - 1;
region
.assign_advice(
|| "k",
a.get(&coord[0..last]),
offset + coord[last],
|| k.into(),
)
.unwrap()
}
})
}
}
}
pub fn layout(
&self,
layouter: &mut impl Layouter<F>,
raw_input: Tensor<i32>,
) -> Result<Tensor<AssignedCell<Assigned<F>, F>>, halo2_proofs::plonk::Error> {
layouter.assign_region(
|| "Input",
|mut region| {
let offset = 0;
self.selector.enable(&mut region, offset)?;
Ok(self.assign(
&mut region,
offset,
ValTensor::from(<Tensor<i32> as Into<Tensor<Value<F>>>>::into(
raw_input.clone(),
)),
))
},
)
}
}
fn format_advice_coord(coord: &[usize]) -> Vec<usize> {
let last = coord.len() - 1;
let mut v = coord.to_vec();
if last == 0 {
v.insert(0, 0);
}
v
}

View File

@@ -1,31 +1,27 @@
use crate::tensor::*;
use halo2_proofs::{
arithmetic::FieldExt,
circuit::{AssignedCell, Layouter},
plonk::{Assigned, ConstraintSystem},
};
use halo2_proofs::{arithmetic::FieldExt, circuit::Layouter, plonk::ConstraintSystem};
/// Structs and methods for configuring and assigning to an affine gate within a Halo2 circuit.
pub mod affine;
/// Structs and methods for configuring and assigning to a convolutional gate within a Halo2 circuit.
pub mod cnvrl;
pub mod io;
/// Trait for configuring neural network layers in a Halo2 circuit.
pub trait LayerConfig<F: FieldExt + TensorType> {
/// Takes in VarTensor input and params, creates a series of operations (gates in Halo2 circuit nomenclature)
/// using both input and params to produce an output to which we can add equality constraints (for proving).
/// Produces a layer object with attributes we can then assign to when calling layout().
fn configure(
_meta: &mut ConstraintSystem<F>,
params: &[VarTensor],
input: VarTensor,
output: VarTensor,
) -> Self;
/// Takes in ValTensor inputs and params and assigns them to the variables created when calling configure().
fn layout(
&self,
layouter: &mut impl Layouter<F>,
input: ValTensor<F>,
params: &[ValTensor<F>],
) -> ValTensor<F>;
fn assign(
&self,
layouter: &mut impl Layouter<F>,
input: ValTensor<F>,
params: &[ValTensor<F>],
) -> Tensor<AssignedCell<Assigned<F>, F>>;
}

View File

@@ -1,4 +1,6 @@
/// A wrapper around a tensor of circuit variables / advices.
pub mod val;
/// A wrapper around a tensor of Halo2 Value types.
pub mod var;
pub use val::*;
@@ -8,8 +10,9 @@ use crate::fieldutils::{felt_to_i32, i32_to_felt};
use halo2_proofs::{
arithmetic::FieldExt,
circuit::{AssignedCell, Value},
plonk::{Advice, Assigned, Column, ConstraintSystem, Expression, Fixed},
circuit::{AssignedCell, Value, Region},
plonk::{Advice, Assigned, Column, ConstraintSystem, Expression, Fixed, VirtualCells},
poly::Rotation,
};
use itertools::Itertools;
use std::fmt::Debug;
@@ -18,6 +21,7 @@ use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Range;
/// The (inner) type of tensor elements.
pub trait TensorType: Clone + Debug + 'static {
/// Returns the zero value.
fn zero() -> Option<Self> {
@@ -79,6 +83,9 @@ impl TensorType for halo2curves::pasta::Fp {
#[derive(Debug)]
pub struct TensorError(String);
/// A generic multi-dimensional array representation of a Tensor.
/// The `inner` attribute contains a vector of values whereas `dims` corresponds to the dimensionality of the array
/// and as such determines how we index, query for values, or slice a Tensor.
#[derive(Clone, Debug, Eq)]
pub struct Tensor<T: TensorType> {
inner: Vec<T>,
@@ -172,7 +179,6 @@ impl<F: FieldExt + TensorType + Clone> From<Tensor<i32>> for Tensor<Value<F>> {
impl<T: Clone + TensorType> Tensor<T> {
/// Sets (copies) the tensor values to the provided ones.
/// ```
pub fn new(values: Option<&[T]>, dims: &[usize]) -> Result<Self, TensorError> {
let total_dims: usize = dims.iter().product();
match values {
@@ -194,10 +200,11 @@ impl<T: Clone + TensorType> Tensor<T> {
}
}
/// Returns the number of elements in the tensor.
pub fn len(&mut self) -> usize {
self.dims().iter().product::<usize>()
}
/// Checks if the number of elements in tensor is 0.
pub fn is_empty(&mut self) -> bool {
self.dims().iter().product::<usize>() == 0
}

View File

@@ -1,5 +1,9 @@
use super::*;
/// A wrapper around a tensor where the inner type is one of
/// Halo2's `Value<F>`, `Value<Assigned<F>>`, `AssignedCell<Assigned<F>, F>`.
/// This enum is generally used to assign values to variables / advices already configured in a Halo2 circuit (usually represented as a `VarTensor`).
/// For instance a `ValTensor` can represent pre-trained neural network weights; or a known input to a network.
#[derive(Debug, Clone)]
pub enum ValTensor<F: FieldExt + TensorType> {
Value {
@@ -44,6 +48,7 @@ impl<F: FieldExt + TensorType> From<Tensor<AssignedCell<Assigned<F>, F>>> for Va
}
impl<F: FieldExt + TensorType> ValTensor<F> {
/// Calls `get_slice` on the inner tensor.
pub fn get_slice(&self, indices: &[Range<usize>]) -> ValTensor<F> {
match self {
ValTensor::Value { inner: v, dims: _ } => {
@@ -70,6 +75,7 @@ impl<F: FieldExt + TensorType> ValTensor<F> {
}
}
/// Sets the `ValTensor`'s shape.
pub fn reshape(&mut self, new_dims: &[usize]) {
match self {
ValTensor::Value { inner: _, dims: d } => {
@@ -87,6 +93,7 @@ impl<F: FieldExt + TensorType> ValTensor<F> {
}
}
/// Calls `flatten` on the inner tensor.
pub fn flatten(&mut self) {
match self {
ValTensor::Value { inner: v, dims: d } => {
@@ -104,6 +111,7 @@ impl<F: FieldExt + TensorType> ValTensor<F> {
}
}
/// Returns the `dims` attribute of the `ValTensor`.
pub fn dims(&self) -> &[usize] {
match self {
ValTensor::Value { inner: _, dims: d } => d,

View File

@@ -1,5 +1,13 @@
use super::*;
/// A wrapper around a tensor where the inner type is one of Halo2's Column<Fixed> or Column<Advice>.
/// The wrapper allows for VarTensor's dimensions to differ from that of the inner (wrapped) tensor.
/// The inner tensor might, for instance, contain 3 Advice Columns. Each of those columns in turn
/// might be representing 3 elements laid out in the circuit. As such, though the inner tensor might
/// only be of dimension `[3]` we can set the VarTensor's dimension to `[3,3]` to capture information
/// about the column layout. This enum is generally used to configure and layout circuit variables / advices.
/// For instance can be used to represent neural network parameters within a circuit that we later assign to
/// using a the `assign` method called on a `ValTensor`.
#[derive(Clone, Debug)]
pub enum VarTensor {
Advice {
@@ -31,6 +39,7 @@ impl From<Tensor<Column<Fixed>>> for VarTensor {
}
impl VarTensor {
/// Calls `get_slice` on the inner tensor.
pub fn get_slice(&self, indices: &[Range<usize>], new_dims: &[usize]) -> VarTensor {
match self {
VarTensor::Advice { inner: v, dims: _ } => {
@@ -50,6 +59,7 @@ impl VarTensor {
}
}
/// Sets the `VarTensor`'s shape.
pub fn reshape(&mut self, new_dims: &[usize]) {
match self {
VarTensor::Advice { inner: _, dims: d } => {
@@ -63,6 +73,7 @@ impl VarTensor {
}
}
/// Enables equality on Advice type `VarTensor`.
pub fn enable_equality<F: FieldExt>(&self, meta: &mut ConstraintSystem<F>) {
match self {
VarTensor::Advice {
@@ -77,6 +88,7 @@ impl VarTensor {
}
}
/// Returns the `dims` attribute of the `VarTensor`.
pub fn dims(&self) -> &[usize] {
match self {
VarTensor::Advice { inner: _, dims: d } => d,
@@ -84,3 +96,115 @@ impl VarTensor {
}
}
}
impl VarTensor {
/// Retrieve the values represented within the columns of the `VarTensor` (recall that `VarTensor`
/// is a Tensor of Halo2 columns).
pub fn query<F: FieldExt>(
&self,
meta: &mut VirtualCells<'_, F>,
offset: usize,
) -> Tensor<Expression<F>> {
let mut t = match &self {
// when fixed we have 1 col per param
VarTensor::Fixed { inner: f, dims: _ } => {
f.map(|c| meta.query_fixed(c, Rotation(offset as i32)))
}
// when advice we have 1 col per row
VarTensor::Advice { inner: a, dims: d } => a
.map(|column| {
Tensor::from(
(0..*d.last().unwrap())
.map(|i| meta.query_advice(column, Rotation(offset as i32 + i as i32))),
)
})
.combine(),
};
t.reshape(self.dims());
t
}
/// Retrieve the value represented at a specific index within the columns of the inner tensor.
pub fn query_idx<F: FieldExt>(
&self,
meta: &mut VirtualCells<'_, F>,
idx: usize,
offset: usize,
) -> Expression<F> {
match &self {
VarTensor::Fixed { inner: f, dims: _ } => {
meta.query_fixed(f[idx], Rotation(offset as i32))
}
VarTensor::Advice { inner: a, dims: _ } => {
meta.query_advice(a[idx], Rotation(offset as i32))
}
}
}
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor.
pub fn assign<F: FieldExt + TensorType>(
&self,
region: &mut Region<'_, F>,
offset: usize,
values: ValTensor<F>,
) -> Tensor<AssignedCell<Assigned<F>, F>> {
match values {
ValTensor::Value { inner: v, dims: _ } => v.mc_enum_map(|coord, k| match &self {
VarTensor::Fixed { inner: f, dims: _ } => region
.assign_fixed(|| "k", f.get(&coord), offset, || k.into())
.unwrap(),
VarTensor::Advice { inner: a, dims: _ } => {
let coord = format_advice_coord(coord);
let last = coord.len() - 1;
region
.assign_advice(
|| "k",
a.get(&coord[0..last]),
offset + coord[last],
|| k.into(),
)
.unwrap()
}
}),
ValTensor::PrevAssigned { inner: v, dims: _ } => {
v.mc_enum_map(|coord, x| match &self {
VarTensor::Fixed { inner: _, dims: _ } => panic!("not implemented"),
VarTensor::Advice { inner: a, dims: _ } => {
let coord = format_advice_coord(coord);
let last = coord.len() - 1;
x.copy_advice(|| "k", region, a.get(&coord[0..last]), offset + coord[last])
.unwrap()
}
})
}
ValTensor::AssignedValue { inner: v, dims: _ } => {
v.mc_enum_map(|coord, k| match &self {
VarTensor::Fixed { inner: f, dims: _ } => region
.assign_fixed(|| "k", f.get(&coord), offset, || k)
.unwrap(),
VarTensor::Advice { inner: a, dims: _ } => {
let coord = format_advice_coord(coord);
let last = coord.len() - 1;
region
.assign_advice(
|| "k",
a.get(&coord[0..last]),
offset + coord[last],
|| k.into(),
)
.unwrap()
}
})
}
}
}
}
fn format_advice_coord(coord: &[usize]) -> Vec<usize> {
let last = coord.len() - 1;
let mut v = coord.to_vec();
if last == 0 {
v.insert(0, 0);
}
v
}

View File

@@ -2,8 +2,8 @@ use crate::fieldutils::{self, felt_to_i32, i32_to_felt};
use crate::tensor::*;
use halo2_proofs::{
arithmetic::FieldExt,
circuit::{AssignedCell, Layouter, Value},
plonk::{Assigned, ConstraintSystem, Selector, TableColumn},
circuit::{Layouter, Value},
plonk::{ConstraintSystem, Selector, TableColumn},
poly::Rotation,
};
use std::{cell::RefCell, marker::PhantomData, rc::Rc};
@@ -19,6 +19,7 @@ pub struct Nonlin1d<F: FieldExt + TensorType, NL: Nonlinearity<F>> {
pub _marker: PhantomData<(F, NL)>,
}
/// Halo2 lookup table for element wise non-linearities.
// Table that should be reused across all lookups (so no Clone)
#[derive(Clone, Debug)]
pub struct EltwiseTable<F: FieldExt, const BITS: usize, NL: Nonlinearity<F>> {
@@ -73,6 +74,7 @@ impl<F: FieldExt, const BITS: usize, NL: Nonlinearity<F>> EltwiseTable<F, BITS,
}
}
/// Configuration for element-wise non-linearities.
#[derive(Clone, Debug)]
pub struct EltwiseConfig<F: FieldExt + TensorType, const BITS: usize, NL: Nonlinearity<F>> {
pub input: VarTensor,
@@ -84,6 +86,7 @@ pub struct EltwiseConfig<F: FieldExt + TensorType, const BITS: usize, NL: Nonlin
impl<F: FieldExt + TensorType, const BITS: usize, NL: 'static + Nonlinearity<F>>
EltwiseConfig<F, BITS, NL>
{
/// Configures multiple element-wise non-linearities at once.
pub fn configure_multiple<const NUM: usize>(
cs: &mut ConstraintSystem<F>,
input: VarTensor,
@@ -147,87 +150,83 @@ impl<F: FieldExt + TensorType, const BITS: usize, NL: 'static + Nonlinearity<F>>
}
}
fn assign(
&self,
layouter: &mut impl Layouter<F>,
input: ValTensor<F>,
) -> Tensor<AssignedCell<Assigned<F>, F>> {
layouter
.assign_region(
|| "Elementwise", // the name of the region
|mut region| {
let offset = 0;
self.qlookup.enable(&mut region, offset)?;
let w = match &input {
ValTensor::AssignedValue { inner: v, dims: _ } => match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} => v.enum_map(|i, x| {
// assign the advice
region
.assign_advice(|| "input", advice[i], offset, || x)
.unwrap()
}),
_ => panic!("not yet implemented"),
},
ValTensor::PrevAssigned { inner: v, dims: _ } => match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} =>
//copy the advice
{
v.enum_map(|i, x| {
x.copy_advice(|| "input", &mut region, advice[i], offset)
.unwrap()
})
}
_ => panic!("not yet implemented"),
},
ValTensor::Value { inner: v, dims: _ } => match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} => v.enum_map(|i, x| {
// assign the advice
region
.assign_advice(|| "input", advice[i], offset, || x.into())
.unwrap()
}),
_ => panic!("not yet implemented"),
},
};
let output = Tensor::from(w.iter().map(|acaf| acaf.value_field()).map(|vaf| {
vaf.map(|f| {
<NL as Nonlinearity<F>>::nonlinearity(felt_to_i32(f.evaluate())).into()
})
}));
match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} => Ok(output.enum_map(|i, o| {
region
.assign_advice(|| format!("nl_{i}"), advice[i], 1, || o)
.unwrap()
})),
_ => panic!("not yet implemented"),
}
},
)
.unwrap()
}
pub fn layout(&self, layouter: &mut impl Layouter<F>, input: ValTensor<F>) -> ValTensor<F> {
if !self.table.borrow().is_assigned {
self.table.borrow_mut().layout(layouter)
}
let mut t = ValTensor::from(self.assign(layouter, input.clone()));
let mut t = ValTensor::from(
layouter
.assign_region(
|| "Elementwise", // the name of the region
|mut region| {
let offset = 0;
self.qlookup.enable(&mut region, offset)?;
let w = match &input {
ValTensor::AssignedValue { inner: v, dims: _ } => match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} => v.enum_map(|i, x| {
// assign the advice
region
.assign_advice(|| "input", advice[i], offset, || x)
.unwrap()
}),
_ => panic!("not yet implemented"),
},
ValTensor::PrevAssigned { inner: v, dims: _ } => match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} =>
//copy the advice
{
v.enum_map(|i, x| {
x.copy_advice(|| "input", &mut region, advice[i], offset)
.unwrap()
})
}
_ => panic!("not yet implemented"),
},
ValTensor::Value { inner: v, dims: _ } => match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} => v.enum_map(|i, x| {
// assign the advice
region
.assign_advice(|| "input", advice[i], offset, || x.into())
.unwrap()
}),
_ => panic!("not yet implemented"),
},
};
let output =
Tensor::from(w.iter().map(|acaf| acaf.value_field()).map(|vaf| {
vaf.map(|f| {
<NL as Nonlinearity<F>>::nonlinearity(felt_to_i32(f.evaluate()))
.into()
})
}));
match &self.input {
VarTensor::Advice {
inner: advice,
dims: _,
} => Ok(output.enum_map(|i, o| {
region
.assign_advice(|| format!("nl_{i}"), advice[i], 1, || o)
.unwrap()
})),
_ => panic!("not yet implemented"),
}
},
)
.unwrap(),
);
t.reshape(input.dims());
t
}

View File

@@ -1,7 +1,25 @@
/// Element-wise operations.
pub mod eltwise;
use crate::tensor::{Tensor, TensorType};
pub use std::ops::{Add, Mul};
/// Matrix multiplies two 2D tensors.
/// ```
/// use halo2deeplearning::tensor::Tensor;
/// use halo2deeplearning::tensor_ops::matmul;
///
/// let x = Tensor::<i32>::new(
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6]),
/// &[3, 3],
/// ).unwrap();
/// let k = Tensor::<i32>::new(
/// Some(&[2, 1, 2, 1, 1, 1]),
/// &[2, 3],
/// ).unwrap();
/// let result = matmul(k, x);
/// let expected = Tensor::<i32>::new(Some(&[18, 2, 19, 10, 3, 10]), &[2, 3]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn matmul<T: TensorType + Mul<Output = T> + Add<Output = T>>(
kernel: Tensor<T>,
input: Tensor<T>,
@@ -9,20 +27,41 @@ pub fn matmul<T: TensorType + Mul<Output = T> + Add<Output = T>>(
let input_dims = input.dims();
let kernel_dims = kernel.dims();
assert!(input_dims[1] == kernel_dims[1]);
assert!(input_dims[0] == kernel_dims[1]);
// calculate value of output
let mut output: Tensor<T> = Tensor::new(None, &[1, kernel_dims[0]]).unwrap();
let mut output: Tensor<T> = Tensor::new(None, &[kernel_dims[0], input_dims[1]]).unwrap();
for i in 0..kernel_dims[0] {
output.set(
&[0, i],
dot_product(kernel.get_slice(&[i..i + 1]), input.get_slice(&[0..1])),
);
for j in 0..input_dims[1] {
output.set(
&[i, j],
dot_product(kernel.get_slice(&[i..i + 1]), input.get_slice(&[j..j + 1])),
);
}
}
output
}
/// Applies convolution over a 3D tensor of shape C x H x W.
/// ```
/// use halo2deeplearning::tensor::Tensor;
/// use halo2deeplearning::tensor_ops::convolution;
///
/// let x = Tensor::<i32>::new(
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6]),
/// &[1, 3, 3],
/// ).unwrap();
/// let k = Tensor::<i32>::new(
/// Some(&[5, 1, 1, 1]),
/// &[1, 1, 2, 2],
/// ).unwrap();
/// const PADDING: usize = 0;
/// const STRIDE: usize = 1;
/// let result = convolution::<i32, PADDING, STRIDE>(k, x);
/// let expected = Tensor::<i32>::new(Some(&[31, 16, 8, 26]), &[1, 2, 2]).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn convolution<
T: TensorType + Mul<Output = T> + Add<Output = T>,
const PADDING: usize,
@@ -77,6 +116,21 @@ pub fn convolution<
output
}
/// Dot product of two tensors.
/// ```
/// use halo2deeplearning::tensor::Tensor;
/// use halo2deeplearning::tensor_ops::dot_product;
///
/// let x = Tensor::<i32>::new(
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6]),
/// &[1, 3, 3],
/// ).unwrap();
/// let y = Tensor::<i32>::new(
/// Some(&[5, 5, 10, -4, 2, -1, 2, 0, 1]),
/// &[1, 3, 3],
/// ).unwrap();
/// assert_eq!(dot_product(x, y), 86);
/// ```
pub fn dot_product<T: TensorType + Mul<Output = T> + Add<Output = T>>(
w: Tensor<T>,
x: Tensor<T>,
@@ -86,7 +140,24 @@ pub fn dot_product<T: TensorType + Mul<Output = T> + Add<Output = T>>(
.fold(T::zero().unwrap(), |acc, (k, i)| acc + k.clone() * i)
}
fn pad<T: TensorType, const PADDING: usize>(image: Tensor<T>) -> Tensor<T> {
/// Pads a 3D tensor of shape `C x H x W` to a tensor of shape `C x (H + 2xPADDING) x (W + 2*PADDING)` using 0 values.
/// ```
/// use halo2deeplearning::tensor::Tensor;
/// use halo2deeplearning::tensor_ops::pad;
///
/// let x = Tensor::<i32>::new(
/// Some(&[5, 2, 3, 0, 4, -1, 3, 1, 6]),
/// &[1, 3, 3],
/// ).unwrap();
/// const PADDING: usize = 1;
/// let result = pad::<i32, PADDING>(x);
/// let expected = Tensor::<i32>::new(
/// Some(&[0, 0, 0, 0, 0, 0, 5, 2, 3, 0, 0, 0, 4, -1, 0, 0, 3, 1, 6, 0, 0, 0, 0, 0, 0]),
/// &[1, 5, 5],
/// ).unwrap();
/// assert_eq!(result, expected);
/// ```
pub fn pad<T: TensorType, const PADDING: usize>(image: Tensor<T>) -> Tensor<T> {
assert_eq!(image.dims().len(), 3);
let (channels, height, width) = (image.dims()[0], image.dims()[1], image.dims()[2]);
let padded_height = height + 2 * PADDING;
@@ -108,9 +179,3 @@ fn pad<T: TensorType, const PADDING: usize>(image: Tensor<T>) -> Tensor<T> {
output.reshape(&[channels, padded_height, padded_width]);
output
}
pub fn op<T: TensorType>(images: Vec<Tensor<T>>, f: impl Fn(T, T) -> T + Clone) -> Tensor<T> {
images.iter().skip(1).fold(images[0].clone(), |acc, image| {
acc.enum_map(|i, e| f(e, image[i].clone()))
})
}