mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-10 06:48:01 -05:00
feat: make max_rotations configurable (#82)
This commit is contained in:
@@ -33,10 +33,10 @@ impl<F: FieldExt + TensorType> Circuit<F> for MyCircuit<F> {
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let len = unsafe { LEN };
|
||||
|
||||
let input = VarTensor::new_advice(cs, K, len, vec![len], true);
|
||||
let kernel = VarTensor::new_advice(cs, K, len * len, vec![len, len], true);
|
||||
let bias = VarTensor::new_advice(cs, K, len, vec![len], true);
|
||||
let output = VarTensor::new_advice(cs, K, len, vec![len], true);
|
||||
let input = VarTensor::new_advice(cs, K, len, vec![len], true, 512);
|
||||
let kernel = VarTensor::new_advice(cs, K, len * len, vec![len, len], true, 512);
|
||||
let bias = VarTensor::new_advice(cs, K, len, vec![len], true, 512);
|
||||
let output = VarTensor::new_advice(cs, K, len, vec![len], true, 512);
|
||||
// tells the config layer to add an affine op to a circuit gate
|
||||
let affine_node = FusedNode {
|
||||
op: FusedOp::Affine,
|
||||
|
||||
@@ -53,6 +53,7 @@ where
|
||||
IN_CHANNELS * IMAGE_HEIGHT * IMAGE_WIDTH,
|
||||
vec![IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH],
|
||||
true,
|
||||
512,
|
||||
);
|
||||
let kernel = VarTensor::new_advice(
|
||||
cs,
|
||||
@@ -60,15 +61,17 @@ where
|
||||
OUT_CHANNELS * IN_CHANNELS * KERNEL_HEIGHT * KERNEL_WIDTH,
|
||||
vec![OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH],
|
||||
true,
|
||||
512
|
||||
);
|
||||
|
||||
let bias = VarTensor::new_advice(cs, K, OUT_CHANNELS, vec![OUT_CHANNELS], true);
|
||||
let bias = VarTensor::new_advice(cs, K, OUT_CHANNELS, vec![OUT_CHANNELS], true, 512);
|
||||
let output = VarTensor::new_advice(
|
||||
cs,
|
||||
K,
|
||||
OUT_CHANNELS * output_height * output_width,
|
||||
vec![OUT_CHANNELS, output_height, output_width],
|
||||
true,
|
||||
512,
|
||||
);
|
||||
|
||||
// tells the config layer to add a conv op to a circuit gate
|
||||
|
||||
@@ -33,7 +33,7 @@ impl<F: FieldExt + TensorType> Circuit<F> for MyCircuit<F> {
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let len = unsafe { LEN };
|
||||
let advices = (0..2)
|
||||
.map(|_| VarTensor::new_advice(cs, K, len, vec![len], true))
|
||||
.map(|_| VarTensor::new_advice(cs, K, len, vec![len], true, 512))
|
||||
.collect_vec();
|
||||
|
||||
RangeCheckConfig::configure(cs, &advices[0], &advices[1], RANGE)
|
||||
|
||||
@@ -34,7 +34,7 @@ impl<F: FieldExt + TensorType, NL: 'static + Nonlinearity<F> + Clone> Circuit<F>
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
unsafe {
|
||||
let advices = (0..2)
|
||||
.map(|_| VarTensor::new_advice(cs, K, LEN, vec![LEN], true))
|
||||
.map(|_| VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Self::Config::configure(cs, &advices[0], &advices[1], Some(&[BITS, 128]))
|
||||
|
||||
@@ -149,6 +149,7 @@ where
|
||||
max(IN_CHANNELS * IMAGE_HEIGHT * IMAGE_WIDTH, LEN),
|
||||
vec![IN_CHANNELS, IMAGE_HEIGHT, IMAGE_WIDTH],
|
||||
true,
|
||||
512,
|
||||
);
|
||||
let kernel = VarTensor::new_advice(
|
||||
cs,
|
||||
@@ -159,16 +160,18 @@ where
|
||||
),
|
||||
vec![OUT_CHANNELS, IN_CHANNELS, KERNEL_HEIGHT, KERNEL_WIDTH],
|
||||
true,
|
||||
512,
|
||||
);
|
||||
|
||||
let bias =
|
||||
VarTensor::new_advice(cs, K, max(OUT_CHANNELS, CLASSES), vec![OUT_CHANNELS], true);
|
||||
VarTensor::new_advice(cs, K, max(OUT_CHANNELS, CLASSES), vec![OUT_CHANNELS], true, 512);
|
||||
let output = VarTensor::new_advice(
|
||||
cs,
|
||||
K,
|
||||
max(OUT_CHANNELS * output_height * output_width, LEN),
|
||||
vec![OUT_CHANNELS, output_height, output_width],
|
||||
true,
|
||||
512,
|
||||
);
|
||||
|
||||
// tells the config layer to add a conv op to a circuit gate
|
||||
|
||||
@@ -50,10 +50,10 @@ impl<F: FieldExt + TensorType, const LEN: usize, const BITS: usize> Circuit<F>
|
||||
// Here we wire together the layers by using the output advice in each layer as input advice in the next (not with copying / equality).
|
||||
// This can be automated but we will sometimes want skip connections, etc. so we need the flexibility.
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true);
|
||||
let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true);
|
||||
let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true);
|
||||
let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true);
|
||||
let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512);
|
||||
let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512);
|
||||
let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512);
|
||||
let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512);
|
||||
// tells the config layer to add an affine op to the circuit gate
|
||||
let affine_node = FusedNode {
|
||||
op: FusedOp::Affine,
|
||||
|
||||
@@ -336,7 +336,7 @@ mod tests {
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let advices = (0..2)
|
||||
.map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true))
|
||||
.map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true, 512))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Self::Config::configure(cs, &advices[0], &advices[1], Some(&[2, 1]))
|
||||
|
||||
@@ -335,10 +335,10 @@ mod tests {
|
||||
}
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true);
|
||||
let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true);
|
||||
let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true);
|
||||
let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true);
|
||||
let input = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512);
|
||||
let kernel = VarTensor::new_advice(cs, K, LEN * LEN, vec![LEN, LEN], true, 512);
|
||||
let bias = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512);
|
||||
let output = VarTensor::new_advice(cs, K, LEN, vec![LEN], true, 512);
|
||||
// tells the config layer to add an affine op to a circuit gate
|
||||
let affine_node = FusedNode {
|
||||
op: FusedOp::Affine,
|
||||
|
||||
@@ -156,7 +156,7 @@ mod tests {
|
||||
|
||||
fn configure(cs: &mut ConstraintSystem<F>) -> Self::Config {
|
||||
let advices = (0..2)
|
||||
.map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true))
|
||||
.map(|_| VarTensor::new_advice(cs, 4, 1, vec![1], true, 512))
|
||||
.collect_vec();
|
||||
let input = &advices[0];
|
||||
let expected = &advices[1];
|
||||
|
||||
@@ -30,6 +30,9 @@ pub struct Cli {
|
||||
/// Flags whether params are public
|
||||
#[arg(long, default_value = "false")]
|
||||
pub public_params: bool,
|
||||
/// Flags to set maximum rotations
|
||||
#[arg(short = 'M', long, default_value = "512")]
|
||||
pub max_rotations: usize,
|
||||
}
|
||||
|
||||
#[derive(ValueEnum, Copy, Clone, Debug, PartialEq, Eq)]
|
||||
|
||||
@@ -122,6 +122,8 @@ impl<F: FieldExt + TensorType> ModelVars<F> {
|
||||
fixed_dims: (usize, usize),
|
||||
instance_dims: (usize, Vec<Vec<usize>>),
|
||||
) -> Self {
|
||||
let tensor_max = Cli::parse().max_rotations;
|
||||
|
||||
let advices = (0..advice_dims.0)
|
||||
.map(|_| {
|
||||
VarTensor::new_advice(
|
||||
@@ -130,12 +132,20 @@ impl<F: FieldExt + TensorType> ModelVars<F> {
|
||||
advice_dims.1,
|
||||
vec![advice_dims.1],
|
||||
true,
|
||||
tensor_max,
|
||||
)
|
||||
})
|
||||
.collect_vec();
|
||||
let fixed = (0..fixed_dims.0)
|
||||
.map(|_| {
|
||||
VarTensor::new_fixed(cs, logrows as usize, fixed_dims.1, vec![fixed_dims.1], true)
|
||||
VarTensor::new_fixed(
|
||||
cs,
|
||||
logrows as usize,
|
||||
fixed_dims.1,
|
||||
vec![fixed_dims.1],
|
||||
true,
|
||||
tensor_max,
|
||||
)
|
||||
})
|
||||
.collect_vec();
|
||||
let instances = (0..instance_dims.0)
|
||||
|
||||
@@ -33,9 +33,10 @@ impl VarTensor {
|
||||
capacity: usize,
|
||||
dims: Vec<usize>,
|
||||
equality: bool,
|
||||
v1: usize,
|
||||
) -> Self {
|
||||
let base = 2u32;
|
||||
let max_rows = min(512, base.pow(k as u32) as usize - cs.blinding_factors() - 1);
|
||||
let max_rows = min(v1, base.pow(k as u32) as usize - cs.blinding_factors() - 1);
|
||||
let modulo = (capacity / max_rows) + 1;
|
||||
let mut advices = vec![];
|
||||
for _ in 0..modulo {
|
||||
@@ -60,9 +61,10 @@ impl VarTensor {
|
||||
capacity: usize,
|
||||
dims: Vec<usize>,
|
||||
equality: bool,
|
||||
v1: usize,
|
||||
) -> Self {
|
||||
let base = 2u32;
|
||||
let max_rows = min(512, base.pow(k as u32) as usize - cs.blinding_factors() - 1);
|
||||
let max_rows = min(v1, base.pow(k as u32) as usize - cs.blinding_factors() - 1);
|
||||
let modulo = (capacity / max_rows) + 1;
|
||||
let mut fixed = vec![];
|
||||
for _ in 0..modulo {
|
||||
|
||||
Reference in New Issue
Block a user