mirror of
https://github.com/zkonduit/ezkl.git
synced 2026-01-14 08:48:01 -05:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9cb958ca8d |
16
Cargo.lock
generated
16
Cargo.lock
generated
@@ -2543,12 +2543,6 @@ dependencies = [
|
||||
"allocator-api2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb"
|
||||
|
||||
[[package]]
|
||||
name = "heck"
|
||||
version = "0.4.1"
|
||||
@@ -2817,12 +2811,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "2.6.0"
|
||||
version = "2.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
|
||||
checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4"
|
||||
dependencies = [
|
||||
"equivalent",
|
||||
"hashbrown 0.15.0",
|
||||
"hashbrown 0.14.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5634,9 +5628,9 @@ checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
|
||||
|
||||
[[package]]
|
||||
name = "tower-service"
|
||||
version = "0.3.3"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
|
||||
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
|
||||
@@ -56,7 +56,6 @@ alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5
|
||||
"rpc-types-eth",
|
||||
"signer-wallet",
|
||||
"node-bindings",
|
||||
|
||||
], optional = true }
|
||||
foundry-compilers = { version = "0.4.1", features = ["svm-solc"], optional = true }
|
||||
ethabi = { version = "18", optional = true }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ezkl==0.0.0
|
||||
ezkl==14.2.0
|
||||
sphinx
|
||||
sphinx-rtd-theme
|
||||
sphinxcontrib-napoleon
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import ezkl
|
||||
|
||||
project = 'ezkl'
|
||||
release = '0.0.0'
|
||||
release = '14.2.0'
|
||||
version = release
|
||||
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ impl<'source> FromPyObject<'source> for Tolerance {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct DynamicLookups {
|
||||
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
|
||||
pub lookup_selectors: BTreeMap<(usize, (usize, usize)), Selector>,
|
||||
pub lookup_selectors: BTreeMap<(usize, usize), Selector>,
|
||||
/// Selectors for the dynamic lookup tables
|
||||
pub table_selectors: Vec<Selector>,
|
||||
/// Inputs:
|
||||
@@ -209,7 +209,7 @@ impl DynamicLookups {
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Shuffles {
|
||||
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
|
||||
pub input_selectors: BTreeMap<(usize, (usize, usize)), Selector>,
|
||||
pub input_selectors: BTreeMap<(usize, usize), Selector>,
|
||||
/// Selectors for the dynamic lookup tables
|
||||
pub reference_selectors: Vec<Selector>,
|
||||
/// Inputs:
|
||||
@@ -646,73 +646,57 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
}
|
||||
|
||||
for t in tables.iter() {
|
||||
if !t.is_advice() || t.num_inner_cols() > 1 {
|
||||
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
|
||||
return Err(CircuitError::WrongDynamicColumnType(t.name().to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// assert all tables have the same number of inner columns
|
||||
if tables
|
||||
.iter()
|
||||
.map(|t| t.num_blocks())
|
||||
.collect::<Vec<_>>()
|
||||
.windows(2)
|
||||
.any(|w| w[0] != w[1])
|
||||
{
|
||||
return Err(CircuitError::WrongDynamicColumnType(
|
||||
"tables inner cols".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let one = Expression::Constant(F::ONE);
|
||||
|
||||
for q in 0..tables[0].num_blocks() {
|
||||
let s_ltable = cs.complex_selector();
|
||||
let s_ltable = cs.complex_selector();
|
||||
|
||||
for x in 0..lookups[0].num_blocks() {
|
||||
for y in 0..lookups[0].num_inner_cols() {
|
||||
let s_lookup = cs.complex_selector();
|
||||
for x in 0..lookups[0].num_blocks() {
|
||||
for y in 0..lookups[0].num_inner_cols() {
|
||||
let s_lookup = cs.complex_selector();
|
||||
|
||||
cs.lookup_any("lookup", |cs| {
|
||||
let s_lookupq = cs.query_selector(s_lookup);
|
||||
let mut expression = vec![];
|
||||
let s_ltableq = cs.query_selector(s_ltable);
|
||||
let mut lookup_queries = vec![one.clone()];
|
||||
cs.lookup_any("lookup", |cs| {
|
||||
let s_lookupq = cs.query_selector(s_lookup);
|
||||
let mut expression = vec![];
|
||||
let s_ltableq = cs.query_selector(s_ltable);
|
||||
let mut lookup_queries = vec![one.clone()];
|
||||
|
||||
for lookup in lookups {
|
||||
lookup_queries.push(match lookup {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
for lookup in lookups {
|
||||
lookup_queries.push(match lookup {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
|
||||
let mut table_queries = vec![one.clone()];
|
||||
for table in tables {
|
||||
table_queries.push(match table {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[q][0], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
let mut table_queries = vec![one.clone()];
|
||||
for table in tables {
|
||||
table_queries.push(match table {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[0][0], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
|
||||
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
|
||||
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
|
||||
expression.extend(lhs.zip(rhs));
|
||||
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
|
||||
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
|
||||
expression.extend(lhs.zip(rhs));
|
||||
|
||||
expression
|
||||
});
|
||||
self.dynamic_lookups
|
||||
.lookup_selectors
|
||||
.entry((q, (x, y)))
|
||||
.or_insert(s_lookup);
|
||||
}
|
||||
expression
|
||||
});
|
||||
self.dynamic_lookups
|
||||
.lookup_selectors
|
||||
.entry((x, y))
|
||||
.or_insert(s_lookup);
|
||||
}
|
||||
|
||||
self.dynamic_lookups.table_selectors.push(s_ltable);
|
||||
}
|
||||
self.dynamic_lookups.table_selectors.push(s_ltable);
|
||||
|
||||
// if we haven't previously initialized the input/output, do so now
|
||||
if self.dynamic_lookups.tables.is_empty() {
|
||||
@@ -745,72 +729,57 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
|
||||
}
|
||||
|
||||
for t in references.iter() {
|
||||
if !t.is_advice() || t.num_inner_cols() > 1 {
|
||||
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
|
||||
return Err(CircuitError::WrongDynamicColumnType(t.name().to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// assert all tables have the same number of blocks
|
||||
if references
|
||||
.iter()
|
||||
.map(|t| t.num_blocks())
|
||||
.collect::<Vec<_>>()
|
||||
.windows(2)
|
||||
.any(|w| w[0] != w[1])
|
||||
{
|
||||
return Err(CircuitError::WrongDynamicColumnType(
|
||||
"references inner cols".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let one = Expression::Constant(F::ONE);
|
||||
|
||||
for q in 0..references[0].num_blocks() {
|
||||
let s_reference = cs.complex_selector();
|
||||
let s_reference = cs.complex_selector();
|
||||
|
||||
for x in 0..inputs[0].num_blocks() {
|
||||
for y in 0..inputs[0].num_inner_cols() {
|
||||
let s_input = cs.complex_selector();
|
||||
for x in 0..inputs[0].num_blocks() {
|
||||
for y in 0..inputs[0].num_inner_cols() {
|
||||
let s_input = cs.complex_selector();
|
||||
|
||||
cs.lookup_any("lookup", |cs| {
|
||||
let s_inputq = cs.query_selector(s_input);
|
||||
let mut expression = vec![];
|
||||
let s_referenceq = cs.query_selector(s_reference);
|
||||
let mut input_queries = vec![one.clone()];
|
||||
cs.lookup_any("lookup", |cs| {
|
||||
let s_inputq = cs.query_selector(s_input);
|
||||
let mut expression = vec![];
|
||||
let s_referenceq = cs.query_selector(s_reference);
|
||||
let mut input_queries = vec![one.clone()];
|
||||
|
||||
for input in inputs {
|
||||
input_queries.push(match input {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
for input in inputs {
|
||||
input_queries.push(match input {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[x][y], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
|
||||
let mut ref_queries = vec![one.clone()];
|
||||
for reference in references {
|
||||
ref_queries.push(match reference {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[q][0], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
let mut ref_queries = vec![one.clone()];
|
||||
for reference in references {
|
||||
ref_queries.push(match reference {
|
||||
VarTensor::Advice { inner: advices, .. } => {
|
||||
cs.query_advice(advices[0][0], Rotation(0))
|
||||
}
|
||||
_ => unreachable!(),
|
||||
});
|
||||
}
|
||||
|
||||
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
|
||||
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
|
||||
expression.extend(lhs.zip(rhs));
|
||||
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
|
||||
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
|
||||
expression.extend(lhs.zip(rhs));
|
||||
|
||||
expression
|
||||
});
|
||||
self.shuffles
|
||||
.input_selectors
|
||||
.entry((q, (x, y)))
|
||||
.or_insert(s_input);
|
||||
}
|
||||
expression
|
||||
});
|
||||
self.shuffles
|
||||
.input_selectors
|
||||
.entry((x, y))
|
||||
.or_insert(s_input);
|
||||
}
|
||||
self.shuffles.reference_selectors.push(s_reference);
|
||||
}
|
||||
self.shuffles.reference_selectors.push(s_reference);
|
||||
|
||||
// if we haven't previously initialized the input/output, do so now
|
||||
if self.shuffles.references.is_empty() {
|
||||
|
||||
@@ -45,8 +45,6 @@ pub enum HybridOp {
|
||||
ReduceArgMin {
|
||||
dim: usize,
|
||||
},
|
||||
Max,
|
||||
Min,
|
||||
Softmax {
|
||||
input_scale: utils::F32,
|
||||
output_scale: utils::F32,
|
||||
@@ -81,8 +79,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
| HybridOp::Less { .. }
|
||||
| HybridOp::Equals { .. }
|
||||
| HybridOp::GreaterEqual { .. }
|
||||
| HybridOp::Max
|
||||
| HybridOp::Min
|
||||
| HybridOp::LessEqual { .. } => {
|
||||
vec![0, 1]
|
||||
}
|
||||
@@ -97,8 +93,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
|
||||
fn as_string(&self) -> String {
|
||||
match self {
|
||||
HybridOp::Max => format!("MAX"),
|
||||
HybridOp::Min => format!("MIN"),
|
||||
HybridOp::Recip {
|
||||
input_scale,
|
||||
output_scale,
|
||||
@@ -168,8 +162,6 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Hybrid
|
||||
values: &[ValTensor<F>],
|
||||
) -> Result<Option<ValTensor<F>>, CircuitError> {
|
||||
Ok(Some(match self {
|
||||
HybridOp::Max => layouts::max_comp(config, region, values[..].try_into()?)?,
|
||||
HybridOp::Min => layouts::min_comp(config, region, values[..].try_into()?)?,
|
||||
HybridOp::SumPool {
|
||||
padding,
|
||||
stride,
|
||||
|
||||
@@ -979,16 +979,8 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash
|
||||
let (lookup_0, lookup_1) = (lookups[0].clone(), lookups[1].clone());
|
||||
let (table_0, table_1) = (tables[0].clone(), tables[1].clone());
|
||||
|
||||
let (table_0, flush_len_0) =
|
||||
region.assign_dynamic_lookup(&config.dynamic_lookups.tables[0], &table_0)?;
|
||||
let (_table_1, flush_len_1) =
|
||||
region.assign_dynamic_lookup(&config.dynamic_lookups.tables[1], &table_1)?;
|
||||
if flush_len_0 != flush_len_1 {
|
||||
return Err(CircuitError::MismatchedLookupTableLength(
|
||||
flush_len_0,
|
||||
flush_len_1,
|
||||
));
|
||||
}
|
||||
let table_0 = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[0], &table_0)?;
|
||||
let _table_1 = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[1], &table_1)?;
|
||||
let table_len = table_0.len();
|
||||
|
||||
trace!("assigning tables took: {:?}", start.elapsed());
|
||||
@@ -1013,21 +1005,13 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash
|
||||
|
||||
trace!("assigning lookup index took: {:?}", start.elapsed());
|
||||
|
||||
let mut lookup_block = 0;
|
||||
|
||||
if !region.is_dummy() {
|
||||
(0..table_len)
|
||||
.map(|i| {
|
||||
let (x, _, z) = config.dynamic_lookups.tables[0]
|
||||
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i + flush_len_0);
|
||||
|
||||
if lookup_block != x {
|
||||
lookup_block = x;
|
||||
}
|
||||
|
||||
let table_selector = config.dynamic_lookups.table_selectors[lookup_block];
|
||||
let table_selector = config.dynamic_lookups.table_selectors[0];
|
||||
let (_, _, z) = config.dynamic_lookups.tables[0]
|
||||
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i);
|
||||
region.enable(Some(&table_selector), z)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, CircuitError>>()?;
|
||||
@@ -1039,23 +1023,20 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash
|
||||
.map(|i| {
|
||||
let (x, y, z) =
|
||||
config.dynamic_lookups.inputs[0].cartesian_coord(region.linear_coord() + i);
|
||||
|
||||
let lookup_selector = config
|
||||
.dynamic_lookups
|
||||
.lookup_selectors
|
||||
.get(&(lookup_block, (x, y)))
|
||||
.get(&(x, y))
|
||||
.ok_or(CircuitError::MissingSelectors(format!("{:?}", (x, y))))?;
|
||||
|
||||
region.enable(Some(lookup_selector), z)?;
|
||||
|
||||
// region.enable(Some(lookup_selector), z)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.collect::<Result<Vec<_>, CircuitError>>()?;
|
||||
}
|
||||
|
||||
region.increment_dynamic_lookup_col_coord(table_len + flush_len_0);
|
||||
region.increment_dynamic_lookup_col_coord(table_len);
|
||||
region.increment_dynamic_lookup_index(1);
|
||||
region.increment(lookup_len);
|
||||
|
||||
@@ -1083,33 +1064,22 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
|
||||
));
|
||||
}
|
||||
|
||||
let (reference, flush_len_ref) =
|
||||
region.assign_shuffle(&config.shuffles.references[0], &reference)?;
|
||||
let reference = region.assign_shuffle(&config.shuffles.references[0], &reference)?;
|
||||
let reference_len = reference.len();
|
||||
|
||||
// now create a vartensor of constants for the shuffle index
|
||||
let index = create_constant_tensor(F::from(shuffle_index as u64), reference_len);
|
||||
let (index, flush_len_index) = region.assign_shuffle(&config.shuffles.references[1], &index)?;
|
||||
|
||||
if flush_len_index != flush_len_ref {
|
||||
return Err(CircuitError::MismatchedShuffleLength(
|
||||
flush_len_index,
|
||||
flush_len_ref,
|
||||
));
|
||||
}
|
||||
let index = region.assign_shuffle(&config.shuffles.references[1], &index)?;
|
||||
|
||||
let input = region.assign(&config.shuffles.inputs[0], &input)?;
|
||||
region.assign(&config.shuffles.inputs[1], &index)?;
|
||||
|
||||
let mut shuffle_block = 0;
|
||||
|
||||
if !region.is_dummy() {
|
||||
(0..reference_len)
|
||||
.map(|i| {
|
||||
let (x, _, z) = config.shuffles.references[0]
|
||||
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i + flush_len_ref);
|
||||
shuffle_block = x;
|
||||
let ref_selector = config.shuffles.reference_selectors[shuffle_block];
|
||||
let ref_selector = config.shuffles.reference_selectors[0];
|
||||
let (_, _, z) = config.shuffles.references[0]
|
||||
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i);
|
||||
region.enable(Some(&ref_selector), z)?;
|
||||
Ok(())
|
||||
})
|
||||
@@ -1125,7 +1095,7 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
|
||||
let input_selector = config
|
||||
.shuffles
|
||||
.input_selectors
|
||||
.get(&(shuffle_block, (x, y)))
|
||||
.get(&(x, y))
|
||||
.ok_or(CircuitError::MissingSelectors(format!("{:?}", (x, y))))?;
|
||||
|
||||
region.enable(Some(input_selector), z)?;
|
||||
@@ -1135,7 +1105,7 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
|
||||
.collect::<Result<Vec<_>, CircuitError>>()?;
|
||||
}
|
||||
|
||||
region.increment_shuffle_col_coord(reference_len + flush_len_ref);
|
||||
region.increment_shuffle_col_coord(reference_len);
|
||||
region.increment_shuffle_index(1);
|
||||
region.increment(reference_len);
|
||||
|
||||
@@ -4155,48 +4125,6 @@ pub(crate) fn argmin<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
Ok(assigned_argmin)
|
||||
}
|
||||
|
||||
/// max layout
|
||||
pub(crate) fn max_comp<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 2],
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
let is_greater = greater(config, region, values)?;
|
||||
let is_less = not(config, region, &[is_greater.clone()])?;
|
||||
|
||||
let max_val_p1 = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[values[0].clone(), is_greater],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
let max_val_p2 = pairwise(config, region, &[values[1].clone(), is_less], BaseOp::Mult)?;
|
||||
|
||||
pairwise(config, region, &[max_val_p1, max_val_p2], BaseOp::Add)
|
||||
}
|
||||
|
||||
/// min comp layout
|
||||
pub(crate) fn min_comp<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
region: &mut RegionCtx<F>,
|
||||
values: &[ValTensor<F>; 2],
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
let is_greater = greater(config, region, values)?;
|
||||
let is_less = not(config, region, &[is_greater.clone()])?;
|
||||
|
||||
let min_val_p1 = pairwise(config, region, &[values[0].clone(), is_less], BaseOp::Mult)?;
|
||||
|
||||
let min_val_p2 = pairwise(
|
||||
config,
|
||||
region,
|
||||
&[values[1].clone(), is_greater],
|
||||
BaseOp::Mult,
|
||||
)?;
|
||||
|
||||
pairwise(config, region, &[min_val_p1, min_val_p2], BaseOp::Add)
|
||||
}
|
||||
|
||||
/// max layout
|
||||
pub(crate) fn max<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
config: &BaseConfig<F>,
|
||||
|
||||
@@ -21,6 +21,14 @@ pub enum LookupOp {
|
||||
Cast {
|
||||
scale: utils::F32,
|
||||
},
|
||||
Max {
|
||||
scale: utils::F32,
|
||||
a: utils::F32,
|
||||
},
|
||||
Min {
|
||||
scale: utils::F32,
|
||||
a: utils::F32,
|
||||
},
|
||||
Ceil {
|
||||
scale: utils::F32,
|
||||
},
|
||||
@@ -121,6 +129,8 @@ impl LookupOp {
|
||||
LookupOp::RoundHalfToEven { scale } => format!("round_half_to_even_{}", scale),
|
||||
LookupOp::Pow { scale, a } => format!("pow_{}_{}", scale, a),
|
||||
LookupOp::KroneckerDelta => "kronecker_delta".into(),
|
||||
LookupOp::Max { scale, a } => format!("max_{}_{}", scale, a),
|
||||
LookupOp::Min { scale, a } => format!("min_{}_{}", scale, a),
|
||||
LookupOp::Div { denom } => format!("div_{}", denom),
|
||||
LookupOp::Cast { scale } => format!("cast_{}", scale),
|
||||
LookupOp::Recip {
|
||||
@@ -176,6 +186,12 @@ impl LookupOp {
|
||||
LookupOp::KroneckerDelta => {
|
||||
Ok::<_, TensorError>(tensor::ops::nonlinearities::kronecker_delta(&x))
|
||||
}
|
||||
LookupOp::Max { scale, a } => Ok::<_, TensorError>(
|
||||
tensor::ops::nonlinearities::max(&x, scale.0.into(), a.0.into()),
|
||||
),
|
||||
LookupOp::Min { scale, a } => Ok::<_, TensorError>(
|
||||
tensor::ops::nonlinearities::min(&x, scale.0.into(), a.0.into()),
|
||||
),
|
||||
LookupOp::Div { denom } => Ok::<_, TensorError>(
|
||||
tensor::ops::nonlinearities::const_div(&x, f32::from(*denom).into()),
|
||||
),
|
||||
@@ -273,6 +289,8 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Lookup
|
||||
LookupOp::RoundHalfToEven { scale } => format!("ROUND_HALF_TO_EVEN(scale={})", scale),
|
||||
LookupOp::Pow { a, scale } => format!("POW(scale={}, exponent={})", scale, a),
|
||||
LookupOp::KroneckerDelta => "K_DELTA".into(),
|
||||
LookupOp::Max { scale, a } => format!("MAX(scale={}, a={})", scale, a),
|
||||
LookupOp::Min { scale, a } => format!("MIN(scale={}, a={})", scale, a),
|
||||
LookupOp::Recip {
|
||||
input_scale,
|
||||
output_scale,
|
||||
|
||||
@@ -255,7 +255,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
|
||||
self.raw_values = Tensor::new(None, &[0]).unwrap();
|
||||
}
|
||||
|
||||
/// Pre-assign a value
|
||||
///
|
||||
pub fn pre_assign(&mut self, val: ValTensor<F>) {
|
||||
self.pre_assigned_val = Some(val)
|
||||
}
|
||||
|
||||
@@ -180,7 +180,6 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Ha
|
||||
statistics: RegionStatistics,
|
||||
settings: RegionSettings,
|
||||
assigned_constants: ConstantsMap<F>,
|
||||
max_dynamic_input_len: usize,
|
||||
}
|
||||
|
||||
impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a, F> {
|
||||
@@ -194,16 +193,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
self.settings.legs
|
||||
}
|
||||
|
||||
/// get the max dynamic input len
|
||||
pub fn max_dynamic_input_len(&self) -> usize {
|
||||
self.max_dynamic_input_len
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
///
|
||||
pub fn debug_report(&self) {
|
||||
log::debug!(
|
||||
"(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={}, max_dynamic_input_len={})",
|
||||
"(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={})",
|
||||
self.row().to_string().blue(),
|
||||
self.linear_coord().to_string().yellow(),
|
||||
self.total_constants().to_string().red(),
|
||||
@@ -211,9 +205,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
self.min_lookup_inputs().to_string().green(),
|
||||
self.max_range_size().to_string().green(),
|
||||
self.dynamic_lookup_col_coord().to_string().green(),
|
||||
self.shuffle_col_coord().to_string().green(),
|
||||
self.max_dynamic_input_len().to_string().green()
|
||||
);
|
||||
self.shuffle_col_coord().to_string().green());
|
||||
}
|
||||
|
||||
///
|
||||
@@ -231,11 +223,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
self.dynamic_lookup_index.index += n;
|
||||
}
|
||||
|
||||
/// increment the max dynamic input len
|
||||
pub fn update_max_dynamic_input_len(&mut self, n: usize) {
|
||||
self.max_dynamic_input_len = self.max_dynamic_input_len.max(n);
|
||||
}
|
||||
|
||||
///
|
||||
pub fn increment_dynamic_lookup_col_coord(&mut self, n: usize) {
|
||||
self.dynamic_lookup_index.col_coord += n;
|
||||
@@ -287,7 +274,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
statistics: RegionStatistics::default(),
|
||||
settings: RegionSettings::all_true(decomp_base, decomp_legs),
|
||||
assigned_constants: HashMap::new(),
|
||||
max_dynamic_input_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,7 +310,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
statistics: RegionStatistics::default(),
|
||||
settings,
|
||||
assigned_constants: HashMap::new(),
|
||||
max_dynamic_input_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -346,7 +331,6 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
statistics: RegionStatistics::default(),
|
||||
settings,
|
||||
assigned_constants: HashMap::new(),
|
||||
max_dynamic_input_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -599,12 +583,9 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
&mut self,
|
||||
var: &VarTensor,
|
||||
values: &ValTensor<F>,
|
||||
) -> Result<(ValTensor<F>, usize), CircuitError> {
|
||||
|
||||
self.update_max_dynamic_input_len(values.len());
|
||||
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
if let Some(region) = &self.region {
|
||||
Ok(var.assign_exact_column(
|
||||
Ok(var.assign(
|
||||
&mut region.borrow_mut(),
|
||||
self.combined_dynamic_shuffle_coord(),
|
||||
values,
|
||||
@@ -615,11 +596,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
let values_map = values.create_constants_map_iterator();
|
||||
self.assigned_constants.par_extend(values_map);
|
||||
}
|
||||
|
||||
let flush_len = var.get_column_flush(self.combined_dynamic_shuffle_coord(), values)?;
|
||||
|
||||
// get the diff between the current column and the next row
|
||||
Ok((values.clone(), flush_len))
|
||||
Ok(values.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -628,7 +605,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
|
||||
&mut self,
|
||||
var: &VarTensor,
|
||||
values: &ValTensor<F>,
|
||||
) -> Result<(ValTensor<F>, usize), CircuitError> {
|
||||
) -> Result<ValTensor<F>, CircuitError> {
|
||||
self.assign_dynamic_lookup(var, values)
|
||||
}
|
||||
|
||||
|
||||
@@ -1516,7 +1516,7 @@ mod add_w_shape_casting {
|
||||
// parameters
|
||||
let a = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1))));
|
||||
|
||||
let b = Tensor::from((0..1).map(|i| Value::known(F::from(i + 1))));
|
||||
let b = Tensor::from((0..1).map(|i| Value::known(F::from(i as u64 + 1))));
|
||||
|
||||
let circuit = MyCircuit::<F> {
|
||||
inputs: [ValTensor::from(a), ValTensor::from(b)],
|
||||
|
||||
@@ -1203,7 +1203,6 @@ pub(crate) async fn calibrate(
|
||||
num_rows: new_settings.num_rows,
|
||||
total_assignments: new_settings.total_assignments,
|
||||
total_const_size: new_settings.total_const_size,
|
||||
total_dynamic_col_size: new_settings.total_dynamic_col_size,
|
||||
..settings.clone()
|
||||
};
|
||||
|
||||
@@ -1321,9 +1320,7 @@ pub(crate) async fn calibrate(
|
||||
let lookup_log_rows = best_params.lookup_log_rows_with_blinding();
|
||||
let module_log_row = best_params.module_constraint_logrows_with_blinding();
|
||||
let instance_logrows = best_params.log2_total_instances_with_blinding();
|
||||
let dynamic_lookup_logrows =
|
||||
best_params.min_dynamic_lookup_and_shuffle_logrows_with_blinding();
|
||||
|
||||
let dynamic_lookup_logrows = best_params.dynamic_lookup_and_shuffle_logrows_with_blinding();
|
||||
let range_check_logrows = best_params.range_check_log_rows_with_blinding();
|
||||
|
||||
let mut reduction = std::cmp::max(lookup_log_rows, module_log_row);
|
||||
|
||||
@@ -408,8 +408,6 @@ pub struct GraphSettings {
|
||||
pub total_const_size: usize,
|
||||
/// total dynamic column size
|
||||
pub total_dynamic_col_size: usize,
|
||||
/// max dynamic column input length
|
||||
pub max_dynamic_input_len: usize,
|
||||
/// number of dynamic lookups
|
||||
pub num_dynamic_lookups: usize,
|
||||
/// number of shuffles
|
||||
@@ -487,13 +485,6 @@ impl GraphSettings {
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
/// calculate the number of rows required for the dynamic lookup and shuffle
|
||||
pub fn min_dynamic_lookup_and_shuffle_logrows_with_blinding(&self) -> u32 {
|
||||
(self.max_dynamic_input_len as f64 + RESERVED_BLINDING_ROWS as f64)
|
||||
.log2()
|
||||
.ceil() as u32
|
||||
}
|
||||
|
||||
fn dynamic_lookup_and_shuffle_col_size(&self) -> usize {
|
||||
self.total_dynamic_col_size + self.total_shuffle_col_size
|
||||
}
|
||||
|
||||
@@ -103,8 +103,6 @@ pub struct DummyPassRes {
|
||||
pub num_rows: usize,
|
||||
/// num dynamic lookups
|
||||
pub num_dynamic_lookups: usize,
|
||||
/// max dynamic lookup input len
|
||||
pub max_dynamic_input_len: usize,
|
||||
/// dynamic lookup col size
|
||||
pub dynamic_lookup_col_coord: usize,
|
||||
/// num shuffles
|
||||
@@ -362,14 +360,6 @@ impl NodeType {
|
||||
NodeType::SubGraph { .. } => SupportedOp::Unknown(Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
/// check if it is a softmax
|
||||
pub fn is_softmax(&self) -> bool {
|
||||
match self {
|
||||
NodeType::Node(n) => n.is_softmax(),
|
||||
NodeType::SubGraph { .. } => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
@@ -572,7 +562,6 @@ impl Model {
|
||||
num_rows: res.num_rows,
|
||||
total_assignments: res.linear_coord,
|
||||
required_lookups: res.lookup_ops.into_iter().collect(),
|
||||
max_dynamic_input_len: res.max_dynamic_input_len,
|
||||
required_range_checks: res.range_checks.into_iter().collect(),
|
||||
model_output_scales: self.graph.get_output_scales()?,
|
||||
model_input_scales: self.graph.get_input_scales(),
|
||||
@@ -1476,7 +1465,6 @@ impl Model {
|
||||
let res = DummyPassRes {
|
||||
num_rows: region.row(),
|
||||
linear_coord: region.linear_coord(),
|
||||
max_dynamic_input_len: region.max_dynamic_input_len(),
|
||||
total_const_size: region.total_constants(),
|
||||
lookup_ops: region.used_lookups(),
|
||||
range_checks: region.used_range_checks(),
|
||||
|
||||
@@ -623,15 +623,6 @@ impl Node {
|
||||
num_uses,
|
||||
})
|
||||
}
|
||||
|
||||
/// check if it is a softmax node
|
||||
pub fn is_softmax(&self) -> bool {
|
||||
if let SupportedOp::Hybrid(HybridOp::Softmax { .. }) = self.opkind {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
|
||||
|
||||
@@ -763,38 +763,81 @@ pub fn new_op_from_onnx(
|
||||
.map(|(i, _)| i)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if inputs.len() == 2 {
|
||||
if const_inputs.len() > 0 {
|
||||
let const_idx = const_inputs[0];
|
||||
let boxed_op = inputs[const_idx].opkind();
|
||||
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
|
||||
if c.len() == 1 {
|
||||
c[0]
|
||||
} else {
|
||||
return Err(GraphError::InvalidDims(idx, "max".to_string()));
|
||||
}
|
||||
} else {
|
||||
return Err(GraphError::OpMismatch(idx, "Max".to_string()));
|
||||
};
|
||||
if unit == 0. {
|
||||
if let Some(node) = inputs.get_mut(const_idx) {
|
||||
node.decrement_use();
|
||||
deleted_indices.push(const_idx);
|
||||
}
|
||||
SupportedOp::Linear(PolyOp::ReLU)
|
||||
} else {
|
||||
SupportedOp::Hybrid(HybridOp::Max)
|
||||
}
|
||||
if const_inputs.len() != 1 {
|
||||
return Err(GraphError::OpMismatch(idx, "Max".to_string()));
|
||||
}
|
||||
|
||||
let const_idx = const_inputs[0];
|
||||
let boxed_op = inputs[const_idx].opkind();
|
||||
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
|
||||
if c.len() == 1 {
|
||||
c[0]
|
||||
} else {
|
||||
SupportedOp::Hybrid(HybridOp::Max)
|
||||
return Err(GraphError::InvalidDims(idx, "max".to_string()));
|
||||
}
|
||||
} else {
|
||||
return Err(GraphError::OpMismatch(idx, "Max".to_string()));
|
||||
};
|
||||
|
||||
if inputs.len() == 2 {
|
||||
if let Some(node) = inputs.get_mut(const_idx) {
|
||||
node.decrement_use();
|
||||
deleted_indices.push(const_idx);
|
||||
}
|
||||
if unit == 0. {
|
||||
SupportedOp::Linear(PolyOp::ReLU)
|
||||
} else {
|
||||
// get the non-constant index
|
||||
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
|
||||
SupportedOp::Nonlinear(LookupOp::Max {
|
||||
scale: scale_to_multiplier(inputs[non_const_idx].out_scales()[0]).into(),
|
||||
a: crate::circuit::utils::F32(unit),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
return Err(GraphError::InvalidDims(idx, "max".to_string()));
|
||||
}
|
||||
}
|
||||
"Min" => {
|
||||
// Extract the min value
|
||||
// first find the input that is a constant
|
||||
// and then extract the value
|
||||
let const_inputs = inputs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, n)| n.is_constant())
|
||||
.map(|(i, _)| i)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if const_inputs.len() != 1 {
|
||||
return Err(GraphError::OpMismatch(idx, "Min".to_string()));
|
||||
}
|
||||
|
||||
let const_idx = const_inputs[0];
|
||||
let boxed_op = inputs[const_idx].opkind();
|
||||
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
|
||||
if c.len() == 1 {
|
||||
c[0]
|
||||
} else {
|
||||
return Err(GraphError::InvalidDims(idx, "min".to_string()));
|
||||
}
|
||||
} else {
|
||||
return Err(GraphError::OpMismatch(idx, "Min".to_string()));
|
||||
};
|
||||
|
||||
if inputs.len() == 2 {
|
||||
SupportedOp::Hybrid(HybridOp::Min)
|
||||
if let Some(node) = inputs.get_mut(const_idx) {
|
||||
node.decrement_use();
|
||||
deleted_indices.push(const_idx);
|
||||
}
|
||||
|
||||
// get the non-constant index
|
||||
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
|
||||
|
||||
SupportedOp::Nonlinear(LookupOp::Min {
|
||||
scale: scale_to_multiplier(inputs[non_const_idx].out_scales()[0]).into(),
|
||||
a: crate::circuit::utils::F32(unit),
|
||||
})
|
||||
} else {
|
||||
return Err(GraphError::InvalidDims(idx, "min".to_string()));
|
||||
}
|
||||
|
||||
@@ -443,7 +443,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
|
||||
let dynamic_lookup =
|
||||
VarTensor::new_advice(cs, logrows, 1, dynamic_lookup_and_shuffle_size);
|
||||
if dynamic_lookup.num_blocks() > 1 {
|
||||
warn!("dynamic lookup has {} blocks", dynamic_lookup.num_blocks());
|
||||
panic!("dynamic lookup or shuffle should only have one block");
|
||||
};
|
||||
advices.push(dynamic_lookup);
|
||||
}
|
||||
|
||||
@@ -541,7 +541,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
|
||||
let mut is_empty = true;
|
||||
x.map(|_| is_empty = false);
|
||||
if is_empty {
|
||||
Ok::<_, TensorError>(vec![Value::<F>::unknown(); n + 1])
|
||||
return Ok::<_, TensorError>(vec![Value::<F>::unknown(); n + 1]);
|
||||
} else {
|
||||
let mut res = vec![Value::unknown(); n + 1];
|
||||
let mut int_rep = 0;
|
||||
|
||||
@@ -396,53 +396,6 @@ impl VarTensor {
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Helper function to get the remaining size of the column
|
||||
pub fn get_column_flush<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&self,
|
||||
offset: usize,
|
||||
values: &ValTensor<F>,
|
||||
) -> Result<usize, halo2_proofs::plonk::Error> {
|
||||
if values.len() > self.col_size() {
|
||||
error!("Values are too large for the column");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
|
||||
// this can only be called on columns that have a single inner column
|
||||
if self.num_inner_cols() != 1 {
|
||||
error!("This function can only be called on columns with a single inner column");
|
||||
return Err(halo2_proofs::plonk::Error::Synthesis);
|
||||
}
|
||||
|
||||
// check if the values fit in the remaining space of the column
|
||||
let current_cartesian = self.cartesian_coord(offset);
|
||||
let final_cartesian = self.cartesian_coord(offset + values.len());
|
||||
|
||||
let mut flush_len = 0;
|
||||
if current_cartesian.0 != final_cartesian.0 {
|
||||
debug!("Values overflow the column, flushing to next column");
|
||||
// diff is the number of values that overflow the column
|
||||
flush_len += self.col_size() - current_cartesian.2;
|
||||
}
|
||||
|
||||
Ok(flush_len)
|
||||
}
|
||||
|
||||
/// Assigns [ValTensor] to the columns of the inner tensor. Whereby the values are assigned to a single column, without overflowing.
|
||||
/// So for instance if we are assigning 10 values and we are at index 18 of the column, and the columns are of length 20, we skip the last 2 values of current column and start from the beginning of the next column.
|
||||
pub fn assign_exact_column<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
|
||||
&self,
|
||||
region: &mut Region<F>,
|
||||
offset: usize,
|
||||
values: &ValTensor<F>,
|
||||
constants: &mut ConstantsMap<F>,
|
||||
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
|
||||
let flush_len = self.get_column_flush(offset, values)?;
|
||||
|
||||
let assigned_vals = self.assign(region, offset + flush_len, values, constants)?;
|
||||
|
||||
Ok((assigned_vals, flush_len))
|
||||
}
|
||||
|
||||
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
|
||||
/// Duplication occurs by copying the last cell of the column to the first cell next column and creating a copy constraint between the two.
|
||||
pub fn dummy_assign_with_duplication<
|
||||
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -33,7 +33,6 @@
|
||||
"total_assignments": 92,
|
||||
"total_const_size": 3,
|
||||
"total_dynamic_col_size": 0,
|
||||
"max_dynamic_input_len": 0,
|
||||
"num_dynamic_lookups": 0,
|
||||
"num_shuffles": 0,
|
||||
"total_shuffle_col_size": 0,
|
||||
|
||||
@@ -124,40 +124,41 @@ mod py_tests {
|
||||
}
|
||||
|
||||
const TESTS: [&str; 34] = [
|
||||
"ezkl_demo_batch.ipynb", // 0
|
||||
"proof_splitting.ipynb", // 1
|
||||
"variance.ipynb", // 2
|
||||
"mnist_gan.ipynb", // 3
|
||||
"keras_simple_demo.ipynb", // 4
|
||||
"mnist_gan_proof_splitting.ipynb", // 5
|
||||
"hashed_vis.ipynb", // 6
|
||||
"simple_demo_all_public.ipynb", // 7
|
||||
"data_attest.ipynb", // 8
|
||||
"little_transformer.ipynb", // 9
|
||||
"simple_demo_aggregated_proofs.ipynb", // 10
|
||||
"ezkl_demo.ipynb", // 11
|
||||
"lstm.ipynb", // 12
|
||||
"set_membership.ipynb", // 13
|
||||
"decision_tree.ipynb", // 14
|
||||
"random_forest.ipynb", // 15
|
||||
"gradient_boosted_trees.ipynb", // 16
|
||||
"xgboost.ipynb", // 17
|
||||
"lightgbm.ipynb", // 18
|
||||
"svm.ipynb", // 19
|
||||
"simple_demo_public_input_output.ipynb", // 20
|
||||
"simple_demo_public_network_output.ipynb", // 21
|
||||
"gcn.ipynb", // 22
|
||||
"linear_regression.ipynb", // 23
|
||||
"stacked_regression.ipynb", // 24
|
||||
"data_attest_hashed.ipynb", // 25
|
||||
"kzg_vis.ipynb", // 26
|
||||
"kmeans.ipynb", // 27
|
||||
"solvency.ipynb", // 28
|
||||
"sklearn_mlp.ipynb", // 29
|
||||
"generalized_inverse.ipynb", // 30
|
||||
"mnist_classifier.ipynb", // 31
|
||||
"world_rotation.ipynb", // 32
|
||||
"logistic_regression.ipynb", // 33
|
||||
"ezkl_demo_batch.ipynb",
|
||||
"proof_splitting.ipynb", // 0
|
||||
"variance.ipynb",
|
||||
"mnist_gan.ipynb",
|
||||
// "mnist_vae.ipynb",
|
||||
"keras_simple_demo.ipynb",
|
||||
"mnist_gan_proof_splitting.ipynb", // 4
|
||||
"hashed_vis.ipynb", // 5
|
||||
"simple_demo_all_public.ipynb",
|
||||
"data_attest.ipynb",
|
||||
"little_transformer.ipynb",
|
||||
"simple_demo_aggregated_proofs.ipynb",
|
||||
"ezkl_demo.ipynb", // 10
|
||||
"lstm.ipynb",
|
||||
"set_membership.ipynb", // 12
|
||||
"decision_tree.ipynb",
|
||||
"random_forest.ipynb",
|
||||
"gradient_boosted_trees.ipynb", // 15
|
||||
"xgboost.ipynb",
|
||||
"lightgbm.ipynb",
|
||||
"svm.ipynb",
|
||||
"simple_demo_public_input_output.ipynb",
|
||||
"simple_demo_public_network_output.ipynb", // 20
|
||||
"gcn.ipynb",
|
||||
"linear_regression.ipynb",
|
||||
"stacked_regression.ipynb",
|
||||
"data_attest_hashed.ipynb",
|
||||
"kzg_vis.ipynb", // 25
|
||||
"kmeans.ipynb",
|
||||
"solvency.ipynb",
|
||||
"sklearn_mlp.ipynb",
|
||||
"generalized_inverse.ipynb",
|
||||
"mnist_classifier.ipynb", // 30
|
||||
"world_rotation.ipynb",
|
||||
"logistic_regression.ipynb",
|
||||
];
|
||||
|
||||
macro_rules! test_func {
|
||||
|
||||
Reference in New Issue
Block a user