Compare commits

...

1 Commits

Author SHA1 Message Date
dante
d51cba589a feat: dynamic lookup overflow (#853) 2024-10-23 23:12:00 -04:00
17 changed files with 279 additions and 107 deletions

16
Cargo.lock generated
View File

@@ -2543,6 +2543,12 @@ dependencies = [
"allocator-api2",
]
[[package]]
name = "hashbrown"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb"
[[package]]
name = "heck"
version = "0.4.1"
@@ -2811,12 +2817,12 @@ dependencies = [
[[package]]
name = "indexmap"
version = "2.2.5"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4"
checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
dependencies = [
"equivalent",
"hashbrown 0.14.3",
"hashbrown 0.15.0",
]
[[package]]
@@ -5628,9 +5634,9 @@ checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
[[package]]
name = "tower-service"
version = "0.3.2"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
[[package]]
name = "tracing"

View File

@@ -56,6 +56,7 @@ alloy = { git = "https://github.com/alloy-rs/alloy", version = "0.1.0", rev = "5
"rpc-types-eth",
"signer-wallet",
"node-bindings",
], optional = true }
foundry-compilers = { version = "0.4.1", features = ["svm-solc"], optional = true }
ethabi = { version = "18", optional = true }

View File

@@ -177,7 +177,7 @@ impl<'source> FromPyObject<'source> for Tolerance {
#[derive(Clone, Debug, Default)]
pub struct DynamicLookups {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub lookup_selectors: BTreeMap<(usize, usize), Selector>,
pub lookup_selectors: BTreeMap<(usize, (usize, usize)), Selector>,
/// Selectors for the dynamic lookup tables
pub table_selectors: Vec<Selector>,
/// Inputs:
@@ -209,7 +209,7 @@ impl DynamicLookups {
#[derive(Clone, Debug, Default)]
pub struct Shuffles {
/// [Selector]s generated when configuring the layer. We use a [BTreeMap] as we expect to configure many dynamic lookup ops.
pub input_selectors: BTreeMap<(usize, usize), Selector>,
pub input_selectors: BTreeMap<(usize, (usize, usize)), Selector>,
/// Selectors for the dynamic lookup tables
pub reference_selectors: Vec<Selector>,
/// Inputs:
@@ -646,57 +646,73 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
}
for t in tables.iter() {
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
if !t.is_advice() || t.num_inner_cols() > 1 {
return Err(CircuitError::WrongDynamicColumnType(t.name().to_string()));
}
}
// assert all tables have the same number of inner columns
if tables
.iter()
.map(|t| t.num_blocks())
.collect::<Vec<_>>()
.windows(2)
.any(|w| w[0] != w[1])
{
return Err(CircuitError::WrongDynamicColumnType(
"tables inner cols".to_string(),
));
}
let one = Expression::Constant(F::ONE);
let s_ltable = cs.complex_selector();
for q in 0..tables[0].num_blocks() {
let s_ltable = cs.complex_selector();
for x in 0..lookups[0].num_blocks() {
for y in 0..lookups[0].num_inner_cols() {
let s_lookup = cs.complex_selector();
for x in 0..lookups[0].num_blocks() {
for y in 0..lookups[0].num_inner_cols() {
let s_lookup = cs.complex_selector();
cs.lookup_any("lookup", |cs| {
let s_lookupq = cs.query_selector(s_lookup);
let mut expression = vec![];
let s_ltableq = cs.query_selector(s_ltable);
let mut lookup_queries = vec![one.clone()];
cs.lookup_any("lookup", |cs| {
let s_lookupq = cs.query_selector(s_lookup);
let mut expression = vec![];
let s_ltableq = cs.query_selector(s_ltable);
let mut lookup_queries = vec![one.clone()];
for lookup in lookups {
lookup_queries.push(match lookup {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
for lookup in lookups {
lookup_queries.push(match lookup {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
let mut table_queries = vec![one.clone()];
for table in tables {
table_queries.push(match table {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[0][0], Rotation(0))
}
_ => unreachable!(),
});
}
let mut table_queries = vec![one.clone()];
for table in tables {
table_queries.push(match table {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[q][0], Rotation(0))
}
_ => unreachable!(),
});
}
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
expression.extend(lhs.zip(rhs));
let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone());
let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone());
expression.extend(lhs.zip(rhs));
expression
});
self.dynamic_lookups
.lookup_selectors
.entry((x, y))
.or_insert(s_lookup);
expression
});
self.dynamic_lookups
.lookup_selectors
.entry((q, (x, y)))
.or_insert(s_lookup);
}
}
self.dynamic_lookups.table_selectors.push(s_ltable);
}
self.dynamic_lookups.table_selectors.push(s_ltable);
// if we haven't previously initialized the input/output, do so now
if self.dynamic_lookups.tables.is_empty() {
@@ -729,57 +745,72 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> {
}
for t in references.iter() {
if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 {
if !t.is_advice() || t.num_inner_cols() > 1 {
return Err(CircuitError::WrongDynamicColumnType(t.name().to_string()));
}
}
// assert all tables have the same number of blocks
if references
.iter()
.map(|t| t.num_blocks())
.collect::<Vec<_>>()
.windows(2)
.any(|w| w[0] != w[1])
{
return Err(CircuitError::WrongDynamicColumnType(
"references inner cols".to_string(),
));
}
let one = Expression::Constant(F::ONE);
let s_reference = cs.complex_selector();
for q in 0..references[0].num_blocks() {
let s_reference = cs.complex_selector();
for x in 0..inputs[0].num_blocks() {
for y in 0..inputs[0].num_inner_cols() {
let s_input = cs.complex_selector();
for x in 0..inputs[0].num_blocks() {
for y in 0..inputs[0].num_inner_cols() {
let s_input = cs.complex_selector();
cs.lookup_any("lookup", |cs| {
let s_inputq = cs.query_selector(s_input);
let mut expression = vec![];
let s_referenceq = cs.query_selector(s_reference);
let mut input_queries = vec![one.clone()];
cs.lookup_any("lookup", |cs| {
let s_inputq = cs.query_selector(s_input);
let mut expression = vec![];
let s_referenceq = cs.query_selector(s_reference);
let mut input_queries = vec![one.clone()];
for input in inputs {
input_queries.push(match input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
for input in inputs {
input_queries.push(match input {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0))
}
_ => unreachable!(),
});
}
let mut ref_queries = vec![one.clone()];
for reference in references {
ref_queries.push(match reference {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[0][0], Rotation(0))
}
_ => unreachable!(),
});
}
let mut ref_queries = vec![one.clone()];
for reference in references {
ref_queries.push(match reference {
VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[q][0], Rotation(0))
}
_ => unreachable!(),
});
}
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
expression.extend(lhs.zip(rhs));
let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone());
let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone());
expression.extend(lhs.zip(rhs));
expression
});
self.shuffles
.input_selectors
.entry((x, y))
.or_insert(s_input);
expression
});
self.shuffles
.input_selectors
.entry((q, (x, y)))
.or_insert(s_input);
}
}
self.shuffles.reference_selectors.push(s_reference);
}
self.shuffles.reference_selectors.push(s_reference);
// if we haven't previously initialized the input/output, do so now
if self.shuffles.references.is_empty() {

View File

@@ -979,8 +979,16 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash
let (lookup_0, lookup_1) = (lookups[0].clone(), lookups[1].clone());
let (table_0, table_1) = (tables[0].clone(), tables[1].clone());
let table_0 = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[0], &table_0)?;
let _table_1 = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[1], &table_1)?;
let (table_0, flush_len_0) =
region.assign_dynamic_lookup(&config.dynamic_lookups.tables[0], &table_0)?;
let (_table_1, flush_len_1) =
region.assign_dynamic_lookup(&config.dynamic_lookups.tables[1], &table_1)?;
if flush_len_0 != flush_len_1 {
return Err(CircuitError::MismatchedLookupTableLength(
flush_len_0,
flush_len_1,
));
}
let table_len = table_0.len();
trace!("assigning tables took: {:?}", start.elapsed());
@@ -1005,13 +1013,21 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash
trace!("assigning lookup index took: {:?}", start.elapsed());
let mut lookup_block = 0;
if !region.is_dummy() {
(0..table_len)
.map(|i| {
let table_selector = config.dynamic_lookups.table_selectors[0];
let (_, _, z) = config.dynamic_lookups.tables[0]
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i);
let (x, _, z) = config.dynamic_lookups.tables[0]
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i + flush_len_0);
if lookup_block != x {
lookup_block = x;
}
let table_selector = config.dynamic_lookups.table_selectors[lookup_block];
region.enable(Some(&table_selector), z)?;
Ok(())
})
.collect::<Result<Vec<_>, CircuitError>>()?;
@@ -1023,20 +1039,23 @@ pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash
.map(|i| {
let (x, y, z) =
config.dynamic_lookups.inputs[0].cartesian_coord(region.linear_coord() + i);
let lookup_selector = config
.dynamic_lookups
.lookup_selectors
.get(&(x, y))
.get(&(lookup_block, (x, y)))
.ok_or(CircuitError::MissingSelectors(format!("{:?}", (x, y))))?;
region.enable(Some(lookup_selector), z)?;
// region.enable(Some(lookup_selector), z)?;
Ok(())
})
.collect::<Result<Vec<_>, CircuitError>>()?;
}
region.increment_dynamic_lookup_col_coord(table_len);
region.increment_dynamic_lookup_col_coord(table_len + flush_len_0);
region.increment_dynamic_lookup_index(1);
region.increment(lookup_len);
@@ -1064,22 +1083,33 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
));
}
let reference = region.assign_shuffle(&config.shuffles.references[0], &reference)?;
let (reference, flush_len_ref) =
region.assign_shuffle(&config.shuffles.references[0], &reference)?;
let reference_len = reference.len();
// now create a vartensor of constants for the shuffle index
let index = create_constant_tensor(F::from(shuffle_index as u64), reference_len);
let index = region.assign_shuffle(&config.shuffles.references[1], &index)?;
let (index, flush_len_index) = region.assign_shuffle(&config.shuffles.references[1], &index)?;
if flush_len_index != flush_len_ref {
return Err(CircuitError::MismatchedShuffleLength(
flush_len_index,
flush_len_ref,
));
}
let input = region.assign(&config.shuffles.inputs[0], &input)?;
region.assign(&config.shuffles.inputs[1], &index)?;
let mut shuffle_block = 0;
if !region.is_dummy() {
(0..reference_len)
.map(|i| {
let ref_selector = config.shuffles.reference_selectors[0];
let (_, _, z) = config.shuffles.references[0]
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i);
let (x, _, z) = config.shuffles.references[0]
.cartesian_coord(region.combined_dynamic_shuffle_coord() + i + flush_len_ref);
shuffle_block = x;
let ref_selector = config.shuffles.reference_selectors[shuffle_block];
region.enable(Some(&ref_selector), z)?;
Ok(())
})
@@ -1095,7 +1125,7 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
let input_selector = config
.shuffles
.input_selectors
.get(&(x, y))
.get(&(shuffle_block, (x, y)))
.ok_or(CircuitError::MissingSelectors(format!("{:?}", (x, y))))?;
region.enable(Some(input_selector), z)?;
@@ -1105,7 +1135,7 @@ pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash
.collect::<Result<Vec<_>, CircuitError>>()?;
}
region.increment_shuffle_col_coord(reference_len);
region.increment_shuffle_col_coord(reference_len + flush_len_ref);
region.increment_shuffle_index(1);
region.increment(reference_len);

View File

@@ -255,7 +255,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> {
self.raw_values = Tensor::new(None, &[0]).unwrap();
}
///
/// Pre-assign a value
pub fn pre_assign(&mut self, val: ValTensor<F>) {
self.pre_assigned_val = Some(val)
}

View File

@@ -180,6 +180,7 @@ pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Ha
statistics: RegionStatistics,
settings: RegionSettings,
assigned_constants: ConstantsMap<F>,
max_dynamic_input_len: usize,
}
impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a, F> {
@@ -193,11 +194,16 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
self.settings.legs
}
/// get the max dynamic input len
pub fn max_dynamic_input_len(&self) -> usize {
self.max_dynamic_input_len
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]
///
pub fn debug_report(&self) {
log::debug!(
"(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={})",
"(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={}, max_dynamic_input_len={})",
self.row().to_string().blue(),
self.linear_coord().to_string().yellow(),
self.total_constants().to_string().red(),
@@ -205,7 +211,9 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
self.min_lookup_inputs().to_string().green(),
self.max_range_size().to_string().green(),
self.dynamic_lookup_col_coord().to_string().green(),
self.shuffle_col_coord().to_string().green());
self.shuffle_col_coord().to_string().green(),
self.max_dynamic_input_len().to_string().green()
);
}
///
@@ -223,6 +231,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
self.dynamic_lookup_index.index += n;
}
/// increment the max dynamic input len
pub fn update_max_dynamic_input_len(&mut self, n: usize) {
self.max_dynamic_input_len = self.max_dynamic_input_len.max(n);
}
///
pub fn increment_dynamic_lookup_col_coord(&mut self, n: usize) {
self.dynamic_lookup_index.col_coord += n;
@@ -274,6 +287,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
statistics: RegionStatistics::default(),
settings: RegionSettings::all_true(decomp_base, decomp_legs),
assigned_constants: HashMap::new(),
max_dynamic_input_len: 0,
}
}
@@ -310,6 +324,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
statistics: RegionStatistics::default(),
settings,
assigned_constants: HashMap::new(),
max_dynamic_input_len: 0,
}
}
@@ -331,6 +346,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
statistics: RegionStatistics::default(),
settings,
assigned_constants: HashMap::new(),
max_dynamic_input_len: 0,
}
}
@@ -583,9 +599,12 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, CircuitError> {
) -> Result<(ValTensor<F>, usize), CircuitError> {
self.update_max_dynamic_input_len(values.len());
if let Some(region) = &self.region {
Ok(var.assign(
Ok(var.assign_exact_column(
&mut region.borrow_mut(),
self.combined_dynamic_shuffle_coord(),
values,
@@ -596,7 +615,11 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
let values_map = values.create_constants_map_iterator();
self.assigned_constants.par_extend(values_map);
}
Ok(values.clone())
let flush_len = var.get_column_flush(self.combined_dynamic_shuffle_coord(), values)?;
// get the diff between the current column and the next row
Ok((values.clone(), flush_len))
}
}
@@ -605,7 +628,7 @@ impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a
&mut self,
var: &VarTensor,
values: &ValTensor<F>,
) -> Result<ValTensor<F>, CircuitError> {
) -> Result<(ValTensor<F>, usize), CircuitError> {
self.assign_dynamic_lookup(var, values)
}

View File

@@ -1516,7 +1516,7 @@ mod add_w_shape_casting {
// parameters
let a = Tensor::from((0..LEN).map(|i| Value::known(F::from(i as u64 + 1))));
let b = Tensor::from((0..1).map(|i| Value::known(F::from(i as u64 + 1))));
let b = Tensor::from((0..1).map(|i| Value::known(F::from(i + 1))));
let circuit = MyCircuit::<F> {
inputs: [ValTensor::from(a), ValTensor::from(b)],

View File

@@ -1203,6 +1203,7 @@ pub(crate) async fn calibrate(
num_rows: new_settings.num_rows,
total_assignments: new_settings.total_assignments,
total_const_size: new_settings.total_const_size,
total_dynamic_col_size: new_settings.total_dynamic_col_size,
..settings.clone()
};
@@ -1320,7 +1321,9 @@ pub(crate) async fn calibrate(
let lookup_log_rows = best_params.lookup_log_rows_with_blinding();
let module_log_row = best_params.module_constraint_logrows_with_blinding();
let instance_logrows = best_params.log2_total_instances_with_blinding();
let dynamic_lookup_logrows = best_params.dynamic_lookup_and_shuffle_logrows_with_blinding();
let dynamic_lookup_logrows =
best_params.min_dynamic_lookup_and_shuffle_logrows_with_blinding();
let range_check_logrows = best_params.range_check_log_rows_with_blinding();
let mut reduction = std::cmp::max(lookup_log_rows, module_log_row);

View File

@@ -408,6 +408,8 @@ pub struct GraphSettings {
pub total_const_size: usize,
/// total dynamic column size
pub total_dynamic_col_size: usize,
/// max dynamic column input length
pub max_dynamic_input_len: usize,
/// number of dynamic lookups
pub num_dynamic_lookups: usize,
/// number of shuffles
@@ -485,6 +487,13 @@ impl GraphSettings {
.ceil() as u32
}
/// calculate the number of rows required for the dynamic lookup and shuffle
pub fn min_dynamic_lookup_and_shuffle_logrows_with_blinding(&self) -> u32 {
(self.max_dynamic_input_len as f64 + RESERVED_BLINDING_ROWS as f64)
.log2()
.ceil() as u32
}
fn dynamic_lookup_and_shuffle_col_size(&self) -> usize {
self.total_dynamic_col_size + self.total_shuffle_col_size
}

View File

@@ -103,6 +103,8 @@ pub struct DummyPassRes {
pub num_rows: usize,
/// num dynamic lookups
pub num_dynamic_lookups: usize,
/// max dynamic lookup input len
pub max_dynamic_input_len: usize,
/// dynamic lookup col size
pub dynamic_lookup_col_coord: usize,
/// num shuffles
@@ -360,6 +362,14 @@ impl NodeType {
NodeType::SubGraph { .. } => SupportedOp::Unknown(Unknown),
}
}
/// check if it is a softmax
pub fn is_softmax(&self) -> bool {
match self {
NodeType::Node(n) => n.is_softmax(),
NodeType::SubGraph { .. } => false,
}
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
@@ -562,6 +572,7 @@ impl Model {
num_rows: res.num_rows,
total_assignments: res.linear_coord,
required_lookups: res.lookup_ops.into_iter().collect(),
max_dynamic_input_len: res.max_dynamic_input_len,
required_range_checks: res.range_checks.into_iter().collect(),
model_output_scales: self.graph.get_output_scales()?,
model_input_scales: self.graph.get_input_scales(),
@@ -1465,6 +1476,7 @@ impl Model {
let res = DummyPassRes {
num_rows: region.row(),
linear_coord: region.linear_coord(),
max_dynamic_input_len: region.max_dynamic_input_len(),
total_const_size: region.total_constants(),
lookup_ops: region.used_lookups(),
range_checks: region.used_range_checks(),

View File

@@ -623,6 +623,15 @@ impl Node {
num_uses,
})
}
/// check if it is a softmax node
pub fn is_softmax(&self) -> bool {
if let SupportedOp::Hybrid(HybridOp::Softmax { .. }) = self.opkind {
true
} else {
false
}
}
}
#[cfg(all(feature = "ezkl", not(target_arch = "wasm32")))]

View File

@@ -443,7 +443,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
let dynamic_lookup =
VarTensor::new_advice(cs, logrows, 1, dynamic_lookup_and_shuffle_size);
if dynamic_lookup.num_blocks() > 1 {
panic!("dynamic lookup or shuffle should only have one block");
warn!("dynamic lookup has {} blocks", dynamic_lookup.num_blocks());
};
advices.push(dynamic_lookup);
}

View File

@@ -541,7 +541,7 @@ impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
let mut is_empty = true;
x.map(|_| is_empty = false);
if is_empty {
return Ok::<_, TensorError>(vec![Value::<F>::unknown(); n + 1]);
Ok::<_, TensorError>(vec![Value::<F>::unknown(); n + 1])
} else {
let mut res = vec![Value::unknown(); n + 1];
let mut int_rep = 0;

View File

@@ -396,6 +396,53 @@ impl VarTensor {
Ok(res)
}
/// Helper function to get the remaining size of the column
pub fn get_column_flush<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
offset: usize,
values: &ValTensor<F>,
) -> Result<usize, halo2_proofs::plonk::Error> {
if values.len() > self.col_size() {
error!("Values are too large for the column");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
// this can only be called on columns that have a single inner column
if self.num_inner_cols() != 1 {
error!("This function can only be called on columns with a single inner column");
return Err(halo2_proofs::plonk::Error::Synthesis);
}
// check if the values fit in the remaining space of the column
let current_cartesian = self.cartesian_coord(offset);
let final_cartesian = self.cartesian_coord(offset + values.len());
let mut flush_len = 0;
if current_cartesian.0 != final_cartesian.0 {
debug!("Values overflow the column, flushing to next column");
// diff is the number of values that overflow the column
flush_len += self.col_size() - current_cartesian.2;
}
Ok(flush_len)
}
/// Assigns [ValTensor] to the columns of the inner tensor. Whereby the values are assigned to a single column, without overflowing.
/// So for instance if we are assigning 10 values and we are at index 18 of the column, and the columns are of length 20, we skip the last 2 values of current column and start from the beginning of the next column.
pub fn assign_exact_column<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
let flush_len = self.get_column_flush(offset, values)?;
let assigned_vals = self.assign(region, offset + flush_len, values, constants)?;
Ok((assigned_vals, flush_len))
}
/// Assigns specific values (`ValTensor`) to the columns of the inner tensor but allows for column wrapping for accumulated operations.
/// Duplication occurs by copying the last cell of the column to the first cell next column and creating a copy constraint between the two.
pub fn dummy_assign_with_duplication<

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@@ -33,6 +33,7 @@
"total_assignments": 92,
"total_const_size": 3,
"total_dynamic_col_size": 0,
"max_dynamic_input_len": 0,
"num_dynamic_lookups": 0,
"num_shuffles": 0,
"total_shuffle_col_size": 0,