chore: update for clippy v0.1.66

This commit is contained in:
Mayeul@Zama
2022-10-26 11:45:24 +02:00
committed by mayeul-zama
parent 6d751b2e4d
commit f5e805af23
13 changed files with 44 additions and 52 deletions

View File

@@ -11,12 +11,11 @@ pub const _4_SIGMA: f64 = 1.0 - 0.999_936_657_516;
const MIN_LOG_POLY_SIZE: u64 = DEFAUT_DOMAINS
.glwe_pbs_constrained
.log2_polynomial_size
.start as u64;
const MAX_LOG_POLY_SIZE: u64 =
DEFAUT_DOMAINS.glwe_pbs_constrained.log2_polynomial_size.end as u64 - 1;
.start;
const MAX_LOG_POLY_SIZE: u64 = DEFAUT_DOMAINS.glwe_pbs_constrained.log2_polynomial_size.end - 1;
pub const MAX_GLWE_DIM: u64 = DEFAUT_DOMAINS.glwe_pbs_constrained.glwe_dimension.end - 1;
pub const MIN_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.start as u64;
pub const MAX_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.end as u64 - 1;
pub const MIN_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.start;
pub const MAX_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.end - 1;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let sum_size = 4096;

View File

@@ -11,12 +11,11 @@ pub const _4_SIGMA: f64 = 1.0 - 0.999_936_657_516;
const MIN_LOG_POLY_SIZE: u64 = DEFAUT_DOMAINS
.glwe_pbs_constrained
.log2_polynomial_size
.start as u64;
const MAX_LOG_POLY_SIZE: u64 =
DEFAUT_DOMAINS.glwe_pbs_constrained.log2_polynomial_size.end as u64 - 1;
.start;
const MAX_LOG_POLY_SIZE: u64 = DEFAUT_DOMAINS.glwe_pbs_constrained.log2_polynomial_size.end - 1;
pub const MAX_GLWE_DIM: u64 = DEFAUT_DOMAINS.glwe_pbs_constrained.glwe_dimension.end - 1;
pub const MIN_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.start as u64;
pub const MAX_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.end as u64 - 1;
pub const MIN_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.start;
pub const MAX_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.end - 1;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let processing_unit = config::ProcessingUnit::Cpu;

View File

@@ -48,7 +48,7 @@ impl Shape {
pub fn duplicated(out_dim_size: u64, other: &Self) -> Self {
let mut dimensions_size = Vec::with_capacity(other.rank() + 1);
dimensions_size.push(out_dim_size as u64);
dimensions_size.push(out_dim_size);
dimensions_size.extend_from_slice(&other.dimensions_size);
Self { dimensions_size }
}

View File

@@ -227,10 +227,7 @@ pub fn domains_to_ranges(
let parameter_ranges = ParameterRanges {
glwe,
br_decomposition: vec![
domains.br_decomposition;
parameters_count.br_decomposition as usize
],
br_decomposition: vec![domains.br_decomposition; parameters_count.br_decomposition],
ks_decomposition: vec![domains.ks_decomposition; parameters_count.ks_decomposition],
};

View File

@@ -57,7 +57,7 @@ pub fn safe_variance_bound_1bit_1padbit(
// This is hardcoded and only valid for 16bit wop pbs
// Precision is 1
let noise_bits = 58;
let fatal_noise_limit = 2_f64.powi(noise_bits as i32);
let fatal_noise_limit = 2_f64.powi(noise_bits);
safe_variance_bound_from_p_error(
fatal_noise_limit,
ciphertext_modulus_log,

View File

@@ -79,7 +79,7 @@ pub fn fft_noise(
let l = br_decomposition_parameter.level as f64;
let big_n = glwe_params.polynomial_size() as f64;
let k = glwe_params.glwe_dimension;
assert!(k > 0, "k = {}", k);
assert!(k > 0, "k = {k}");
assert!(
DEFAUT_DOMAINS.glwe_pbs_constrained.glwe_dimension.end > k,
"k={} and bound={}",

View File

@@ -20,12 +20,12 @@ pub fn estimate_packing_private_keyswitch<T>(
let var_s_w = 1. / 4.;
let mean_s_w = 1. / 2.;
// println!("n = {}", n);
let res_1 = (l * (n + 1.) * var_ggsw.get_modular_variance(ciphertext_modulus_log)) as f64
let res_1 = (l * (n + 1.) * var_ggsw.get_modular_variance(ciphertext_modulus_log))
* (square(b) + 2.)
/ 12.;
#[allow(clippy::cast_possible_wrap)]
let res_3 = ((square(f64::powi(2., ciphertext_modulus_log as i32)) as f64 - b2l) / (12. * b2l)
let res_3 = ((square(f64::powi(2., ciphertext_modulus_log as i32)) - b2l) / (12. * b2l)
* (1.
+ n * (K::variance_key_coefficient(ciphertext_modulus_log)
.get_modular_variance(ciphertext_modulus_log)

View File

@@ -887,7 +887,7 @@ mod tests {
let mut graph = unparametrized::OperationDag::new();
let max_precision: Precision = 5;
for i in 1..=max_precision {
let _ = graph.add_input(i as u8, Shape::number());
let _ = graph.add_input(i, Shape::number());
}
let analysis = analyze(&graph);
assert!(analysis.constraints_by_precisions.len() == max_precision as usize);

View File

@@ -569,11 +569,11 @@ mod tests {
fn no_lut_vs_lut(precision: Precision, cache: &PersistDecompCache) {
let mut dag_lut = unparametrized::OperationDag::new();
let input1 = dag_lut.add_input(precision as u8, Shape::number());
let input1 = dag_lut.add_input(precision, Shape::number());
let _lut1 = dag_lut.add_lut(input1, FunctionTable::UNKWOWN, precision);
let mut dag_no_lut = unparametrized::OperationDag::new();
let _input2 = dag_no_lut.add_input(precision as u8, Shape::number());
let _input2 = dag_no_lut.add_input(precision, Shape::number());
let state_no_lut = optimize(&dag_no_lut, cache);
let state_lut = optimize(&dag_lut, cache);
@@ -608,7 +608,7 @@ mod tests {
let mut dag_1 = unparametrized::OperationDag::new();
{
let input1 = dag_1.add_input(precision as u8, Shape::number());
let input1 = dag_1.add_input(precision, Shape::number());
let scaled_input1 = dag_1.add_dot([input1], weight);
let lut1 = dag_1.add_lut(scaled_input1, FunctionTable::UNKWOWN, precision);
let _lut2 = dag_1.add_lut(lut1, FunctionTable::UNKWOWN, precision);
@@ -616,7 +616,7 @@ mod tests {
let mut dag_2 = unparametrized::OperationDag::new();
{
let input1 = dag_2.add_input(precision as u8, Shape::number());
let input1 = dag_2.add_input(precision, Shape::number());
let lut1 = dag_2.add_lut(input1, FunctionTable::UNKWOWN, precision);
let scaled_lut1 = dag_2.add_dot([lut1], weight);
let _lut2 = dag_2.add_lut(scaled_lut1, FunctionTable::UNKWOWN, precision);
@@ -651,14 +651,14 @@ mod tests {
fn lut_1_layer_has_better_complexity(precision: Precision, cache: &PersistDecompCache) {
let dag_1_layer = {
let mut dag = unparametrized::OperationDag::new();
let input1 = dag.add_input(precision as u8, Shape::number());
let input1 = dag.add_input(precision, Shape::number());
let _lut1 = dag.add_lut(input1, FunctionTable::UNKWOWN, precision);
let _lut2 = dag.add_lut(input1, FunctionTable::UNKWOWN, precision);
dag
};
let dag_2_layer = {
let mut dag = unparametrized::OperationDag::new();
let input1 = dag.add_input(precision as u8, Shape::number());
let input1 = dag.add_input(precision, Shape::number());
let lut1 = dag.add_lut(input1, FunctionTable::UNKWOWN, precision);
let _lut2 = dag.add_lut(lut1, FunctionTable::UNKWOWN, precision);
dag
@@ -783,7 +783,7 @@ mod tests {
let shape = Shape::vector(dim);
let weights = Weights::number(weight);
let mut dag = unparametrized::OperationDag::new();
let input1 = dag.add_input(precision as u8, shape);
let input1 = dag.add_input(precision, shape);
let _dot1 = dag.add_dot([input1], weights); // this is just several multiply
let state = optimize(&dag, cache);
let sol = state.best_solution.unwrap();
@@ -814,7 +814,7 @@ mod tests {
let shape = Shape::number();
let weights = Weights::number(weight);
let mut dag = unparametrized::OperationDag::new();
let mut last_val = dag.add_input(precision as u8, shape);
let mut last_val = dag.add_input(precision, shape);
for _i in 0..depth {
let dot = dag.add_dot([last_val], &weights);
last_val = dag.add_lut(dot, FunctionTable::UNKWOWN, precision);
@@ -842,8 +842,8 @@ mod tests {
let mut dag = unparametrized::OperationDag::new();
let weights_low = Weights::number(weight_low);
let weights_high = Weights::number(weight_high);
let mut last_val_low = dag.add_input(precision_low as u8, &shape);
let mut last_val_high = dag.add_input(precision_high as u8, &shape);
let mut last_val_low = dag.add_input(precision_low, &shape);
let mut last_val_high = dag.add_input(precision_high, &shape);
for _i in 0..depth {
let dot_low = dag.add_dot([last_val_low], &weights_low);
last_val_low = dag.add_lut(dot_low, FunctionTable::UNKWOWN, precision_low);

View File

@@ -197,7 +197,7 @@ fn update_state_with_best_decompositions(
let variance_ggsw = base_variance_private_packing_ks + shared_br_decomp.noise / 2.;
let variance_coeff_1_cmux_tree =
2_f64.powf(2. * log_norm as f64) // variance_coeff for the multisum
2_f64.powf(2. * log_norm) // variance_coeff for the multisum
* (precisions_sum // for hybrid packing
<< (2 * (max_precision - 1))) as f64 // for left shift
;
@@ -238,7 +238,7 @@ fn update_state_with_best_decompositions(
} else {
0.0
};
let complexity_cmux_tree = cmux_group_count as f64 * complexity_1_cmux_hp;
let complexity_cmux_tree = cmux_group_count * complexity_1_cmux_hp;
let complexity_one_ggsw_to_fft = cb_decomp.complexity_one_ggsw_to_fft;

View File

@@ -11,7 +11,7 @@ pub fn minimal_variance(
let equiv_lwe_dimension = glwe_params.glwe_dimension * glwe_params.polynomial_size();
let security_weights = SECURITY_WEIGHTS_TABLE
.get(&security_level)
.unwrap_or_else(|| panic!("{} bits of security is not supported", security_level));
.unwrap_or_else(|| panic!("{security_level} bits of security is not supported"));
let secure_log2_std =
security_weights.secure_log2_std(equiv_lwe_dimension, ciphertext_modulus_log as f64);
let log2_var = 2.0 * secure_log2_std;

View File

@@ -155,10 +155,8 @@ where
match std::fs::create_dir_all(std::path::Path::new(&self.path).parent().unwrap()) {
Ok(()) => (),
Err(err) => {
println!(
"PersistentCache::sync_to_disk: Cannot create directory {}, {}",
self.path, err
);
let path = &self.path;
println!("PersistentCache::sync_to_disk: Cannot create directory {path}, {err}",);
return;
}
};
@@ -209,7 +207,7 @@ where
match disk_version {
Ok(disk_version) => {
if disk_version != *version {
println!("PersistentCache:: Invalid version {}: cleaning", path);
println!("PersistentCache:: Invalid version {path}: cleaning");
Self::clear_file(path);
return None;
}
@@ -246,15 +244,15 @@ where
}
if let Err(err) = filelock.file.rewind() {
println!(
"PersistentCache::write: cannot rewind file: {}, {}",
self.path, err
"PersistentCache::write: cannot rewind file: {}, {err}",
self.path
);
return;
}
if let Err(err) = filelock.file.set_len(0) {
println!(
"PersistentCache::write: cannot truncate file: {}, {}",
self.path, err
"PersistentCache::write: cannot truncate file: {}, {err}",
self.path
);
}
let file = &mut filelock.file;
@@ -273,7 +271,7 @@ where
let filelock = match FileLock::lock(path, is_blocking, options) {
Ok(lock) => lock,
Err(_err) => {
println!("PersistentCache::clear: Cannot lock cache file {}", path);
println!("PersistentCache::clear: Cannot lock cache file {path}");
return;
}
};

View File

@@ -28,12 +28,11 @@ pub const _4_SIGMA: f64 = 1.0 - 0.999_936_657_516;
const MIN_LOG_POLY_SIZE: u64 = DEFAUT_DOMAINS
.glwe_pbs_constrained
.log2_polynomial_size
.start as u64;
const MAX_LOG_POLY_SIZE: u64 =
DEFAUT_DOMAINS.glwe_pbs_constrained.log2_polynomial_size.end as u64 - 1;
.start;
const MAX_LOG_POLY_SIZE: u64 = DEFAUT_DOMAINS.glwe_pbs_constrained.log2_polynomial_size.end - 1;
pub const MAX_GLWE_DIM: u64 = DEFAUT_DOMAINS.glwe_pbs_constrained.glwe_dimension.end - 1;
pub const MIN_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.start as u64;
pub const MAX_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.end as u64 - 1;
pub const MIN_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.start;
pub const MAX_LWE_DIM: u64 = DEFAUT_DOMAINS.free_glwe.glwe_dimension.end - 1;
/// Find parameters for classical PBS and new WoP-PBS
#[derive(Parser, Debug)]
@@ -168,8 +167,8 @@ pub fn compute_print_results(mut writer: impl Write, args: &Args) -> Result<(),
let precisions = args.min_precision..=args.max_precision;
let manps: Vec<_> = (0..=31).collect();
writeln!(writer, "security level: {}", security_level)?;
writeln!(writer, "target p_error: {:1.1e}", p_error)?;
writeln!(writer, "security level: {security_level}")?;
writeln!(writer, "target p_error: {p_error:1.1e}")?;
writeln!(writer, "per precision and log norm2:")?;
for (precision_i, precision) in precisions.enumerate() {
@@ -258,7 +257,7 @@ mod tests {
const CMP_LINES: &str = "\n";
const EXACT_EQUALITY: i32 = 0;
for &security_level in security_levels {
let ref_file: &str = &format!("ref/v0_last_{}", security_level);
let ref_file: &str = &format!("ref/v0_last_{security_level}");
let args: Args = Args {
min_precision: 1,
max_precision: 9,
@@ -298,7 +297,7 @@ mod tests {
const CMP_LINES: &str = "\n";
const EXACT_EQUALITY: i32 = 0;
for &security_level in security_levels {
let ref_file: &str = &format!("ref/wop_pbs_last_{}", security_level);
let ref_file: &str = &format!("ref/wop_pbs_last_{security_level}");
let args = Args {
min_precision: 1,