feat(optimizer): add brute-force optimizer

Co-authored-by: Samuel Tap <samuel.tap@zama.ai>
This commit is contained in:
Mayeul@Zama
2023-03-22 09:16:47 +01:00
committed by mayeul-zama
parent e9e097d941
commit 74feda147c
17 changed files with 2519 additions and 2 deletions

View File

@@ -5,6 +5,7 @@ members = [
"v0-parameters",
"concrete-optimizer-cpp",
"charts",
"brute-force-optimizer",
]

View File

@@ -0,0 +1,13 @@
[package]
name = "brute-force-optimizer"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rayon = "1.6"
concrete-cpu-noise-model = { path = "../../../backends/concrete-cpu/noise-model" }
concrete-optimizer = { path = "../concrete-optimizer" }
concrete-security-curves = { path = "../../../tools/parameter-curves/concrete-security-curves-rust" }

View File

@@ -0,0 +1,364 @@
use crate::generic::{Problem, SequentialProblem};
use crate::{ExplicitRange, MyRange, Solution, STEP};
use concrete_cpu_noise_model::gaussian_noise::noise::blind_rotate::variance_blind_rotate;
use concrete_cpu_noise_model::gaussian_noise::noise::keyswitch::variance_keyswitch;
use concrete_cpu_noise_model::gaussian_noise::noise::modulus_switching::estimate_modulus_switching_noise_with_binary_key;
use concrete_optimizer::computing_cost::complexity_model::ComplexityModel;
use concrete_optimizer::noise_estimator::error;
use concrete_optimizer::parameters::{
AtomicPatternParameters, BrDecompositionParameters, GlweParameters, KsDecompositionParameters,
LweDimension,
};
use concrete_security_curves::gaussian::security::{minimal_variance_glwe, minimal_variance_lwe};
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
use std::io::Write;
// use rayon_cond::CondIterator;
#[derive(Debug, Clone, Copy)]
pub struct CGGIParams {
base_log_ks: u64,
level_ks: u64,
base_log_pbs: u64,
level_pbs: u64,
glwe_dim: u64,
log_poly_size: u64,
small_lwe_dim: u64,
}
impl CGGIParams {
fn big_lwe_dim(&self) -> u64 {
let poly_size = 1 << self.log_poly_size;
self.glwe_dim * poly_size
}
}
struct CGGIConstraint {
variance_constraint: f64,
log_norm2: u64,
security_level: u64,
sum_size: u64,
}
impl Problem for CGGIConstraint {
type Param = CGGIParams;
fn verify(&self, param: Self::Param) -> bool {
let poly_size = 1 << param.log_poly_size;
let variance_ksk = minimal_variance_lwe(param.small_lwe_dim, 64, self.security_level);
let v_ks = variance_keyswitch(
param.big_lwe_dim(),
param.base_log_ks,
param.level_ks,
64,
variance_ksk,
);
let variance_bsk =
minimal_variance_glwe(param.glwe_dim, poly_size, 64, self.security_level);
let v_pbs = variance_blind_rotate(
param.small_lwe_dim,
param.glwe_dim,
poly_size,
param.base_log_pbs,
param.level_pbs,
64,
variance_bsk,
);
let v_ms = estimate_modulus_switching_noise_with_binary_key(
param.small_lwe_dim,
param.log_poly_size,
64,
);
(v_pbs + v_ks) * (1 << (2 * self.log_norm2)) as f64 + v_ms < self.variance_constraint
}
fn cost(&self, param: Self::Param) -> f64 {
cggi_complexity(
self.sum_size,
AtomicPatternParameters {
input_lwe_dimension: LweDimension(param.big_lwe_dim()),
ks_decomposition_parameter: KsDecompositionParameters {
level: param.level_ks,
log2_base: param.base_log_ks,
},
internal_lwe_dimension: LweDimension(param.small_lwe_dim),
br_decomposition_parameter: BrDecompositionParameters {
level: param.level_pbs,
log2_base: param.base_log_pbs,
},
output_glwe_params: GlweParameters {
log2_polynomial_size: param.log_poly_size,
glwe_dimension: param.glwe_dim,
},
},
64,
)
}
}
#[allow(dead_code)]
pub fn cggi_complexity(
sum_size: u64,
params: AtomicPatternParameters,
ciphertext_modulus_log: u32,
) -> f64 {
let complexity_model = concrete_optimizer::computing_cost::cpu::CpuComplexity::default();
let multisum_complexity = complexity_model.levelled_complexity(
sum_size,
params.input_lwe_dimension,
ciphertext_modulus_log,
);
let ks_complexity =
complexity_model.ks_complexity(params.ks_parameters(), ciphertext_modulus_log);
let pbs_complexity =
complexity_model.pbs_complexity(params.pbs_parameters(), ciphertext_modulus_log);
multisum_complexity + ks_complexity + pbs_complexity
}
struct CGGISearchSpace {
range_base_log_ks: MyRange,
range_level_ks: MyRange,
_range_base_log_pbs: MyRange,
_range_level_pbs: MyRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl CGGISearchSpace {
fn to_tighten(&self, security_level: u64) -> CGGISearchSpaceTighten {
// Keyswitch
let mut ks_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_ks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_ks.to_std_range() {
let variance_ksk = minimal_variance_lwe(n, 64, security_level);
let v_ks = variance_keyswitch(
(1 << log_N) * k,
baselog,
level,
64,
variance_ksk,
);
if v_ks <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_ks;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
ks_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
// PBS
let mut pbs_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_ks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_ks.to_std_range() {
let variance_bsk =
minimal_variance_glwe(k, 1 << log_N, 64, security_level);
let v_pbs = variance_blind_rotate(
n,
k,
1 << log_N,
baselog,
level,
64,
variance_bsk,
);
if v_pbs <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_pbs;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
pbs_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
ks_decomp.sort();
ks_decomp.dedup();
pbs_decomp.sort();
pbs_decomp.dedup();
println!("Only {} couples left for keyswitch", ks_decomp.len());
println!("Only {} couples left for bootstrap", pbs_decomp.len());
CGGISearchSpaceTighten {
range_base_log_level_ks: ExplicitRange(ks_decomp.clone()),
range_base_log_level_pbs: ExplicitRange(pbs_decomp.clone()),
range_glwe_dim: self.range_glwe_dim,
range_log_poly_size: self.range_log_poly_size,
range_small_lwe_dim: self.range_small_lwe_dim,
}
}
}
#[derive(Clone)]
struct CGGISearchSpaceTighten {
range_base_log_level_ks: ExplicitRange,
range_base_log_level_pbs: ExplicitRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl CGGISearchSpaceTighten {
#[allow(unused)]
#[rustfmt::skip]
fn par_iter(self) -> impl rayon::iter::ParallelIterator<Item=CGGIParams> {
self.range_glwe_dim
.to_std_range()
.into_par_iter().map(|_k| CGGIParams {
base_log_ks: 0,
level_ks: 0,
base_log_pbs: 0,
level_pbs: 0,
glwe_dim: 0,
log_poly_size: 0,
small_lwe_dim: 0,
})
}
fn iter(self, _precision: u64) -> impl Iterator<Item = CGGIParams> {
self.range_base_log_level_ks
.into_iter()
.flat_map(move |(base_log_ks, level_ks)| {
self.range_base_log_level_pbs.clone().into_iter().flat_map(
move |(base_log_pbs, level_pbs)| {
self.range_glwe_dim
.to_std_range()
.flat_map(move |glwe_dim| {
self.range_log_poly_size.to_std_range().flat_map(
move |log_poly_size| {
self.range_small_lwe_dim.to_std_range().step_by(STEP).map(
move |small_lwe_dim| CGGIParams {
base_log_ks,
level_ks,
base_log_pbs,
level_pbs,
glwe_dim,
log_poly_size,
small_lwe_dim,
},
)
},
)
})
},
)
})
}
}
pub fn solve_all_cggi(p_fail: f64, writer: impl Write) {
// -> Vec<(u64, u64, Option<(V0Params, f64)>)> {
// let p_fail = 1.0 - 0.999_936_657_516;
let precisions = 1..9;
// let precisions_iter = ParallelIterator::new(precisions);
let log_norms = vec![4, 6, 8, 10];
// 0..31;
// let mut res = vec![];
let a = CGGISearchSpace {
range_base_log_ks: MyRange(1, 53),
range_level_ks: MyRange(1, 53),
_range_base_log_pbs: MyRange(1, 53),
_range_level_pbs: MyRange(1, 53),
range_glwe_dim: MyRange(1, 7),
range_log_poly_size: MyRange(8, 18),
range_small_lwe_dim: MyRange(500, 1500),
};
let a_tighten = a.to_tighten(128);
let res: Vec<Solution<CGGIParams>> = precisions
.into_par_iter()
.flat_map(|precision| {
log_norms
.clone()
.into_par_iter()
.map(|log_norm| {
let config = CGGIConstraint {
variance_constraint: error::safe_variance_bound_2padbits(
precision, 64, p_fail,
), //5.960464477539063e-08, // 0.0009765625006088146,
log_norm2: log_norm,
security_level: 128,
sum_size: 4096,
};
let intem = config.brute_force(a_tighten.clone().iter(precision));
Solution {
precision,
log_norm,
intem,
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
write_to_file(writer, &res).unwrap();
}
pub fn write_to_file(
mut writer: impl Write,
res: &[Solution<CGGIParams>],
) -> Result<(), std::io::Error> {
writeln!(
writer,
" p,log(nu), k, N, n, br_l,br_b, ks_l,ks_b, cost"
)?;
for Solution {
precision,
log_norm,
intem,
} in res.iter()
{
match intem {
Some((solution, cost)) => {
writeln!(
writer,
" {:2}, {:2}, {:2}, {:2}, {:4}, {:2}, {:2}, {:2}, {:2}, {:6}",
precision,
log_norm,
solution.glwe_dim,
solution.log_poly_size,
solution.small_lwe_dim,
solution.level_pbs,
solution.base_log_pbs,
solution.level_ks,
solution.base_log_ks,
cost
)?;
}
None => {}
}
}
Ok(())
}

View File

@@ -0,0 +1,384 @@
use crate::generic::{Problem, SequentialProblem};
use crate::{minimal_added_noise_by_modulus_switching, ExplicitRange, MyRange, Solution, STEP};
use concrete_cpu_noise_model::gaussian_noise::noise::blind_rotate::variance_blind_rotate;
use concrete_cpu_noise_model::gaussian_noise::noise::keyswitch::variance_keyswitch;
use concrete_cpu_noise_model::gaussian_noise::noise::modulus_switching::estimate_modulus_switching_noise_with_binary_key;
use concrete_optimizer::computing_cost::complexity_model::ComplexityModel;
use concrete_optimizer::noise_estimator::error;
use concrete_optimizer::parameters::{
AtomicPatternParameters, BrDecompositionParameters, GlweParameters, KsDecompositionParameters,
LweDimension,
};
use concrete_security_curves::gaussian::security::{minimal_variance_glwe, minimal_variance_lwe};
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
use std::io::Write;
use std::time::Instant;
#[derive(Debug, Clone, Copy)]
pub struct CJPParams {
base_log_ks: u64,
level_ks: u64,
base_log_pbs: u64,
level_pbs: u64,
glwe_dim: u64,
log_poly_size: u64,
small_lwe_dim: u64,
}
impl CJPParams {
fn big_lwe_dim(&self) -> u64 {
let poly_size = 1 << self.log_poly_size;
self.glwe_dim * poly_size
}
}
struct CJPConstraint {
variance_constraint: f64,
log_norm2: u64,
security_level: u64,
sum_size: u64,
}
impl Problem for CJPConstraint {
type Param = CJPParams;
fn verify(&self, param: Self::Param) -> bool {
let poly_size = 1 << param.log_poly_size;
let variance_ksk = minimal_variance_lwe(param.small_lwe_dim, 64, self.security_level);
let v_ks = variance_keyswitch(
param.big_lwe_dim(),
param.base_log_ks,
param.level_ks,
64,
variance_ksk,
);
let variance_bsk =
minimal_variance_glwe(param.glwe_dim, poly_size, 64, self.security_level);
let v_pbs = variance_blind_rotate(
param.small_lwe_dim,
param.glwe_dim,
poly_size,
param.base_log_pbs,
param.level_pbs,
64,
variance_bsk,
);
let v_ms = estimate_modulus_switching_noise_with_binary_key(
param.small_lwe_dim,
param.log_poly_size,
64,
);
v_pbs * (1 << (2 * self.log_norm2)) as f64 + v_ks + v_ms < self.variance_constraint
}
fn cost(&self, param: Self::Param) -> f64 {
cjp_complexity(
self.sum_size,
AtomicPatternParameters {
input_lwe_dimension: LweDimension(param.big_lwe_dim()),
ks_decomposition_parameter: KsDecompositionParameters {
level: param.level_ks,
log2_base: param.base_log_ks,
},
internal_lwe_dimension: LweDimension(param.small_lwe_dim),
br_decomposition_parameter: BrDecompositionParameters {
level: param.level_pbs,
log2_base: param.base_log_pbs,
},
output_glwe_params: GlweParameters {
log2_polynomial_size: param.log_poly_size,
glwe_dimension: param.glwe_dim,
},
},
64,
)
}
}
#[allow(dead_code)]
pub fn cjp_complexity(
sum_size: u64,
params: AtomicPatternParameters,
ciphertext_modulus_log: u32,
) -> f64 {
let complexity_model = concrete_optimizer::computing_cost::cpu::CpuComplexity::default();
let multisum_complexity = complexity_model.levelled_complexity(
sum_size,
params.input_lwe_dimension,
ciphertext_modulus_log,
);
let ks_complexity =
complexity_model.ks_complexity(params.ks_parameters(), ciphertext_modulus_log);
let pbs_complexity =
complexity_model.pbs_complexity(params.pbs_parameters(), ciphertext_modulus_log);
multisum_complexity + ks_complexity + pbs_complexity
}
struct CJPSearchSpace {
range_base_log_ks: MyRange,
range_level_ks: MyRange,
_range_base_log_pbs: MyRange,
_range_level_pbs: MyRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl CJPSearchSpace {
fn to_tighten(&self, security_level: u64) -> CJPSearchSpaceTighten {
// Keyswitch
let mut ks_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_ks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_ks.to_std_range() {
let variance_ksk = minimal_variance_lwe(n, 64, security_level);
let v_ks = variance_keyswitch(
(1 << log_N) * k,
baselog,
level,
64,
variance_ksk,
);
if v_ks <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_ks;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
ks_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
// PBS
let mut pbs_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_ks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_ks.to_std_range() {
let variance_bsk =
minimal_variance_glwe(k, 1 << log_N, 64, security_level);
let v_pbs = variance_blind_rotate(
n,
k,
1 << log_N,
baselog,
level,
64,
variance_bsk,
);
if v_pbs <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_pbs;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
pbs_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
ks_decomp.sort();
ks_decomp.dedup();
pbs_decomp.sort();
pbs_decomp.dedup();
println!("Only {} couples left for keyswitch", ks_decomp.len());
println!("Only {} couples left for bootstrap", pbs_decomp.len());
CJPSearchSpaceTighten {
range_base_log_level_ks: ExplicitRange(ks_decomp.clone()),
range_base_log_level_pbs: ExplicitRange(pbs_decomp.clone()),
range_glwe_dim: self.range_glwe_dim,
range_log_poly_size: self.range_log_poly_size,
range_small_lwe_dim: self.range_small_lwe_dim,
}
}
}
#[derive(Clone)]
struct CJPSearchSpaceTighten {
range_base_log_level_ks: ExplicitRange,
range_base_log_level_pbs: ExplicitRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl CJPSearchSpaceTighten {
#[allow(unused)]
#[rustfmt::skip]
fn par_iter(self) -> impl rayon::iter::ParallelIterator<Item=CJPParams> {
self.range_glwe_dim
.to_std_range()
.into_par_iter().map(|_k| CJPParams {
base_log_ks: 0,
level_ks: 0,
base_log_pbs: 0,
level_pbs: 0,
glwe_dim: 0,
log_poly_size: 0,
small_lwe_dim: 0,
})
}
fn iter(self, precision: u64, minimal_ms_value: u64) -> impl Iterator<Item = CJPParams> {
self.range_base_log_level_ks
.into_iter()
.flat_map(move |(base_log_ks, level_ks)| {
self.range_base_log_level_pbs.clone().into_iter().flat_map(
move |(base_log_pbs, level_pbs)| {
self.range_glwe_dim
.to_std_range()
.flat_map(move |glwe_dim| {
self.range_log_poly_size
.to_std_range_poly_size(precision + minimal_ms_value)
.flat_map(move |log_poly_size| {
self.range_small_lwe_dim.to_std_range().step_by(STEP).map(
move |small_lwe_dim| CJPParams {
base_log_ks,
level_ks,
base_log_pbs,
level_pbs,
glwe_dim,
log_poly_size,
small_lwe_dim,
},
)
})
})
},
)
})
// })
// })
}
}
pub fn solve_all_cjp(p_fail: f64, writer: impl Write) {
let start = Instant::now();
let precisions = 1..24;
let log_norms = vec![4, 6, 8, 10];
// find the minimal added noise by the modulus switching
// for KS
let a = CJPSearchSpace {
range_base_log_ks: MyRange(1, 40),
range_level_ks: MyRange(1, 40),
_range_base_log_pbs: MyRange(1, 40),
_range_level_pbs: MyRange(1, 53),
range_glwe_dim: MyRange(1, 7),
range_log_poly_size: MyRange(8, 19),
range_small_lwe_dim: MyRange(500, 1500),
};
let minimal_ms_value = minimal_added_noise_by_modulus_switching(
(1 << a.range_log_poly_size.0) * a.range_glwe_dim.0,
)
.sqrt()
.ceil() as u64;
// let a = CJPSearchSpace {
// range_base_log_ks: MyRange(1, 53),
// range_level_ks: MyRange(1, 53),
// range_base_log_pbs: MyRange(1, 53),
// range_level_pbs: MyRange(1, 53),
// range_glwe_dim: MyRange(1, 7),
// range_log_poly_size: MyRange(8, 16),
// range_small_lwe_dim: MyRange(500, 1000),
// };
let a_tighten = a.to_tighten(128);
let res: Vec<Solution<CJPParams>> = precisions
.into_par_iter()
.flat_map(|precision| {
log_norms
.clone()
.into_par_iter()
.map(|log_norm| {
let config = CJPConstraint {
variance_constraint: error::safe_variance_bound_2padbits(
precision, 64, p_fail,
), //5.960464477539063e-08, // 0.0009765625006088146,
log_norm2: log_norm,
security_level: 128,
sum_size: 4096,
};
let intem =
config.brute_force(a_tighten.clone().iter(precision, minimal_ms_value));
Solution {
precision,
log_norm,
intem,
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let duration = start.elapsed();
println!(
"Optimization took: {:?} min",
duration.as_secs() as f64 / 60.
);
write_to_file(writer, &res).unwrap();
}
pub fn write_to_file(
mut writer: impl Write,
res: &[Solution<CJPParams>],
) -> Result<(), std::io::Error> {
writeln!(
writer,
" p,log(nu), k, N, n, br_l,br_b, ks_l,ks_b, cost"
)?;
for Solution {
precision,
log_norm,
intem,
} in res.iter()
{
match intem {
Some((solution, cost)) => {
writeln!(
writer,
" {:2}, {:2}, {:2}, {:2}, {:4}, {:2}, {:2}, {:2}, {:2}, {:6}",
precision,
log_norm,
solution.glwe_dim,
solution.log_poly_size,
solution.small_lwe_dim,
solution.level_pbs,
solution.base_log_pbs,
solution.level_ks,
solution.base_log_ks,
cost
)?;
}
None => {}
}
}
Ok(())
}

View File

@@ -0,0 +1,501 @@
use crate::generic::{Problem, SequentialProblem};
use crate::{
minimal_added_noise_by_modulus_switching, pbs_p_fail_from_global_p_fail, ExplicitRange,
MyRange, Solution, STEP,
};
use concrete_cpu_noise_model::gaussian_noise::noise::blind_rotate::variance_blind_rotate;
use concrete_cpu_noise_model::gaussian_noise::noise::keyswitch::variance_keyswitch;
use concrete_cpu_noise_model::gaussian_noise::noise::modulus_switching::estimate_modulus_switching_noise_with_binary_key;
use concrete_optimizer::computing_cost::complexity_model::ComplexityModel;
use concrete_optimizer::noise_estimator::error;
use concrete_optimizer::parameters::{
BrDecompositionParameters, GlweParameters, KeyswitchParameters, KsDecompositionParameters,
LweDimension, PbsParameters,
};
use concrete_security_curves::gaussian::security::{minimal_variance_glwe, minimal_variance_lwe};
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
use std::io::Write;
use std::time::Instant;
// use rayon_cond::CondIterator;
#[derive(Debug, Clone, Copy)]
pub struct GBAParams {
base_log_ks: u64,
level_ks: u64,
base_log_pbs: u64,
level_pbs: u64,
glwe_dim: u64,
log_poly_size: u64,
small_lwe_dim: u64,
base_log_fpks: u64,
level_fpks: u64,
}
impl GBAParams {
fn big_lwe_dim(&self) -> u64 {
let poly_size = 1 << self.log_poly_size;
self.glwe_dim * poly_size
}
}
struct GBAConstraint {
variance_constraint: f64,
log_norm2: u64,
security_level: u64,
sum_size: u64,
precision: u64,
nb_inputs: u64,
}
impl Problem for GBAConstraint {
type Param = GBAParams;
fn verify(&self, param: Self::Param) -> bool {
let poly_size = 1 << param.log_poly_size;
let variance_ksk = minimal_variance_lwe(param.small_lwe_dim, 64, self.security_level);
let v_ks = variance_keyswitch(
param.big_lwe_dim(),
param.base_log_ks,
param.level_ks,
64,
variance_ksk,
);
let variance_bsk =
minimal_variance_glwe(param.glwe_dim, poly_size, 64, self.security_level);
let v_pbs = variance_blind_rotate(
param.small_lwe_dim,
param.glwe_dim,
poly_size,
param.base_log_pbs,
param.level_pbs,
64,
variance_bsk,
);
let square = |x| x * x;
let v_after_functions =
v_pbs * (1 << self.precision) as f64 * square((1 << self.precision) as f64 - 1.);
let v_pp_keyswitch = poly_size as f64
* variance_keyswitch(
param.big_lwe_dim(),
param.base_log_fpks,
param.level_fpks,
64,
variance_bsk,
);
let v_tree_pbs =
// cim noise
v_after_functions
// noise other layers
+ (self.nb_inputs - 1) as f64 * (v_pp_keyswitch + v_pbs);
let v_ms = estimate_modulus_switching_noise_with_binary_key(
param.small_lwe_dim,
param.log_poly_size,
64,
);
v_tree_pbs * (1 << (2 * self.log_norm2)) as f64 + v_ks + v_ms < self.variance_constraint
}
fn cost(&self, param: Self::Param) -> f64 {
let complexity_model = concrete_optimizer::computing_cost::cpu::CpuComplexity::default();
let multisum_complexity = self.nb_inputs as f64
* complexity_model.levelled_complexity(
self.sum_size,
LweDimension(param.big_lwe_dim()),
64,
);
let ks_decomposition_parameter = KsDecompositionParameters {
level: param.level_ks,
log2_base: param.base_log_ks,
};
let ks_parameter = KeyswitchParameters {
input_lwe_dimension: LweDimension(param.big_lwe_dim()),
output_lwe_dimension: LweDimension(param.small_lwe_dim),
ks_decomposition_parameter,
};
let ks_complexity = complexity_model.ks_complexity(ks_parameter, 64);
let pbs_decomposition_parameter = BrDecompositionParameters {
level: param.level_pbs,
log2_base: param.base_log_pbs,
};
let pbs_parameter = PbsParameters {
internal_lwe_dimension: LweDimension(param.small_lwe_dim),
br_decomposition_parameter: pbs_decomposition_parameter,
output_glwe_params: GlweParameters {
log2_polynomial_size: param.log_poly_size,
glwe_dimension: param.glwe_dim,
},
};
let pbs_complexity = complexity_model.pbs_complexity(pbs_parameter, 64);
let fft_cost = (1 << param.log_poly_size) as f64 * param.log_poly_size as f64;
let k = pbs_parameter.output_glwe_params.glwe_dimension as f64;
let message_modulus = (1 << self.precision) as f64;
let complexity_cim_pbs = multisum_complexity
+ ks_complexity
+ pbs_complexity
// FFT
+ (k + 1.) * fft_cost +
// element wise multplication
(k + 1.) * self.nb_inputs as f64 * f64::powi(message_modulus, self.nb_inputs as i32 - 1) * (1 << param.log_poly_size) as f64 +
// iFFT
(k + 1.) * self.nb_inputs as f64 * f64::powi(message_modulus, self.nb_inputs as i32 - 1) * fft_cost;
let pp_keyswitch_decomposition_parameter = KsDecompositionParameters {
level: param.level_fpks,
log2_base: param.base_log_fpks,
};
let pp_keyswitch_parameter = KeyswitchParameters {
input_lwe_dimension: LweDimension(param.big_lwe_dim()),
output_lwe_dimension: LweDimension(param.big_lwe_dim()),
ks_decomposition_parameter: pp_keyswitch_decomposition_parameter,
};
let complexity_packing_ks = message_modulus
* (complexity_model.ks_complexity(pp_keyswitch_parameter, 64)
+ (1 << param.log_poly_size) as f64 * (k + 1.));
// additon for packing ks
// );
let complexity_all_ppks = self.nb_inputs as f64
* complexity_packing_ks
* ((f64::powi(message_modulus, self.nb_inputs as i32 - 1) - 1.)
/ (message_modulus - 1.));
let complexity_all_pbs = self.nb_inputs as f64
* pbs_complexity
* ((f64::powi(message_modulus, self.nb_inputs as i32 - 1) - 1.)
/ (message_modulus - 1.));
let complexity_all_ppks_pbs = complexity_all_pbs + complexity_all_ppks;
complexity_cim_pbs + complexity_all_ppks_pbs + (self.nb_inputs - 1) as f64 * ks_complexity
}
}
struct GBASearchSpace {
range_base_log_ks: MyRange,
range_level_ks: MyRange,
range_base_log_pbs: MyRange,
range_level_pbs: MyRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
range_base_log_fpks: MyRange,
range_level_fpks: MyRange,
}
impl GBASearchSpace {
fn to_tighten(&self, security_level: u64) -> GBASearchSpaceTighten {
// Keyswitch
let mut ks_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_ks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_ks.to_std_range() {
let variance_ksk = minimal_variance_lwe(n, 64, security_level);
let v_ks = variance_keyswitch(
(1 << log_N) * k,
baselog,
level,
64,
variance_ksk,
);
if v_ks <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_ks;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
ks_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
// PBS
let mut pbs_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_pbs.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_pbs.to_std_range() {
let variance_bsk =
minimal_variance_glwe(k, 1 << log_N, 64, security_level);
let v_pbs = variance_blind_rotate(
n,
k,
1 << log_N,
baselog,
level,
64,
variance_bsk,
);
if v_pbs <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_pbs;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
pbs_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
// FPKS
let mut fpks_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for _n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_fpks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_fpks.to_std_range() {
let variance_bsk =
minimal_variance_glwe(k, 1 << log_N, 64, security_level);
let v_pp_keyswitch = (1 << log_N) as f64
* variance_keyswitch(
(1 << log_N) * k,
baselog,
level,
64,
variance_bsk,
);
if v_pp_keyswitch <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_pp_keyswitch;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
fpks_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
ks_decomp.sort();
ks_decomp.dedup();
pbs_decomp.sort();
pbs_decomp.dedup();
fpks_decomp.sort();
fpks_decomp.dedup();
println!("Only {} couples left for keyswitch", ks_decomp.len());
println!("Only {} couples left for bootstrap", pbs_decomp.len());
println!("Only {} couples left for fpks", fpks_decomp.len());
GBASearchSpaceTighten {
range_base_log_level_ks: ExplicitRange(ks_decomp.clone()),
range_base_log_level_pbs: ExplicitRange(pbs_decomp.clone()),
range_base_log_level_fpks: ExplicitRange(fpks_decomp.clone()),
range_glwe_dim: self.range_glwe_dim,
range_log_poly_size: self.range_log_poly_size,
range_small_lwe_dim: self.range_small_lwe_dim,
}
}
}
#[derive(Clone)]
struct GBASearchSpaceTighten {
range_base_log_level_ks: ExplicitRange,
range_base_log_level_pbs: ExplicitRange,
range_base_log_level_fpks: ExplicitRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl GBASearchSpaceTighten {
#[allow(unused)]
#[rustfmt::skip]
fn par_iter(self) -> impl rayon::iter::ParallelIterator<Item=GBAParams> {
self.range_glwe_dim
.to_std_range()
.into_par_iter().map(|_k| GBAParams {
base_log_ks: 0,
level_ks: 0,
base_log_pbs: 0,
level_pbs: 0,
glwe_dim: 0,
log_poly_size: 0,
small_lwe_dim: 0,
base_log_fpks: 0,
level_fpks: 0,
})
}
fn iter(self, precision: u64, minimal_ms_value: u64) -> impl Iterator<Item = GBAParams> {
self.range_base_log_level_fpks.clone().into_iter().flat_map(
move |(base_log_fpks, level_fpks)| {
let interm_range_base_log_level_pbs = self.range_base_log_level_pbs.clone();
self.range_base_log_level_ks.clone().into_iter().flat_map(
move |(base_log_ks, level_ks)| {
interm_range_base_log_level_pbs
.clone()
.into_iter()
.flat_map(move |(base_log_pbs, level_pbs)| {
self.range_glwe_dim
.to_std_range()
.flat_map(move |glwe_dim| {
self.range_log_poly_size
.to_std_range_poly_size(precision + minimal_ms_value)
.flat_map(move |log_poly_size| {
self.range_small_lwe_dim
.to_std_range()
.step_by(STEP)
.map(move |small_lwe_dim| GBAParams {
base_log_ks,
level_ks,
base_log_pbs,
level_pbs,
glwe_dim,
log_poly_size,
small_lwe_dim,
base_log_fpks,
level_fpks,
})
})
})
})
},
)
},
)
}
}
pub fn solve_all_gba(p_fail: f64, writer: impl Write) {
let nb_inputs = 2;
let start = Instant::now();
let precisions = 1..9;
let log_norms = vec![4, 6, 8, 10];
// find the minimal added noise by the modulus switching
// for KS
let a = GBASearchSpace {
range_base_log_ks: MyRange(1, 40),
range_level_ks: MyRange(1, 25),
range_base_log_pbs: MyRange(1, 40),
range_level_pbs: MyRange(1, 25),
range_glwe_dim: MyRange(1, 7),
range_log_poly_size: MyRange(8, 16),
range_small_lwe_dim: MyRange(500, 1000),
range_base_log_fpks: MyRange(1, 40),
range_level_fpks: MyRange(1, 25),
};
let minimal_ms_value = minimal_added_noise_by_modulus_switching(
(1 << a.range_log_poly_size.0) * a.range_glwe_dim.0,
)
.sqrt()
.ceil() as u64;
let a_tighten = a.to_tighten(128);
let res: Vec<Solution<GBAParams>> = precisions
.into_par_iter()
.flat_map(|precision| {
log_norms
.clone()
.into_par_iter()
.map(|log_norm| {
let config = GBAConstraint {
variance_constraint: error::safe_variance_bound_2padbits(
precision,
64,
gba_p_fail_per_pbs(precision, nb_inputs, p_fail),
),
log_norm2: log_norm,
security_level: 128,
sum_size: 4096,
precision,
nb_inputs,
};
let intem =
config.brute_force(a_tighten.clone().iter(precision, minimal_ms_value));
Solution {
precision,
log_norm,
intem,
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let duration = start.elapsed();
println!(
"Optimization took: {:?} min",
duration.as_secs() as f64 / 60.
);
write_to_file(writer, &res, nb_inputs).unwrap();
}
pub fn gba_p_fail_per_pbs(precision: u64, nb_input: u64, p_fail: f64) -> f64 {
let message_modulus = (1 << precision) as f64;
let nb_total_pbs = (nb_input as f64)
* ((f64::powi(message_modulus, (nb_input as i32) - 1) - 1.) / (message_modulus - 1.))
+ 1.;
pbs_p_fail_from_global_p_fail(nb_total_pbs as u64, p_fail)
}
pub fn write_to_file(
mut writer: impl Write,
res: &[Solution<GBAParams>],
nb_inputs: u64,
) -> Result<(), std::io::Error> {
writeln!(
writer,
" p,log(nu), k, N, n, br_l,br_b, ks_l,ks_b,fpks_l,fpks_b, cost"
)?;
for Solution {
precision,
log_norm,
intem,
} in res.iter()
{
match intem {
Some((solution, cost)) => {
writeln!(
writer,
" {:2}, {:2}, {:2}, {:2}, {:4}, {:2}, {:2}, {:2}, {:2}, {:2}, {:2}, {:6}",
nb_inputs * precision,
log_norm,
solution.glwe_dim,
solution.log_poly_size,
solution.small_lwe_dim,
solution.level_pbs,
solution.base_log_pbs,
solution.level_ks,
solution.base_log_ks,
solution.level_fpks,
solution.base_log_fpks,
cost
)?;
}
None => {}
}
}
Ok(())
}

View File

@@ -0,0 +1,66 @@
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
pub trait Problem {
type Param;
fn verify(&self, param: Self::Param) -> bool;
fn cost(&self, param: Self::Param) -> f64;
}
pub trait ParallelBruteForcableProblem: Problem + Sync
where
Self::Param: Send + Copy,
{
fn brute_force_parallel(
&self,
params: impl rayon::iter::ParallelIterator<Item = Self::Param>,
) -> Option<(Self::Param, f64)>;
}
impl<T> ParallelBruteForcableProblem for T
where
T: Problem + Sync,
T::Param: Send + Copy,
{
fn brute_force_parallel(
&self,
params: impl rayon::iter::ParallelIterator<Item = Self::Param>,
) -> Option<(Self::Param, f64)> {
params
.into_par_iter()
.filter_map(|param| {
if self.verify(param) {
Some((param, self.cost(param)))
} else {
None
}
})
.min_by(|(_, cost1), (_, cost2)| cost1.partial_cmp(cost2).unwrap())
}
}
pub trait SequentialProblem: Problem + Sync
where
Self::Param: Send + Copy,
{
fn brute_force(&self, params: impl Iterator<Item = Self::Param>) -> Option<(Self::Param, f64)>;
}
impl<T> SequentialProblem for T
where
T: Problem + Sync,
T::Param: Send + Copy,
{
fn brute_force(&self, params: impl Iterator<Item = Self::Param>) -> Option<(Self::Param, f64)> {
params
.into_iter()
.filter_map(|param| {
if self.verify(param) {
Some((param, self.cost(param)))
} else {
None
}
})
.min_by(|(_, cost1), (_, cost2)| cost1.partial_cmp(cost2).unwrap())
}
}

View File

@@ -0,0 +1,205 @@
use crate::generic::{Problem, SequentialProblem};
use crate::{MyRange, Solution};
use concrete_cpu_noise_model::gaussian_noise::noise::blind_rotate::variance_blind_rotate;
use concrete_cpu_noise_model::gaussian_noise::noise::modulus_switching::estimate_modulus_switching_noise_with_binary_key;
use concrete_optimizer::computing_cost::complexity_model::ComplexityModel;
use concrete_optimizer::noise_estimator::error;
use concrete_optimizer::parameters::{
BrDecompositionParameters, GlweParameters, LweDimension, PbsParameters,
};
use concrete_security_curves::gaussian::security::minimal_variance_glwe;
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
use std::io::Write;
// use rayon_cond::CondIterator;
#[derive(Debug, Clone, Copy)]
pub struct KSFreeParams {
base_log_pbs: u64,
level_pbs: u64,
glwe_dim: u64,
log_poly_size: u64,
}
impl KSFreeParams {
fn big_lwe_dim(&self) -> u64 {
let poly_size = 1 << self.log_poly_size;
self.glwe_dim * poly_size
}
}
struct KSFreeConstraint {
variance_constraint: f64,
log_norm2: u64,
security_level: u64,
sum_size: u64,
}
impl Problem for KSFreeConstraint {
type Param = KSFreeParams;
fn verify(&self, param: Self::Param) -> bool {
let poly_size = 1 << param.log_poly_size;
let variance_bsk =
minimal_variance_glwe(param.glwe_dim, poly_size, 64, self.security_level);
let v_pbs = variance_blind_rotate(
param.big_lwe_dim(),
param.glwe_dim,
poly_size,
param.base_log_pbs,
param.level_pbs,
64,
variance_bsk,
);
let v_ms = estimate_modulus_switching_noise_with_binary_key(
param.big_lwe_dim(),
param.log_poly_size,
64,
);
v_pbs * (1 << (2 * self.log_norm2)) as f64 + v_ms < self.variance_constraint
}
fn cost(&self, param: Self::Param) -> f64 {
let complexity_model = concrete_optimizer::computing_cost::cpu::CpuComplexity::default();
let multisum_complexity = complexity_model.levelled_complexity(
self.sum_size,
LweDimension(param.big_lwe_dim()),
64,
);
let pbs_parameter = PbsParameters {
internal_lwe_dimension: LweDimension(param.big_lwe_dim()),
br_decomposition_parameter: BrDecompositionParameters {
level: param.level_pbs,
log2_base: param.base_log_pbs,
},
output_glwe_params: GlweParameters {
log2_polynomial_size: param.log_poly_size,
glwe_dimension: param.glwe_dim,
},
};
let pbs_complexity = complexity_model.pbs_complexity(pbs_parameter, 64);
multisum_complexity + pbs_complexity
}
}
struct KSFreeSearchSpace {
range_base_log_pbs: MyRange,
range_level_pbs: MyRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
}
impl KSFreeSearchSpace {
#[allow(unused)]
#[rustfmt::skip]
fn par_iter(self) -> impl rayon::iter::ParallelIterator<Item=KSFreeParams> {
self.range_glwe_dim
.to_std_range()
.into_par_iter().map(|_k| KSFreeParams {
base_log_pbs: 0,
level_pbs: 0,
glwe_dim: 0,
log_poly_size: 0,
})
}
fn iter(self, _precision: u64) -> impl Iterator<Item = KSFreeParams> {
self.range_base_log_pbs
.to_std_range()
.flat_map(move |base_log_pbs| {
self.range_level_pbs
// .to_std_range_tight(base_log_pbs, precision)
.to_std_range()
.flat_map(move |level_pbs| {
self.range_glwe_dim
.to_std_range()
.flat_map(move |glwe_dim| {
self.range_log_poly_size
.to_std_range()
.map(move |log_poly_size| KSFreeParams {
base_log_pbs,
level_pbs,
glwe_dim,
log_poly_size,
})
})
})
})
}
}
pub fn solve_all_ksfree(p_fail: f64, writer: impl Write) {
let precisions = 1..9;
let log_norms = vec![4, 6, 8, 10];
let res: Vec<Solution<KSFreeParams>> = precisions
.into_par_iter()
.flat_map(|precision| {
log_norms
.clone()
.into_par_iter()
.map(|log_norm| {
let a = KSFreeSearchSpace {
range_base_log_pbs: MyRange(1, 53),
range_level_pbs: MyRange(1, 53),
range_glwe_dim: MyRange(1, 7),
range_log_poly_size: MyRange(7, 18),
};
let config = KSFreeConstraint {
variance_constraint: error::safe_variance_bound_2padbits(
precision, 64, p_fail,
),
log_norm2: log_norm,
security_level: 128,
sum_size: 4096,
};
let intem = config.brute_force(a.iter(precision));
Solution {
precision,
log_norm,
intem,
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
write_to_file(writer, &res).unwrap();
}
pub fn write_to_file(
mut writer: impl Write,
res: &[Solution<KSFreeParams>],
) -> Result<(), std::io::Error> {
writeln!(writer, " p,log(nu), k, N, br_l,br_b, cost")?;
for Solution {
precision,
log_norm,
intem,
} in res.iter()
{
match intem {
Some((solution, cost)) => {
writeln!(
writer,
" {:2}, {:2}, {:2}, {:4}, {:2}, {:2}, {:6}",
precision,
log_norm,
solution.glwe_dim,
solution.log_poly_size,
solution.level_pbs,
solution.base_log_pbs,
cost
)?;
}
None => {}
}
}
Ok(())
}

View File

@@ -0,0 +1,104 @@
#![allow(non_snake_case)]
use std::ops::Range;
use std::vec::IntoIter;
// Useful because Range<u64> is not Copy
#[derive(Clone, Copy)]
struct MyRange(u64, u64);
impl MyRange {
pub fn to_std_range(self) -> Range<u64> {
Range {
start: self.0,
end: self.1,
}
}
pub fn to_std_range_tight(self, baselog: u64, precision: u64) -> Range<u64> {
// REMARK: in precision we take into account the min(noise MS)
// To be used for the level range, we only need level * baselog <= 53
// and so we have level <= 53. / baselog
// We also need level * baselog >= precision + min(noise MS) i.e.
// level >= (precision + min(noise MS))/ baselog
Range {
start: (precision / baselog).max(self.0),
// start: self.0,
end: (53 / baselog).min(self.1),
}
}
pub fn to_std_range_poly_size(self, precision: u64) -> Range<u64> {
// REMARK: in precision we take into account the min(noise MS)
// we need log2 N >= precision + 1
Range {
start: (precision + 1).max(self.0),
// start: self.0,
end: self.1,
}
}
#[allow(unused)]
pub fn to_std_range_lwe_dim(self, log_poly_size: u64, glwe_dimension: u64) -> Range<u64> {
Range {
start: self.0,
end: self.1.min(glwe_dimension * (1 << log_poly_size)),
}
}
#[allow(unused)]
pub fn to_std_range_kt_zeros(
self,
log_poly_size: u64,
glwe_dimension: u64,
small_lwe_dim: u64,
) -> Range<u64> {
let poly_size = 1 << (log_poly_size);
// from 0 to min(N/-1, N-n)
let tmp = if poly_size * glwe_dimension < small_lwe_dim {
0
} else {
poly_size * glwe_dimension - small_lwe_dim + 1
};
Range {
start: 0,
end: (((1 << (log_poly_size)) - 1).min(tmp)).min(512),
}
}
}
pub fn minimal_added_noise_by_modulus_switching(lwe_dim: u64) -> f64 {
(1. / 12. + lwe_dim as f64 / 24.)
+ (lwe_dim as f64 / 48. - 1. / 12.) * 4. / (f64::exp2(2. * 64.))
}
pub fn pbs_p_fail_from_global_p_fail(nb_pbs: u64, global_p_fail: f64) -> f64 {
1. - f64::powf(1. - global_p_fail, 1. / (nb_pbs as f64))
}
#[derive(Clone)]
struct ExplicitRange(Vec<(u64, u64)>);
impl ExplicitRange {
pub fn into_iter(self) -> IntoIter<(u64, u64)> {
self.0.into_iter()
}
}
const STEP: usize = 4; // 4;
pub mod cggi;
pub mod cjp;
pub mod gba;
pub mod generic;
pub mod ks_free;
pub mod lmp;
pub mod multi_bit_cjp;
pub struct Solution<T> {
pub precision: u64,
pub log_norm: u64,
pub intem: Option<(T, f64)>,
}

View File

@@ -0,0 +1,320 @@
use crate::generic::{Problem, SequentialProblem};
use crate::{pbs_p_fail_from_global_p_fail, MyRange, Solution, STEP};
use concrete_cpu_noise_model::gaussian_noise::noise::blind_rotate::variance_blind_rotate;
use concrete_cpu_noise_model::gaussian_noise::noise::keyswitch::variance_keyswitch;
use concrete_cpu_noise_model::gaussian_noise::noise::modulus_switching::estimate_modulus_switching_noise_with_binary_key;
use concrete_optimizer::computing_cost::complexity_model::ComplexityModel;
use concrete_optimizer::noise_estimator::error;
use concrete_optimizer::parameters::{
AtomicPatternParameters, BrDecompositionParameters, GlweParameters, KsDecompositionParameters,
LweDimension,
};
use concrete_security_curves::gaussian::security::{minimal_variance_glwe, minimal_variance_lwe};
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
use std::io::Write;
use std::time::Instant;
// use rayon_cond::CondIterator;
pub const UNDERESTIMATION: bool = true;
#[derive(Debug, Clone, Copy)]
pub struct LMPParams {
base_log_ks: u64,
level_ks: u64,
base_log_pbs: u64,
level_pbs: u64,
glwe_dim: u64,
log_poly_size: u64,
small_lwe_dim: u64,
}
impl LMPParams {
fn big_lwe_dim(&self) -> u64 {
let poly_size = 1 << self.log_poly_size;
self.glwe_dim * poly_size
}
}
struct LMPConstraint {
variance_constraint: f64,
log_norm2: u64,
security_level: u64,
sum_size: u64,
}
impl Problem for LMPConstraint {
type Param = LMPParams;
fn verify(&self, param: Self::Param) -> bool {
let poly_size = 1 << param.log_poly_size;
let variance_ksk = minimal_variance_lwe(param.small_lwe_dim, 64, self.security_level);
let v_ks = variance_keyswitch(
param.big_lwe_dim(),
param.base_log_ks,
param.level_ks,
64,
variance_ksk,
);
let variance_bsk =
minimal_variance_glwe(param.glwe_dim, poly_size, 64, self.security_level);
let v_pbs = variance_blind_rotate(
param.small_lwe_dim,
param.glwe_dim,
poly_size,
param.base_log_pbs,
param.level_pbs,
64,
variance_bsk,
);
let v_ms = estimate_modulus_switching_noise_with_binary_key(
param.small_lwe_dim,
param.log_poly_size,
64,
);
if UNDERESTIMATION {
v_pbs * (1 << ((2 * self.log_norm2) + 1)) as f64 + v_ks + v_ms
< self.variance_constraint
} else {
v_pbs * (1 << (2 * self.log_norm2)) as f64 + v_ks + v_ms < self.variance_constraint
}
}
fn cost(&self, param: Self::Param) -> f64 {
lmp_complexity(
self.sum_size,
AtomicPatternParameters {
input_lwe_dimension: LweDimension(param.big_lwe_dim()),
ks_decomposition_parameter: KsDecompositionParameters {
level: param.level_ks,
log2_base: param.base_log_ks,
},
internal_lwe_dimension: LweDimension(param.small_lwe_dim),
br_decomposition_parameter: BrDecompositionParameters {
level: param.level_pbs,
log2_base: param.base_log_pbs,
},
output_glwe_params: GlweParameters {
log2_polynomial_size: param.log_poly_size,
glwe_dimension: param.glwe_dim,
},
},
64,
)
}
}
#[allow(dead_code)]
pub fn lmp_complexity(
sum_size: u64,
params: AtomicPatternParameters,
ciphertext_modulus_log: u32,
) -> f64 {
let complexity_model = concrete_optimizer::computing_cost::cpu::CpuComplexity::default();
let multisum_complexity = complexity_model.levelled_complexity(
sum_size,
params.input_lwe_dimension,
ciphertext_modulus_log,
);
let ks_complexity =
complexity_model.ks_complexity(params.ks_parameters(), ciphertext_modulus_log);
let pbs_complexity =
complexity_model.pbs_complexity(params.pbs_parameters(), ciphertext_modulus_log);
multisum_complexity + 2. * (ks_complexity + pbs_complexity)
}
struct LMPSearchSpace {
range_base_log_ks: MyRange,
range_level_ks: MyRange,
range_base_log_pbs: MyRange,
range_level_pbs: MyRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl LMPSearchSpace {
#[allow(unused)]
#[rustfmt::skip]
fn par_iter(self) -> impl rayon::iter::ParallelIterator<Item=LMPParams> {
self.range_base_log_ks
.to_std_range()
.into_par_iter()
.flat_map(move |base_log_ks| {
self.range_level_ks
.to_std_range()
.into_par_iter()
.flat_map(move |level_ks| {
self.range_base_log_pbs
.to_std_range()
.into_par_iter()
.flat_map(move |base_log_pbs| {
self.range_level_pbs
.to_std_range()
.into_par_iter()
.flat_map(move |level_pbs| {
self.range_glwe_dim
.to_std_range()
.into_par_iter()
.flat_map(move |glwe_dim| {
self.range_log_poly_size
.to_std_range()
.into_par_iter()
.flat_map(move |log_poly_size| {
self.range_small_lwe_dim
.to_std_range()
.into_par_iter()
.map(move |small_lwe_dim| {
LMPParams {
base_log_ks,
level_ks,
base_log_pbs,
level_pbs,
glwe_dim,
log_poly_size,
small_lwe_dim,
}
})
})
})
})
})
})
})
}
fn iter(self, precision: u64) -> impl Iterator<Item = LMPParams> {
self.range_base_log_ks
.to_std_range()
.flat_map(move |base_log_ks| {
self.range_level_ks
.to_std_range_tight(base_log_ks, precision)
.flat_map(move |level_ks| {
self.range_base_log_pbs
.to_std_range()
.flat_map(move |base_log_pbs| {
self.range_level_pbs
.to_std_range_tight(base_log_pbs, precision)
.flat_map(move |level_pbs| {
self.range_glwe_dim.to_std_range().flat_map(
move |glwe_dim| {
self.range_log_poly_size.to_std_range().flat_map(
move |log_poly_size| {
self.range_small_lwe_dim
.to_std_range()
.step_by(STEP)
.map(move |small_lwe_dim| LMPParams {
base_log_ks,
level_ks,
base_log_pbs,
level_pbs,
glwe_dim,
log_poly_size,
small_lwe_dim,
})
},
)
},
)
})
})
})
})
}
}
pub fn solve_all_lmp(p_fail: f64, writer: impl Write) {
let start = Instant::now();
let precisions = 1..24;
let log_norms = vec![4, 6, 8, 10];
let p_fail_per_pbs = pbs_p_fail_from_global_p_fail(2, p_fail);
let res: Vec<Solution<LMPParams>> = precisions
.into_par_iter()
.flat_map(|precision| {
log_norms
.clone()
.into_par_iter()
.map(|log_norm| {
let a = LMPSearchSpace {
range_base_log_ks: MyRange(1, 40),
range_level_ks: MyRange(1, 25),
range_base_log_pbs: MyRange(1, 40),
range_level_pbs: MyRange(1, 25),
range_glwe_dim: MyRange(1, 7),
range_log_poly_size: MyRange(8, 18),
range_small_lwe_dim: MyRange(500, 1500),
};
// precision - 1 because no need for padding bit
// + 1 because of the random MSB
let config = LMPConstraint {
variance_constraint: error::safe_variance_bound_2padbits(
precision,
64,
p_fail_per_pbs,
), //5.960464477539063e-08, // 0.0009765625006088146,
log_norm2: log_norm,
security_level: 128,
sum_size: 4096,
};
let intem = config.brute_force(a.iter(precision));
Solution {
precision,
log_norm,
intem,
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let duration = start.elapsed();
println!(
"Optimization took: {:?} min",
duration.as_secs() as f64 / 60.
);
write_to_file(writer, &res).unwrap();
}
pub fn write_to_file(
mut writer: impl Write,
res: &[Solution<LMPParams>],
) -> Result<(), std::io::Error> {
writeln!(
writer,
" p,log(nu), k, N, n, br_l,br_b, ks_l,ks_b, cost"
)?;
for Solution {
precision,
log_norm,
intem,
} in res.iter()
{
match intem {
Some((solution, cost)) => {
writeln!(
writer,
" {:2}, {:2}, {:2}, {:2}, {:4}, {:2}, {:2}, {:2}, {:2}, {:6}",
precision,
log_norm,
solution.glwe_dim,
solution.log_poly_size,
solution.small_lwe_dim,
solution.level_pbs,
solution.base_log_pbs,
solution.level_ks,
solution.base_log_ks,
cost
)?;
}
None => {}
}
}
Ok(())
}

View File

@@ -0,0 +1,431 @@
use crate::generic::{Problem, SequentialProblem};
use crate::{minimal_added_noise_by_modulus_switching, ExplicitRange, MyRange, Solution};
use concrete_cpu_noise_model::gaussian_noise::noise::keyswitch::variance_keyswitch;
use concrete_cpu_noise_model::gaussian_noise::noise::modulus_switching::estimate_modulus_switching_noise_with_binary_key;
use concrete_cpu_noise_model::gaussian_noise::noise::multi_bit_blind_rotate::variance_multi_bit_blind_rotate;
use concrete_optimizer::computing_cost::complexity_model::ComplexityModel;
use concrete_optimizer::noise_estimator::error;
use concrete_optimizer::parameters::{
AtomicPatternParameters, BrDecompositionParameters, GlweParameters, KsDecompositionParameters,
LweDimension,
};
use concrete_security_curves::gaussian::security::{minimal_variance_glwe, minimal_variance_lwe};
use rayon::prelude::{IntoParallelIterator, ParallelIterator};
use std::io::Write;
use std::time::Instant;
const GROUPING_FACTOR: u32 = 3;
const JIT_FFT: bool = true;
#[derive(Debug, Clone, Copy)]
pub struct MultiBitCJPParams {
base_log_ks: u64,
level_ks: u64,
base_log_pbs: u64,
level_pbs: u64,
glwe_dim: u64,
log_poly_size: u64,
small_lwe_dim: u64,
}
impl MultiBitCJPParams {
fn big_lwe_dim(&self) -> u64 {
let poly_size = 1 << self.log_poly_size;
self.glwe_dim * poly_size
}
}
struct MultiBitCJPConstraint {
variance_constraint: f64,
log_norm2: u64,
security_level: u64,
sum_size: u64,
grouping_factor: u32,
jit_fft: bool,
}
impl Problem for MultiBitCJPConstraint {
type Param = MultiBitCJPParams;
fn verify(&self, param: Self::Param) -> bool {
// TODO CHANGE NOISE FORMULAE
let poly_size = 1 << param.log_poly_size;
let variance_ksk = minimal_variance_lwe(param.small_lwe_dim, 64, self.security_level);
let v_ks = variance_keyswitch(
param.big_lwe_dim(),
param.base_log_ks,
param.level_ks,
64,
variance_ksk,
);
let variance_bsk =
minimal_variance_glwe(param.glwe_dim, poly_size, 64, self.security_level);
// let v_pbs = variance_blind_rotate(
// param.small_lwe_dim,
// param.glwe_dim,
// poly_size,
// param.base_log_pbs,
// param.level_pbs,
// 64,
// variance_bsk,
// );
let v_pbs = variance_multi_bit_blind_rotate(
param.small_lwe_dim,
param.glwe_dim,
poly_size,
param.base_log_pbs,
param.level_pbs,
64,
variance_bsk,
self.grouping_factor,
self.jit_fft,
);
let v_ms = estimate_modulus_switching_noise_with_binary_key(
param.small_lwe_dim,
param.log_poly_size,
64,
);
// println!("v_pbs: {}", v_pbs);
// println!("v_ks: {}", v_ks);
v_pbs * (1 << (2 * self.log_norm2)) as f64 + v_ks + v_ms < self.variance_constraint
}
fn cost(&self, param: Self::Param) -> f64 {
multi_cjp_complexity(
self.sum_size,
AtomicPatternParameters {
input_lwe_dimension: LweDimension(param.big_lwe_dim()),
ks_decomposition_parameter: KsDecompositionParameters {
level: param.level_ks,
log2_base: param.base_log_ks,
},
internal_lwe_dimension: LweDimension(param.small_lwe_dim),
br_decomposition_parameter: BrDecompositionParameters {
level: param.level_pbs,
log2_base: param.base_log_pbs,
},
output_glwe_params: GlweParameters {
log2_polynomial_size: param.log_poly_size,
glwe_dimension: param.glwe_dim,
},
},
64,
self.grouping_factor,
self.jit_fft,
)
}
}
#[allow(dead_code)]
pub fn multi_cjp_complexity(
sum_size: u64,
params: AtomicPatternParameters,
ciphertext_modulus_log: u32,
grouping_factor: u32,
jit_fft: bool,
) -> f64 {
let complexity_model = concrete_optimizer::computing_cost::cpu::CpuComplexity::default();
let multisum_complexity = complexity_model.levelled_complexity(
sum_size,
params.input_lwe_dimension,
ciphertext_modulus_log,
);
let ks_complexity =
complexity_model.ks_complexity(params.ks_parameters(), ciphertext_modulus_log);
let pbs_complexity = complexity_model.multi_bit_pbs_complexity(
params.pbs_parameters(),
ciphertext_modulus_log,
grouping_factor,
jit_fft,
);
multisum_complexity + ks_complexity + pbs_complexity
}
struct CJPSearchSpace {
range_base_log_ks: MyRange,
range_level_ks: MyRange,
_range_base_log_pbs: MyRange,
_range_level_pbs: MyRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl CJPSearchSpace {
fn to_tighten(
&self,
security_level: u64,
grouping_factor: u32,
jit_fft: bool,
) -> CJPSearchSpaceTighten {
// Keyswitch
let mut ks_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self.range_small_lwe_dim.to_std_range() {
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_ks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_ks.to_std_range() {
let variance_ksk = minimal_variance_lwe(n, 64, security_level);
let v_ks = variance_keyswitch(
(1 << log_N) * k,
baselog,
level,
64,
variance_ksk,
);
if v_ks <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_ks;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
ks_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
// PBS
let mut pbs_decomp = vec![];
for log_N in self.range_log_poly_size.to_std_range() {
for k in self.range_glwe_dim.to_std_range() {
for n in self
.range_small_lwe_dim
.to_std_range()
.step_by(GROUPING_FACTOR as usize)
{
let mut current_minimal_noise = f64::INFINITY;
for level in self.range_level_ks.to_std_range() {
let mut current_minimal_noise_for_a_given_level = current_minimal_noise;
let mut current_pair = (0, 0);
for baselog in self.range_base_log_ks.to_std_range() {
let variance_bsk =
minimal_variance_glwe(k, 1 << log_N, 64, security_level);
let v_pbs = variance_multi_bit_blind_rotate(
n,
k,
1 << log_N,
baselog,
level,
64,
variance_bsk,
grouping_factor,
jit_fft,
);
if v_pbs <= current_minimal_noise_for_a_given_level {
current_minimal_noise_for_a_given_level = v_pbs;
current_pair = (baselog, level)
}
}
if current_minimal_noise_for_a_given_level < current_minimal_noise {
pbs_decomp.push(current_pair);
current_minimal_noise = current_minimal_noise_for_a_given_level;
}
}
}
}
}
ks_decomp.sort();
ks_decomp.dedup();
pbs_decomp.sort();
pbs_decomp.dedup();
println!("Only {} couples left for keyswitch", ks_decomp.len());
println!("Only {} couples left for bootstrap", pbs_decomp.len());
CJPSearchSpaceTighten {
range_base_log_level_ks: ExplicitRange(ks_decomp.clone()),
range_base_log_level_pbs: ExplicitRange(pbs_decomp.clone()),
range_glwe_dim: self.range_glwe_dim,
range_log_poly_size: self.range_log_poly_size,
range_small_lwe_dim: self.range_small_lwe_dim,
}
}
}
#[derive(Clone)]
struct CJPSearchSpaceTighten {
range_base_log_level_ks: ExplicitRange,
range_base_log_level_pbs: ExplicitRange,
range_glwe_dim: MyRange,
range_log_poly_size: MyRange,
range_small_lwe_dim: MyRange,
}
impl CJPSearchSpaceTighten {
#[allow(unused)]
#[rustfmt::skip]
fn par_iter(self) -> impl rayon::iter::ParallelIterator<Item=MultiBitCJPParams> {
self.range_glwe_dim
.to_std_range()
.into_par_iter().map(|_k| MultiBitCJPParams {
base_log_ks: 0,
level_ks: 0,
base_log_pbs: 0,
level_pbs: 0,
glwe_dim: 0,
log_poly_size: 0,
small_lwe_dim: 0,
})
}
fn iter(
self,
precision: u64,
minimal_ms_value: u64,
) -> impl Iterator<Item = MultiBitCJPParams> {
self.range_base_log_level_ks
.into_iter()
.flat_map(move |(base_log_ks, level_ks)| {
self.range_base_log_level_pbs.clone().into_iter().flat_map(
move |(base_log_pbs, level_pbs)| {
self.range_glwe_dim
.to_std_range()
.flat_map(move |glwe_dim| {
self.range_log_poly_size
.to_std_range_poly_size(precision + minimal_ms_value)
.flat_map(move |log_poly_size| {
self.range_small_lwe_dim
.to_std_range()
.step_by(GROUPING_FACTOR as usize)
.map(move |small_lwe_dim| MultiBitCJPParams {
base_log_ks,
level_ks,
base_log_pbs,
level_pbs,
glwe_dim,
log_poly_size,
small_lwe_dim,
})
})
})
},
)
})
// })
// })
}
}
pub fn solve_all_multi_bit_cjp(p_fail: f64, writer: impl Write) {
let start = Instant::now();
let precisions = 1..24;
let log_norms = vec![4, 6, 8, 10];
// find the minimal added noise by the modulus switching
// for KS
let a = CJPSearchSpace {
range_base_log_ks: MyRange(1, 40),
range_level_ks: MyRange(1, 40),
_range_base_log_pbs: MyRange(1, 40),
_range_level_pbs: MyRange(1, 53),
range_glwe_dim: MyRange(1, 7),
range_log_poly_size: MyRange(8, 19),
range_small_lwe_dim: MyRange(
GROUPING_FACTOR as u64 * (500. / GROUPING_FACTOR as f64).round() as u64,
1500,
),
};
let minimal_ms_value = minimal_added_noise_by_modulus_switching(
(1 << a.range_log_poly_size.0) * a.range_glwe_dim.0,
)
.sqrt()
.ceil() as u64;
// let a = CJPSearchSpace {
// range_base_log_ks: MyRange(1, 53),
// range_level_ks: MyRange(1, 53),
// range_base_log_pbs: MyRange(1, 53),
// range_level_pbs: MyRange(1, 53),
// range_glwe_dim: MyRange(1, 7),
// range_log_poly_size: MyRange(8, 16),
// range_small_lwe_dim: MyRange(500, 1000),
// };
let a_tighten = a.to_tighten(128, GROUPING_FACTOR, JIT_FFT);
let res: Vec<Solution<MultiBitCJPParams>> = precisions
.into_par_iter()
.flat_map(|precision| {
log_norms
.clone()
.into_par_iter()
.map(|log_norm| {
let config = MultiBitCJPConstraint {
variance_constraint: error::safe_variance_bound_2padbits(
precision, 64, p_fail,
), //5.960464477539063e-08, // 0.0009765625006088146,
log_norm2: log_norm,
security_level: 128,
sum_size: 4096,
grouping_factor: GROUPING_FACTOR,
jit_fft: JIT_FFT,
};
let intem =
config.brute_force(a_tighten.clone().iter(precision, minimal_ms_value));
Solution {
precision,
log_norm,
intem,
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let duration = start.elapsed();
println!(
"Optimization took: {:?} min",
duration.as_secs() as f64 / 60.
);
write_to_file(writer, &res).unwrap();
}
pub fn write_to_file(
mut writer: impl Write,
res: &[Solution<MultiBitCJPParams>],
) -> Result<(), std::io::Error> {
writeln!(
writer,
" p,log(nu), k, N, n, br_l,br_b, ks_l,ks_b, cost"
)?;
for Solution {
precision,
log_norm,
intem,
} in res.iter()
{
match intem {
Some((solution, cost)) => {
writeln!(
writer,
" {:2}, {:2}, {:2}, {:2}, {:4}, {:2}, {:2}, {:2}, {:2}, {:6}",
precision,
log_norm,
solution.glwe_dim,
solution.log_poly_size,
solution.small_lwe_dim,
solution.level_pbs,
solution.base_log_pbs,
solution.level_ks,
solution.base_log_ks,
cost
)?;
}
None => {}
}
}
Ok(())
}

View File

@@ -13,4 +13,11 @@ pub trait ComplexityModel: Send + Sync {
lwe_dimension: LweDimension,
ciphertext_modulus_log: u32,
) -> Complexity;
fn multi_bit_pbs_complexity(
&self,
params: PbsParameters,
ciphertext_modulus_log: u32,
grouping_factor: u32,
jit_fft: bool,
) -> Complexity;
}

View File

@@ -1,19 +1,31 @@
use super::complexity::Complexity;
use super::complexity_model::ComplexityModel;
use super::operators::keyswitch_lwe::KsComplexity;
use super::operators::{keyswitch_lwe, pbs};
use super::operators::{keyswitch_lwe, multi_bit_pbs, pbs};
use crate::computing_cost::operators::multi_bit_pbs::MultiBitPbsComplexity;
use crate::parameters::{CmuxParameters, KeyswitchParameters, LweDimension, PbsParameters};
#[derive(Clone)]
pub struct CpuComplexity {
pub ks_lwe: keyswitch_lwe::KsComplexity,
pub pbs: pbs::PbsComplexity,
pub multi_bit_pbs: MultiBitPbsComplexity,
}
impl ComplexityModel for CpuComplexity {
fn pbs_complexity(&self, params: PbsParameters, ciphertext_modulus_log: u32) -> Complexity {
self.pbs.complexity(params, ciphertext_modulus_log)
}
fn multi_bit_pbs_complexity(
&self,
params: PbsParameters,
ciphertext_modulus_log: u32,
grouping_factor: u32,
jit_fft: bool,
) -> Complexity {
self.multi_bit_pbs
.complexity(params, ciphertext_modulus_log, grouping_factor, jit_fft)
}
fn cmux_complexity(&self, params: CmuxParameters, ciphertext_modulus_log: u32) -> Complexity {
self.pbs.cmux.complexity(params, ciphertext_modulus_log)
@@ -48,6 +60,7 @@ impl Default for CpuComplexity {
Self {
ks_lwe: KsComplexity,
pbs: pbs::PbsComplexity::default(),
multi_bit_pbs: multi_bit_pbs::MultiBitPbsComplexity::default(),
}
}
}

View File

@@ -72,6 +72,16 @@ impl ComplexityModel for GpuComplexity {
) -> Complexity {
0.
}
fn multi_bit_pbs_complexity(
&self,
_params: PbsParameters,
_ciphertext_modulus_log: u32,
_grouping_factor: u32,
_jit_fft: bool,
) -> Complexity {
todo!()
}
}
#[allow(non_snake_case)]

View File

@@ -1,3 +1,4 @@
pub mod cmux;
pub(super) mod keyswitch_lwe;
pub mod multi_bit_pbs;
pub(super) mod pbs;

View File

@@ -0,0 +1,41 @@
use super::super::complexity::Complexity;
use super::cmux;
use crate::parameters::PbsParameters;
use crate::utils::square;
#[derive(Default, Clone)]
pub struct MultiBitPbsComplexity {
pub cmux: cmux::SimpleWithFactors,
}
impl MultiBitPbsComplexity {
pub fn complexity(
&self,
params: PbsParameters,
ciphertext_modulus_log: u32,
grouping_factor: u32,
jit_fft: bool,
) -> Complexity {
// https://github.com/zama-ai/concrete-optimizer/blob/prototype/python/optimizer/noise_formulas/bootstrap.py#L163
// grouping_factor: nb of sk bit bundled together
let square_glwe_size = square(params.output_glwe_params.glwe_dimension as f64 + 1.);
let cmux_cost = self
.cmux
.complexity(params.cmux_parameters(), ciphertext_modulus_log);
let ggsw_size = params.br_decomposition_parameter.level as f64
* square_glwe_size
* params.output_glwe_params.polynomial_size() as f64;
// JIT fourier transform for the GGSW
let jit_fft_complexity = if jit_fft {
ggsw_size * params.output_glwe_params.log2_polynomial_size as f64
} else {
0.
};
(params.internal_lwe_dimension.0 as f64) / (grouping_factor as f64) * cmux_cost
+ 2. * (f64::exp2(grouping_factor as f64) - 1.) * ggsw_size
+ jit_fft_complexity
}
}

View File

@@ -9,7 +9,8 @@ expensive_tests = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
concrete-optimizer = { path ="../concrete-optimizer" }
concrete-optimizer = { path = "../concrete-optimizer" }
brute-force-optimizer = { path = "../brute-force-optimizer" }
clap = { version = "4.0.17", features = ["derive"] }
rayon-cond = "0.2" # to avoid rayon code coloring
# pprof = { version = "0.4", features = ["flamegraph"] }
@@ -34,6 +35,10 @@ bench = false
name = "v0-parameters-by-level"
bench = false
[[bin]]
name = "brute-force"
bench = false
[[bench]]
name = "benchmark"
harness = false

View File

@@ -0,0 +1,51 @@
use brute_force_optimizer::cggi::solve_all_cggi;
use brute_force_optimizer::cjp::solve_all_cjp;
use brute_force_optimizer::ks_free::solve_all_ksfree;
use brute_force_optimizer::gba::solve_all_gba;
use brute_force_optimizer::lmp::solve_all_lmp;
use brute_force_optimizer::multi_bit_cjp::solve_all_multi_bit_cjp;
use clap::Parser;
use std::fs::File;
use v0_parameters::_4_SIGMA;
/// Find parameters for a variety of atomic patterns using a brute force algorithm
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
#[allow(clippy::struct_excessive_bools)]
pub struct BruteForceArgs {
#[clap(long, default_value_t = _4_SIGMA)]
pub p_fail: f64,
#[clap(
long,
help = "Supported atomic patterns: CJP, KSfree, CGGI, GBA and LMP and MBCJP"
)]
pub atomic_pattern: String,
}
fn main() {
let args = BruteForceArgs::parse();
let filename = format!(
"exp/{}-pfail-{}.txt",
args.atomic_pattern,
args.p_fail.log2().round()
);
let file = File::create(filename).unwrap();
match args.atomic_pattern.as_str() {
"CJP" => solve_all_cjp(args.p_fail, file),
"CGGI" => solve_all_cggi(args.p_fail, file),
"KSfree" => solve_all_ksfree(args.p_fail, file),
"LMP" => solve_all_lmp(args.p_fail, file),
"GBA" => solve_all_gba(args.p_fail, file),
"MBCJP" => solve_all_multi_bit_cjp(args.p_fail, file),
_ => {
panic!(
"The resquested AP is not supported ({})",
args.atomic_pattern
)
}
};
}