mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-11 15:48:20 -05:00
Compare commits
25 Commits
al/debug_m
...
go/chore/g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
470e173f1c | ||
|
|
354c792a5d | ||
|
|
ddeda441be | ||
|
|
55eeafa0f1 | ||
|
|
52c2c80aa2 | ||
|
|
1631593605 | ||
|
|
ee0a30ae2b | ||
|
|
21b523eec8 | ||
|
|
c9bd010478 | ||
|
|
7da1711ee5 | ||
|
|
2bf005d1be | ||
|
|
148cb07fdf | ||
|
|
623cb15421 | ||
|
|
10b07597e8 | ||
|
|
514b4740b1 | ||
|
|
cfed6d3868 | ||
|
|
2f738c0e52 | ||
|
|
7be5de73f0 | ||
|
|
a7f9807e3c | ||
|
|
71cb52b753 | ||
|
|
ecd31e3554 | ||
|
|
f3a4951edb | ||
|
|
768145bc5b | ||
|
|
eb74612cc9 | ||
|
|
7023bace32 |
4
.gitignore
vendored
4
.gitignore
vendored
@@ -34,3 +34,7 @@ package-lock.json
|
||||
|
||||
# Dir used for backward compatibility test data
|
||||
tfhe/tfhe-backward-compat-data/
|
||||
|
||||
# Sampling tool stuff
|
||||
/venv/
|
||||
**/*.algo_sample_acquistion
|
||||
|
||||
@@ -8,6 +8,9 @@ members = [
|
||||
"apps/trivium",
|
||||
"concrete-csprng",
|
||||
"backends/tfhe-cuda-backend",
|
||||
"concrete-cpu-noise-model",
|
||||
"tfhe-rs-cost-model",
|
||||
"concrete-security-curves-rust",
|
||||
"utils/tfhe-versionable",
|
||||
"utils/tfhe-versionable-derive",
|
||||
]
|
||||
|
||||
28
Makefile
28
Makefile
@@ -420,7 +420,8 @@ clippy_versionable: install_rs_check_toolchain
|
||||
.PHONY: clippy_all # Run all clippy targets
|
||||
clippy_all: clippy_rustdoc clippy clippy_boolean clippy_shortint clippy_integer clippy_all_targets \
|
||||
clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core clippy_concrete_csprng clippy_zk_pok clippy_trivium \
|
||||
clippy_versionable
|
||||
clippy_versionable \
|
||||
clippy_noise_measurement
|
||||
|
||||
.PHONY: clippy_fast # Run main clippy targets
|
||||
clippy_fast: clippy_rustdoc clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks \
|
||||
@@ -1289,6 +1290,31 @@ sha256_bool: install_rs_check_toolchain
|
||||
--example sha256_bool \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean
|
||||
|
||||
.PHONY: external_product_noise_measurement # Run scripts to run noise measurement for external_product
|
||||
external_product_noise_measurement: setup_venv install_rs_check_toolchain
|
||||
source venv/bin/activate && \
|
||||
cd tfhe-rs-cost-model/src/ && \
|
||||
python3 external_product_correction.py \
|
||||
--rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--chunks "$$(nproc)" -- \
|
||||
--algorithm ext-prod
|
||||
|
||||
.PHONY: clippy_noise_measurement # Run clippy lints on noise measurement tool
|
||||
clippy_noise_measurement: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
-p tfhe-rs-cost-model -- --no-deps -D warnings
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
-p concrete-cpu-noise-model -- --no-deps -D warnings
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
-p concrete-security-curves -- --no-deps -D warnings
|
||||
|
||||
.PHONY: setup_venv
|
||||
setup_venv:
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate && \
|
||||
pip install -U pip wheel setuptools && \
|
||||
pip install -r tfhe-rs-cost-model/src/requirements.txt
|
||||
|
||||
.PHONY: pcc # pcc stands for pre commit checks (except GPU)
|
||||
pcc: no_tfhe_typo no_dbg_log check_fmt check_typos lint_doc check_md_docs_are_tested check_intra_md_links \
|
||||
clippy_all tfhe_lints check_compile_tests
|
||||
|
||||
19
concrete-cpu-noise-model/Cargo.toml
Normal file
19
concrete-cpu-noise-model/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[package]
|
||||
# see https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
name = "concrete-cpu-noise-model"
|
||||
version = "0.1.0"
|
||||
authors = [""]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
concrete-security-curves = { path = "../concrete-security-curves-rust" }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
approx = "0.5"
|
||||
|
||||
[build-dependencies]
|
||||
cbindgen = "0.24"
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib", "staticlib"]
|
||||
14
concrete-cpu-noise-model/build.rs
Normal file
14
concrete-cpu-noise-model/build.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
extern crate cbindgen;
|
||||
|
||||
use std::env;
|
||||
|
||||
fn main() {
|
||||
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
let package_name = env::var("CARGO_PKG_NAME").unwrap();
|
||||
let output_file = format!("include/{package_name}.h");
|
||||
println!("cargo:rerun-if-changed={output_file}");
|
||||
|
||||
cbindgen::generate(crate_dir)
|
||||
.unwrap()
|
||||
.write_to_file(output_file);
|
||||
}
|
||||
129
concrete-cpu-noise-model/cbindgen.toml
Normal file
129
concrete-cpu-noise-model/cbindgen.toml
Normal file
@@ -0,0 +1,129 @@
|
||||
# This is a template cbindgen.toml file with all of the default values.
|
||||
# Some values are commented out because their absence is the real default.
|
||||
#
|
||||
# See https://github.com/eqrion/cbindgen/blob/master/docs.md#cbindgentoml
|
||||
# for detailed documentation of every option here.
|
||||
|
||||
|
||||
language = "C"
|
||||
|
||||
|
||||
############## Options for Wrapping the Contents of the Header #################
|
||||
|
||||
header = "// Copyright © 2022 ZAMA.\n// All rights reserved."
|
||||
# trailer = "/* Text to put at the end of the generated file */"
|
||||
include_guard = "CONCRETE_CPU_NOISE_MODEL_FFI_H"
|
||||
# pragma_once = true
|
||||
autogen_warning = "// Warning, this file is autogenerated by cbindgen. Do not modify this manually."
|
||||
include_version = false
|
||||
#namespace = "concrete_cpu_ffi"
|
||||
namespaces = []
|
||||
using_namespaces = []
|
||||
sys_includes = []
|
||||
includes = []
|
||||
no_includes = false
|
||||
cpp_compat = true
|
||||
after_includes = ""
|
||||
|
||||
|
||||
############################ Code Style Options ################################
|
||||
|
||||
braces = "SameLine"
|
||||
line_length = 100
|
||||
tab_width = 2
|
||||
documentation = false
|
||||
documentation_style = "auto"
|
||||
line_endings = "LF" # also "CR", "CRLF", "Native"
|
||||
|
||||
|
||||
############################# Codegen Options ##################################
|
||||
|
||||
style = "both"
|
||||
sort_by = "Name" # default for `fn.sort_by` and `const.sort_by`
|
||||
usize_is_size_t = true
|
||||
|
||||
|
||||
[defines]
|
||||
# "target_os = freebsd" = "DEFINE_FREEBSD"
|
||||
# "feature = serde" = "DEFINE_SERDE"
|
||||
|
||||
|
||||
[export]
|
||||
include = []
|
||||
exclude = []
|
||||
#prefix = "CAPI_"
|
||||
item_types = []
|
||||
renaming_overrides_prefixing = false
|
||||
|
||||
|
||||
[export.rename]
|
||||
|
||||
|
||||
[export.body]
|
||||
|
||||
|
||||
[export.mangle]
|
||||
|
||||
|
||||
[fn]
|
||||
rename_args = "None"
|
||||
# must_use = "MUST_USE_FUNC"
|
||||
# no_return = "NO_RETURN"
|
||||
# prefix = "START_FUNC"
|
||||
# postfix = "END_FUNC"
|
||||
args = "auto"
|
||||
sort_by = "Name"
|
||||
|
||||
|
||||
[struct]
|
||||
rename_fields = "None"
|
||||
# must_use = "MUST_USE_STRUCT"
|
||||
derive_constructor = false
|
||||
derive_eq = false
|
||||
derive_neq = false
|
||||
derive_lt = false
|
||||
derive_lte = false
|
||||
derive_gt = false
|
||||
derive_gte = false
|
||||
|
||||
|
||||
[enum]
|
||||
rename_variants = "None"
|
||||
# must_use = "MUST_USE_ENUM"
|
||||
add_sentinel = false
|
||||
prefix_with_name = false
|
||||
derive_helper_methods = false
|
||||
derive_const_casts = false
|
||||
derive_mut_casts = false
|
||||
# cast_assert_name = "ASSERT"
|
||||
derive_tagged_enum_destructor = false
|
||||
derive_tagged_enum_copy_constructor = false
|
||||
enum_class = true
|
||||
private_default_tagged_enum_constructor = false
|
||||
|
||||
|
||||
[const]
|
||||
allow_static_const = true
|
||||
allow_constexpr = false
|
||||
sort_by = "Name"
|
||||
|
||||
|
||||
[macro_expansion]
|
||||
bitflags = false
|
||||
|
||||
|
||||
############## Options for How Your Rust library Should Be Parsed ##############
|
||||
|
||||
[parse]
|
||||
parse_deps = true
|
||||
include = ["concrete-cpu"]
|
||||
exclude = []
|
||||
clean = false
|
||||
extra_bindings = []
|
||||
|
||||
|
||||
[parse.expand]
|
||||
crates = []
|
||||
all_features = false
|
||||
default_features = true
|
||||
features = []
|
||||
45
concrete-cpu-noise-model/include/concrete-cpu-noise-model.h
Normal file
45
concrete-cpu-noise-model/include/concrete-cpu-noise-model.h
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright © 2022 ZAMA.
|
||||
// All rights reserved.
|
||||
|
||||
#ifndef CONCRETE_CPU_NOISE_MODEL_FFI_H
|
||||
#define CONCRETE_CPU_NOISE_MODEL_FFI_H
|
||||
|
||||
// Warning, this file is autogenerated by cbindgen. Do not modify this manually.
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
||||
#define FFT_SCALING_WEIGHT -2.57722494
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
double concrete_cpu_estimate_modulus_switching_noise_with_binary_key(uint64_t internal_ks_output_lwe_dimension,
|
||||
uint64_t glwe_log2_polynomial_size,
|
||||
uint32_t ciphertext_modulus_log);
|
||||
|
||||
double concrete_cpu_variance_blind_rotate(uint64_t in_lwe_dimension,
|
||||
uint64_t out_glwe_dimension,
|
||||
uint64_t out_polynomial_size,
|
||||
uint64_t log2_base,
|
||||
uint64_t level,
|
||||
uint32_t ciphertext_modulus_log,
|
||||
uint32_t fft_precision,
|
||||
double variance_bsk);
|
||||
|
||||
double concrete_cpu_variance_keyswitch(uint64_t input_lwe_dimension,
|
||||
uint64_t log2_base,
|
||||
uint64_t level,
|
||||
uint32_t ciphertext_modulus_log,
|
||||
double variance_ksk);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif /* CONCRETE_CPU_NOISE_MODEL_FFI_H */
|
||||
3
concrete-cpu-noise-model/src/c_api.rs
Normal file
3
concrete-cpu-noise-model/src/c_api.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod blind_rotate;
|
||||
pub mod keyswitch;
|
||||
pub mod modulus_switching;
|
||||
24
concrete-cpu-noise-model/src/c_api/blind_rotate.rs
Normal file
24
concrete-cpu-noise-model/src/c_api/blind_rotate.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
use crate::gaussian_noise::noise::blind_rotate::variance_blind_rotate;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn concrete_cpu_variance_blind_rotate(
|
||||
in_lwe_dimension: u64,
|
||||
out_glwe_dimension: u64,
|
||||
out_polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
variance_bsk: f64,
|
||||
) -> f64 {
|
||||
variance_blind_rotate(
|
||||
in_lwe_dimension,
|
||||
out_glwe_dimension,
|
||||
out_polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
variance_bsk,
|
||||
)
|
||||
}
|
||||
18
concrete-cpu-noise-model/src/c_api/keyswitch.rs
Normal file
18
concrete-cpu-noise-model/src/c_api/keyswitch.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use crate::gaussian_noise::noise::keyswitch::variance_keyswitch;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn concrete_cpu_variance_keyswitch(
|
||||
input_lwe_dimension: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
variance_ksk: f64,
|
||||
) -> f64 {
|
||||
variance_keyswitch(
|
||||
input_lwe_dimension,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
variance_ksk,
|
||||
)
|
||||
}
|
||||
14
concrete-cpu-noise-model/src/c_api/modulus_switching.rs
Normal file
14
concrete-cpu-noise-model/src/c_api/modulus_switching.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
use crate::gaussian_noise::noise::modulus_switching::estimate_modulus_switching_noise_with_binary_key;
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn concrete_cpu_estimate_modulus_switching_noise_with_binary_key(
|
||||
internal_ks_output_lwe_dimension: u64,
|
||||
glwe_log2_polynomial_size: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
) -> f64 {
|
||||
estimate_modulus_switching_noise_with_binary_key(
|
||||
internal_ks_output_lwe_dimension,
|
||||
glwe_log2_polynomial_size,
|
||||
ciphertext_modulus_log,
|
||||
)
|
||||
}
|
||||
2
concrete-cpu-noise-model/src/gaussian_noise.rs
Normal file
2
concrete-cpu-noise-model/src/gaussian_noise.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod conversion;
|
||||
pub mod noise;
|
||||
15
concrete-cpu-noise-model/src/gaussian_noise/conversion.rs
Normal file
15
concrete-cpu-noise-model/src/gaussian_noise/conversion.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
fn modular_variance_variance_ratio(ciphertext_modulus_log: u32) -> f64 {
|
||||
2_f64.powi(2 * ciphertext_modulus_log as i32)
|
||||
}
|
||||
|
||||
pub fn modular_variance_to_variance(modular_variance: f64, ciphertext_modulus_log: u32) -> f64 {
|
||||
modular_variance / modular_variance_variance_ratio(ciphertext_modulus_log)
|
||||
}
|
||||
|
||||
pub fn variance_to_modular_variance(variance: f64, ciphertext_modulus_log: u32) -> f64 {
|
||||
variance * modular_variance_variance_ratio(ciphertext_modulus_log)
|
||||
}
|
||||
|
||||
pub fn variance_to_std_dev(variance: f64) -> f64 {
|
||||
variance.sqrt()
|
||||
}
|
||||
9
concrete-cpu-noise-model/src/gaussian_noise/noise.rs
Normal file
9
concrete-cpu-noise-model/src/gaussian_noise/noise.rs
Normal file
@@ -0,0 +1,9 @@
|
||||
pub mod blind_rotate;
|
||||
pub mod cmux;
|
||||
pub mod external_product_glwe;
|
||||
pub mod keyswitch;
|
||||
pub mod keyswitch_one_bit;
|
||||
pub mod modulus_switching;
|
||||
pub mod multi_bit_blind_rotate;
|
||||
pub mod multi_bit_external_product_glwe;
|
||||
pub mod private_packing_keyswitch;
|
||||
@@ -0,0 +1,110 @@
|
||||
use super::cmux::variance_cmux;
|
||||
|
||||
pub const FFT_SCALING_WEIGHT: f64 = -2.577_224_94;
|
||||
|
||||
/// Final reduced noise generated by the final bootstrap step.
|
||||
/// Note that it does not depends from input noise, assuming the bootstrap is successful
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn variance_blind_rotate(
|
||||
in_lwe_dimension: u64,
|
||||
out_glwe_dimension: u64,
|
||||
out_polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
variance_bsk: f64,
|
||||
) -> f64 {
|
||||
in_lwe_dimension as f64
|
||||
* variance_cmux(
|
||||
out_glwe_dimension,
|
||||
out_polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
variance_bsk,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use concrete_security_curves::gaussian::security::minimal_variance_glwe;
|
||||
|
||||
use crate::gaussian_noise::conversion::variance_to_modular_variance;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn security_variance_bootstrap_1() {
|
||||
let ref_modular_variance = 4.078_296_369_990_673e31;
|
||||
|
||||
let polynomial_size = 1 << 12;
|
||||
let glwe_dimension = 2;
|
||||
|
||||
let ciphertext_modulus_log = 64;
|
||||
let security = 128;
|
||||
let variance_bsk = minimal_variance_glwe(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
ciphertext_modulus_log,
|
||||
security,
|
||||
);
|
||||
|
||||
let fft_precision = 53;
|
||||
|
||||
let actual = variance_blind_rotate(
|
||||
2048,
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
24,
|
||||
2,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
variance_bsk,
|
||||
);
|
||||
|
||||
approx::assert_relative_eq!(
|
||||
variance_to_modular_variance(actual, ciphertext_modulus_log),
|
||||
ref_modular_variance,
|
||||
max_relative = 1e-8
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn golden_python_prototype_security_variance_bootstrap_2() {
|
||||
// golden value include fft correction
|
||||
let golden_modular_variance = 3.269_722_907_894_341e55;
|
||||
|
||||
let polynomial_size = 1 << 12;
|
||||
let glwe_dimension = 4;
|
||||
|
||||
let ciphertext_modulus_log = 128;
|
||||
let security = 128;
|
||||
let variance_bsk = minimal_variance_glwe(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
ciphertext_modulus_log,
|
||||
security,
|
||||
);
|
||||
|
||||
let fft_precision = 53;
|
||||
|
||||
let actual = variance_blind_rotate(
|
||||
1024,
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
5,
|
||||
9,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
variance_bsk,
|
||||
);
|
||||
|
||||
approx::assert_relative_eq!(
|
||||
variance_to_modular_variance(actual, ciphertext_modulus_log),
|
||||
golden_modular_variance,
|
||||
max_relative = 1e-8
|
||||
);
|
||||
}
|
||||
}
|
||||
22
concrete-cpu-noise-model/src/gaussian_noise/noise/cmux.rs
Normal file
22
concrete-cpu-noise-model/src/gaussian_noise/noise/cmux.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use super::external_product_glwe::variance_external_product_glwe;
|
||||
|
||||
// only valid in the blind rotate case
|
||||
pub fn variance_cmux(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
variance_ggsw: f64,
|
||||
) -> f64 {
|
||||
variance_external_product_glwe(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
variance_ggsw,
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
use crate::gaussian_noise::conversion::modular_variance_to_variance;
|
||||
use crate::utils::square;
|
||||
|
||||
pub fn variance_external_product_glwe(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
variance_ggsw: f64,
|
||||
) -> f64 {
|
||||
theoretical_variance_external_product_glwe(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
variance_ggsw,
|
||||
) + fft_noise_variance_external_product_glwe(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn theoretical_variance_external_product_glwe(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
variance_ggsw: f64,
|
||||
) -> f64 {
|
||||
let variance_key_coefficient_binary: f64 =
|
||||
modular_variance_to_variance(1. / 4., ciphertext_modulus_log);
|
||||
|
||||
let square_expectation_key_coefficient_binary: f64 =
|
||||
modular_variance_to_variance(square(1. / 2.), ciphertext_modulus_log);
|
||||
|
||||
let k = glwe_dimension as f64;
|
||||
let b = 2_f64.powi(log2_base as i32);
|
||||
let b2l = 2_f64.powi((log2_base * 2 * level) as i32);
|
||||
let l = level as f64;
|
||||
let big_n = polynomial_size as f64;
|
||||
let q_square = 2_f64.powi(2 * ciphertext_modulus_log as i32);
|
||||
|
||||
let res_1 = l * (k + 1.) * big_n * (square(b) + 2.) / 12. * variance_ggsw;
|
||||
let res_2 = (q_square - b2l) / (24. * b2l)
|
||||
* (modular_variance_to_variance(1., ciphertext_modulus_log)
|
||||
+ k * big_n
|
||||
* (variance_key_coefficient_binary + square_expectation_key_coefficient_binary))
|
||||
+ k * big_n / 8. * variance_key_coefficient_binary
|
||||
+ 1. / 16. * square(1. - k * big_n) * square_expectation_key_coefficient_binary;
|
||||
|
||||
res_1 + res_2
|
||||
}
|
||||
|
||||
const FFT_SCALING_WEIGHT: f64 = -2.577_224_94;
|
||||
|
||||
/// Additional noise generated by fft computation
|
||||
|
||||
fn fft_noise_variance_external_product_glwe(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
) -> f64 {
|
||||
// https://github.com/zama-ai/concrete-optimizer/blob/prototype/python/optimizer/noise_formulas/bootstrap.py#L25
|
||||
let b = 2_f64.powi(log2_base as i32);
|
||||
let l = level as f64;
|
||||
let big_n = polynomial_size as f64;
|
||||
let k = glwe_dimension;
|
||||
assert!(k > 0, "k = {k}");
|
||||
assert!(k < 7, "k = {k}");
|
||||
|
||||
let lost_bits = ciphertext_modulus_log as i32 - fft_precision as i32;
|
||||
|
||||
let scale_margin = 2_f64.powi(2 * lost_bits);
|
||||
|
||||
let res =
|
||||
f64::exp2(FFT_SCALING_WEIGHT) * scale_margin * l * b * b * big_n.powi(2) * (k as f64 + 1.);
|
||||
modular_variance_to_variance(res, ciphertext_modulus_log)
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
use super::keyswitch_one_bit::variance_keyswitch_one_bit;
|
||||
|
||||
/// Additional noise generated by the keyswitch step.
|
||||
pub fn variance_keyswitch(
|
||||
input_lwe_dimension: u64, //n_big
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
variance_ksk: f64,
|
||||
) -> f64 {
|
||||
input_lwe_dimension as f64
|
||||
* variance_keyswitch_one_bit(log2_base, level, ciphertext_modulus_log, variance_ksk)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use concrete_security_curves::gaussian::security::minimal_variance_lwe;
|
||||
|
||||
use crate::gaussian_noise::conversion::variance_to_modular_variance;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn golden_python_prototype_security_variance_keyswitch_1() {
|
||||
let golden_modular_variance = 5.997_880_135_602_194e68;
|
||||
let internal_ks_output_lwe_dimension = 1024;
|
||||
let ciphertext_modulus_log = 128;
|
||||
let security = 128;
|
||||
|
||||
let actual = variance_keyswitch(
|
||||
4096,
|
||||
5,
|
||||
9,
|
||||
ciphertext_modulus_log,
|
||||
minimal_variance_lwe(
|
||||
internal_ks_output_lwe_dimension,
|
||||
ciphertext_modulus_log,
|
||||
security,
|
||||
),
|
||||
);
|
||||
|
||||
approx::assert_relative_eq!(
|
||||
variance_to_modular_variance(actual, ciphertext_modulus_log),
|
||||
golden_modular_variance,
|
||||
max_relative = 1e-8
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn golden_python_prototype_security_variance_keyswitch_2() {
|
||||
// let golden_modular_variance = 8.580795457940938e+66;
|
||||
// the full npe implements a part of the full estimation
|
||||
let golden_modular_variance = 7.407_691_550_271_225e48; // full estimation
|
||||
let internal_ks_output_lwe_dimension = 512;
|
||||
let ciphertext_modulus_log = 64;
|
||||
let security = 128;
|
||||
|
||||
let actual = variance_keyswitch(
|
||||
2048,
|
||||
24,
|
||||
2,
|
||||
ciphertext_modulus_log,
|
||||
minimal_variance_lwe(
|
||||
internal_ks_output_lwe_dimension,
|
||||
ciphertext_modulus_log,
|
||||
security,
|
||||
),
|
||||
);
|
||||
|
||||
approx::assert_relative_eq!(
|
||||
variance_to_modular_variance(actual, ciphertext_modulus_log),
|
||||
golden_modular_variance,
|
||||
max_relative = 1e-8
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
use crate::gaussian_noise::conversion::modular_variance_to_variance;
|
||||
use crate::utils::square;
|
||||
|
||||
/// Additional noise generated by the bit multiplication
|
||||
pub fn variance_keyswitch_one_bit(
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
variance_ksk: f64,
|
||||
) -> f64 {
|
||||
let variance_key_coefficient_binary: f64 =
|
||||
modular_variance_to_variance(1. / 4., ciphertext_modulus_log);
|
||||
|
||||
let square_expectation_key_coefficient_binary: f64 =
|
||||
modular_variance_to_variance(square(1. / 2.), ciphertext_modulus_log);
|
||||
|
||||
let base = 2_f64.powi(log2_base as i32);
|
||||
let b2l = 2_f64.powi((log2_base * 2 * level) as i32);
|
||||
let q_square = 2_f64.powi((2 * ciphertext_modulus_log) as i32);
|
||||
|
||||
// res 2
|
||||
let res_2 = (q_square / (12. * b2l) - 1. / 12.)
|
||||
* (variance_key_coefficient_binary + square_expectation_key_coefficient_binary);
|
||||
|
||||
// res 3
|
||||
let res_3 = 1. / 4. * variance_key_coefficient_binary;
|
||||
|
||||
// res 4
|
||||
let res_4 = (level as f64) * variance_ksk * (square(base) + 2.) / 12.;
|
||||
|
||||
res_2 + res_3 + res_4
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
use crate::gaussian_noise::conversion::modular_variance_to_variance;
|
||||
use crate::utils::square;
|
||||
|
||||
pub fn estimate_modulus_switching_noise_with_binary_key(
|
||||
internal_ks_output_lwe_dimension: u64,
|
||||
glwe_log2_polynomial_size: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
) -> f64 {
|
||||
let nb_msb = glwe_log2_polynomial_size + 1;
|
||||
|
||||
let w = 2_f64.powi(nb_msb as i32);
|
||||
let n = internal_ks_output_lwe_dimension as f64;
|
||||
|
||||
(1. / 12. + n / 24.) / square(w)
|
||||
+ modular_variance_to_variance(-1. / 12. + n / 48., ciphertext_modulus_log)
|
||||
}
|
||||
@@ -0,0 +1,36 @@
|
||||
use super::multi_bit_external_product_glwe::variance_multi_bit_external_product_glwe;
|
||||
|
||||
/// Final reduced noise generated by the final multi bit bootstrap step.
|
||||
/// Note that it does not depends from input noise, assuming the bootstrap is successful
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn variance_multi_bit_blind_rotate(
|
||||
in_lwe_dimension: u64,
|
||||
out_glwe_dimension: u64,
|
||||
out_polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
variance_bsk: f64,
|
||||
grouping_factor: u32,
|
||||
jit_fft: bool,
|
||||
) -> f64 {
|
||||
assert_eq!(
|
||||
in_lwe_dimension % (grouping_factor as u64),
|
||||
0,
|
||||
"in_lwe_dimension ({in_lwe_dimension}) has \
|
||||
to be a multiple of grouping_factor ({grouping_factor})"
|
||||
);
|
||||
(in_lwe_dimension / (grouping_factor as u64)) as f64
|
||||
* variance_multi_bit_external_product_glwe(
|
||||
out_glwe_dimension,
|
||||
out_polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
variance_bsk,
|
||||
grouping_factor,
|
||||
jit_fft,
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,115 @@
|
||||
use crate::gaussian_noise::conversion::modular_variance_to_variance;
|
||||
use crate::utils::square;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn variance_multi_bit_external_product_glwe(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
variance_ggsw: f64,
|
||||
grouping_factor: u32,
|
||||
jit_fft: bool,
|
||||
) -> f64 {
|
||||
theoretical_variance_multi_bit_external_product_glwe(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
variance_ggsw,
|
||||
grouping_factor,
|
||||
) + fft_noise_variance_multi_bit_external_product_glwe(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
log2_base,
|
||||
level,
|
||||
ciphertext_modulus_log,
|
||||
fft_precision,
|
||||
grouping_factor,
|
||||
jit_fft,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn theoretical_variance_multi_bit_external_product_glwe(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
variance_ggsw: f64,
|
||||
grouping_factor: u32,
|
||||
) -> f64 {
|
||||
let variance_key_coefficient_binary: f64 =
|
||||
modular_variance_to_variance(1. / 4., ciphertext_modulus_log);
|
||||
|
||||
let square_expectation_key_coefficient_binary: f64 =
|
||||
modular_variance_to_variance(square(1. / 2.), ciphertext_modulus_log);
|
||||
|
||||
let k = glwe_dimension as f64;
|
||||
let b = 2_f64.powi(log2_base as i32);
|
||||
let b2l = 2_f64.powi((log2_base * 2 * level) as i32);
|
||||
let l = level as f64;
|
||||
let big_n = polynomial_size as f64;
|
||||
let q_square = 2_f64.powi(2 * ciphertext_modulus_log as i32);
|
||||
|
||||
let res_1 = l * (k + 1.) * big_n * (square(b) + 2.) / 12.
|
||||
* variance_ggsw
|
||||
* 2.0f64.powi(grouping_factor as i32);
|
||||
let res_2 = (q_square - b2l) / (24. * b2l)
|
||||
* (modular_variance_to_variance(1., ciphertext_modulus_log)
|
||||
+ k * big_n
|
||||
* (variance_key_coefficient_binary + square_expectation_key_coefficient_binary))
|
||||
+ k * big_n / 8. * variance_key_coefficient_binary
|
||||
+ 1. / 16. * square(1. - k * big_n) * square_expectation_key_coefficient_binary;
|
||||
|
||||
res_1 + res_2
|
||||
}
|
||||
|
||||
const FFT_SCALING_WEIGHTS: [(u32, f64); 3] = [
|
||||
(2, 0.265_753_885_551_084_5),
|
||||
(3, 1.350_324_550_016_489_8),
|
||||
(4, 2.475_036_769_207_096),
|
||||
];
|
||||
const JIT_FFT_SCALING_WEIGHT: f64 = -2.015_541_494_298_571_7;
|
||||
|
||||
/// Additional noise generated by fft computation
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn fft_noise_variance_multi_bit_external_product_glwe(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
fft_precision: u32,
|
||||
grouping_factor: u32,
|
||||
jit_fft: bool,
|
||||
) -> f64 {
|
||||
let b = 2_f64.powi(log2_base as i32);
|
||||
let l = level as f64;
|
||||
let big_n = polynomial_size as f64;
|
||||
let k = glwe_dimension;
|
||||
assert!(k > 0, "k = {k}");
|
||||
assert!(k < 7, "k = {k}");
|
||||
|
||||
let fft_scaling_weight = if jit_fft {
|
||||
JIT_FFT_SCALING_WEIGHT
|
||||
} else {
|
||||
let index = FFT_SCALING_WEIGHTS
|
||||
.binary_search_by_key(&grouping_factor, |&(factor, _)| factor)
|
||||
.unwrap_or_else(|_| {
|
||||
panic!("Could not find fft scaling weight for grouping factor {grouping_factor}.")
|
||||
});
|
||||
FFT_SCALING_WEIGHTS[index].1
|
||||
};
|
||||
|
||||
let lost_bits = ciphertext_modulus_log as i32 - fft_precision as i32;
|
||||
|
||||
let scale_margin = 2_f64.powi(2 * lost_bits);
|
||||
|
||||
let res =
|
||||
f64::exp2(fft_scaling_weight) * scale_margin * l * b * b * big_n.powi(2) * (k as f64 + 1.);
|
||||
modular_variance_to_variance(res, ciphertext_modulus_log)
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
use crate::gaussian_noise::conversion::modular_variance_to_variance;
|
||||
use crate::utils::square;
|
||||
|
||||
// packing private keyswitch for WoP-PBS, described in algorithm 3 of https://eprint.iacr.org/2018/421.pdf (TFHE paper)
|
||||
pub fn estimate_packing_private_keyswitch(
|
||||
var_glwe: f64,
|
||||
var_ggsw: f64,
|
||||
log2_base: u64,
|
||||
level: u64,
|
||||
output_glwe_dimension: u64,
|
||||
output_polynomial_size: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
) -> f64 {
|
||||
let variance_key_coefficient_binary: f64 = 1. / 4.;
|
||||
let expectation_key_coefficient_binary: f64 = 1. / 2.;
|
||||
|
||||
let l = level as f64;
|
||||
let b = 2f64.powi(log2_base as i32);
|
||||
let n = (output_glwe_dimension * output_polynomial_size) as f64; // param.internal_lwe_dimension.0 as f64;
|
||||
let b2l = f64::powi(b, 2 * level as i32);
|
||||
let var_s_w = 1. / 4.;
|
||||
let mean_s_w = 1. / 2.;
|
||||
let res_1 = l * (n + 1.) * var_ggsw * (square(b) + 2.) / 12.;
|
||||
|
||||
#[allow(clippy::cast_possible_wrap)]
|
||||
let res_3 = (f64::powi(2., 2 * ciphertext_modulus_log as i32) - b2l) / (12. * b2l)
|
||||
* modular_variance_to_variance(
|
||||
1. + n * variance_key_coefficient_binary + square(expectation_key_coefficient_binary),
|
||||
ciphertext_modulus_log,
|
||||
)
|
||||
+ n / 4.
|
||||
* modular_variance_to_variance(variance_key_coefficient_binary, ciphertext_modulus_log)
|
||||
+ var_glwe * (var_s_w + square(mean_s_w));
|
||||
|
||||
let res_5 = modular_variance_to_variance(var_s_w, ciphertext_modulus_log) * 1. / 4.
|
||||
* square(1. - n * expectation_key_coefficient_binary);
|
||||
|
||||
res_1 + res_3 + res_5
|
||||
}
|
||||
24
concrete-cpu-noise-model/src/lib.rs
Normal file
24
concrete-cpu-noise-model/src/lib.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
#![warn(clippy::nursery)]
|
||||
#![warn(clippy::pedantic)]
|
||||
#![warn(clippy::style)]
|
||||
#![allow(clippy::cast_lossless)]
|
||||
#![allow(clippy::cast_precision_loss)] // u64 to f64
|
||||
#![allow(clippy::cast_possible_truncation)] // u64 to usize
|
||||
#![allow(clippy::missing_panics_doc)]
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
#![allow(clippy::must_use_candidate)]
|
||||
#![allow(clippy::suboptimal_flops)]
|
||||
#![allow(clippy::cast_possible_wrap)]
|
||||
#![warn(unused_results)]
|
||||
|
||||
pub mod c_api;
|
||||
pub mod gaussian_noise;
|
||||
|
||||
pub(crate) mod utils {
|
||||
pub fn square<V>(v: V) -> V
|
||||
where
|
||||
V: std::ops::Mul<Output = V> + Copy,
|
||||
{
|
||||
v * v
|
||||
}
|
||||
}
|
||||
8
concrete-security-curves-rust/Cargo.toml
Normal file
8
concrete-security-curves-rust/Cargo.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
[package]
|
||||
name = "concrete-security-curves"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
14
concrete-security-curves-rust/gen_table.py
Normal file
14
concrete-security-curves-rust/gen_table.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import sys, json;
|
||||
|
||||
def print_curve(data):
|
||||
print(f' ({data["security_level"]}, SecurityWeights {{ slope: {data["slope"]}, bias: {data["bias"]}, minimal_lwe_dimension: {data["minimal_lwe_dimension"]} }}),')
|
||||
|
||||
|
||||
def print_rust_curves_declaration(datas):
|
||||
print("use super::security_weights::SecurityWeights;")
|
||||
print(f"pub const SECURITY_WEIGHTS_ARRAY: [(u64, SecurityWeights); {len(datas)}] = [")
|
||||
for data in datas:
|
||||
print_curve(data)
|
||||
print("];")
|
||||
|
||||
print_rust_curves_declaration(json.load(sys.stdin))
|
||||
35
concrete-security-curves-rust/src/gaussian/curves_gen.rs
Normal file
35
concrete-security-curves-rust/src/gaussian/curves_gen.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use super::security_weights::SecurityWeights;
|
||||
pub const SECURITY_WEIGHTS_ARRAY: [(u64, SecurityWeights); 4] = [
|
||||
(
|
||||
80,
|
||||
SecurityWeights {
|
||||
slope: -0.04045822621883835,
|
||||
bias: 1.7183812000404686,
|
||||
minimal_lwe_dimension: 450,
|
||||
},
|
||||
),
|
||||
(
|
||||
112,
|
||||
SecurityWeights {
|
||||
slope: -0.029881371645803536,
|
||||
bias: 2.6539316216894946,
|
||||
minimal_lwe_dimension: 450,
|
||||
},
|
||||
),
|
||||
(
|
||||
128,
|
||||
SecurityWeights {
|
||||
slope: -0.026599462343105267,
|
||||
bias: 2.981543184145991,
|
||||
minimal_lwe_dimension: 450,
|
||||
},
|
||||
),
|
||||
(
|
||||
192,
|
||||
SecurityWeights {
|
||||
slope: -0.018894148763647572,
|
||||
bias: 4.2700349965659115,
|
||||
minimal_lwe_dimension: 532,
|
||||
},
|
||||
),
|
||||
];
|
||||
3
concrete-security-curves-rust/src/gaussian/mod.rs
Normal file
3
concrete-security-curves-rust/src/gaussian/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod curves_gen;
|
||||
pub mod security;
|
||||
pub mod security_weights;
|
||||
70
concrete-security-curves-rust/src/gaussian/security.rs
Normal file
70
concrete-security-curves-rust/src/gaussian/security.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use super::curves_gen::SECURITY_WEIGHTS_ARRAY;
|
||||
use super::security_weights::SecurityWeights;
|
||||
|
||||
pub fn supported_security_levels() -> impl std::iter::Iterator<Item = u64> {
|
||||
SECURITY_WEIGHTS_ARRAY
|
||||
.iter()
|
||||
.map(|(security_level, _)| *security_level)
|
||||
}
|
||||
|
||||
pub fn security_weight(security_level: u64) -> Option<SecurityWeights> {
|
||||
let index = SECURITY_WEIGHTS_ARRAY
|
||||
.binary_search_by_key(&security_level, |(security_level, _weights)| {
|
||||
*security_level
|
||||
})
|
||||
.ok()?;
|
||||
|
||||
Some(SECURITY_WEIGHTS_ARRAY[index].1)
|
||||
}
|
||||
|
||||
/// Noise ensuring security
|
||||
pub fn minimal_variance_lwe(
|
||||
lwe_dimension: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
security_level: u64,
|
||||
) -> f64 {
|
||||
minimal_variance_glwe(lwe_dimension, 1, ciphertext_modulus_log, security_level)
|
||||
}
|
||||
|
||||
/// Noise ensuring security
|
||||
pub fn minimal_variance_glwe(
|
||||
glwe_dimension: u64,
|
||||
polynomial_size: u64,
|
||||
ciphertext_modulus_log: u32,
|
||||
security_level: u64,
|
||||
) -> f64 {
|
||||
let equiv_lwe_dimension = glwe_dimension * polynomial_size;
|
||||
let security_weights = security_weight(security_level)
|
||||
.unwrap_or_else(|| panic!("{security_level} bits of security is not supported"));
|
||||
|
||||
let secure_log2_std =
|
||||
security_weights.secure_log2_std(equiv_lwe_dimension, ciphertext_modulus_log as f64);
|
||||
let log2_var = 2.0 * secure_log2_std;
|
||||
f64::exp2(log2_var)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let weight = security_weight(128).unwrap();
|
||||
|
||||
let secure_log_2_std = weight.secure_log2_std(512, 64.);
|
||||
|
||||
assert!((-12.0..-10.0).contains(&secure_log_2_std));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn security_security_glwe_variance_low() {
|
||||
let integer_size = 64;
|
||||
let golden_std_dev = 2.168_404_344_971_009e-19;
|
||||
let security_level = 128;
|
||||
|
||||
let actual_var = minimal_variance_glwe(10, 1 << 14, integer_size, security_level);
|
||||
let actual_std_dev = actual_var.sqrt();
|
||||
let expected_std_dev = (0.99 * golden_std_dev)..(1.01 * golden_std_dev);
|
||||
assert!(expected_std_dev.contains(&actual_std_dev));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,23 @@
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct SecurityWeights {
|
||||
pub(crate) slope: f64,
|
||||
pub(crate) bias: f64,
|
||||
pub minimal_lwe_dimension: u64,
|
||||
}
|
||||
|
||||
impl SecurityWeights {
|
||||
pub fn secure_log2_std(&self, lwe_dimension: u64, ciphertext_modulus_log: f64) -> f64 {
|
||||
// ensure to have a minimal on std deviation covering the 2 lowest bits on modular scale
|
||||
let epsilon_log2_std_modular = 2.0;
|
||||
let epsilon_log2_std = epsilon_log2_std_modular - (ciphertext_modulus_log);
|
||||
// ensure the requested lwe_dimension is bigger than the minimal lwe dimension
|
||||
if self.minimal_lwe_dimension <= lwe_dimension {
|
||||
f64::max(
|
||||
self.slope * lwe_dimension as f64 + self.bias,
|
||||
epsilon_log2_std,
|
||||
)
|
||||
} else {
|
||||
ciphertext_modulus_log
|
||||
}
|
||||
}
|
||||
}
|
||||
1
concrete-security-curves-rust/src/lib.rs
Normal file
1
concrete-security-curves-rust/src/lib.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod gaussian;
|
||||
29
tfhe-rs-cost-model/Cargo.toml
Normal file
29
tfhe-rs-cost-model/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "tfhe-rs-cost-model"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
aligned-vec = { version = "0.5", features = ["serde"] }
|
||||
concrete-cpu-noise-model = { path = "../concrete-cpu-noise-model" }
|
||||
concrete-security-curves = { path = "../concrete-security-curves-rust" }
|
||||
clap = { version = "3.1", features = ["derive"] }
|
||||
itertools = "0.8.0"
|
||||
indicatif = "0.16.2"
|
||||
rand = "0.6.5"
|
||||
rand_chacha = "0.1.1"
|
||||
rayon = "1.9.0"
|
||||
|
||||
[target.'cfg(target_arch = "x86_64")'.dependencies.tfhe]
|
||||
path = "../tfhe"
|
||||
features = ["x86_64-unix"]
|
||||
|
||||
[target.'cfg(target_arch = "aarch64")'.dependencies.tfhe]
|
||||
path = "../tfhe"
|
||||
features = ["aarch64-unix"]
|
||||
|
||||
[features]
|
||||
nightly-avx512 = ["tfhe/nightly-avx512"]
|
||||
gpu = ["tfhe/gpu"]
|
||||
507
tfhe-rs-cost-model/src/external_product_correction.py
Normal file
507
tfhe-rs-cost-model/src/external_product_correction.py
Normal file
@@ -0,0 +1,507 @@
|
||||
import argparse
|
||||
import concurrent.futures
|
||||
import csv
|
||||
import dataclasses
|
||||
import datetime
|
||||
import json
|
||||
import pathlib
|
||||
import subprocess
|
||||
import functools
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
from scipy.optimize import curve_fit
|
||||
from sklearn.ensemble import IsolationForest
|
||||
|
||||
# Command used to run Rust program responsible to perform sampling on external product.
|
||||
BASE_COMMAND = 'RUSTFLAGS="-C target-cpu=native" cargo {} {} --release --features=nightly-avx512'
|
||||
# Leave toolchain empty at first
|
||||
BUILD_COMMAND = BASE_COMMAND.format("{}", "build")
|
||||
RUN_COMMAND = BASE_COMMAND.format("{}", "run") + " -- --tot {} --id {} {}"
|
||||
|
||||
SECS_PER_HOUR = 3600
|
||||
SECS_PER_MINUTES = 60
|
||||
|
||||
parser = argparse.ArgumentParser(description="Compute coefficient correction for external product")
|
||||
parser.add_argument(
|
||||
"--chunks",
|
||||
type=int,
|
||||
help="Total number of chunks the parameter grid is divided into."
|
||||
"Each chunk is run in a sub-process, to speed up processing make sure to"
|
||||
" have at least this number of CPU cores to allocate for this task",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--rust-toolchain",
|
||||
type=str,
|
||||
help="The rust toolchain to use",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-file",
|
||||
"-o",
|
||||
type=str,
|
||||
dest="output_filename",
|
||||
default="correction_coefficients.json",
|
||||
help="Output file containing correction coefficients, formatted as JSON"
|
||||
" (default: correction_coefficients.json)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--analysis-only",
|
||||
"-A",
|
||||
action="store_true",
|
||||
dest="analysis_only",
|
||||
help="If this flag is set, no sampling will be done, it will only try to"
|
||||
" analyze existing results",
|
||||
)
|
||||
parser.add_argument("--dir", type=str, default=".", help="Dir where acquisition files are stored.")
|
||||
parser.add_argument(
|
||||
"--worst-case-analysis",
|
||||
"-W",
|
||||
dest="worst_case_analysis",
|
||||
action="store_true",
|
||||
help="Perform a 1000 analysis pruning different outliers, "
|
||||
"selecting the wort-case parameter for the fft noise fitting",
|
||||
)
|
||||
parser.add_argument(
|
||||
"sampling_args",
|
||||
nargs=argparse.REMAINDER,
|
||||
help="Arguments directly passed to sampling program, to get an exhaustive list"
|
||||
" of options run command: `cargo run -- --help`",
|
||||
)
|
||||
|
||||
|
||||
@dataclasses.dataclass(init=False)
|
||||
class SamplingLine:
|
||||
"""
|
||||
Extract output variance parameter from a sampling result string.
|
||||
|
||||
:param line: :class:`str` formatted as ``polynomial_size, glwe_dimension,
|
||||
decomposition_level_count, decomposition_base_log, input_variance, output_variance,
|
||||
predicted_variance``
|
||||
"""
|
||||
|
||||
parameters: list
|
||||
input_variance: float
|
||||
output_variance_exp: float
|
||||
output_variance_th: float
|
||||
|
||||
def __init__(self, line: dict):
|
||||
self.input_variance = float(line["input_variance"])
|
||||
self.output_variance_exp = float(line["output_variance"])
|
||||
self.output_variance_th = float(line["predicted_variance"])
|
||||
self.parameters = [
|
||||
float(line["polynomial_size"]),
|
||||
float(line["glwe_dimension"]),
|
||||
float(line["decomposition_level_count"]),
|
||||
float(line["decomposition_base_log"]),
|
||||
]
|
||||
# polynomial_size, glwe_dimension, decomposition_level_count, decomposition_base_log
|
||||
ggsw_value = int(line["ggsw_encrypted_value"])
|
||||
if ggsw_value != 1:
|
||||
raise ValueError(f"GGSW value is not 1, it's: {ggsw_value}")
|
||||
|
||||
|
||||
def concatenate_result_files(dir_):
|
||||
"""
|
||||
Concatenate result files into a single one.
|
||||
|
||||
:param pattern: filename pattern as :class:`str`
|
||||
:return: concatenated filename as :class:`Path`
|
||||
"""
|
||||
dir_path = Path(dir_)
|
||||
results_filepath = dir_path / "concatenated_sampling_results"
|
||||
files = sorted(Path(dir_).glob("*.algo_sample_acquistion"))
|
||||
if results_filepath.exists():
|
||||
results_filepath.unlink()
|
||||
|
||||
first_file = files[0]
|
||||
with results_filepath.open("w", encoding="utf-8") as results:
|
||||
content = first_file.read_text()
|
||||
(header, sep, _content) = content.partition("\n")
|
||||
new_hader = (header + sep).replace(" ", "")
|
||||
results.write(new_hader)
|
||||
|
||||
with results_filepath.open("a", encoding="utf-8") as results:
|
||||
for file in files:
|
||||
content = file.read_text()
|
||||
(_header, _sep, content) = content.partition("\n")
|
||||
results.write(content.replace(" ", ""))
|
||||
|
||||
return results_filepath
|
||||
|
||||
|
||||
def extract_from_acquisitions(filename):
|
||||
"""
|
||||
Retrieve and parse data from sampling results.
|
||||
|
||||
:param filename: sampling results filename as :class:`Path`
|
||||
:return: :class:`tuple` of :class:`numpy.array`
|
||||
"""
|
||||
parameters = []
|
||||
exp_output_variance = []
|
||||
th_output_variance = []
|
||||
input_variance = []
|
||||
|
||||
with filename.open() as csvfile:
|
||||
csv_reader = csv.DictReader(csvfile, delimiter=",")
|
||||
|
||||
for line in csv_reader:
|
||||
try:
|
||||
sampled_line = SamplingLine(line)
|
||||
except Exception as err:
|
||||
# If an exception occurs when parsing a result line, we simply discard this one.
|
||||
print(f"Exception while parsing line (error: {err}, line: {line})")
|
||||
continue
|
||||
|
||||
exp_output_var = sampled_line.output_variance_exp
|
||||
th_output_var = sampled_line.output_variance_th
|
||||
input_var = sampled_line.input_variance
|
||||
params = sampled_line.parameters
|
||||
|
||||
if exp_output_var < 0.083:
|
||||
params.append(th_output_var)
|
||||
parameters.append(params)
|
||||
exp_output_variance.append(exp_output_var)
|
||||
th_output_variance.append(th_output_var)
|
||||
input_variance.append(input_var)
|
||||
|
||||
num_samples = len(parameters)
|
||||
|
||||
print(f"There is {num_samples} samples ...")
|
||||
|
||||
return (
|
||||
(
|
||||
np.array(parameters),
|
||||
np.array(exp_output_variance),
|
||||
np.array(th_output_variance),
|
||||
np.array(input_variance),
|
||||
)
|
||||
if num_samples != 0
|
||||
else None
|
||||
)
|
||||
|
||||
|
||||
def get_input(filename):
|
||||
"""
|
||||
:param filename: result filename as :class:`Path`
|
||||
:return: :class:`tuple` of X and Y values
|
||||
"""
|
||||
acquisition_samples = extract_from_acquisitions(filename)
|
||||
if acquisition_samples is None:
|
||||
return None
|
||||
|
||||
(
|
||||
parameters,
|
||||
exp_output_variance,
|
||||
_th_output_variance,
|
||||
input_variance,
|
||||
) = acquisition_samples
|
||||
y_values = np.maximum(0.0, (exp_output_variance - input_variance))
|
||||
x_values = parameters
|
||||
return x_values, y_values
|
||||
|
||||
|
||||
def get_input_without_outlier(filename, bits):
|
||||
inputs = get_input(filename)
|
||||
if inputs is None:
|
||||
return None
|
||||
return remove_outlier(bits, *inputs)
|
||||
|
||||
|
||||
def remove_outlier(bits, x_values, y_values):
|
||||
"""
|
||||
Remove outliers from a dataset using an isolation forest algorithm.
|
||||
|
||||
:param x_values: values for the first dimension as :class:`list`
|
||||
:param y_values: values for the second dimension as :class:`list`
|
||||
:return: cleaned dataset as :class:`tuple` which element storing values a dimension in a
|
||||
:class:`list`
|
||||
"""
|
||||
# identify outliers in the training dataset
|
||||
iso = IsolationForest(contamination=0.1) # Contamination value obtained by experience
|
||||
yhat = iso.fit_predict(x_values)
|
||||
|
||||
# select all rows that are not outliers
|
||||
mask = yhat != -1
|
||||
previous_size = len(x_values)
|
||||
x_values, y_values = x_values[mask, :], y_values[mask]
|
||||
new_size = len(x_values)
|
||||
print(f"Removing {previous_size - new_size} outliers ...")
|
||||
x_values = x_values.astype(np.float64)
|
||||
# Scale the values from variance to modular variance after the filtering was done to avoid
|
||||
# overflowing the isolation forest from sklearn
|
||||
x_values[:, -1] = x_values[:, -1] * np.float64(2 ** (bits * 2))
|
||||
y_values = y_values.astype(np.float64) * np.float64(2 ** (bits * 2))
|
||||
return x_values, y_values
|
||||
|
||||
|
||||
def fft_noise(x, a, log2_q):
|
||||
"""
|
||||
Noise formula for FFTW.
|
||||
"""
|
||||
# 53 bits of mantissa kept at most
|
||||
bits_lost_per_conversion = max(0, log2_q - 53)
|
||||
bit_lost_roundtrip = 2 * bits_lost_per_conversion
|
||||
|
||||
N = x[:, 0]
|
||||
k = x[:, 1]
|
||||
level = x[:, 2]
|
||||
logbase = x[:, 3]
|
||||
theoretical_var = x[:, -1]
|
||||
return (
|
||||
2**a * 2**bit_lost_roundtrip * (k + 1) * level * 2.0 ** (2 * logbase) * N**2
|
||||
+ theoretical_var
|
||||
)
|
||||
|
||||
|
||||
def fft_noise_128(x, a, log2_q):
|
||||
"""
|
||||
Noise formula for f128 fft
|
||||
"""
|
||||
# 106 bits of mantissa kept at most
|
||||
bits_lost_per_conversion = max(0, log2_q - 106)
|
||||
bit_lost_roundtrip = 2 * bits_lost_per_conversion
|
||||
|
||||
N = x[:, 0]
|
||||
k = x[:, 1]
|
||||
level = x[:, 2]
|
||||
logbase = x[:, 3]
|
||||
theoretical_var = x[:, -1]
|
||||
# we lose 2 * 11 bits of mantissa per conversion 22 * 2 = 44
|
||||
return (
|
||||
2**a * 2**bit_lost_roundtrip * (k + 1) * level * 2.0 ** (2 * logbase) * N**2
|
||||
+ theoretical_var
|
||||
)
|
||||
|
||||
def log_fft_noise_fun(x, a, fft_noise_fun):
|
||||
return np.log2(fft_noise_fun(x, a))
|
||||
|
||||
|
||||
def train(x_values, y_values, fft_noise_fun):
|
||||
weights, _ = curve_fit(
|
||||
lambda x, a: log_fft_noise_fun(x, a, fft_noise_fun), x_values, np.log2(y_values)
|
||||
)
|
||||
return weights
|
||||
|
||||
|
||||
def get_weights(filename, fft_noise_fun, bits):
|
||||
"""
|
||||
Get weights from sampling results.
|
||||
|
||||
:param filename: results filename as :class:`Path`
|
||||
:return: :class:`dict` of weights formatted as ``{"a": <float>}``
|
||||
"""
|
||||
inputs_without_outlier = get_input_without_outlier(filename, bits)
|
||||
if inputs_without_outlier is None:
|
||||
return None
|
||||
x_values, y_values = inputs_without_outlier
|
||||
weights = train(x_values, y_values, fft_noise_fun)
|
||||
test(x_values, y_values, weights, fft_noise_fun)
|
||||
return {"a": weights[0]}
|
||||
|
||||
|
||||
def write_to_file(filename, obj):
|
||||
"""
|
||||
Write the given ``obj``ect into a file formatted as JSON.
|
||||
|
||||
:param filename: filename to write into as :class:`str`
|
||||
:param obj: object to write as JSON
|
||||
"""
|
||||
filepath = Path(filename)
|
||||
try:
|
||||
with filepath.open("w", encoding="utf-8") as f:
|
||||
json.dump(obj, f)
|
||||
except Exception as err:
|
||||
print(f"Exception occurred while writing to {filename}: {err}")
|
||||
else:
|
||||
print(f"Results written to {filename}")
|
||||
|
||||
|
||||
def build_sampler(rust_toolchain) -> bool:
|
||||
"""
|
||||
Build sampling Rust program as a subprocess.
|
||||
"""
|
||||
start_time = datetime.datetime.now()
|
||||
print("Building sampling program")
|
||||
|
||||
build_command = BUILD_COMMAND.format(rust_toolchain)
|
||||
|
||||
process = subprocess.run(build_command, shell=True, capture_output=True, check=False)
|
||||
|
||||
elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
|
||||
|
||||
stderr = process.stderr.decode()
|
||||
stderr_formatted = f"STDERR: {stderr}" if stderr else ""
|
||||
print(
|
||||
f"Building failed after {elapsed_time} seconds\n"
|
||||
f"STDOUT: {process.stdout.decode()}\n"
|
||||
f"{stderr_formatted}"
|
||||
)
|
||||
|
||||
if process.returncode == 0:
|
||||
print(f"Building done in {elapsed_time} seconds")
|
||||
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def run_sampling_chunk(rust_toolchain, total_chunks, identity, input_args) -> bool:
|
||||
"""
|
||||
Run an external product sampling on a chunk of data as a subprocess.
|
||||
|
||||
:param total_chunks: number of chunks the parameter is divided into
|
||||
:param identity: chunk identifier as :class:`int`
|
||||
:param input_args: arguments passed to sampling program
|
||||
"""
|
||||
cmd = RUN_COMMAND.format(rust_toolchain, total_chunks, identity, input_args)
|
||||
start_time = datetime.datetime.now()
|
||||
|
||||
print(f"External product sampling chunk #{identity} starting")
|
||||
|
||||
process = subprocess.run(cmd, shell=True, capture_output=True, check=False)
|
||||
|
||||
elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
|
||||
hours = int(elapsed_time // SECS_PER_HOUR)
|
||||
minutes = int((elapsed_time % SECS_PER_HOUR) // SECS_PER_MINUTES)
|
||||
seconds = int(elapsed_time % SECS_PER_HOUR % SECS_PER_MINUTES)
|
||||
|
||||
if process.returncode == 0:
|
||||
print(
|
||||
f"External product sampling chunk #{identity} successfully done in"
|
||||
f" {hours}:{minutes}:{seconds}"
|
||||
)
|
||||
|
||||
return True
|
||||
else:
|
||||
stderr = process.stderr.decode()
|
||||
stderr_formatted = f"STDERR: {stderr}" if stderr else ""
|
||||
print(
|
||||
f"External product sampling chunk #{identity} failed after"
|
||||
f" {hours}:{minutes}:{seconds}\n"
|
||||
f"STDOUT: {process.stdout.decode()}\n"
|
||||
f"{stderr_formatted}"
|
||||
)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def var_to_bit(variance):
|
||||
if variance <= 0:
|
||||
return np.nan
|
||||
return np.ceil(0.5 * np.log2(variance))
|
||||
|
||||
|
||||
def test(x_values, y_values, weights, fft_noise_fun):
|
||||
mse = 0.0
|
||||
mse_without_correction = 0.0
|
||||
count = 0
|
||||
for index in range(len(x_values)):
|
||||
params = np.array([x_values[index, :]])
|
||||
real_out = y_values[index]
|
||||
pred_out = max(fft_noise_fun(params, *list(weights))[0], 0.000001)
|
||||
if var_to_bit(real_out) >= var_to_bit(pred_out):
|
||||
mse += (var_to_bit(real_out) - var_to_bit(pred_out)) ** 2
|
||||
# print(
|
||||
# f"th: {var_to_bit(params[0, -1])}, pred_fft: {var_to_bit(pred_out)}, "
|
||||
# f"real: {var_to_bit(real_out)}"
|
||||
# )
|
||||
mse_without_correction += (var_to_bit(real_out) - var_to_bit(params[0, -1])) ** 2
|
||||
count += 1
|
||||
# print(var_to_bit(params[0, -1]))
|
||||
# mse_without_correction += (var_to_bit(real_out) ) ** 2
|
||||
|
||||
count = max(count, 1)
|
||||
|
||||
mse /= count # len(x_values)
|
||||
mse_without_correction /= count # len(x_values)
|
||||
print(f"mse: {mse} \nMSE without correction: {mse_without_correction}")
|
||||
return mse, mse_without_correction
|
||||
|
||||
|
||||
def main():
|
||||
args = parser.parse_args()
|
||||
rust_toolchain = args.rust_toolchain
|
||||
if rust_toolchain[0] != "+":
|
||||
rust_toolchain = f"+{rust_toolchain}"
|
||||
|
||||
sampling_args = list(filter(lambda x: x != "--", args.sampling_args))
|
||||
|
||||
bits = 64
|
||||
fft_noise_fun = fft_noise
|
||||
if any(arg in ["ext-prod-u128-split", "ext-prod-u128"] for arg in sampling_args):
|
||||
fft_noise_fun = fft_noise_128
|
||||
bits = 128
|
||||
|
||||
for idx, flag_or_value in enumerate(sampling_args):
|
||||
if flag_or_value in ["-q", "--modulus-log2"]:
|
||||
bits = int(sampling_args[idx + 1])
|
||||
break
|
||||
|
||||
sampling_args.extend(["--dir", args.dir])
|
||||
|
||||
fft_noise_fun = functools.partial(fft_noise_fun, log2_q=bits)
|
||||
dest_dir = Path(args.dir).resolve()
|
||||
|
||||
if not args.analysis_only:
|
||||
# if dest_dir.exists() and dest_dir.glob(args.output_filename):
|
||||
# user_input = input(
|
||||
# f"Warning directory {str(dest_dir)} already exists, "
|
||||
# "proceed and overwrite existing data? [y/N]\n"
|
||||
# )
|
||||
# if user_input.lower() != "y":
|
||||
# print("Aborting.")
|
||||
# exit(1)
|
||||
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not build_sampler(rust_toolchain):
|
||||
print("Error while building sampler. Exiting")
|
||||
exit(1)
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=args.chunks) as executor:
|
||||
futures = []
|
||||
for n in range(args.chunks):
|
||||
futures.append(
|
||||
executor.submit(
|
||||
run_sampling_chunk,
|
||||
rust_toolchain,
|
||||
args.chunks,
|
||||
n,
|
||||
" ".join(sampling_args),
|
||||
)
|
||||
)
|
||||
|
||||
# Wait for all sampling chunks to be completed.
|
||||
concurrent.futures.wait(futures)
|
||||
|
||||
execution_ok = True
|
||||
|
||||
for future in futures:
|
||||
execution_ok = execution_ok and future.result()
|
||||
|
||||
if not execution_ok:
|
||||
print("Error while running samplings processes. Check logs.")
|
||||
exit(1)
|
||||
|
||||
result_file = concatenate_result_files(args.dir)
|
||||
output_file = dest_dir / args.output_filename
|
||||
|
||||
if args.worst_case_analysis:
|
||||
weights = get_weights(result_file, fft_noise_fun, bits)
|
||||
if weights is None:
|
||||
print("Empty weights after outlier removal, exiting")
|
||||
return
|
||||
max_a = weights["a"]
|
||||
for _ in range(1000):
|
||||
weights = get_weights(result_file, fft_noise_fun, bits)
|
||||
max_a = max(max_a, weights["a"])
|
||||
write_to_file(output_file, {"a": max_a})
|
||||
else:
|
||||
weights = get_weights(result_file, fft_noise_fun, bits)
|
||||
if weights is None:
|
||||
print("Empty weights after outlier removal, exiting")
|
||||
return
|
||||
write_to_file(output_file, weights)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1122
tfhe-rs-cost-model/src/ks_pbs_timing.rs
Normal file
1122
tfhe-rs-cost-model/src/ks_pbs_timing.rs
Normal file
File diff suppressed because it is too large
Load Diff
797
tfhe-rs-cost-model/src/main.rs
Normal file
797
tfhe-rs-cost-model/src/main.rs
Normal file
@@ -0,0 +1,797 @@
|
||||
mod ks_pbs_timing;
|
||||
mod noise_estimation;
|
||||
mod operators;
|
||||
|
||||
use crate::operators::classic_pbs::{
|
||||
classic_pbs_external_product, classic_pbs_external_product_u128,
|
||||
classic_pbs_external_product_u128_split,
|
||||
};
|
||||
use crate::operators::multi_bit_pbs::{
|
||||
multi_bit_pbs_external_product, std_multi_bit_pbs_external_product,
|
||||
};
|
||||
use clap::Parser;
|
||||
use concrete_security_curves::gaussian::security::minimal_variance_glwe;
|
||||
use itertools::iproduct;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use tfhe::core_crypto::algorithms::misc::torus_modular_diff;
|
||||
use tfhe::core_crypto::prelude::*;
|
||||
|
||||
pub const DEBUG: bool = false;
|
||||
pub const EXT_PROD_ALGO: &str = "ext-prod";
|
||||
pub const MULTI_BIT_EXT_PROD_ALGO: &str = "multi-bit-ext-prod";
|
||||
pub const STD_MULTI_BIT_EXT_PROD_ALGO: &str = "std-multi-bit-ext-prod";
|
||||
pub const EXT_PROD_U128_SPLIT_ALGO: &str = "ext-prod-u128-split";
|
||||
pub const EXT_PROD_U128_ALGO: &str = "ext-prod-u128";
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct GlweCiphertextGgswCiphertextExternalProductParameters<Scalar: UnsignedInteger> {
|
||||
pub ggsw_noise: Gaussian<f64>,
|
||||
pub glwe_noise: Gaussian<f64>,
|
||||
pub glwe_dimension: GlweDimension,
|
||||
pub ggsw_encrypted_value: Scalar,
|
||||
pub polynomial_size: PolynomialSize,
|
||||
pub decomposition_base_log: DecompositionBaseLog,
|
||||
pub decomposition_level_count: DecompositionLevelCount,
|
||||
pub ciphertext_modulus: CiphertextModulus<Scalar>,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(author, version, about, long_about = None)]
|
||||
struct Args {
|
||||
/// Total number of threads.
|
||||
#[clap(long, short)]
|
||||
tot: usize,
|
||||
/// Current Thread ID
|
||||
#[clap(long, short)]
|
||||
id: usize,
|
||||
/// Number of time a test is repeated for a single set of parameter.
|
||||
/// This indicates the number of different keys since, at each repetition, we re-sample
|
||||
/// everything
|
||||
#[clap(long, short, default_value_t = 10)]
|
||||
repetitions: usize,
|
||||
/// The size of the sample per key
|
||||
#[clap(long, short = 'S', default_value_t = 10)]
|
||||
sample_size: usize,
|
||||
/// Step used for testing levels beyond 20in hypercube.
|
||||
/// Example: with a step of 3, tested levels tested would be: 1 through 20 then 21, 24, 27, etc
|
||||
#[clap(long, short = 's', default_value_t = 1)]
|
||||
steps: usize,
|
||||
/// Which algorithm to measure fft noise for
|
||||
#[clap(long, short = 'a', value_parser = [
|
||||
EXT_PROD_ALGO,
|
||||
MULTI_BIT_EXT_PROD_ALGO,
|
||||
STD_MULTI_BIT_EXT_PROD_ALGO,
|
||||
EXT_PROD_U128_SPLIT_ALGO,
|
||||
EXT_PROD_U128_ALGO,
|
||||
], default_value = "")]
|
||||
algorithm: String,
|
||||
multi_bit_grouping_factor: Option<usize>,
|
||||
#[clap(long, short = 'q')]
|
||||
modulus_log2: Option<u32>,
|
||||
#[clap(long, short = 'd', default_value = ".")]
|
||||
dir: String,
|
||||
#[clap(long, default_value_t = false)]
|
||||
timing_only: bool,
|
||||
}
|
||||
|
||||
fn variance_to_stddev(var: Variance) -> StandardDev {
|
||||
StandardDev::from_standard_dev(var.get_standard_dev())
|
||||
}
|
||||
|
||||
fn get_analysis_output_file(dir: &str, id: usize) -> std::fs::File {
|
||||
match OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.append(true)
|
||||
.create(true)
|
||||
.open(format!("{dir}/{id}.algo_sample_acquistion"))
|
||||
{
|
||||
Err(why) => panic!("{why}"),
|
||||
Ok(file) => file,
|
||||
}
|
||||
}
|
||||
|
||||
fn prepare_output_file_header(dir: &str, id: usize) {
|
||||
let mut file = get_analysis_output_file(dir, id);
|
||||
let header =
|
||||
"polynomial_size, glwe_dimension, decomposition_level_count, decomposition_base_log, \
|
||||
ggsw_encrypted_value, input_variance, output_variance, predicted_variance, mean_runtime_ns, \
|
||||
prep_time_ns\n";
|
||||
let _ = file.write(header.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn write_to_file<Scalar: UnsignedInteger + std::fmt::Display>(
|
||||
params: &GlweCiphertextGgswCiphertextExternalProductParameters<Scalar>,
|
||||
input_stddev: StandardDev,
|
||||
output_stddev: StandardDev,
|
||||
pred_stddev: StandardDev,
|
||||
mean_runtime_ns: u128,
|
||||
mean_prep_time_ns: u128,
|
||||
dir: &str,
|
||||
id: usize,
|
||||
) {
|
||||
let data_to_save = format!(
|
||||
"{}, {}, {}, {}, {}, {}, {}, {}, {}, {}\n",
|
||||
params.polynomial_size.0,
|
||||
params.glwe_dimension.0,
|
||||
params.decomposition_level_count.0,
|
||||
params.decomposition_base_log.0,
|
||||
params.ggsw_encrypted_value,
|
||||
input_stddev.get_variance(),
|
||||
output_stddev.get_variance(),
|
||||
pred_stddev.get_variance(),
|
||||
mean_runtime_ns,
|
||||
mean_prep_time_ns,
|
||||
);
|
||||
|
||||
let mut file = get_analysis_output_file(dir, id);
|
||||
|
||||
let _ = file.write(data_to_save.as_bytes()).unwrap();
|
||||
}
|
||||
|
||||
fn minimal_variance_for_security(k: GlweDimension, size: PolynomialSize, modulus_log2: u32) -> f64 {
|
||||
minimal_variance_glwe(k.0 as u64, size.0 as u64, modulus_log2, 128)
|
||||
}
|
||||
|
||||
fn mean(data: &[f64]) -> Option<f64> {
|
||||
// adapted from https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
|
||||
let sum: f64 = data.iter().sum();
|
||||
let count = data.len();
|
||||
|
||||
match count {
|
||||
positive if positive > 0 => Some(sum / count as f64),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn std_deviation(data: &[f64]) -> Option<StandardDev> {
|
||||
// from https://rust-lang-nursery.github.io/rust-cookbook/science/mathematics/statistics.html
|
||||
// replacing the mean by 0. as we theoretically know it
|
||||
match (mean(data), data.len()) {
|
||||
(Some(_data_mean), count) if count > 0 => {
|
||||
let variance = data
|
||||
.iter()
|
||||
.map(|&value| {
|
||||
let diff = 0. - value;
|
||||
|
||||
diff * diff
|
||||
})
|
||||
.sum::<f64>()
|
||||
/ count as f64;
|
||||
|
||||
Some(StandardDev::from_standard_dev(variance.sqrt()))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn compute_torus_diff<Scalar: UnsignedInteger>(
|
||||
errs: &mut [f64],
|
||||
output: Vec<Scalar>,
|
||||
input: Vec<Scalar>,
|
||||
ciphertext_modulus: CiphertextModulus<Scalar>,
|
||||
bit: Scalar,
|
||||
) {
|
||||
if bit == Scalar::ONE {
|
||||
for (&out, (&inp, err)) in output.iter().zip(input.iter().zip(errs.iter_mut())) {
|
||||
*err = torus_modular_diff(out, inp, ciphertext_modulus);
|
||||
}
|
||||
} else if bit == Scalar::ZERO {
|
||||
for (&out, err) in output.iter().zip(errs.iter_mut()) {
|
||||
*err = torus_modular_diff(out, Scalar::ZERO, ciphertext_modulus);
|
||||
}
|
||||
} else {
|
||||
panic!("Not a bit: {:?}", bit);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
struct BaseLevel {
|
||||
base: DecompositionBaseLog,
|
||||
level: DecompositionLevelCount,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
struct HyperCubeParams {
|
||||
glwe_dimension: GlweDimension,
|
||||
base_level: BaseLevel,
|
||||
polynomial_size: PolynomialSize,
|
||||
}
|
||||
|
||||
fn filter_b_l(bases: &[usize], levels: &[usize], preserved_mantissa: usize) -> Vec<BaseLevel> {
|
||||
let mut bases_levels = vec![];
|
||||
for (b, l) in iproduct!(bases, levels) {
|
||||
if b * l <= preserved_mantissa {
|
||||
bases_levels.push(BaseLevel {
|
||||
base: DecompositionBaseLog(*b),
|
||||
level: DecompositionLevelCount(*l),
|
||||
});
|
||||
}
|
||||
}
|
||||
bases_levels
|
||||
}
|
||||
|
||||
fn ggsw_scalar_size(k: GlweDimension, l: DecompositionLevelCount, n: PolynomialSize) -> usize {
|
||||
let (k, l, n) = (k.0, l.0, n.0);
|
||||
(k + 1).pow(2) * l * n
|
||||
}
|
||||
|
||||
fn scalar_muls_per_ext_prod(
|
||||
k: GlweDimension,
|
||||
l: DecompositionLevelCount,
|
||||
n: PolynomialSize,
|
||||
) -> usize {
|
||||
// Each coefficient of the ggsw is involved once in an fmadd operation
|
||||
ggsw_scalar_size(k, l, n)
|
||||
}
|
||||
|
||||
fn ext_prod_cost(k: GlweDimension, l: DecompositionLevelCount, n: PolynomialSize) -> usize {
|
||||
// Conversions going from integer to float and from float to integer
|
||||
let conversion_cost = 2 * k.to_glwe_size().0 * n.0;
|
||||
// Fwd and back
|
||||
let fft_cost = 2 * l.0 * k.to_glwe_size().0 * n.0 * n.0.ilog2() as usize;
|
||||
scalar_muls_per_ext_prod(k, l, n) + conversion_cost + fft_cost
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn ks_cost(
|
||||
input_lwe_dimenion: LweDimension,
|
||||
output_lwe_dimension: LweDimension,
|
||||
ks_level_count: DecompositionLevelCount,
|
||||
) -> usize {
|
||||
// times 2 as it's multiply and add
|
||||
2 * input_lwe_dimenion.0 * ks_level_count.0 * output_lwe_dimension.0
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn pbs_cost(
|
||||
w: LweDimension,
|
||||
k: GlweDimension,
|
||||
l: DecompositionLevelCount,
|
||||
n: PolynomialSize,
|
||||
) -> usize {
|
||||
w.0 * ext_prod_cost(k, l, n)
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
let tot = args.tot;
|
||||
let id = args.id;
|
||||
let total_repetitions = args.repetitions;
|
||||
let base_sample_size = args.sample_size;
|
||||
let algo = args.algorithm;
|
||||
let dir = &args.dir;
|
||||
let timing_only = args.timing_only;
|
||||
|
||||
if algo.is_empty() {
|
||||
panic!("No algorithm provided")
|
||||
}
|
||||
|
||||
let grouping_factor = match algo.as_str() {
|
||||
MULTI_BIT_EXT_PROD_ALGO | STD_MULTI_BIT_EXT_PROD_ALGO => Some(LweBskGroupingFactor(
|
||||
args.multi_bit_grouping_factor
|
||||
.expect("Required multi_bit_grouping_factor when sampling multi bit alogrithms"),
|
||||
)),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let modulus: u128 = match args.modulus_log2 {
|
||||
Some(modulus_log2) => {
|
||||
if modulus_log2 > 128 {
|
||||
panic!("Got modulus_log2 > 128, this is not supported");
|
||||
}
|
||||
|
||||
match algo.as_str() {
|
||||
EXT_PROD_ALGO | MULTI_BIT_EXT_PROD_ALGO | STD_MULTI_BIT_EXT_PROD_ALGO => {
|
||||
if modulus_log2 > 64 {
|
||||
panic!("Got modulus_log2 > 64, for 64 bits scalars");
|
||||
}
|
||||
|
||||
1u128 << modulus_log2
|
||||
}
|
||||
EXT_PROD_U128_SPLIT_ALGO | EXT_PROD_U128_ALGO => {
|
||||
if modulus_log2 == 128 {
|
||||
// native
|
||||
0
|
||||
} else {
|
||||
1u128 << modulus_log2
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
// Native
|
||||
None => 0,
|
||||
};
|
||||
|
||||
// TODO manage moduli < 2^53
|
||||
let (stepped_levels_cutoff, max_base_log_inclusive, preserved_mantissa) = match algo.as_str() {
|
||||
EXT_PROD_U128_ALGO | EXT_PROD_U128_SPLIT_ALGO => (41, 128, 106),
|
||||
_ => (21, 64, 53),
|
||||
};
|
||||
|
||||
if timing_only {
|
||||
#[cfg(feature = "gpu")]
|
||||
return ks_pbs_timing::timing_experiment_gpu(&algo, preserved_mantissa, modulus);
|
||||
#[cfg(not(feature = "gpu"))]
|
||||
return ks_pbs_timing::timing_experiment(&algo, preserved_mantissa, modulus);
|
||||
}
|
||||
|
||||
assert_ne!(
|
||||
tot, 0,
|
||||
"Got tot = 0 for noise sampling experiment, unsupported"
|
||||
);
|
||||
|
||||
// Parameter Grid
|
||||
let polynomial_sizes = vec![
|
||||
PolynomialSize(1 << 8),
|
||||
/* PolynomialSize(1 << 9),
|
||||
PolynomialSize(1 << 10),
|
||||
PolynomialSize(1 << 11),
|
||||
PolynomialSize(1 << 12),
|
||||
PolynomialSize(1 << 13),
|
||||
PolynomialSize(1 << 14),*/
|
||||
];
|
||||
let max_polynomial_size = polynomial_sizes.iter().copied().max().unwrap();
|
||||
let glwe_dimensions = vec![
|
||||
GlweDimension(1),
|
||||
/*GlweDimension(2),
|
||||
GlweDimension(3),
|
||||
GlweDimension(4),
|
||||
GlweDimension(5),*/
|
||||
];
|
||||
|
||||
let base_logs: Vec<_> = (1..=max_base_log_inclusive).collect();
|
||||
let mut levels = (1..stepped_levels_cutoff).collect::<Vec<_>>();
|
||||
let mut stepped_levels = (stepped_levels_cutoff..=max_base_log_inclusive)
|
||||
.step_by(args.steps)
|
||||
.collect::<Vec<_>>();
|
||||
levels.append(&mut stepped_levels);
|
||||
let bases_levels = filter_b_l(&base_logs, &levels, preserved_mantissa);
|
||||
|
||||
let hypercube = iproduct!(glwe_dimensions, bases_levels, polynomial_sizes);
|
||||
let mut hypercube: Vec<HyperCubeParams> = hypercube
|
||||
.map(
|
||||
|(glwe_dimension, base_level, polynomial_size)| HyperCubeParams {
|
||||
glwe_dimension,
|
||||
base_level,
|
||||
polynomial_size,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
|
||||
hypercube.sort_by(|a, b| {
|
||||
let k_a = a.glwe_dimension;
|
||||
let l_a = a.base_level.level;
|
||||
let n_a = a.polynomial_size;
|
||||
|
||||
let k_b = b.glwe_dimension;
|
||||
let l_b = b.base_level.level;
|
||||
let n_b = b.polynomial_size;
|
||||
|
||||
let muls_a = ext_prod_cost(k_a, l_a, n_a);
|
||||
let muls_b = ext_prod_cost(k_b, l_b, n_b);
|
||||
|
||||
muls_a.cmp(&muls_b)
|
||||
});
|
||||
|
||||
// Pick elements of increasing complexity stepping by the number of threads to balance the
|
||||
// computation cost among threads
|
||||
let chunk: Vec<_> = hypercube.iter().skip(id).step_by(tot).collect();
|
||||
let chunk_size = chunk.len();
|
||||
|
||||
println!(
|
||||
"-> Thread #{id} computing chunk #{id} of length {chunk_size} \
|
||||
(processing elements #{id} + k * {tot})",
|
||||
);
|
||||
|
||||
prepare_output_file_header(dir, id);
|
||||
|
||||
let mut seeder = new_seeder();
|
||||
let seeder = seeder.as_mut();
|
||||
|
||||
let mut secret_random_generator =
|
||||
SecretRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed());
|
||||
let mut encryption_random_generator =
|
||||
EncryptionRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed(), seeder);
|
||||
|
||||
let u64_tool =
|
||||
|secret_rng: &mut SecretRandomGenerator<ActivatedRandomGenerator>,
|
||||
encrypt_rng: &mut EncryptionRandomGenerator<ActivatedRandomGenerator>| {
|
||||
for (
|
||||
curr_idx,
|
||||
HyperCubeParams {
|
||||
glwe_dimension,
|
||||
base_level:
|
||||
BaseLevel {
|
||||
base: decomposition_base_log,
|
||||
level: decomposition_level_count,
|
||||
},
|
||||
polynomial_size,
|
||||
},
|
||||
) in chunk.iter().enumerate()
|
||||
{
|
||||
let glwe_dimension = *glwe_dimension;
|
||||
let decomposition_base_log = *decomposition_base_log;
|
||||
let decomposition_level_count = *decomposition_level_count;
|
||||
let polynomial_size = *polynomial_size;
|
||||
let ciphertext_modulus = CiphertextModulus::try_new(modulus).unwrap();
|
||||
|
||||
let modulus_log2 = if ciphertext_modulus.is_native_modulus() {
|
||||
u64::BITS
|
||||
} else if ciphertext_modulus.is_power_of_two() {
|
||||
ciphertext_modulus.get_custom_modulus().ilog2()
|
||||
} else {
|
||||
todo!("Non power of 2 moduli are currently not supported")
|
||||
};
|
||||
|
||||
println!("Chunk part: {:?}/{chunk_size:?} done", curr_idx + 1);
|
||||
let sample_size = base_sample_size * max_polynomial_size.0 / polynomial_size.0;
|
||||
let ggsw_noise = Gaussian::from_dispersion_parameter(
|
||||
Variance::from_variance(minimal_variance_for_security(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
modulus_log2,
|
||||
)),
|
||||
0.0,
|
||||
);
|
||||
// We measure the noise added to a GLWE ciphertext, here we can choose to have no
|
||||
// input noise
|
||||
// It also avoid potential cases where the noise is so big it gets decomposed
|
||||
// during computations, it's an assumption we apparently already make ("small noise
|
||||
// regime")
|
||||
let glwe_noise = Gaussian::from_dispersion_parameter(Variance(0.0), 0.0);
|
||||
// Variance::from_variance(minimal_variance_for_security_64(glwe_dimension,
|
||||
// poly_size));
|
||||
|
||||
let parameters = GlweCiphertextGgswCiphertextExternalProductParameters::<u64> {
|
||||
ggsw_noise,
|
||||
glwe_noise,
|
||||
glwe_dimension,
|
||||
ggsw_encrypted_value: 1,
|
||||
polynomial_size,
|
||||
decomposition_base_log,
|
||||
decomposition_level_count,
|
||||
ciphertext_modulus,
|
||||
};
|
||||
|
||||
println!("params: {parameters:?}");
|
||||
|
||||
let noise_prediction =
|
||||
match algo.as_str() {
|
||||
EXT_PROD_ALGO => noise_estimation::classic_pbs_estimate_external_product_noise_with_binary_ggsw_and_glwe(
|
||||
polynomial_size,
|
||||
glwe_dimension,
|
||||
ggsw_noise.standard_dev(),
|
||||
decomposition_base_log,
|
||||
decomposition_level_count,
|
||||
modulus_log2,
|
||||
),
|
||||
MULTI_BIT_EXT_PROD_ALGO => noise_estimation::multi_bit_pbs_estimate_external_product_noise_with_binary_ggsw_and_glwe(
|
||||
polynomial_size,
|
||||
glwe_dimension,
|
||||
ggsw_noise.standard_dev(),
|
||||
decomposition_base_log,
|
||||
decomposition_level_count,
|
||||
modulus_log2,
|
||||
grouping_factor.unwrap(),
|
||||
),
|
||||
STD_MULTI_BIT_EXT_PROD_ALGO => noise_estimation::multi_bit_pbs_estimate_external_product_noise_with_binary_ggsw_and_glwe(
|
||||
polynomial_size,
|
||||
glwe_dimension,
|
||||
ggsw_noise.standard_dev(),
|
||||
decomposition_base_log,
|
||||
decomposition_level_count,
|
||||
modulus_log2,
|
||||
grouping_factor.unwrap(),
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let fft = Fft::new(parameters.polynomial_size);
|
||||
let mut computation_buffers = ComputationBuffers::new();
|
||||
computation_buffers.resize(
|
||||
add_external_product_assign_mem_optimized_requirement::<u64>(
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
fft.as_view(),
|
||||
)
|
||||
.unwrap()
|
||||
.unaligned_bytes_required()
|
||||
.max(
|
||||
fft.as_view()
|
||||
.forward_scratch()
|
||||
.unwrap()
|
||||
.unaligned_bytes_required(),
|
||||
),
|
||||
);
|
||||
|
||||
let mut errors = vec![0.; sample_size * polynomial_size.0 * total_repetitions];
|
||||
|
||||
if noise_prediction.get_variance() < 1. / 12. {
|
||||
let mut total_runtime_ns = 0u128;
|
||||
let mut total_prep_time_ns = 0u128;
|
||||
|
||||
for (_, errs) in (0..total_repetitions)
|
||||
.zip(errors.chunks_mut(sample_size * polynomial_size.0))
|
||||
{
|
||||
let mut raw_inputs = Vec::with_capacity(sample_size);
|
||||
let mut outputs = Vec::with_capacity(sample_size);
|
||||
|
||||
let (sample_runtime_ns, prep_time_ns) = match algo.as_str() {
|
||||
EXT_PROD_ALGO => classic_pbs_external_product(
|
||||
¶meters,
|
||||
&mut raw_inputs,
|
||||
&mut outputs,
|
||||
sample_size,
|
||||
secret_rng,
|
||||
encrypt_rng,
|
||||
fft.as_view(),
|
||||
&mut computation_buffers,
|
||||
),
|
||||
MULTI_BIT_EXT_PROD_ALGO => multi_bit_pbs_external_product(
|
||||
¶meters,
|
||||
&mut raw_inputs,
|
||||
&mut outputs,
|
||||
sample_size,
|
||||
secret_rng,
|
||||
encrypt_rng,
|
||||
fft.as_view(),
|
||||
&mut computation_buffers,
|
||||
grouping_factor.unwrap(),
|
||||
),
|
||||
STD_MULTI_BIT_EXT_PROD_ALGO => std_multi_bit_pbs_external_product(
|
||||
¶meters,
|
||||
&mut raw_inputs,
|
||||
&mut outputs,
|
||||
sample_size,
|
||||
secret_rng,
|
||||
encrypt_rng,
|
||||
fft.as_view(),
|
||||
&mut computation_buffers,
|
||||
grouping_factor.unwrap(),
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
total_runtime_ns += sample_runtime_ns;
|
||||
total_prep_time_ns += prep_time_ns;
|
||||
|
||||
let raw_input_plaintext_vector =
|
||||
raw_inputs.into_iter().flatten().collect::<Vec<_>>();
|
||||
let output_plaintext_vector =
|
||||
outputs.into_iter().flatten().collect::<Vec<_>>();
|
||||
|
||||
compute_torus_diff(
|
||||
errs,
|
||||
output_plaintext_vector,
|
||||
raw_input_plaintext_vector,
|
||||
parameters.ciphertext_modulus,
|
||||
parameters.ggsw_encrypted_value,
|
||||
);
|
||||
}
|
||||
let _mean_err = mean(&errors).unwrap();
|
||||
let std_err = std_deviation(&errors).unwrap();
|
||||
let mean_runtime_ns =
|
||||
total_runtime_ns / ((total_repetitions * sample_size) as u128);
|
||||
// GGSW is prepared only once per sample
|
||||
let mean_prep_time_ns = total_prep_time_ns / (total_repetitions as u128);
|
||||
write_to_file(
|
||||
¶meters,
|
||||
parameters.glwe_noise.standard_dev(),
|
||||
std_err,
|
||||
variance_to_stddev(noise_prediction),
|
||||
mean_runtime_ns,
|
||||
mean_prep_time_ns,
|
||||
dir,
|
||||
id,
|
||||
);
|
||||
|
||||
// TODO output raw data
|
||||
} else {
|
||||
write_to_file(
|
||||
¶meters,
|
||||
parameters.glwe_noise.standard_dev(),
|
||||
variance_to_stddev(Variance::from_variance(1. / 12.)),
|
||||
variance_to_stddev(Variance::from_variance(1. / 12.)),
|
||||
0,
|
||||
0,
|
||||
dir,
|
||||
id,
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let u128_tool =
|
||||
|secret_rng: &mut SecretRandomGenerator<ActivatedRandomGenerator>,
|
||||
encrypt_rng: &mut EncryptionRandomGenerator<ActivatedRandomGenerator>| {
|
||||
for (
|
||||
curr_idx,
|
||||
HyperCubeParams {
|
||||
glwe_dimension,
|
||||
base_level:
|
||||
BaseLevel {
|
||||
base: decomposition_base_log,
|
||||
level: decomposition_level_count,
|
||||
},
|
||||
polynomial_size,
|
||||
},
|
||||
) in chunk.iter().enumerate()
|
||||
{
|
||||
let glwe_dimension = *glwe_dimension;
|
||||
let decomposition_base_log = *decomposition_base_log;
|
||||
let decomposition_level_count = *decomposition_level_count;
|
||||
let polynomial_size = *polynomial_size;
|
||||
let ciphertext_modulus = CiphertextModulus::try_new(modulus).unwrap();
|
||||
|
||||
let modulus_log2 = if ciphertext_modulus.is_native_modulus() {
|
||||
u128::BITS
|
||||
} else if ciphertext_modulus.is_power_of_two() {
|
||||
ciphertext_modulus.get_custom_modulus().ilog2()
|
||||
} else {
|
||||
todo!("Non power of 2 moduli are currently not supported")
|
||||
};
|
||||
|
||||
println!("Chunk part: {:?}/{chunk_size:?} done", curr_idx + 1);
|
||||
let sample_size = base_sample_size * max_polynomial_size.0 / polynomial_size.0;
|
||||
let ggsw_noise = Gaussian::from_dispersion_parameter(
|
||||
Variance::from_variance(minimal_variance_for_security(
|
||||
glwe_dimension,
|
||||
polynomial_size,
|
||||
modulus_log2,
|
||||
)),
|
||||
0.0,
|
||||
);
|
||||
// We measure the noise added to a GLWE ciphertext, here we can choose to have no
|
||||
// input noise
|
||||
// It also avoid potential cases where the noise is so big it gets decomposed
|
||||
// during computations, it's an assumption we apparently already make ("small noise
|
||||
// regime")
|
||||
let glwe_noise = Gaussian::from_dispersion_parameter(Variance(0.0), 0.0);
|
||||
// Variance::from_variance(minimal_variance_for_security_64(glwe_dimension,
|
||||
// poly_size));
|
||||
|
||||
let parameters = GlweCiphertextGgswCiphertextExternalProductParameters::<u128> {
|
||||
ggsw_noise,
|
||||
glwe_noise,
|
||||
glwe_dimension,
|
||||
ggsw_encrypted_value: 1,
|
||||
polynomial_size,
|
||||
decomposition_base_log,
|
||||
decomposition_level_count,
|
||||
ciphertext_modulus,
|
||||
};
|
||||
|
||||
println!("params: {parameters:?}");
|
||||
|
||||
let noise_prediction =
|
||||
match algo.as_str() {
|
||||
EXT_PROD_U128_SPLIT_ALGO | EXT_PROD_U128_ALGO => noise_estimation::classic_pbs_estimate_external_product_noise_with_binary_ggsw_and_glwe(
|
||||
polynomial_size,
|
||||
glwe_dimension,
|
||||
ggsw_noise.standard_dev(),
|
||||
decomposition_base_log,
|
||||
decomposition_level_count,
|
||||
modulus_log2,
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let fft = Fft128::new(parameters.polynomial_size);
|
||||
let mut computation_buffers = ComputationBuffers::new();
|
||||
computation_buffers.resize(
|
||||
programmable_bootstrap_f128_lwe_ciphertext_mem_optimized_requirement::<u128>(
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
fft.as_view(),
|
||||
)
|
||||
.unwrap()
|
||||
.unaligned_bytes_required()
|
||||
.max(
|
||||
fft.as_view()
|
||||
.backward_scratch()
|
||||
.unwrap()
|
||||
.unaligned_bytes_required(),
|
||||
),
|
||||
);
|
||||
|
||||
let mut errors = vec![0.; sample_size * polynomial_size.0 * total_repetitions];
|
||||
|
||||
if noise_prediction.get_variance() < 1. / 12. {
|
||||
let mut total_runtime_ns = 0u128;
|
||||
let mut total_prep_time_ns = 0u128;
|
||||
|
||||
for (_, errs) in (0..total_repetitions)
|
||||
.zip(errors.chunks_mut(sample_size * polynomial_size.0))
|
||||
{
|
||||
let mut raw_inputs = Vec::with_capacity(sample_size);
|
||||
let mut outputs = Vec::with_capacity(sample_size);
|
||||
|
||||
let (sample_runtime_ns, prep_time_ns) = match algo.as_str() {
|
||||
EXT_PROD_U128_SPLIT_ALGO => classic_pbs_external_product_u128_split(
|
||||
¶meters,
|
||||
&mut raw_inputs,
|
||||
&mut outputs,
|
||||
sample_size,
|
||||
secret_rng,
|
||||
encrypt_rng,
|
||||
fft.as_view(),
|
||||
&mut computation_buffers,
|
||||
),
|
||||
EXT_PROD_U128_ALGO => classic_pbs_external_product_u128(
|
||||
¶meters,
|
||||
&mut raw_inputs,
|
||||
&mut outputs,
|
||||
sample_size,
|
||||
secret_rng,
|
||||
encrypt_rng,
|
||||
fft.as_view(),
|
||||
&mut computation_buffers,
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
total_runtime_ns += sample_runtime_ns;
|
||||
total_prep_time_ns += prep_time_ns;
|
||||
|
||||
let raw_input_plaintext_vector =
|
||||
raw_inputs.into_iter().flatten().collect::<Vec<_>>();
|
||||
let output_plaintext_vector =
|
||||
outputs.into_iter().flatten().collect::<Vec<_>>();
|
||||
|
||||
compute_torus_diff(
|
||||
errs,
|
||||
output_plaintext_vector,
|
||||
raw_input_plaintext_vector,
|
||||
parameters.ciphertext_modulus,
|
||||
parameters.ggsw_encrypted_value,
|
||||
);
|
||||
}
|
||||
let _mean_err = mean(&errors).unwrap();
|
||||
let std_err = std_deviation(&errors).unwrap();
|
||||
let mean_runtime_ns =
|
||||
total_runtime_ns / ((total_repetitions * sample_size) as u128);
|
||||
// GGSW is prepared only once per sample
|
||||
let mean_prep_time_ns = total_prep_time_ns / (total_repetitions as u128);
|
||||
write_to_file(
|
||||
¶meters,
|
||||
parameters.glwe_noise.standard_dev(),
|
||||
std_err,
|
||||
variance_to_stddev(noise_prediction),
|
||||
mean_runtime_ns,
|
||||
mean_prep_time_ns,
|
||||
dir,
|
||||
id,
|
||||
);
|
||||
|
||||
// TODO output raw data
|
||||
} else {
|
||||
write_to_file(
|
||||
¶meters,
|
||||
parameters.glwe_noise.standard_dev(),
|
||||
variance_to_stddev(Variance::from_variance(1. / 12.)),
|
||||
variance_to_stddev(Variance::from_variance(1. / 12.)),
|
||||
0,
|
||||
0,
|
||||
dir,
|
||||
id,
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match algo.as_str() {
|
||||
EXT_PROD_ALGO | MULTI_BIT_EXT_PROD_ALGO | STD_MULTI_BIT_EXT_PROD_ALGO => u64_tool(
|
||||
&mut secret_random_generator,
|
||||
&mut encryption_random_generator,
|
||||
),
|
||||
EXT_PROD_U128_ALGO | EXT_PROD_U128_SPLIT_ALGO => u128_tool(
|
||||
&mut secret_random_generator,
|
||||
&mut encryption_random_generator,
|
||||
),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
50
tfhe-rs-cost-model/src/noise_estimation.rs
Normal file
50
tfhe-rs-cost-model/src/noise_estimation.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
use concrete_cpu_noise_model::gaussian_noise;
|
||||
use tfhe::core_crypto::prelude::*;
|
||||
|
||||
pub fn classic_pbs_estimate_external_product_noise_with_binary_ggsw_and_glwe<D1>(
|
||||
polynomial_size: PolynomialSize,
|
||||
glwe_dimension: GlweDimension,
|
||||
ggsw_noise: D1,
|
||||
base_log: DecompositionBaseLog,
|
||||
level: DecompositionLevelCount,
|
||||
log2_modulus: u32,
|
||||
) -> Variance
|
||||
where
|
||||
D1: DispersionParameter,
|
||||
{
|
||||
Variance(
|
||||
gaussian_noise::noise::external_product_glwe::theoretical_variance_external_product_glwe(
|
||||
glwe_dimension.0 as u64,
|
||||
polynomial_size.0 as u64,
|
||||
base_log.0 as u64,
|
||||
level.0 as u64,
|
||||
log2_modulus,
|
||||
ggsw_noise.get_variance(),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn multi_bit_pbs_estimate_external_product_noise_with_binary_ggsw_and_glwe<D1>(
|
||||
polynomial_size: PolynomialSize,
|
||||
glwe_dimension: GlweDimension,
|
||||
ggsw_noise: D1,
|
||||
base_log: DecompositionBaseLog,
|
||||
level: DecompositionLevelCount,
|
||||
log2_modulus: u32,
|
||||
grouping_factor: LweBskGroupingFactor,
|
||||
) -> Variance
|
||||
where
|
||||
D1: DispersionParameter,
|
||||
{
|
||||
Variance(
|
||||
gaussian_noise::noise::multi_bit_external_product_glwe::theoretical_variance_multi_bit_external_product_glwe(
|
||||
glwe_dimension.0 as u64,
|
||||
polynomial_size.0 as u64,
|
||||
base_log.0 as u64,
|
||||
level.0 as u64,
|
||||
log2_modulus,
|
||||
ggsw_noise.get_variance(),
|
||||
grouping_factor.0 as u32,
|
||||
),
|
||||
)
|
||||
}
|
||||
520
tfhe-rs-cost-model/src/operators/classic_pbs.rs
Normal file
520
tfhe-rs-cost-model/src/operators/classic_pbs.rs
Normal file
@@ -0,0 +1,520 @@
|
||||
use crate::GlweCiphertextGgswCiphertextExternalProductParameters;
|
||||
use aligned_vec::CACHELINE_ALIGN;
|
||||
use tfhe::core_crypto::commons::math::decomposition::SignedDecomposer;
|
||||
use tfhe::core_crypto::commons::parameters::{DecompositionBaseLog, DecompositionLevelCount};
|
||||
use tfhe::core_crypto::fft_impl::fft128::crypto::ggsw::{
|
||||
add_external_product_assign, Fourier128GgswCiphertext,
|
||||
};
|
||||
use tfhe::core_crypto::fft_impl::fft128_u128::crypto::ggsw::add_external_product_assign_split;
|
||||
use tfhe::core_crypto::fft_impl::fft128_u128::math::fft::Fft128View;
|
||||
use tfhe::core_crypto::fft_impl::fft64::crypto::ggsw::FourierGgswCiphertext;
|
||||
use tfhe::core_crypto::fft_impl::fft64::math::fft::FftView;
|
||||
use tfhe::core_crypto::prelude::{
|
||||
add_external_product_assign_mem_optimized, allocate_and_generate_new_binary_glwe_secret_key,
|
||||
convert_standard_ggsw_ciphertext_to_fourier_mem_optimized, decrypt_glwe_ciphertext,
|
||||
encrypt_constant_ggsw_ciphertext, encrypt_glwe_ciphertext, ActivatedRandomGenerator,
|
||||
CiphertextModulus, ComputationBuffers, EncryptionRandomGenerator, GgswCiphertext,
|
||||
GlweCiphertext, GlweCiphertextMutView, GlweCiphertextView, Numeric, Cleartext, PlaintextCount,
|
||||
PlaintextList, SecretRandomGenerator,
|
||||
};
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn classic_pbs_external_product(
|
||||
parameters: &GlweCiphertextGgswCiphertextExternalProductParameters<u64>,
|
||||
raw_inputs: &mut Vec<Vec<u64>>,
|
||||
outputs: &mut Vec<Vec<u64>>,
|
||||
sample_size: usize,
|
||||
secret_random_generator: &mut SecretRandomGenerator<ActivatedRandomGenerator>,
|
||||
encryption_random_generator: &mut EncryptionRandomGenerator<ActivatedRandomGenerator>,
|
||||
fft: FftView,
|
||||
computation_buffers: &mut ComputationBuffers,
|
||||
) -> (u128, u128) {
|
||||
let ciphertext_modulus = parameters.ciphertext_modulus;
|
||||
|
||||
let glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
|
||||
parameters.glwe_dimension,
|
||||
parameters.polynomial_size,
|
||||
secret_random_generator,
|
||||
);
|
||||
|
||||
let mut std_ggsw = GgswCiphertext::new(
|
||||
0u64,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
parameters.decomposition_base_log,
|
||||
parameters.decomposition_level_count,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_constant_ggsw_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut std_ggsw,
|
||||
Cleartext(parameters.ggsw_encrypted_value),
|
||||
parameters.ggsw_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut fourier_ggsw = FourierGgswCiphertext::new(
|
||||
std_ggsw.glwe_size(),
|
||||
std_ggsw.polynomial_size(),
|
||||
std_ggsw.decomposition_base_log(),
|
||||
std_ggsw.decomposition_level_count(),
|
||||
);
|
||||
|
||||
convert_standard_ggsw_ciphertext_to_fourier_mem_optimized(
|
||||
&std_ggsw,
|
||||
&mut fourier_ggsw,
|
||||
fft,
|
||||
computation_buffers.stack(),
|
||||
);
|
||||
|
||||
let mut sample_runtime_ns = 0u128;
|
||||
|
||||
for _ in 0..sample_size {
|
||||
let mut input_plaintext_list =
|
||||
PlaintextList::new(0u64, PlaintextCount(parameters.polynomial_size.0));
|
||||
encryption_random_generator
|
||||
.fill_slice_with_random_uniform_mask(input_plaintext_list.as_mut());
|
||||
let scaling_to_native_torus = parameters
|
||||
.ciphertext_modulus
|
||||
.get_power_of_two_scaling_to_native_torus();
|
||||
// Shift to match the behavior of the previous concrete-core fixtures
|
||||
// Divide as encryption will encode the power of two in the MSBs
|
||||
input_plaintext_list.as_mut().iter_mut().for_each(|x| {
|
||||
*x = (*x << (<u64 as Numeric>::BITS - parameters.decomposition_base_log.0))
|
||||
/ scaling_to_native_torus
|
||||
});
|
||||
|
||||
// Sanity check
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
let modulus: u64 = ciphertext_modulus.get_custom_modulus() as u64;
|
||||
assert!(input_plaintext_list.as_ref().iter().all(|x| *x < modulus));
|
||||
}
|
||||
|
||||
let mut input_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u64,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut input_glwe_ciphertext,
|
||||
&input_plaintext_list,
|
||||
parameters.glwe_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut output_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u64,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
add_external_product_assign_mem_optimized(
|
||||
&mut output_glwe_ciphertext,
|
||||
&fourier_ggsw,
|
||||
&input_glwe_ciphertext,
|
||||
fft,
|
||||
computation_buffers.stack(),
|
||||
);
|
||||
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
// When we convert back from the fourier domain, integer values will contain up to 53
|
||||
// MSBs with information. In our representation of power of 2 moduli < native modulus we
|
||||
// fill the MSBs and leave the LSBs empty, this usage of the signed decomposer allows to
|
||||
// round while keeping the data in the MSBs
|
||||
let signed_decomposer = SignedDecomposer::new(
|
||||
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
|
||||
DecompositionLevelCount(1),
|
||||
);
|
||||
output_glwe_ciphertext
|
||||
.as_mut()
|
||||
.iter_mut()
|
||||
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
|
||||
}
|
||||
|
||||
let elapsed = start.elapsed().as_nanos();
|
||||
sample_runtime_ns += elapsed;
|
||||
|
||||
let mut output_plaintext_list = input_plaintext_list.clone();
|
||||
decrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&output_glwe_ciphertext,
|
||||
&mut output_plaintext_list,
|
||||
);
|
||||
|
||||
// Sanity check
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
let modulus: u64 = ciphertext_modulus.get_custom_modulus() as u64;
|
||||
assert!(output_plaintext_list.as_ref().iter().all(|x| *x < modulus));
|
||||
}
|
||||
|
||||
raw_inputs.push(input_plaintext_list.into_container());
|
||||
outputs.push(output_plaintext_list.into_container());
|
||||
}
|
||||
|
||||
// No prep time in this case
|
||||
(sample_runtime_ns, 0)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn classic_pbs_external_product_u128_split(
|
||||
parameters: &GlweCiphertextGgswCiphertextExternalProductParameters<u128>,
|
||||
raw_inputs: &mut Vec<Vec<u128>>,
|
||||
outputs: &mut Vec<Vec<u128>>,
|
||||
sample_size: usize,
|
||||
secret_random_generator: &mut SecretRandomGenerator<ActivatedRandomGenerator>,
|
||||
encryption_random_generator: &mut EncryptionRandomGenerator<ActivatedRandomGenerator>,
|
||||
fft: Fft128View,
|
||||
computation_buffers: &mut ComputationBuffers,
|
||||
) -> (u128, u128) {
|
||||
let ciphertext_modulus = parameters.ciphertext_modulus;
|
||||
|
||||
let glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
|
||||
parameters.glwe_dimension,
|
||||
parameters.polynomial_size,
|
||||
secret_random_generator,
|
||||
);
|
||||
|
||||
let mut std_ggsw = GgswCiphertext::new(
|
||||
0u128,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
parameters.decomposition_base_log,
|
||||
parameters.decomposition_level_count,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_constant_ggsw_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut std_ggsw,
|
||||
Cleartext(parameters.ggsw_encrypted_value),
|
||||
parameters.ggsw_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut fourier_ggsw = Fourier128GgswCiphertext::new(
|
||||
std_ggsw.glwe_size(),
|
||||
std_ggsw.polynomial_size(),
|
||||
std_ggsw.decomposition_base_log(),
|
||||
std_ggsw.decomposition_level_count(),
|
||||
);
|
||||
|
||||
fourier_ggsw
|
||||
.as_mut_view()
|
||||
.fill_with_forward_fourier(&std_ggsw, fft);
|
||||
|
||||
let mut sample_runtime_ns = 0u128;
|
||||
|
||||
for _ in 0..sample_size {
|
||||
let mut input_plaintext_list =
|
||||
PlaintextList::new(0u128, PlaintextCount(parameters.polynomial_size.0));
|
||||
encryption_random_generator
|
||||
.fill_slice_with_random_uniform_mask(input_plaintext_list.as_mut());
|
||||
let scaling_to_native_torus = parameters
|
||||
.ciphertext_modulus
|
||||
.get_power_of_two_scaling_to_native_torus();
|
||||
// Shift to match the behavior of the previous concrete-core fixtures
|
||||
// Divide as encryption will encode the power of two in the MSBs
|
||||
input_plaintext_list.as_mut().iter_mut().for_each(|x| {
|
||||
*x = (*x << (<u128 as Numeric>::BITS - parameters.decomposition_base_log.0))
|
||||
/ scaling_to_native_torus
|
||||
});
|
||||
|
||||
// Sanity check
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
let modulus = ciphertext_modulus.get_custom_modulus();
|
||||
assert!(input_plaintext_list.as_ref().iter().all(|x| *x < modulus));
|
||||
}
|
||||
|
||||
let mut input_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u128,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut input_glwe_ciphertext,
|
||||
&input_plaintext_list,
|
||||
parameters.glwe_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut output_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u128,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
let stack = computation_buffers.stack();
|
||||
|
||||
let align = CACHELINE_ALIGN;
|
||||
|
||||
let (input_glwe_lo, stack) = stack.collect_aligned(
|
||||
align,
|
||||
input_glwe_ciphertext.as_ref().iter().map(|i| *i as u64),
|
||||
);
|
||||
let (input_glwe_hi, stack) = stack.collect_aligned(
|
||||
align,
|
||||
input_glwe_ciphertext
|
||||
.as_ref()
|
||||
.iter()
|
||||
.map(|i| (*i >> 64) as u64),
|
||||
);
|
||||
|
||||
let input_glwe_lo = GlweCiphertextView::from_container(
|
||||
&*input_glwe_lo,
|
||||
input_glwe_ciphertext.polynomial_size(),
|
||||
// Here we split a u128 to two u64 containers and the ciphertext modulus does not
|
||||
// match anymore in terms of the underlying Scalar type, so we'll provide a dummy
|
||||
// native modulus
|
||||
CiphertextModulus::new_native(),
|
||||
);
|
||||
let input_glwe_hi = GlweCiphertextView::from_container(
|
||||
&*input_glwe_hi,
|
||||
input_glwe_ciphertext.polynomial_size(),
|
||||
// Here we split a u128 to two u64 containers and the ciphertext modulus does not
|
||||
// match anymore in terms of the underlying Scalar type, so we'll provide a dummy
|
||||
// native modulus
|
||||
CiphertextModulus::new_native(),
|
||||
);
|
||||
|
||||
let (output_glwe_lo, stack) = stack.collect_aligned(
|
||||
align,
|
||||
output_glwe_ciphertext.as_ref().iter().map(|i| *i as u64),
|
||||
);
|
||||
let (output_glwe_hi, stack) = stack.collect_aligned(
|
||||
align,
|
||||
output_glwe_ciphertext
|
||||
.as_ref()
|
||||
.iter()
|
||||
.map(|i| (*i >> 64) as u64),
|
||||
);
|
||||
|
||||
let mut output_glwe_lo = GlweCiphertextMutView::from_container(
|
||||
&mut *output_glwe_lo,
|
||||
output_glwe_ciphertext.polynomial_size(),
|
||||
// Here we split a u128 to two u64 containers and the ciphertext modulus does not
|
||||
// match anymore in terms of the underlying Scalar type, so we'll provide a dummy
|
||||
// native modulus
|
||||
CiphertextModulus::new_native(),
|
||||
);
|
||||
let mut output_glwe_hi = GlweCiphertextMutView::from_container(
|
||||
&mut *output_glwe_hi,
|
||||
output_glwe_ciphertext.polynomial_size(),
|
||||
// Here we split a u128 to two u64 containers and the ciphertext modulus does not
|
||||
// match anymore in terms of the underlying Scalar type, so we'll provide a dummy
|
||||
// native modulus
|
||||
CiphertextModulus::new_native(),
|
||||
);
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
add_external_product_assign_split(
|
||||
&mut output_glwe_lo,
|
||||
&mut output_glwe_hi,
|
||||
&fourier_ggsw,
|
||||
&input_glwe_lo,
|
||||
&input_glwe_hi,
|
||||
fft,
|
||||
stack,
|
||||
);
|
||||
|
||||
let elapsed = start.elapsed().as_nanos();
|
||||
sample_runtime_ns += elapsed;
|
||||
|
||||
output_glwe_ciphertext
|
||||
.as_mut()
|
||||
.iter_mut()
|
||||
.zip(
|
||||
output_glwe_lo
|
||||
.as_ref()
|
||||
.iter()
|
||||
.zip(output_glwe_hi.as_ref().iter()),
|
||||
)
|
||||
.for_each(|(out, (&lo, &hi))| *out = lo as u128 | ((hi as u128) << 64));
|
||||
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
// When we convert back from the fourier domain, integer values will contain up to 53
|
||||
// MSBs with information. In our representation of power of 2 moduli < native modulus we
|
||||
// fill the MSBs and leave the LSBs empty, this usage of the signed decomposer allows to
|
||||
// round while keeping the data in the MSBs
|
||||
let signed_decomposer = SignedDecomposer::new(
|
||||
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
|
||||
DecompositionLevelCount(1),
|
||||
);
|
||||
output_glwe_ciphertext
|
||||
.as_mut()
|
||||
.iter_mut()
|
||||
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
|
||||
}
|
||||
|
||||
let mut output_plaintext_list = input_plaintext_list.clone();
|
||||
decrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&output_glwe_ciphertext,
|
||||
&mut output_plaintext_list,
|
||||
);
|
||||
|
||||
// Sanity check
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
let modulus = ciphertext_modulus.get_custom_modulus();
|
||||
assert!(output_plaintext_list.as_ref().iter().all(|x| *x < modulus));
|
||||
}
|
||||
|
||||
raw_inputs.push(input_plaintext_list.into_container());
|
||||
outputs.push(output_plaintext_list.into_container());
|
||||
}
|
||||
|
||||
// No prep time in this case
|
||||
(sample_runtime_ns, 0)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn classic_pbs_external_product_u128(
|
||||
parameters: &GlweCiphertextGgswCiphertextExternalProductParameters<u128>,
|
||||
raw_inputs: &mut Vec<Vec<u128>>,
|
||||
outputs: &mut Vec<Vec<u128>>,
|
||||
sample_size: usize,
|
||||
secret_random_generator: &mut SecretRandomGenerator<ActivatedRandomGenerator>,
|
||||
encryption_random_generator: &mut EncryptionRandomGenerator<ActivatedRandomGenerator>,
|
||||
fft: Fft128View,
|
||||
computation_buffers: &mut ComputationBuffers,
|
||||
) -> (u128, u128) {
|
||||
let ciphertext_modulus = parameters.ciphertext_modulus;
|
||||
|
||||
let glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
|
||||
parameters.glwe_dimension,
|
||||
parameters.polynomial_size,
|
||||
secret_random_generator,
|
||||
);
|
||||
|
||||
let mut std_ggsw = GgswCiphertext::new(
|
||||
0u128,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
parameters.decomposition_base_log,
|
||||
parameters.decomposition_level_count,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_constant_ggsw_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut std_ggsw,
|
||||
Cleartext(parameters.ggsw_encrypted_value),
|
||||
parameters.ggsw_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut fourier_ggsw = Fourier128GgswCiphertext::new(
|
||||
std_ggsw.glwe_size(),
|
||||
std_ggsw.polynomial_size(),
|
||||
std_ggsw.decomposition_base_log(),
|
||||
std_ggsw.decomposition_level_count(),
|
||||
);
|
||||
|
||||
fourier_ggsw
|
||||
.as_mut_view()
|
||||
.fill_with_forward_fourier(&std_ggsw, fft);
|
||||
|
||||
let mut sample_runtime_ns = 0u128;
|
||||
|
||||
for _ in 0..sample_size {
|
||||
let mut input_plaintext_list =
|
||||
PlaintextList::new(0u128, PlaintextCount(parameters.polynomial_size.0));
|
||||
encryption_random_generator
|
||||
.fill_slice_with_random_uniform_mask(input_plaintext_list.as_mut());
|
||||
let scaling_to_native_torus = parameters
|
||||
.ciphertext_modulus
|
||||
.get_power_of_two_scaling_to_native_torus();
|
||||
// Shift to match the behavior of the previous concrete-core fixtures
|
||||
// Divide as encryption will encode the power of two in the MSBs
|
||||
input_plaintext_list.as_mut().iter_mut().for_each(|x| {
|
||||
*x = (*x << (<u128 as Numeric>::BITS - parameters.decomposition_base_log.0))
|
||||
/ scaling_to_native_torus
|
||||
});
|
||||
|
||||
// Sanity check
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
let modulus = ciphertext_modulus.get_custom_modulus();
|
||||
assert!(input_plaintext_list.as_ref().iter().all(|x| *x < modulus));
|
||||
}
|
||||
|
||||
let mut input_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u128,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut input_glwe_ciphertext,
|
||||
&input_plaintext_list,
|
||||
parameters.glwe_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut output_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u128,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
add_external_product_assign(
|
||||
&mut output_glwe_ciphertext,
|
||||
&fourier_ggsw,
|
||||
&input_glwe_ciphertext,
|
||||
fft,
|
||||
computation_buffers.stack(),
|
||||
);
|
||||
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
// When we convert back from the fourier domain, integer values will contain up to 53
|
||||
// MSBs with information. In our representation of power of 2 moduli < native modulus we
|
||||
// fill the MSBs and leave the LSBs empty, this usage of the signed decomposer allows to
|
||||
// round while keeping the data in the MSBs
|
||||
let signed_decomposer = SignedDecomposer::new(
|
||||
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
|
||||
DecompositionLevelCount(1),
|
||||
);
|
||||
output_glwe_ciphertext
|
||||
.as_mut()
|
||||
.iter_mut()
|
||||
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
|
||||
}
|
||||
|
||||
let elapsed = start.elapsed().as_nanos();
|
||||
sample_runtime_ns += elapsed;
|
||||
|
||||
let mut output_plaintext_list = input_plaintext_list.clone();
|
||||
decrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&output_glwe_ciphertext,
|
||||
&mut output_plaintext_list,
|
||||
);
|
||||
|
||||
// Sanity check
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
let modulus = ciphertext_modulus.get_custom_modulus();
|
||||
assert!(output_plaintext_list.as_ref().iter().all(|x| *x < modulus));
|
||||
}
|
||||
|
||||
raw_inputs.push(input_plaintext_list.into_container());
|
||||
outputs.push(output_plaintext_list.into_container());
|
||||
}
|
||||
|
||||
// No prep time in this case
|
||||
(sample_runtime_ns, 0)
|
||||
}
|
||||
2
tfhe-rs-cost-model/src/operators/mod.rs
Normal file
2
tfhe-rs-cost-model/src/operators/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod classic_pbs;
|
||||
pub mod multi_bit_pbs;
|
||||
334
tfhe-rs-cost-model/src/operators/multi_bit_pbs.rs
Normal file
334
tfhe-rs-cost-model/src/operators/multi_bit_pbs.rs
Normal file
@@ -0,0 +1,334 @@
|
||||
use crate::GlweCiphertextGgswCiphertextExternalProductParameters;
|
||||
use tfhe::core_crypto::algorithms::polynomial_algorithms;
|
||||
use tfhe::core_crypto::fft_impl::common::pbs_modulus_switch;
|
||||
use tfhe::core_crypto::fft_impl::fft64::crypto::ggsw::FourierGgswCiphertext;
|
||||
use tfhe::core_crypto::fft_impl::fft64::math::fft::FftView;
|
||||
use tfhe::core_crypto::fft_impl::fft64::math::polynomial::FourierPolynomial;
|
||||
use tfhe::core_crypto::prelude::{
|
||||
add_external_product_assign_mem_optimized, allocate_and_generate_new_binary_glwe_secret_key,
|
||||
allocate_and_generate_new_lwe_multi_bit_bootstrap_key,
|
||||
convert_standard_lwe_multi_bit_bootstrap_key_to_fourier_mem_optimized, decrypt_glwe_ciphertext,
|
||||
encrypt_glwe_ciphertext, modulus_switch_multi_bit, prepare_multi_bit_ggsw_mem_optimized,
|
||||
std_prepare_multi_bit_ggsw, ActivatedRandomGenerator, ComputationBuffers,
|
||||
ContiguousEntityContainer, EncryptionRandomGenerator, FourierLweMultiBitBootstrapKey,
|
||||
GgswCiphertext, GlweCiphertext, LweBskGroupingFactor, LweSecretKey, MonomialDegree, Numeric,
|
||||
PlaintextCount, PlaintextList, SecretRandomGenerator,
|
||||
};
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn multi_bit_pbs_external_product(
|
||||
parameters: &GlweCiphertextGgswCiphertextExternalProductParameters<u64>,
|
||||
raw_inputs: &mut Vec<Vec<u64>>,
|
||||
outputs: &mut Vec<Vec<u64>>,
|
||||
sample_size: usize,
|
||||
secret_random_generator: &mut SecretRandomGenerator<ActivatedRandomGenerator>,
|
||||
encryption_random_generator: &mut EncryptionRandomGenerator<ActivatedRandomGenerator>,
|
||||
fft: FftView,
|
||||
computation_buffers: &mut ComputationBuffers,
|
||||
grouping_factor: LweBskGroupingFactor,
|
||||
) -> (u128, u128) {
|
||||
let lwe_sk = LweSecretKey::from_container(vec![1u64; grouping_factor.0]);
|
||||
let glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
|
||||
parameters.glwe_dimension,
|
||||
parameters.polynomial_size,
|
||||
secret_random_generator,
|
||||
);
|
||||
|
||||
let bsk = allocate_and_generate_new_lwe_multi_bit_bootstrap_key(
|
||||
&lwe_sk,
|
||||
&glwe_secret_key,
|
||||
parameters.decomposition_base_log,
|
||||
parameters.decomposition_level_count,
|
||||
grouping_factor,
|
||||
parameters.ggsw_noise,
|
||||
parameters.ciphertext_modulus,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut fbsk = FourierLweMultiBitBootstrapKey::new(
|
||||
bsk.input_lwe_dimension(),
|
||||
bsk.glwe_size(),
|
||||
bsk.polynomial_size(),
|
||||
bsk.decomposition_base_log(),
|
||||
bsk.decomposition_level_count(),
|
||||
bsk.grouping_factor(),
|
||||
);
|
||||
|
||||
convert_standard_lwe_multi_bit_bootstrap_key_to_fourier_mem_optimized(
|
||||
&bsk,
|
||||
&mut fbsk,
|
||||
fft,
|
||||
computation_buffers.stack(),
|
||||
);
|
||||
|
||||
let ggsw_vec: Vec<_> = fbsk.ggsw_iter().collect();
|
||||
|
||||
let grouping_factor = fbsk.grouping_factor();
|
||||
let ggsw_per_multi_bit_element = grouping_factor.ggsw_per_multi_bit_element();
|
||||
|
||||
assert_eq!(ggsw_vec.len(), ggsw_per_multi_bit_element.0);
|
||||
|
||||
let mut random_mask = vec![0u64; grouping_factor.0];
|
||||
encryption_random_generator.fill_slice_with_random_uniform_mask(&mut random_mask);
|
||||
|
||||
// Recompute it here to rotate and negate the input or output vector to compute errors that make
|
||||
// sense
|
||||
let equivalent_monomial_degree = MonomialDegree(pbs_modulus_switch(
|
||||
random_mask.iter().sum::<u64>(),
|
||||
parameters.polynomial_size,
|
||||
));
|
||||
|
||||
let mut fourier_a_monomial = FourierPolynomial::new(fbsk.polynomial_size());
|
||||
|
||||
let mut fourier_ggsw = FourierGgswCiphertext::new(
|
||||
fbsk.glwe_size(),
|
||||
fbsk.polynomial_size(),
|
||||
fbsk.decomposition_base_log(),
|
||||
fbsk.decomposition_level_count(),
|
||||
);
|
||||
|
||||
let prep_start = std::time::Instant::now();
|
||||
prepare_multi_bit_ggsw_mem_optimized(
|
||||
&mut fourier_ggsw,
|
||||
&ggsw_vec,
|
||||
modulus_switch_multi_bit(
|
||||
fbsk.polynomial_size().to_blind_rotation_input_modulus_log(),
|
||||
grouping_factor,
|
||||
&random_mask,
|
||||
),
|
||||
&mut fourier_a_monomial,
|
||||
fft,
|
||||
);
|
||||
let prep_time_ns = prep_start.elapsed().as_nanos();
|
||||
|
||||
let mut sample_runtime_ns = 0u128;
|
||||
|
||||
for _ in 0..sample_size {
|
||||
let mut input_plaintext_list =
|
||||
PlaintextList::new(0u64, PlaintextCount(parameters.polynomial_size.0));
|
||||
encryption_random_generator
|
||||
.fill_slice_with_random_uniform_mask(input_plaintext_list.as_mut());
|
||||
// Shift to match the behavior of the previous concrete-core fixtures
|
||||
input_plaintext_list
|
||||
.as_mut()
|
||||
.iter_mut()
|
||||
.for_each(|x| *x <<= <u64 as Numeric>::BITS - parameters.decomposition_base_log.0);
|
||||
|
||||
let mut input_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u64,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
parameters.ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut input_glwe_ciphertext,
|
||||
&input_plaintext_list,
|
||||
parameters.glwe_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut output_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u64,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
parameters.ciphertext_modulus,
|
||||
);
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
add_external_product_assign_mem_optimized(
|
||||
&mut output_glwe_ciphertext,
|
||||
&fourier_ggsw,
|
||||
&input_glwe_ciphertext,
|
||||
fft,
|
||||
computation_buffers.stack(),
|
||||
);
|
||||
|
||||
let elapsed = start.elapsed().as_nanos();
|
||||
sample_runtime_ns += elapsed;
|
||||
|
||||
let mut output_plaintext_list = input_plaintext_list.clone();
|
||||
decrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&output_glwe_ciphertext,
|
||||
&mut output_plaintext_list,
|
||||
);
|
||||
|
||||
let mut output_pt_list_as_polynomial = output_plaintext_list.as_mut_polynomial();
|
||||
|
||||
// As we performed a monomial multiplication, we need to apply a monomial div to get outputs
|
||||
// in the right order
|
||||
polynomial_algorithms::polynomial_wrapping_monic_monomial_div_assign(
|
||||
&mut output_pt_list_as_polynomial,
|
||||
equivalent_monomial_degree,
|
||||
);
|
||||
|
||||
raw_inputs.push(input_plaintext_list.into_container());
|
||||
outputs.push(output_plaintext_list.into_container());
|
||||
}
|
||||
|
||||
(sample_runtime_ns, prep_time_ns)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn std_multi_bit_pbs_external_product(
|
||||
parameters: &GlweCiphertextGgswCiphertextExternalProductParameters<u64>,
|
||||
raw_inputs: &mut Vec<Vec<u64>>,
|
||||
outputs: &mut Vec<Vec<u64>>,
|
||||
sample_size: usize,
|
||||
secret_random_generator: &mut SecretRandomGenerator<ActivatedRandomGenerator>,
|
||||
encryption_random_generator: &mut EncryptionRandomGenerator<ActivatedRandomGenerator>,
|
||||
fft: FftView,
|
||||
computation_buffers: &mut ComputationBuffers,
|
||||
grouping_factor: LweBskGroupingFactor,
|
||||
) -> (u128, u128) {
|
||||
let lwe_sk = LweSecretKey::from_container(vec![1u64; grouping_factor.0]);
|
||||
let glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
|
||||
parameters.glwe_dimension,
|
||||
parameters.polynomial_size,
|
||||
secret_random_generator,
|
||||
);
|
||||
|
||||
let bsk = allocate_and_generate_new_lwe_multi_bit_bootstrap_key(
|
||||
&lwe_sk,
|
||||
&glwe_secret_key,
|
||||
parameters.decomposition_base_log,
|
||||
parameters.decomposition_level_count,
|
||||
grouping_factor,
|
||||
parameters.ggsw_noise,
|
||||
parameters.ciphertext_modulus,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let ggsw_vec: Vec<_> = bsk.iter().collect();
|
||||
|
||||
let grouping_factor = bsk.grouping_factor();
|
||||
let ggsw_per_multi_bit_element = grouping_factor.ggsw_per_multi_bit_element();
|
||||
|
||||
assert_eq!(ggsw_vec.len(), ggsw_per_multi_bit_element.0);
|
||||
|
||||
let mut random_mask = vec![0u64; grouping_factor.0];
|
||||
encryption_random_generator.fill_slice_with_random_uniform_mask(&mut random_mask);
|
||||
|
||||
// Recompute it here to rotate and negate the input or output vector to compute errors that make
|
||||
// sense
|
||||
let equivalent_monomial_degree = MonomialDegree(pbs_modulus_switch(
|
||||
random_mask.iter().sum::<u64>(),
|
||||
parameters.polynomial_size,
|
||||
));
|
||||
|
||||
let mut fourier_ggsw = FourierGgswCiphertext::new(
|
||||
bsk.glwe_size(),
|
||||
bsk.polynomial_size(),
|
||||
bsk.decomposition_base_log(),
|
||||
bsk.decomposition_level_count(),
|
||||
);
|
||||
|
||||
let mut std_ggsw = GgswCiphertext::new(
|
||||
0u64,
|
||||
bsk.glwe_size(),
|
||||
bsk.polynomial_size(),
|
||||
bsk.decomposition_base_log(),
|
||||
bsk.decomposition_level_count(),
|
||||
bsk.ciphertext_modulus(),
|
||||
);
|
||||
|
||||
let mut tmp_std_ggsw = GgswCiphertext::new(
|
||||
0u64,
|
||||
bsk.glwe_size(),
|
||||
bsk.polynomial_size(),
|
||||
bsk.decomposition_base_log(),
|
||||
bsk.decomposition_level_count(),
|
||||
bsk.ciphertext_modulus(),
|
||||
);
|
||||
|
||||
let prep_start = std::time::Instant::now();
|
||||
std_prepare_multi_bit_ggsw(
|
||||
&mut std_ggsw,
|
||||
&mut tmp_std_ggsw,
|
||||
&ggsw_vec,
|
||||
modulus_switch_multi_bit(
|
||||
bsk.polynomial_size().to_blind_rotation_input_modulus_log(),
|
||||
grouping_factor,
|
||||
&random_mask,
|
||||
),
|
||||
);
|
||||
fourier_ggsw.as_mut_view().fill_with_forward_fourier(
|
||||
std_ggsw.as_view(),
|
||||
fft,
|
||||
computation_buffers.stack(),
|
||||
);
|
||||
let prep_time_ns = prep_start.elapsed().as_nanos();
|
||||
|
||||
let mut sample_runtime_ns = 0u128;
|
||||
|
||||
for _ in 0..sample_size {
|
||||
let mut input_plaintext_list =
|
||||
PlaintextList::new(0u64, PlaintextCount(parameters.polynomial_size.0));
|
||||
encryption_random_generator
|
||||
.fill_slice_with_random_uniform_mask(input_plaintext_list.as_mut());
|
||||
// Shift to match the behavior of the previous concrete-core fixtures
|
||||
input_plaintext_list
|
||||
.as_mut()
|
||||
.iter_mut()
|
||||
.for_each(|x| *x <<= <u64 as Numeric>::BITS - parameters.decomposition_base_log.0);
|
||||
|
||||
let mut input_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u64,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
parameters.ciphertext_modulus,
|
||||
);
|
||||
|
||||
encrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&mut input_glwe_ciphertext,
|
||||
&input_plaintext_list,
|
||||
parameters.glwe_noise,
|
||||
encryption_random_generator,
|
||||
);
|
||||
|
||||
let mut output_glwe_ciphertext = GlweCiphertext::new(
|
||||
0u64,
|
||||
parameters.glwe_dimension.to_glwe_size(),
|
||||
parameters.polynomial_size,
|
||||
parameters.ciphertext_modulus,
|
||||
);
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
add_external_product_assign_mem_optimized(
|
||||
&mut output_glwe_ciphertext,
|
||||
&fourier_ggsw,
|
||||
&input_glwe_ciphertext,
|
||||
fft,
|
||||
computation_buffers.stack(),
|
||||
);
|
||||
|
||||
let elapsed = start.elapsed().as_nanos();
|
||||
sample_runtime_ns += elapsed;
|
||||
|
||||
let mut output_plaintext_list = input_plaintext_list.clone();
|
||||
decrypt_glwe_ciphertext(
|
||||
&glwe_secret_key,
|
||||
&output_glwe_ciphertext,
|
||||
&mut output_plaintext_list,
|
||||
);
|
||||
|
||||
let mut output_pt_list_as_polynomial = output_plaintext_list.as_mut_polynomial();
|
||||
|
||||
// As we performed a monomial multiplication, we need to apply a monomial div to get outputs
|
||||
// in the right order
|
||||
polynomial_algorithms::polynomial_wrapping_monic_monomial_div_assign(
|
||||
&mut output_pt_list_as_polynomial,
|
||||
equivalent_monomial_degree,
|
||||
);
|
||||
|
||||
raw_inputs.push(input_plaintext_list.into_container());
|
||||
outputs.push(output_plaintext_list.into_container());
|
||||
}
|
||||
|
||||
(sample_runtime_ns, prep_time_ns)
|
||||
}
|
||||
3
tfhe-rs-cost-model/src/requirements.txt
Normal file
3
tfhe-rs-cost-model/src/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
numpy
|
||||
scipy
|
||||
scikit-learn
|
||||
@@ -131,7 +131,7 @@ impl<G: ByteRandomGenerator> EncryptionRandomGenerator<G> {
|
||||
}
|
||||
|
||||
// Fills the slice with random uniform values, using the mask generator.
|
||||
pub(crate) fn fill_slice_with_random_uniform_mask<Scalar>(&mut self, output: &mut [Scalar])
|
||||
pub fn fill_slice_with_random_uniform_mask<Scalar>(&mut self, output: &mut [Scalar])
|
||||
where
|
||||
Scalar: RandomGenerable<Uniform>,
|
||||
{
|
||||
|
||||
@@ -15,7 +15,8 @@ use crate::core_crypto::entities::ggsw_ciphertext::{
|
||||
use crate::core_crypto::entities::glwe_ciphertext::{GlweCiphertext, GlweCiphertextView};
|
||||
use crate::core_crypto::fft_impl::fft64::math::decomposition::TensorSignedDecompositionLendingIter;
|
||||
use crate::core_crypto::prelude::ContainerMut;
|
||||
use aligned_vec::CACHELINE_ALIGN;
|
||||
use aligned_vec::{avec, ABox, CACHELINE_ALIGN};
|
||||
//use concrete_fft::fft128::f128;
|
||||
use dyn_stack::{PodStack, ReborrowMut, SizeOverflow, StackReq};
|
||||
use tfhe_fft::fft128::f128;
|
||||
use tfhe_versionable::Versionize;
|
||||
@@ -170,6 +171,38 @@ impl<C: Container<Element = f64>> Fourier128GgswCiphertext<C> {
|
||||
}
|
||||
}
|
||||
|
||||
pub type Fourier128GgswCiphertextOwned = Fourier128GgswCiphertext<ABox<[f64]>>;
|
||||
|
||||
impl Fourier128GgswCiphertext<ABox<[f64]>> {
|
||||
pub fn new(
|
||||
glwe_size: GlweSize,
|
||||
polynomial_size: PolynomialSize,
|
||||
decomposition_base_log: DecompositionBaseLog,
|
||||
decomposition_level_count: DecompositionLevelCount,
|
||||
) -> Self {
|
||||
let container_len = polynomial_size.to_fourier_polynomial_size().0
|
||||
* decomposition_level_count.0
|
||||
* glwe_size.0
|
||||
* glwe_size.0;
|
||||
|
||||
let boxed_re0 = avec![0.0f64; container_len].into_boxed_slice();
|
||||
let boxed_re1 = avec![0.0f64; container_len].into_boxed_slice();
|
||||
let boxed_im0 = avec![0.0f64; container_len].into_boxed_slice();
|
||||
let boxed_im1 = avec![0.0f64; container_len].into_boxed_slice();
|
||||
|
||||
Fourier128GgswCiphertext::from_container(
|
||||
boxed_re0,
|
||||
boxed_re1,
|
||||
boxed_im0,
|
||||
boxed_im1,
|
||||
polynomial_size,
|
||||
glwe_size,
|
||||
decomposition_base_log,
|
||||
decomposition_level_count,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: Container<Element = f64>> Fourier128GgswLevelMatrix<C> {
|
||||
pub fn from_container(
|
||||
data_re0: C,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use super::super::super::{fft128, fft128_u128};
|
||||
use super::super::math::fft::Fft128View;
|
||||
use crate::core_crypto::fft_impl::common::tests::{
|
||||
gen_keys_or_get_from_cache_if_enabled, generate_keys,
|
||||
};
|
||||
|
||||
@@ -6,4 +6,4 @@ pub mod common;
|
||||
pub mod fft64;
|
||||
|
||||
pub mod fft128;
|
||||
mod fft128_u128;
|
||||
pub mod fft128_u128;
|
||||
|
||||
@@ -14,6 +14,6 @@ pub use super::commons::math::random::{ActivatedRandomGenerator, Gaussian, TUnif
|
||||
pub use super::commons::parameters::*;
|
||||
pub use super::commons::traits::*;
|
||||
pub use super::entities::*;
|
||||
pub use super::fft_impl::fft128::math::fft::Fft128;
|
||||
pub use super::fft_impl::fft64::math::fft::Fft;
|
||||
pub use super::fft_impl::fft128::math::fft::{Fft128, Fft128View};
|
||||
pub use super::fft_impl::fft64::math::fft::{Fft, FftView};
|
||||
pub use super::seeders::*;
|
||||
|
||||
Reference in New Issue
Block a user