Compare commits

...

55 Commits

Author SHA1 Message Date
Jakub Klemsa
1a9c0a8e3c commands that run FFT noise measurements 2025-04-07 13:42:45 +02:00
Jakub Klemsa
b86d649140 minor: graph bounds 2025-03-13 11:07:38 +01:00
Arthur Meyre
ed58de6114 fix: hack use bigger memory buffer for karatsuba ext prod 2025-02-06 16:16:17 +01:00
Jakub Klemsa
4bbe9b0d28 updated measurement-processing scripts for u128 2025-02-06 15:49:23 +01:00
Jakub Klemsa
774d8f2648 Modified multi-bit FFT noise measurement script to reflect the classic one. 2025-02-06 13:06:17 +01:00
Arthur Meyre
6fe72f822e fix memory issue for karatsuba and u128 2025-02-04 16:43:36 +01:00
Jakub Klemsa
b4de99499b increase decomp params for u128 2025-02-04 16:13:26 +01:00
Arthur Meyre
c675575374 test: add multi bit pbs 128 2025-02-04 14:49:17 +01:00
Arthur Meyre
1549a71ce6 fix pbs noise tests to work with u64 and u128 2025-02-03 18:20:09 +01:00
Arthur Meyre
6babaf0897 wip: make fmt 2025-02-03 15:51:53 +01:00
Jakub Klemsa
aa8f0d19ac WIP: ultimately broken transition towards the measurements of FFT-128. 2025-01-30 15:15:22 +01:00
Arthur Meyre
784351393f feat: add multi bit return noise for PBS 128 2025-01-27 18:15:59 +01:00
Arthur Meyre
4be3a82bc4 feat: add return noise variant for the classic PBS 128 2025-01-27 17:53:38 +01:00
Arthur Meyre
99e18e25c9 fix: fix tests 2025-01-24 18:38:07 +01:00
Arthur Meyre
824d6e22a0 feat: add multi bit pbs 128 2025-01-24 18:32:24 +01:00
Jakub Klemsa
1091679f5e Log-scale in FFT variance model fitting. 2025-01-22 22:22:40 +01:00
Jakub Klemsa
f24a7949d9 Multi-param curve fitting. 2025-01-21 17:30:39 +01:00
Jakub Klemsa
b2c180d7aa FFT noise: scripts for superlinear (k+1)*l. 2025-01-21 13:16:33 +01:00
Jakub Klemsa
6b04e8d7f1 fitting superlinear (k+1)l. 2025-01-16 18:00:55 +01:00
Jakub Klemsa
1db06750b9 Support for gf=1 measurements, new curve fits. 2025-01-15 11:08:52 +01:00
Arthur Meyre
48d55f0943 feat: add fft pbs which returns noise after each external product 2025-01-14 15:35:04 +01:00
Jakub Klemsa
826fd89103 WIP: refactor of noise test params, measurement tool for classic PBS. 2025-01-14 14:51:02 +01:00
Arthur Meyre
5f6212bab6 chore: fix karatsuba pbs returning noise 2025-01-14 10:55:14 +01:00
Arthur Meyre
b5b0333bcf feat: add karatsuba pbs which returns noise after each loop 2025-01-13 18:44:26 +01:00
Jakub Klemsa
4fa77bce8b FFT noise formula: find fit of a*N^b. FIXME in formulas (too much manual work for TUniform which is not used for experiments atm). 2025-01-13 18:19:27 +01:00
Jakub Klemsa
252bbc3047 Formats & ranges. 2025-01-13 14:02:34 +01:00
Jakub Klemsa
ad6362cc47 New measurement setup. 2025-01-03 11:10:31 +01:00
Jakub Klemsa
6a668d6f03 Minor: adjustment to variable (k,N). 2024-12-30 15:19:04 +01:00
Jakub Klemsa
1bc75a4086 Manual fix of sympy-to-rust issue. 2024-12-27 17:08:15 +01:00
Jakub Klemsa
b71dc7a094 Update noise formulas with FFT gap. 2024-12-27 16:25:13 +01:00
Jakub Klemsa
fc5da8524e Minor: towards gf=3,4. 2024-12-27 14:26:10 +01:00
Jakub Klemsa
4d70703ab5 Measurements for log(B) problem. 2024-12-24 15:17:22 +01:00
Jakub Klemsa
8e2b07b357 Removed unused stuff. 2024-12-23 11:51:24 +01:00
Jakub Klemsa
f0e9598941 Predicted noise can be generated separately from experiments. 2024-12-20 18:41:13 +01:00
Jakub Klemsa
738d1f908f Fix: noise formulas from the optimizer after unfixing the div-by-4 hotfix. 2024-12-20 18:40:36 +01:00
Jakub Klemsa
e2dd1d8a2f Added new TUniform params with various gf's. 2024-12-19 17:57:02 +01:00
Jakub Klemsa
dffb2c4992 Generalized for TUniform. 2024-12-19 16:52:16 +01:00
Jakub Klemsa
99782d88be Autogen GLWE noise, print value of a, fixed Gauss params with k > 1. 2024-12-18 12:51:58 +01:00
Jakub Klemsa
592c14746f TUniform params 2024-12-17 19:34:45 +01:00
Jakub Klemsa
241fddb4dd Pseudo-fix: div by 4 the GGSW-amplif term. Folder struct, more generic. 2024-12-17 17:28:20 +01:00
Jakub Klemsa
b98d62d30b Increased experiment scope. 2024-12-13 17:48:21 +01:00
Jakub Klemsa
700a287657 measured & expected noises outputted to json, then parsed by plotting script 2024-12-13 15:19:08 +01:00
Jakub Klemsa
685a6894ea WIP towards separate noise measurements: Kara <-> FFT. 2024-12-12 16:20:18 +01:00
Jakub Klemsa
96baf7c440 experiment that loops parameters 2024-12-12 10:41:40 +01:00
Jakub Klemsa
9a61b2f0bf WIP: fixed FFT noise model in a better way (from the whole blind-rot) 2024-12-11 12:50:44 +01:00
Jakub Klemsa
3c4950fa7a only 1 coeff per poly (to avoid correlation). plotting tool 2024-12-04 18:29:40 +01:00
Arthur Meyre
43386ba5af wip: add noiseless encryption of LUT 2024-12-04 12:06:54 +01:00
Arthur Meyre
457fc3f323 export noise as i64 2024-12-03 18:49:48 +01:00
Arthur Meyre
dfbfb3aadc add npy dump 2024-12-03 13:52:16 +01:00
Arthur Meyre
5afcb0bfb0 add as output all the intermediate noises after all external products for
the multi bit noise test
2024-12-02 20:26:51 +01:00
Jakub Klemsa
372f1bc76e FFT hot-fix: y = 319 -> 321 and results are OK. 2024-11-21 13:51:00 +01:00
Arthur Meyre
84a4bb36cd test: have a full karatsuba multi bit 2024-11-20 10:13:25 +01:00
Jakub Klemsa
7f116708fe Updated keys, old formula in comment. 2024-11-19 18:10:48 +01:00
Agnes Leroy
f11340a93a Updated noise formula & params with Jakub's fixes 2024-11-19 14:18:27 +01:00
Agnes Leroy
52efc40173 chore(gpu): add noise test for the classical & multi-bit PBS 2024-11-19 14:18:27 +01:00
42 changed files with 8817 additions and 480 deletions

View File

@@ -49,6 +49,7 @@ ron = "0.8"
tfhe-backward-compat-data = { git = "https://github.com/zama-ai/tfhe-backward-compat-data.git", branch = "v0.4", default-features = false, features = [
"load",
] }
npyz = "0.8"
[build-dependencies]
cbindgen = { version = "0.26.0", optional = true }

27
tfhe/cmd Normal file
View File

@@ -0,0 +1,27 @@
Run Measurements Classic & Multi-Bit
================================================================================
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_lwe_encrypt_pbs_decrypt_custom_mod_noise_test_params_4_bits_native_u64_132_bits --nocapture
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_lwe_encrypt_pbs_decrypt_custom_mod_noise_test_params_4_bits_native_u128_132_bits --nocapture
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_lwe_encrypt_multi_bit_pbs_decrypt_custom_mod_noise_test_params_multi_bit_4_bits_native_u64_132_bits --nocapture
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_lwe_encrypt_multi_bit_pbs_decrypt_custom_mod_noise_test_params_multi_bit_4_bits_native_u128_132_bits --nocapture
Export Predicted Variances Classic & Multi-Bit
================================================================================
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_export_noise_predictions_native_u64_132_bits --nocapture
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_export_noise_predictions_native_u128_132_bits --nocapture
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_export_multi_bit_noise_predictions_native_u64_132_bits --nocapture
$ RUSTFLAGS="-C target-cpu=native" cargo +stable test --tests --profile release --features=x86_64-unix -p tfhe -- test_export_multi_bit_noise_predictions_native_u128_132_bits --nocapture
Delete expected variances for non-existing measurements
================================================================================
irb> EXP_NAME = "u64-u128" ; distro = "GAUSSIAN" ; a = []
irb> for gf in [2,3,4] do ; for logbase in 5..30 do ; for level in 1..6 do ; next if logbase * level > 36 ; for k,logN in [[4,9],[2,10],[1,11],[3,10],[2,11],[1,12],[1,13],] do ; ef = "./results/#{EXP_NAME}/expected-variances-gf=#{gf}-logB=#{logbase}-l=#{level}-k=#{k}-N=#{2**logN}-distro=#{distro}.json" ; ff = "./results/#{EXP_NAME}/samples/fft-id=0-gf=#{gf}-logB=#{logbase}-l=#{level}-k=#{k}-N=#{2**logN}-distro=#{distro}.npy" ; a << ef if File.exist? ef and not File.exist? ff ; end ; end ; end ; end
irb> puts a.join(" ") # to remove

329
tfhe/fft-vs-kara-SE.py Executable file
View File

@@ -0,0 +1,329 @@
#!/usr/bin/env python3
import numpy as np
import os.path as osp
import json
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
EXP_NAME = "u64-u128" # wide-search-2000-gauss wide-search-2000-tuniform gpu-gauss gpu-tuniform log-b-problem
IN_FILE_FMT = "results/" + EXP_NAME + "/samples/%s-id=%d-gf=%d-logB=%d-l=%d-k=%d-N=%d-distro=%s-logQ=%d.npy"
GRAPH_FILE_FMT = "results/" + EXP_NAME + "/graphs/%s-gf=%d-logB=%d-l=%d-k=%d-N=%d-distro=%s-logQ=%d-nsamples=%d.png"
LOG_B_FILE_FMT = "results/" + EXP_NAME + "/graphs/logB_issue-gf=%d-distro=%s-logQ=%d.dat"
EXP_VAR_FILE_FMT = "results/" + EXP_NAME + "/expected-variances-gf=%d-logB=%d-l=%d-k=%d-N=%d-distro=%s-logQ=%d.json"
# ~ MEAS_VAR_FILE_FMT = "results/" + EXP_NAME + "/measured-variances-gf=%d-logB=%d-l=%d-k=%d-N=%d-distro=%s-logQ=%d.json"
FIG_W = 2400
FIG_H = 1200
DPI = 96
NB_TESTS_MAX = 2501
fft_noises = {}
kara_noises = {}
def log_B_bound(level, k, N, mantissa, nb_bodies=1):
# n.b. !!! in the optimizer, +7 is used instead, to avoid underestimations around the unstable plateau
# also, the curve fit was made with 5, not with 7 !!!
return (mantissa + 5 - np.log2(level*N*(k+nb_bodies))) / (level+1)
def fft_var_base(base, log_mod, mantissa):
bits_lost = max(0, log_mod - mantissa)
return (base**2 * 2**(2*bits_lost)) / ((2**log_mod) ** 2)
# keep here a copy of the FFT noise prediction from the optimizer (required to make sure where the log-B-bound is)
def fft_variance(base, level, k, N, gf, log_mod, mantissa, nb_bodies=1):
# heuristically derived bound where the next round's decomposition reaches the end of the f64's mantissa (after iFFT of the previous round)
log_B_bnd = log_B_bound(level, k, N, mantissa, nb_bodies)
fft_base = fft_var_base(base, log_mod, mantissa)
plateau = fft_var_base(2**log_B_bnd, log_mod, mantissa)
match gf:
case 1: # unclear why no gap is visible around log-B-bound
ae1 = [0.007055857074515369, 1.2200341768234066, 1.0182758006937769] # post-bound
return ae1[0] * fft_base * (k*N)**ae1[1] * (level*(k+nb_bodies))**ae1[2] * N
case 2:
ae0 = [0.002198730742187336, 1.9454823914975672, 1.0414771149597082] # pre-bound
ae1 = [0.04408419079060492, 1.1863309572742167, 0.9520130331861396] # post-bound
case 3:
ae0 = [0.004919593456537255, 1.9072214291896599, 1.011101247104754] # pre-bound
ae1 = [0.08647998614194002, 1.1951694645748305, 0.9480625121016949] # post-bound
case 4:
ae0 = [0.008548835894699377, 1.9075907752971348, 1.0071546386211694] # pre-bound
ae1 = [0.25761688396443255, 1.1558681035803715, 0.9673568364804596] # post-bound
case _:
exit(f"!! Grouping factor {gf} not supported !!")
return min( \
ae0[0] * fft_base * (k*N)**ae0[1] * (level*(k+nb_bodies))**ae0[2] * N, \
max( \
ae0[0] * plateau * (k*N)**ae0[1] * (level*(k+nb_bodies))**ae0[2] * N, \
ae1[0] * fft_base * (k*N)**ae1[1] * (level*(k+nb_bodies))**ae1[2] * N) \
)
# FFT noise model for curve fitting: O(B^2 (kN)^e0 ((k+1)l)^e1 N log^e2(N)) ... however, not working with power of log(N)
# ~ def fft_log_var_model(params, a, eNk, ekl, eN):
def fft_log_var_model(params, a, eNk, ekl):
nb_bodies = 1
base, level, k, logN, bits_lost = params
# python vyprcanej: dělení neee ... return a * (base**2 * 2**(2*bits_lost)) / ((2**LOG_CT_MOD) ** 2) * (k*(2**logN))**eNk * (level*(k+nb_bodies))**ekl * (2**logN) * logN**eN
# ~ return np.log2(a * (base**2 * 2**(2*bits_lost)) * (k*(2**logN))**eNk * (level*(k+nb_bodies))**ekl * (2**logN) * logN**eN)
return np.log2(a * (base**2 * 2**(2*bits_lost)) * (k*(2**logN))**eNk * (level*(k+nb_bodies))**ekl * (2**logN))
#TODO rework into 2-pass: first fit the curve, then generate the output files with updated predictions (now the values in fft_variance must be updated manually)
for logQ_mantissa in [[64,53],[128,104]]:
logQ = logQ_mantissa[0] ; mantissa = logQ_mantissa[1]
# ~ for distro in ["TUNIFORM", "GAUSSIAN"]:
for distro in ["GAUSSIAN"]:
print(f"# for logQ = {logQ}, mantissa = {mantissa}, distro = {distro}")
for gf in range(1,4+1):
fft_vars_0 = [] # pre-bound
fft_vars_1 = [] # post-bound
with open(LOG_B_FILE_FMT % (gf, distro, logQ), "w") as logB_file:
logB_file.write( "# Excess FFT noise\n")
logB_file.write( "# log B level k log N pred.slope avg.slope meas/pred bnd_flag\n")
for k in range(1,4+1):
for logN in range(9,13+1):
N = 1<<logN
for level in range(1,6+1):
# ~ for logbase in [3*i for i in range(3,10+1)]:
for logbase in range(5,30+1):
base = 1<<logbase
# ~ if logbase * level < 15 or logbase * level > 36:
# ~ continue
# Convert dictionary to tuple (sorted to make it deterministic)
params = tuple(sorted({
"gf": gf,
"logbase": logbase,
"level": level,
"k": k,
"logN": logN,
}.items()))
# load predicted noise
if not osp.isfile(EXP_VAR_FILE_FMT % (gf, logbase, level, k, N, distro, logQ)):
continue
with open(EXP_VAR_FILE_FMT % (gf, logbase, level, k, N, distro, logQ)) as file_exp_var:
exp_vars = json.load(file_exp_var)
y_dimension = exp_vars["lwe_dimension"] / gf
expected_variance_kara = exp_vars["expected_variance_kara"]
expected_variance_fft = exp_vars["expected_variance_fft"]
# load noise measurements into a single array
data_len = len(np.load(IN_FILE_FMT % ("fft", 0, gf, logbase, level, k, N, distro, logQ)))
fft_noises[params] = [np.array([]) for _ in range(0,data_len)]
kara_noises[params] = [np.array([]) for _ in range(0,data_len)]
for thread_id in range(0,NB_TESTS_MAX):
if not osp.isfile(IN_FILE_FMT % ("fft", thread_id, gf, logbase, level, k, N, distro, logQ)):
total_samples = thread_id
break
fi = np.load(IN_FILE_FMT % ("fft", thread_id, gf, logbase, level, k, N, distro, logQ)) # / CT_MOD ... needed when error was given as an int mod 2^64, now it's a float in the Torus scale
ki = np.load(IN_FILE_FMT % ("kara", thread_id, gf, logbase, level, k, N, distro, logQ)) # / CT_MOD
fft_noises[params] = np.column_stack([fft_noises[params],fi])
kara_noises[params] = np.column_stack([kara_noises[params],ki])
# ~ print(f"Processing {params} with {thread_id} samples ...")
# x-axis values: [1,2,3,4,...,321]
x_vals = np.arange(1,len(fft_noises[params])+1)
# compute diff (shall be aligned s.t. the same sample is calculated at respective index)
fk = fft_noises[params] - kara_noises[params]
# ~ # ==== Histograms for selected indexes ===================================
# ~ for i in [1, 2, 4, 8, 32, 128, 321]:
# ~ # for i in [1, 2]:
# ~ plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI) ; plt.tight_layout() ; plt.grid() # ; plt.ylim(-1.5e-4,1.5e-4)
# ~ plt.hist(fft_noises[i-1], 50)
# ~ plt.title(f"FFT distro at step {i} {params}")
# ~ plt.savefig(---"FFT-distro-%d.png" % (i)) # , format="pdf", bbox_inches="tight"
# ~ # plt.show()
# ~ plt.close()
# ~ for i in [1, 2, 4, 8, 32, 128, 321]:
# ~ # for i in [1, 2]:
# ~ plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI) ; plt.tight_layout() ; plt.grid() # ; plt.ylim(-1.5e-4,1.5e-4)
# ~ plt.hist(kara_noises[i-1], 50)
# ~ plt.title(f"Karatsuba distro at step {i} {params}")
# ~ plt.savefig(---"Karatsuba-distro-%d.png" % (i)) # , format="pdf", bbox_inches="tight"
# ~ # plt.show()
# ~ plt.close()
# ==== FFT ===============================================================
f_means = [np.mean(fi) for fi in fft_noises[params]]
f_vars = [np.var (fi) for fi in fft_noises[params]]
f_stdvs = [np.std (fi) for fi in fft_noises[params]]
# ~ # mean + std-dev error bars
# ~ plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI) ; plt.tight_layout() ; plt.grid() # ; plt.ylim(-1.5e-4,1.5e-4)
# ~ plt.errorbar(
# ~ x_vals,
# ~ f_means,
# ~ yerr = f_stdvs,
# ~ fmt ='o',
# ~ )
# ~ plt.title(f"FFT mean & std-dev {params}")
# ~ plt.savefig(GRAPH_FILE_FMT % ("stddev-mean-fft", gf, logbase, level, k, N, distro, logQ, total_samples)) # , format="pdf", bbox_inches="tight"
# ~ # plt.show()
# ~ plt.close()
# ==== Karatsuba =========================================================
k_means = [np.mean(ki) for ki in kara_noises[params]]
k_vars = [np.var (ki) for ki in kara_noises[params]]
k_stdvs = [np.std (ki) for ki in kara_noises[params]]
# ~ # mean + std-dev error bars
# ~ plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI) ; plt.tight_layout() ; plt.grid() # ; plt.ylim(-1.5e-4,1.5e-4)
# ~ plt.errorbar(
# ~ x_vals,
# ~ k_means,
# ~ yerr = k_stdvs,
# ~ fmt ='o', color='tab:orange',
# ~ )
# ~ plt.title(f"Karatsuba mean & std-dev {params}")
# ~ plt.savefig(GRAPH_FILE_FMT % ("stddev-mean-kara", gf, logbase, level, k, N, distro, logQ, total_samples)) # , format="pdf", bbox_inches="tight"
# ~ # plt.show()
# ~ plt.close()
# # ==== Diff ==============================================================
# # ... is a piece of shit: starting from after-2nd ext-prod, the FFT and Kara samples are completely different
# fk_means = [np.mean(fki) for fki in fk]
fk_vars = [np.var (fki) for fki in fk] # just once checked
# fk_stdvs = [np.std (fki) for fki in fk]
# plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI) ; plt.tight_layout() ; plt.grid() ; plt.ylim(-1.5e-4,1.5e-4)
# plt.errorbar(
# x_vals,
# fk_means,
# yerr = fk_stdvs,
# fmt ='o',
# )
# plt.title(f"(FFT-Kara) mean & std-dev {params}")
# plt.savefig(---"FFT-Kara-diff-mean-stddev.png") # , format="pdf", bbox_inches="tight"
# # plt.show()
# plt.close()
# ==== Both ==============================================================
# ~ kara_avg_slope = np.mean(np.array(k_vars)/x_vals)
# ~ fft_avg_slope = np.mean(np.array(f_vars)/x_vals)
kara_avg_slope_2nd_half = np.mean(np.array(k_vars[len(k_vars)//2:])/x_vals[len(k_vars)//2:])
fft_avg_slope_2nd_half = np.mean(np.array(f_vars[len(f_vars)//2:])/x_vals[len(f_vars)//2:])
# calc the value of FFT variance in various configs
fft_var_pred = fft_variance(base, level, k, N, gf, logQ, mantissa)
log_B_bnd = log_B_bound(level, k, N, mantissa)
# significant FFT contribution?
if fft_avg_slope_2nd_half/kara_avg_slope_2nd_half < 1.2:
logB_file.write("# ") # comment out anything insignificant
else:
logB_file.write(" ")
# values for curve fit
bnd_flag = 0
if logbase < log_B_bnd: # testing without -0.5
fft_vars_0.append([(fft_avg_slope_2nd_half - kara_avg_slope_2nd_half) * (2.0**logQ)**2, base, level, k, logN, max(0, logQ - mantissa)])
bnd_flag = -1
elif fft_var_pred > fft_variance(2**log_B_bnd, level, k, N, gf, logQ, mantissa) * 1.1:
fft_vars_1.append([(fft_avg_slope_2nd_half - kara_avg_slope_2nd_half) * (2.0**logQ)**2, base, level, k, logN, max(0, logQ - mantissa)])
bnd_flag = 1
# export values: # log B level k log N pred.slope avg.slope meas/pred bnd_flag
logB_file.write("%6d %7d %7d %7d %10.3e %10.3e %10.3e %11d\n" % (logbase, level, k, logN, fft_var_pred, fft_avg_slope_2nd_half - kara_avg_slope_2nd_half, (fft_avg_slope_2nd_half - kara_avg_slope_2nd_half) / fft_var_pred, bnd_flag))
continue ###########################################
plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI)
plt.tight_layout() ; plt.grid() # ; plt.ylim(-.2e-9,5.0e-9) ; plt.gca().yaxis.set_major_locator(MultipleLocator(.5e-9))
plt.title(f"FFT vs. Kara var's {params}. FFT-only slope: {fft_avg_slope_2nd_half - kara_avg_slope_2nd_half}")
plt.plot(x_vals, f_vars, '.', label='meas FFT', color='tab:blue')
plt.plot([0,y_dimension], [0.0,expected_variance_fft], '.', label='exp FFT', color='tab:blue', linestyle='dotted', marker=',')
plt.plot([0,y_dimension], [0.0,fft_avg_slope_2nd_half*y_dimension], '.', label='avg. slope FFT', color='tab:blue', linestyle='dashed', marker=',')
plt.plot(x_vals, k_vars, '.', label='Karatsuba', color='tab:orange')
plt.plot([0,y_dimension], [0.0,expected_variance_kara], '.', label='exp Kara', color='tab:orange', linestyle='dotted', marker=',')
plt.plot([0,y_dimension], [0.0,kara_avg_slope_2nd_half*y_dimension], '.', label='avg. slope Kara', color='tab:orange', linestyle='dashed', marker=',')
plt.savefig(GRAPH_FILE_FMT % ("variances-FFT-Kara", gf, logbase, level, k, N, distro, logQ, total_samples)) # , format="pdf", bbox_inches="tight"
# plt.show()
plt.close()
# start: 1..3
plt.figure(figsize=(FIG_W/DPI/2, FIG_H/DPI/2), dpi=DPI)
plt.tight_layout() ; plt.grid() ; plt.xlim(-.2,3.2) # ; plt.ylim(-.2e-11,4.0e-11) ; plt.gca().yaxis.set_major_locator(MultipleLocator(.5e-11))
plt.title(f"FFT vs. Kara var's, start {params}")
plt.plot(x_vals[0:4], f_vars[0:4], marker='o', label='meas FFT', color='tab:blue')
plt.plot([0,4], [0.0,expected_variance_fft/y_dimension*4], '.', label='exp FFT', color='tab:blue', linestyle='dotted', marker=',')
plt.plot(x_vals[0:4], k_vars[0:4], marker='o', label='meas Karatsuba', color='tab:orange')
plt.plot([0,4], [0.0,expected_variance_kara/y_dimension*4], '.', label='exp Kara', color='tab:orange', linestyle='dotted', marker=',')
plt.ylim(bottom=0) # after plotting the data: https://stackoverflow.com/a/11745291/1869446
plt.savefig(GRAPH_FILE_FMT % ("variances-start-FFT-Kara", gf, logbase, level, k, N, distro, logQ, total_samples)) # , format="pdf", bbox_inches="tight"
# plt.show()
plt.close()
# diff growth
diff_vars = np.insert(np.array(f_vars) - np.array(k_vars), 0, 0.0)
diff_vars_growth = np.diff(diff_vars)
# ~ plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI)
# ~ plt.tight_layout() ; plt.grid() # ; plt.ylim(-.2e-9,5.0e-10) ; plt.gca().yaxis.set_major_locator(MultipleLocator(.5e-10))
# ~ plt.title(f"Growth of diff: FFT - Kara {params}")
# ~ plt.plot(x_vals, diff_vars_growth, '.', label='Growth')
# ~ plt.savefig(GRAPH_FILE_FMT % ("growth-FFT-Kara", gf, logbase, level, k, N, distro, logQ, total_samples)) # , format="pdf", bbox_inches="tight"
# ~ # plt.show()
# ~ plt.close()
# just slope of diff
plt.figure(figsize=(FIG_W/DPI, FIG_H/DPI), dpi=DPI)
plt.tight_layout() ; plt.grid() # ; plt.ylim(-.2e-11,2.0e-11) ; plt.gca().yaxis.set_major_locator(MultipleLocator(.1e-11))
plt.title(f"FFT vs. Kara var's {params}")
plt.plot(x_vals, np.array(f_vars)/x_vals, '.', label='meas FFT', color='tab:blue')
plt.plot([0,y_dimension], [expected_variance_fft/y_dimension,expected_variance_fft/y_dimension], '.', label='exp FFT', color='tab:blue', linestyle='dotted', marker=',')
plt.plot([0,y_dimension], [fft_avg_slope_2nd_half,fft_avg_slope_2nd_half], '.', label='avg. slope FFT', color='tab:blue', linestyle='dashed', marker=',')
plt.plot(x_vals, np.array(k_vars)/x_vals, '.', label='meas Karatsuba', color='tab:orange')
plt.plot([0,y_dimension], [expected_variance_kara/y_dimension,expected_variance_kara/y_dimension], '.', label='exp Kara', color='tab:orange', linestyle='dotted', marker=',')
plt.plot([0,y_dimension], [kara_avg_slope_2nd_half,kara_avg_slope_2nd_half], '.', label='avg. slope Kara', color='tab:orange', linestyle='dashed', marker=',')
plt.ylim(bottom=0)
plt.savefig(GRAPH_FILE_FMT % ("variances-per-step-FFT-Kara", gf, logbase, level, k, N, distro, logQ, total_samples)) # , format="pdf", bbox_inches="tight"
# plt.show()
plt.close()
# print some values
print(f"\nParameters: {params}\n")
wk, _ = curve_fit(lambda x, a: a*x, x_vals, k_vars)
wf, _ = curve_fit(lambda x, a: a*x, x_vals, f_vars)
print("Kara linear fit:", wk[0])
print("Kara avg slope:", kara_avg_slope_2nd_half)
print("FFT linear fit:", wf[0])
print("FFT avg slope:", fft_avg_slope_2nd_half)
print("FFT-only as diff of linear fits:", (wf - wk)[0])
print("FFT-only as diff of avg slopes:", fft_avg_slope_2nd_half - kara_avg_slope_2nd_half)
# ~ print("----")
# ~ print("Kara first:", k_vars[0])
# ~ print("FFT first:", f_vars[0])
print("----")
print("Value of a:", fft_a)
print("Noise base w/o N^2:", fft_var_without_a_N)
print("----")
print("FFT excess 0..1:", diff_vars_growth[0])
# ~ print("FFT excess 0..1 from plain diff (close to prev?):", fk_vars[0])
print("FFT excess 1..2:", diff_vars_growth[1])
print("FFT excess 2..3:", diff_vars_growth[2])
# ~ print("----")
# ~ print("FFT excess growth mean (close to .. from slope?):", np.mean(diff_vars_growth))
print("=" * 80)
# for logQ, mantissa, distro, gf:
print(f" case {gf}:")
if len(fft_vars_0) > 4:
fft_vars_0 = np.array(fft_vars_0)
a3e_0, _ = curve_fit(fft_log_var_model, fft_vars_0[:, 1:].T, np.log2(fft_vars_0[:, 0]), p0=[0.0035, 1.9, 1.1])
print(f" ae0 = [{a3e_0[0]}, {a3e_0[1]}, {a3e_0[2]}] # pre-bound")
if len(fft_vars_1) > 4:
fft_vars_1 = np.array(fft_vars_1)
a3e_1, _ = curve_fit(fft_log_var_model, fft_vars_1[:, 1:].T, np.log2(fft_vars_1[:, 0]), p0=[0.08, 1.15, 0.8])
print(f" ae1 = [{a3e_1[0]}, {a3e_1[1]}, {a3e_1[2]}] # post-bound")

View File

@@ -0,0 +1,102 @@
#!/usr/bin/gnuplot
file_exists(file) = system("[ -f '".file."' ] && echo '1' || echo '0'") + 0
IN_FMT = "logB_issue-gf=%d-distro=GAUSSIAN.dat"
OUT_LOG_B_FMT = "plot-logB-gf=%d-k=%d-N=%d-distro=GAUSSIAN.png"
OUT_LOG_N_FMT = "plot-logN-gf=%d-k=%d-distro=GAUSSIAN.png"
OUT_KL_FMT = "plot-(k+1)l-gf=%d-distro=GAUSSIAN.png"
set term pngcairo size 1200,900 linewidth 2
set logscale y
set datafile missing NaN
set grid
set key top left
do for [gf=1:4] {
f = sprintf(IN_FMT, gf)
if (file_exists(f)) {
# log-B
set xrange [5:30]
set yrange [1e-21:1e-6]
do for [k=1:4] {
do for [logN=9:13] {
N = 1 << logN ; hovno = 1 << 2
set output sprintf(OUT_LOG_B_FMT, gf, k, N)
plot \
f u 1:(($2 == 1 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=1', \
f u 1:(($2 == 1 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 1 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 1 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=1', \
f u 1:(($2 == 2 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=2', \
f u 1:(($2 == 2 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 2 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 2 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=2', \
f u 1:(($2 == 3 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=3', \
f u 1:(($2 == 3 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 3 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 3 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=3', \
f u 1:(($2 == 4 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=4', \
f u 1:(($2 == 4 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 4 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 4 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=4', \
f u 1:(($2 == 5 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=5', \
f u 1:(($2 == 5 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 5 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 5 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=5', \
f u 1:(($2 == 6 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=6', \
f u 1:(($2 == 6 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 6 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 6 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=6'
}
}
# log-N
set xrange [8:15]
set yrange [1e-21:1e-6]
# logB_l = [[18,2], [28,1], [22,1], [14,2]]
do for [k=1:4] {
set output sprintf(OUT_LOG_N_FMT, gf, k)
plot \
f u 4:(($2 == 1 && $1 == 22 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=22, l=1', \
f u 4:(($2 == 1 && $1 == 22 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=22, l=1', \
f u 4:(($2 == 1 && $1 == 28 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=28, l=1', \
f u 4:(($2 == 1 && $1 == 28 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=28, l=1', \
f u 4:(($2 == 2 && $1 == 14 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=14, l=2', \
f u 4:(($2 == 2 && $1 == 14 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=14, l=2', \
f u 4:(($2 == 2 && $1 == 18 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=18, l=2', \
f u 4:(($2 == 2 && $1 == 18 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=18, l=2'
}
# (k+1)l
set xrange [1:11]
set yrange [1e-34:1e-31]
#~ do for [k=1:4] {
# TODO fix the noise model param's
if (gf == 2) {
set output sprintf(OUT_KL_FMT, gf)
plot \
f u ($2*($3+1)):($8 == -1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.823854616672861) : NaN) w p t 'pred. slope before bound', \
f u ($2*($3+1)):($8 == -1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.823854616672861) : NaN) w p t 'meas. slope before bound', \
f u ($2*($3+1)):($8 == +1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.199976884576144) : NaN) w p t 'pred. slope after bound', \
f u ($2*($3+1)):($8 == +1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.199976884576144) : NaN) w p t 'meas. slope after bound'
}
if (gf == 3) {
set output sprintf(OUT_KL_FMT, gf)
plot \
f u ($2*($3+1)):($8 == -1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.9546582263796637) : NaN) w p t 'pred. slope before bound', \
f u ($2*($3+1)):($8 == -1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.9546582263796637) : NaN) w p t 'meas. slope before bound', \
f u ($2*($3+1)):($8 == +1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.186096703735851) : NaN) w p t 'pred. slope after bound', \
f u ($2*($3+1)):($8 == +1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.186096703735851) : NaN) w p t 'meas. slope after bound'
}
if (gf == 4) {
set output sprintf(OUT_KL_FMT, gf)
plot \
f u ($2*($3+1)):($8 == -1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.8850256339231044) : NaN) w p t 'pred. slope before bound', \
f u ($2*($3+1)):($8 == -1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.8850256339231044) : NaN) w p t 'meas. slope before bound', \
f u ($2*($3+1)):($8 == +1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.165413038755238) : NaN) w p t 'pred. slope after bound', \
f u ($2*($3+1)):($8 == +1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.165413038755238) : NaN) w p t 'meas. slope after bound'
}
}
}

View File

@@ -0,0 +1,108 @@
#!/usr/bin/gnuplot
file_exists(file) = system("[ -f '".file."' ] && echo '1' || echo '0'") + 0
IN_FMT = "logB_issue-gf=%d-distro=GAUSSIAN-logQ=%d.dat"
OUT_LOG_B_FMT = "plot-logB-gf=%d-k=%d-N=%d-distro=GAUSSIAN-logQ=%d.png"
OUT_LOG_N_FMT = "plot-logN-gf=%d-k=%d-distro=GAUSSIAN-logQ=%d.png"
OUT_KL_FMT = "plot-(k+1)l-gf=%d-distro=GAUSSIAN-logQ=%d.png"
set term pngcairo size 1200,900 linewidth 2
set logscale y
set datafile missing NaN
set grid
set key top left
LOG_CT_MOD = 128
do for [gf=1:4] {
f = sprintf(IN_FMT, gf, LOG_CT_MOD)
if (file_exists(f)) {
# log-B
set xrange [18:32]
#~ set yrange [1e-21:1e-6]
set yrange [1e-45:1e-37]
do for [k=1:4] {
do for [logN=9:13] {
N = 1 << logN ; hovno = 1 << 2
set output sprintf(OUT_LOG_B_FMT, gf, k, N, LOG_CT_MOD)
plot \
f u 1:(($2 == 1 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=1', \
f u 1:(($2 == 1 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 1 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 1 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=1', \
f u 1:(($2 == 2 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=2', \
f u 1:(($2 == 2 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 2 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 2 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=2', \
f u 1:(($2 == 3 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=3', \
f u 1:(($2 == 3 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 3 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 3 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=3', \
f u 1:(($2 == 4 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=4', \
f u 1:(($2 == 4 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 4 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 4 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=4', \
f u 1:(($2 == 5 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=5', \
f u 1:(($2 == 5 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 5 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 5 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=5', \
f u 1:(($2 == 6 && $3 == k && $4 == logN) ? $5 : NaN) w lp t 'pred. slope, l=6', \
f u 1:(($2 == 6 && $3 == k && $4 == logN && $8 == -1) ? $5 : NaN) w p pt 6 ps 2.5 lc 0 t 'before plateau', \
f u 1:(($2 == 6 && $3 == k && $4 == logN && $8 == +1) ? $5 : NaN) w p pt 4 ps 2.5 lc 0 t 'after plateau', \
f u 1:(($2 == 6 && $3 == k && $4 == logN) ? $6 : NaN) w lp t 'meas. slope, l=6'
}
}
# log-N
set xrange [8:15]
#~ set yrange [1e-21:1e-6]
set yrange [1e-45:1e-37]
# logB_l = [[18,2], [28,1], [22,1], [14,2]]
do for [k=1:4] {
set output sprintf(OUT_LOG_N_FMT, gf, k, LOG_CT_MOD)
plot \
f u 4:(($2 == 1 && $1 == 22 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=22, l=1', \
f u 4:(($2 == 1 && $1 == 22 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=22, l=1', \
f u 4:(($2 == 1 && $1 == 28 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=28, l=1', \
f u 4:(($2 == 1 && $1 == 28 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=28, l=1', \
f u 4:(($2 == 2 && $1 == 14 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=14, l=2', \
f u 4:(($2 == 2 && $1 == 14 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=14, l=2', \
f u 4:(($2 == 2 && $1 == 18 && $3 == k) ? $5 : NaN) w lp t 'pred. slope, logB=18, l=2', \
f u 4:(($2 == 2 && $1 == 18 && $3 == k) ? $6 : NaN) w lp t 'meas. slope, logB=18, l=2'
}
# (k+1)l
#~ set xrange [1:11]
set xrange [1:15]
#~ set yrange [1e-34:1e-31]
set yrange [1e-65:1e-62]
#~ do for [k=1:4] {
# TODO fix the noise model param's
if (gf == 2) {
set output sprintf(OUT_KL_FMT, gf, LOG_CT_MOD)
plot \
f u ($2*($3+1)):($8 == -1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.823854616672861) : NaN) w p t 'pred. slope before bound', \
f u ($2*($3+1)):($8 == -1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.823854616672861) : NaN) w p t 'meas. slope before bound', \
f u ($2*($3+1)):($8 == +1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.199976884576144) : NaN) w p t 'pred. slope after bound', \
f u ($2*($3+1)):($8 == +1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.199976884576144) : NaN) w p t 'meas. slope after bound'
}
if (gf == 3) {
set output sprintf(OUT_KL_FMT, gf, LOG_CT_MOD)
plot \
f u ($2*($3+1)):($8 == -1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.9546582263796637) : NaN) w p t 'pred. slope before bound', \
f u ($2*($3+1)):($8 == -1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.9546582263796637) : NaN) w p t 'meas. slope before bound', \
f u ($2*($3+1)):($8 == +1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.186096703735851) : NaN) w p t 'pred. slope after bound', \
f u ($2*($3+1)):($8 == +1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.186096703735851) : NaN) w p t 'meas. slope after bound'
}
if (gf == 4) {
set output sprintf(OUT_KL_FMT, gf, LOG_CT_MOD)
plot \
f u ($2*($3+1)):($8 == -1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.8850256339231044) : NaN) w p t 'pred. slope before bound', \
f u ($2*($3+1)):($8 == -1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.8850256339231044) : NaN) w p t 'meas. slope before bound', \
f u ($2*($3+1)):($8 == +1 ? $5 / ((2**$1)**2 * $3 * (2**$4)**2.165413038755238) : NaN) w p t 'pred. slope after bound', \
f u ($2*($3+1)):($8 == +1 ? $6 / ((2**$1)**2 * $3 * (2**$4)**2.165413038755238) : NaN) w p t 'meas. slope after bound'
}
}
}

View File

@@ -5,10 +5,13 @@
use crate::core_crypto::commons::computation_buffers::ComputationBuffers;
use crate::core_crypto::commons::traits::*;
use crate::core_crypto::entities::*;
use crate::core_crypto::fft_impl::fft128::math::fft::Fft128;
use crate::core_crypto::fft_impl::fft64::math::fft::{
par_convert_polynomials_list_to_fourier, Fft, FftView,
};
use dyn_stack::{PodStack, ReborrowMut, SizeOverflow, StackReq};
use rayon::prelude::*;
use tfhe_fft::c64;
/// Convert an [`LWE multi_bit bootstrap key`](`LweMultiBitBootstrapKey`) with standard
@@ -99,3 +102,47 @@ pub fn par_convert_standard_lwe_multi_bit_bootstrap_key_to_fourier<Scalar, Input
fft,
);
}
pub fn par_convert_standard_lwe_multi_bit_bootstrap_key_to_fourier_128<
Scalar,
InputCont,
OutputCont,
>(
input_bsk: &LweMultiBitBootstrapKey<InputCont>,
output_bsk: &mut Fourier128LweMultiBitBootstrapKey<OutputCont>,
) where
Scalar: UnsignedTorus,
InputCont: Container<Element = Scalar>,
OutputCont: ContainerMut<Element = f64>,
{
let fft = Fft128::new(input_bsk.polynomial_size());
let fft = fft.as_view();
assert_eq!(input_bsk.polynomial_size(), output_bsk.polynomial_size());
let fourier_poly_size = output_bsk.polynomial_size().to_fourier_polynomial_size();
let (data_re0, data_re1, data_im0, data_im1) = output_bsk.as_mut_view().data();
data_re0
.par_chunks_exact_mut(fourier_poly_size.0)
.zip(
data_re1.par_chunks_exact_mut(fourier_poly_size.0).zip(
data_im0
.par_chunks_exact_mut(fourier_poly_size.0)
.zip(data_im1.par_chunks_exact_mut(fourier_poly_size.0)),
),
)
.zip(input_bsk.as_polynomial_list().par_iter())
.for_each(
|((fourier_re0, (fourier_re1, (fourier_im0, fourier_im1))), coef_poly)| {
fft.forward_as_torus(
fourier_re0,
fourier_re1,
fourier_im0,
fourier_im1,
coef_poly.as_ref(),
);
},
);
}

View File

@@ -171,20 +171,27 @@ use dyn_stack::{PodStack, SizeOverflow, StackReq};
/// "Multiplication via PBS result is correct! Expected 6, got {pbs_multiplication_result}"
/// );
/// ```
pub fn programmable_bootstrap_f128_lwe_ciphertext<Scalar, InputCont, OutputCont, AccCont, KeyCont>(
pub fn programmable_bootstrap_f128_lwe_ciphertext<
InputScalar,
OutputScalar,
InputCont,
OutputCont,
AccCont,
KeyCont,
>(
input: &LweCiphertext<InputCont>,
output: &mut LweCiphertext<OutputCont>,
accumulator: &GlweCiphertext<AccCont>,
fourier_bsk: &Fourier128LweBootstrapKey<KeyCont>,
) where
// CastInto required for PBS modulus switch which returns a usize
Scalar: UnsignedTorus + CastInto<usize>,
InputCont: Container<Element = Scalar>,
OutputCont: ContainerMut<Element = Scalar>,
AccCont: Container<Element = Scalar>,
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
InputCont: Container<Element = InputScalar>,
OutputCont: ContainerMut<Element = OutputScalar>,
AccCont: Container<Element = OutputScalar>,
KeyCont: Container<Element = f64>,
{
assert_eq!(input.ciphertext_modulus(), output.ciphertext_modulus());
assert_eq!(
output.ciphertext_modulus(),
accumulator.ciphertext_modulus()
@@ -196,7 +203,7 @@ pub fn programmable_bootstrap_f128_lwe_ciphertext<Scalar, InputCont, OutputCont,
let fft = fft.as_view();
buffers.resize(
programmable_bootstrap_f128_lwe_ciphertext_mem_optimized_requirement::<Scalar>(
programmable_bootstrap_f128_lwe_ciphertext_mem_optimized_requirement::<OutputScalar>(
fourier_bsk.glwe_size(),
fourier_bsk.polynomial_size(),
fft,
@@ -222,7 +229,8 @@ pub fn programmable_bootstrap_f128_lwe_ciphertext<Scalar, InputCont, OutputCont,
/// having a capacity at least as large as the result of
/// [`programmable_bootstrap_f128_lwe_ciphertext_mem_optimized_requirement`].
pub fn programmable_bootstrap_f128_lwe_ciphertext_mem_optimized<
Scalar,
InputScalar,
OutputScalar,
InputCont,
OutputCont,
AccCont,
@@ -236,10 +244,11 @@ pub fn programmable_bootstrap_f128_lwe_ciphertext_mem_optimized<
stack: PodStack<'_>,
) where
// CastInto required for PBS modulus switch which returns a usize
Scalar: UnsignedTorus + CastInto<usize>,
InputCont: Container<Element = Scalar>,
OutputCont: ContainerMut<Element = Scalar>,
AccCont: Container<Element = Scalar>,
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
InputCont: Container<Element = InputScalar>,
OutputCont: ContainerMut<Element = OutputScalar>,
AccCont: Container<Element = OutputScalar>,
KeyCont: Container<Element = f64>,
{
fourier_bsk.bootstrap(output, input, accumulator, fft, stack);
@@ -253,3 +262,55 @@ pub fn programmable_bootstrap_f128_lwe_ciphertext_mem_optimized_requirement<Scal
) -> Result<StackReq, SizeOverflow> {
bootstrap_scratch_f128::<Scalar>(glwe_size, polynomial_size, fft)
}
pub fn programmable_bootstrap_f128_lwe_ciphertext_return_noise<
InputScalar,
OutputScalar,
InputCont,
OutputCont,
AccCont,
KeyCont,
>(
input: &LweCiphertext<InputCont>,
output: &mut LweCiphertext<OutputCont>,
accumulator: &GlweCiphertext<AccCont>,
fourier_bsk: &Fourier128LweBootstrapKey<KeyCont>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
InputCont: Container<Element = InputScalar>,
OutputCont: ContainerMut<Element = OutputScalar>,
AccCont: Container<Element = OutputScalar>,
KeyCont: Container<Element = f64>,
{
assert_eq!(
output.ciphertext_modulus(),
accumulator.ciphertext_modulus()
);
let mut buffers = ComputationBuffers::new();
let fft = Fft128::new(fourier_bsk.polynomial_size());
let fft = fft.as_view();
buffers.resize(
programmable_bootstrap_f128_lwe_ciphertext_mem_optimized_requirement::<OutputScalar>(
fourier_bsk.glwe_size(),
fourier_bsk.polynomial_size(),
fft,
)
.unwrap()
.unaligned_bytes_required(),
);
let stack = buffers.stack();
fourier_bsk.bootstrap_return_noise(output, input, accumulator, fft, stack, debug_material)
}

View File

@@ -6,17 +6,29 @@ use crate::core_crypto::commons::computation_buffers::ComputationBuffers;
use crate::core_crypto::commons::math::decomposition::SignedDecomposer;
use crate::core_crypto::commons::parameters::*;
use crate::core_crypto::commons::traits::*;
use crate::core_crypto::commons::utils::izip;
use crate::core_crypto::entities::*;
use crate::core_crypto::fft_impl::common::pbs_modulus_switch;
use crate::core_crypto::fft_impl::fft64::crypto::bootstrap::{
batch_bootstrap_scratch, blind_rotate_assign_scratch, bootstrap_scratch,
};
use crate::core_crypto::fft_impl::fft64::crypto::ggsw::{
add_external_product_assign as impl_add_external_product_assign,
add_external_product_assign_scratch as impl_add_external_product_assign_scratch, cmux,
cmux_scratch,
cmux_scratch, karatsuba_add_external_product_assign,
karatsuba_add_external_product_assign as impl_karatsuba_add_external_product_assign,
};
use crate::core_crypto::fft_impl::fft64::math::fft::{Fft, FftView};
use dyn_stack::{PodStack, SizeOverflow, StackReq};
use crate::core_crypto::prelude::polynomial_algorithms::{
polynomial_wrapping_monic_monomial_div, polynomial_wrapping_monic_monomial_div_assign,
polynomial_wrapping_monic_monomial_mul_and_subtract,
polynomial_wrapping_monic_monomial_mul_assign,
};
use crate::core_crypto::prelude::{
decrypt_glwe_ciphertext, extract_lwe_sample_from_glwe_ciphertext,
};
use aligned_vec::CACHELINE_ALIGN;
use dyn_stack::{PodStack, ReborrowMut, SizeOverflow, StackReq};
use tfhe_fft::c64;
/// Perform a blind rotation given an input [`LWE ciphertext`](`LweCiphertext`), modifying a look-up
@@ -489,6 +501,48 @@ pub fn add_external_product_assign_mem_optimized<Scalar, OutputGlweCont, InputGl
}
}
pub fn karatsuba_add_external_product_assign_mem_optimized<
Scalar,
OutputGlweCont,
InputGlweCont,
GgswCont,
>(
out: &mut GlweCiphertext<OutputGlweCont>,
ggsw: &GgswCiphertext<GgswCont>,
glwe: &GlweCiphertext<InputGlweCont>,
stack: PodStack<'_>,
) where
Scalar: UnsignedTorus,
OutputGlweCont: ContainerMut<Element = Scalar>,
GgswCont: Container<Element = Scalar>,
InputGlweCont: Container<Element = Scalar>,
{
assert_eq!(out.ciphertext_modulus(), glwe.ciphertext_modulus());
let ciphertext_modulus = out.ciphertext_modulus();
assert!(ciphertext_modulus.is_compatible_with_native_modulus());
impl_karatsuba_add_external_product_assign(
out.as_mut_view(),
ggsw.as_view(),
glwe.as_view(),
stack,
);
if !ciphertext_modulus.is_native_modulus() {
// When we convert back from the fourier domain, integer values will contain up to 53
// MSBs with information. In our representation of power of 2 moduli < native modulus we
// fill the MSBs and leave the LSBs empty, this usage of the signed decomposer allows to
// round while keeping the data in the MSBs
let signed_decomposer = SignedDecomposer::new(
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
DecompositionLevelCount(1),
);
out.as_mut()
.iter_mut()
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
}
}
/// Return the required memory for [`add_external_product_assign_mem_optimized`].
pub fn add_external_product_assign_mem_optimized_requirement<Scalar>(
glwe_size: GlweSize,
@@ -1003,6 +1057,293 @@ pub fn programmable_bootstrap_lwe_ciphertext<
);
}
pub fn programmable_bootstrap_lwe_ciphertext_return_noise<
InputScalar,
OutputScalar,
InputCont,
OutputCont,
AccCont,
KeyCont,
>(
input: &LweCiphertext<InputCont>,
output: &mut LweCiphertext<OutputCont>,
accumulator: &GlweCiphertext<AccCont>,
fourier_bsk: &FourierLweBootstrapKey<KeyCont>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
InputCont: Container<Element = InputScalar>,
OutputCont: ContainerMut<Element = OutputScalar>,
AccCont: Container<Element = OutputScalar>,
KeyCont: Container<Element = c64>,
{
assert!(
input.ciphertext_modulus().is_power_of_two(),
"This operation requires the input to have a power of two modulus."
);
assert_eq!(
output.ciphertext_modulus(),
accumulator.ciphertext_modulus()
);
let mut buffers = ComputationBuffers::new();
let fft = Fft::new(fourier_bsk.polynomial_size());
let fft = fft.as_view();
buffers.resize(
programmable_bootstrap_lwe_ciphertext_mem_optimized_requirement::<OutputScalar>(
fourier_bsk.glwe_size(),
fourier_bsk.polynomial_size(),
fft,
)
.unwrap()
.unaligned_bytes_required(),
);
let stack = buffers.stack();
programmable_bootstrap_lwe_ciphertext_mem_optimized_return_noise(
input,
output,
accumulator,
fourier_bsk,
fft,
stack,
debug_material,
)
}
pub fn karatsuba_programmable_bootstrap_lwe_ciphertext_return_noise<
InputScalar,
OutputScalar,
InputCont,
OutputCont,
AccCont,
KeyCont,
>(
input: &LweCiphertext<InputCont>,
output: &mut LweCiphertext<OutputCont>,
accumulator: &GlweCiphertext<AccCont>,
bsk: &LweBootstrapKey<KeyCont>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus + CastInto<usize>,
InputCont: Container<Element = InputScalar>,
OutputCont: ContainerMut<Element = OutputScalar>,
AccCont: Container<Element = OutputScalar>,
KeyCont: Container<Element = OutputScalar>,
{
assert!(
input.ciphertext_modulus().is_power_of_two(),
"This operation requires the input to have a power of two modulus."
);
assert_eq!(
output.ciphertext_modulus(),
accumulator.ciphertext_modulus()
);
let mut buffers = ComputationBuffers::new();
let fft = Fft::new(bsk.polynomial_size());
let fft = fft.as_view();
buffers.resize(
programmable_bootstrap_lwe_ciphertext_mem_optimized_requirement::<OutputScalar>(
bsk.glwe_size(),
bsk.polynomial_size(),
fft,
)
.unwrap()
.unaligned_bytes_required()
* 10,
);
let stack = buffers.stack();
assert_eq!(
accumulator.ciphertext_modulus(),
output.ciphertext_modulus(),
"Mismatched moduli between accumulator ({:?}) and output ({:?})",
accumulator.ciphertext_modulus(),
output.ciphertext_modulus()
);
assert_eq!(
bsk.input_lwe_dimension(),
input.lwe_size().to_lwe_dimension(),
"Mismatched input LweDimension. \
FourierLweBootstrapKey input LweDimension: {:?}, input LweCiphertext LweDimension {:?}.",
bsk.input_lwe_dimension(),
input.lwe_size().to_lwe_dimension(),
);
assert_eq!(
bsk.output_lwe_dimension(),
output.lwe_size().to_lwe_dimension(),
"Mismatched output LweDimension. \
FourierLweBootstrapKey input LweDimension: {:?}, input LweCiphertext LweDimension {:?}.",
bsk.output_lwe_dimension(),
output.lwe_size().to_lwe_dimension(),
);
let mut noise_vec = vec![];
{
assert!(input.ciphertext_modulus().is_power_of_two());
assert!(output.ciphertext_modulus().is_power_of_two());
assert_eq!(
output.ciphertext_modulus(),
accumulator.ciphertext_modulus()
);
let (local_accumulator_data, mut stack) =
stack.collect_aligned(CACHELINE_ALIGN, accumulator.as_ref().iter().copied());
let mut local_accumulator = GlweCiphertextMutView::from_container(
&mut *local_accumulator_data,
accumulator.polynomial_size(),
accumulator.ciphertext_modulus(),
);
{
let mut lut = local_accumulator.as_mut_view();
let (lwe_mask, lwe_body) = input.get_mask_and_body();
let lut_poly_size = lut.polynomial_size();
let ciphertext_modulus = lut.ciphertext_modulus();
assert!(ciphertext_modulus.is_compatible_with_native_modulus());
let monomial_degree = MonomialDegree(pbs_modulus_switch(*lwe_body.data, lut_poly_size));
let mut clear_accumulator = Polynomial::from_container(
debug_material.map_or(vec![], |x| x.2.get_body().as_ref().to_vec()),
);
lut.as_mut_polynomial_list()
.iter_mut()
.for_each(|mut poly| {
let (tmp_poly, _) = stack
.rb_mut()
.make_aligned_raw(poly.as_ref().len(), CACHELINE_ALIGN);
let mut tmp_poly = Polynomial::from_container(&mut *tmp_poly);
tmp_poly.as_mut().copy_from_slice(poly.as_ref());
polynomial_wrapping_monic_monomial_div(&mut poly, &tmp_poly, monomial_degree);
});
// Apply the same computation on the clear polynomial
polynomial_wrapping_monic_monomial_div_assign(&mut clear_accumulator, monomial_degree);
// We initialize the ct_0 used for the successive cmuxes
let mut ct0 = lut;
let (ct1, mut stack) = stack.make_aligned_raw(ct0.as_ref().len(), CACHELINE_ALIGN);
let mut ct1 =
GlweCiphertextMutView::from_container(&mut *ct1, lut_poly_size, ciphertext_modulus);
for (loop_idx, (lwe_mask_element, bootstrap_key_ggsw)) in
izip!(lwe_mask.as_ref().iter(), bsk.iter()).enumerate()
{
if *lwe_mask_element != InputScalar::ZERO {
let monomial_degree =
MonomialDegree(pbs_modulus_switch(*lwe_mask_element, lut_poly_size));
// we effectively inline the body of cmux here, merging the initial subtraction
// operation with the monic polynomial multiplication, then performing the
// external product manually
// We rotate ct_1 and subtract ct_0 (first step of cmux) by performing
// ct_1 <- (ct_0 * X^{a_hat}) - ct_0
for (mut ct1_poly, ct0_poly) in izip!(
ct1.as_mut_polynomial_list().iter_mut(),
ct0.as_polynomial_list().iter(),
) {
polynomial_wrapping_monic_monomial_mul_and_subtract(
&mut ct1_poly,
&ct0_poly,
monomial_degree,
);
}
// as_mut_view is required to keep borrow rules consistent
// second step of cmux
karatsuba_add_external_product_assign(
ct0.as_mut_view(),
bootstrap_key_ggsw,
ct1.as_view(),
stack.rb_mut(),
);
if let Some((lwe_secret_key, glwe_secret_key, _)) = &debug_material {
let lwe_key_bit: usize = lwe_secret_key.as_ref()[loop_idx].cast_into();
// Rotate the clear accumulator depending on the key bit value
polynomial_wrapping_monic_monomial_mul_assign(
&mut clear_accumulator,
MonomialDegree(monomial_degree.0 * lwe_key_bit),
);
let mut decrypted = PlaintextList::new(
OutputScalar::ZERO,
PlaintextCount(ct0.polynomial_size().0),
);
decrypt_glwe_ciphertext(glwe_secret_key, &ct0, &mut decrypted);
// println!("decrypted={:?}", decrypted.as_ref());
// println!("clear_accumulator={:?}", clear_accumulator.as_ref());
let diff_to_clear: Vec<_> = decrypted
.as_ref()
.iter()
.copied()
.zip(clear_accumulator.as_ref().iter().copied())
.map(|(dec, clear)| dec.wrapping_sub(clear))
.collect();
// println!("diff_to_clear={:?}", &diff_to_clear);
// assert!(diff_to_clear.iter().copied().all(|x| x == Scalar::ZERO));
noise_vec.push(diff_to_clear);
}
}
}
if !ciphertext_modulus.is_native_modulus() {
// When we convert back from the fourier domain, integer values will contain up to
// 53 MSBs with information. In our representation of power of 2
// moduli < native modulus we fill the MSBs and leave the LSBs
// empty, this usage of the signed decomposer allows to round while
// keeping the data in the MSBs
let signed_decomposer = SignedDecomposer::new(
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
DecompositionLevelCount(1),
);
ct0.as_mut()
.iter_mut()
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
}
}
extract_lwe_sample_from_glwe_ciphertext(&local_accumulator, output, MonomialDegree(0));
}
noise_vec
}
/// Memory optimized version of [`programmable_bootstrap_lwe_ciphertext`], the caller must provide
/// a properly configured [`FftView`] object and a `PodStack` used as a memory buffer having a
/// capacity at least as large as the result of
@@ -1064,6 +1405,70 @@ pub fn programmable_bootstrap_lwe_ciphertext_mem_optimized<
);
}
pub fn programmable_bootstrap_lwe_ciphertext_mem_optimized_return_noise<
InputScalar,
OutputScalar,
InputCont,
OutputCont,
AccCont,
KeyCont,
>(
input: &LweCiphertext<InputCont>,
output: &mut LweCiphertext<OutputCont>,
accumulator: &GlweCiphertext<AccCont>,
fourier_bsk: &FourierLweBootstrapKey<KeyCont>,
fft: FftView<'_>,
stack: PodStack<'_>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
InputCont: Container<Element = InputScalar>,
OutputCont: ContainerMut<Element = OutputScalar>,
AccCont: Container<Element = OutputScalar>,
KeyCont: Container<Element = c64>,
{
assert_eq!(
accumulator.ciphertext_modulus(),
output.ciphertext_modulus(),
"Mismatched moduli between accumulator ({:?}) and output ({:?})",
accumulator.ciphertext_modulus(),
output.ciphertext_modulus()
);
assert_eq!(
fourier_bsk.input_lwe_dimension(),
input.lwe_size().to_lwe_dimension(),
"Mismatched input LweDimension. \
FourierLweBootstrapKey input LweDimension: {:?}, input LweCiphertext LweDimension {:?}.",
fourier_bsk.input_lwe_dimension(),
input.lwe_size().to_lwe_dimension(),
);
assert_eq!(
fourier_bsk.output_lwe_dimension(),
output.lwe_size().to_lwe_dimension(),
"Mismatched output LweDimension. \
FourierLweBootstrapKey input LweDimension: {:?}, input LweCiphertext LweDimension {:?}.",
fourier_bsk.output_lwe_dimension(),
output.lwe_size().to_lwe_dimension(),
);
fourier_bsk.as_view().bootstrap_return_noise(
output.as_mut_view(),
input.as_view(),
accumulator.as_view(),
fft,
stack,
debug_material,
)
}
/// Return the required memory for [`programmable_bootstrap_lwe_ciphertext_mem_optimized`].
pub fn programmable_bootstrap_lwe_ciphertext_mem_optimized_requirement<OutputScalar>(
glwe_size: GlweSize,
@@ -1072,7 +1477,6 @@ pub fn programmable_bootstrap_lwe_ciphertext_mem_optimized_requirement<OutputSca
) -> Result<StackReq, SizeOverflow> {
bootstrap_scratch::<OutputScalar>(glwe_size, polynomial_size, fft)
}
/// This function takes list as input and output and computes the programmable bootstrap for each
/// slot progressively loading the bootstrapping key only once. The caller must provide
/// a properly configured [`FftView`] object and a `PodStack` used as a memory buffer having a

View File

@@ -1,10 +1,10 @@
pub mod fft128;
pub mod fft64;
pub mod ntt64;
pub mod fft128_pbs;
pub mod fft64_pbs;
pub mod ntt64_pbs;
pub use fft128::*;
pub use fft64::*;
pub use ntt64::*;
pub use fft128_pbs::*;
pub use fft64_pbs::*;
pub use ntt64_pbs::*;
use crate::core_crypto::algorithms::glwe_encryption::allocate_and_trivially_encrypt_new_glwe_ciphertext;
use crate::core_crypto::commons::parameters::*;

View File

@@ -22,7 +22,7 @@ pub fn generate_keys<
// Create the LweSecretKey
let input_lwe_secret_key = allocate_and_generate_new_binary_lwe_secret_key(
params.input_lwe_dimension,
params.lwe_dimension,
&mut rsc.secret_random_generator,
);
let output_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
@@ -36,9 +36,9 @@ pub fn generate_keys<
Scalar::ZERO,
params.glwe_dimension.to_glwe_size(),
params.polynomial_size,
params.decomp_base_log,
params.decomp_level_count,
params.input_lwe_dimension,
params.pbs_base_log,
params.pbs_level,
params.lwe_dimension,
params.grouping_factor,
params.ciphertext_modulus,
);
@@ -52,11 +52,11 @@ pub fn generate_keys<
);
let mut fbsk = FourierLweMultiBitBootstrapKey::new(
params.input_lwe_dimension,
params.lwe_dimension,
params.glwe_dimension.to_glwe_size(),
params.polynomial_size,
params.decomp_base_log,
params.decomp_level_count,
params.pbs_base_log,
params.pbs_level,
params.grouping_factor,
);
@@ -627,3 +627,327 @@ pub fn std_test_lwe_encrypt_multi_bit_deterministic_pbs_decrypt_factor_3_thread_
MULTI_BIT_2_2_3_CUSTOM_MOD_PARAMS,
);
}
fn std_lwe_encrypt_multi_bit_deterministic_pbs_f128_decrypt_custom_mod(
params: MultiBitTestParams<u128>,
) {
let lwe_noise_distribution = params.lwe_noise_distribution;
let ciphertext_modulus = params.ciphertext_modulus;
let message_modulus_log = params.message_modulus_log;
let msg_modulus = 1u128 << message_modulus_log.0;
let encoding_with_padding = get_encoding_with_padding(ciphertext_modulus);
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let thread_count = params.thread_count;
let mut rsc = TestResources::new();
let f = |x| x;
let delta = encoding_with_padding / msg_modulus;
let mut msg = msg_modulus;
let accumulator = generate_programmable_bootstrap_glwe_lut(
polynomial_size,
glwe_dimension.to_glwe_size(),
msg_modulus.cast_into(),
ciphertext_modulus,
delta,
f,
);
assert!(check_encrypted_content_respects_mod(
&accumulator,
ciphertext_modulus
));
let (input_lwe_secret_key, output_lwe_secret_key, bsk) = {
// Create the LweSecretKey
let input_lwe_secret_key = allocate_and_generate_new_binary_lwe_secret_key(
params.lwe_dimension,
&mut rsc.secret_random_generator,
);
let output_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
params.glwe_dimension,
params.polynomial_size,
&mut rsc.secret_random_generator,
);
let output_lwe_secret_key = output_glwe_secret_key.clone().into_lwe_secret_key();
let mut bsk = LweMultiBitBootstrapKey::new(
0u128,
params.glwe_dimension.to_glwe_size(),
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
params.lwe_dimension,
params.grouping_factor,
params.ciphertext_modulus,
);
par_generate_lwe_multi_bit_bootstrap_key(
&input_lwe_secret_key,
&output_glwe_secret_key,
&mut bsk,
params.glwe_noise_distribution,
&mut rsc.encryption_random_generator,
);
(input_lwe_secret_key, output_lwe_secret_key, bsk)
};
assert!(check_encrypted_content_respects_mod(
&*bsk,
ciphertext_modulus
));
while msg != 0 {
msg = msg.wrapping_sub(1);
for _ in 0..NB_TESTS {
let plaintext = Plaintext(msg * delta);
let lwe_ciphertext_in = allocate_and_encrypt_new_lwe_ciphertext(
&input_lwe_secret_key,
plaintext,
lwe_noise_distribution,
ciphertext_modulus,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&lwe_ciphertext_in,
ciphertext_modulus
));
let out_pbs_ct = {
let mut out_pbs_ct = LweCiphertext::new(
0,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
std_multi_bit_programmable_bootstrap_f128_lwe_ciphertext(
&lwe_ciphertext_in,
&mut out_pbs_ct,
&accumulator,
&bsk,
thread_count,
true,
);
assert!(check_encrypted_content_respects_mod(
&out_pbs_ct,
ciphertext_modulus
));
let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &out_pbs_ct);
let decoded = round_decode(decrypted.0, delta) % msg_modulus;
assert_eq!(decoded, f(msg));
out_pbs_ct
};
let out_pbs_ct_other = {
let mut out_pbs_ct = LweCiphertext::new(
0,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
std_multi_bit_programmable_bootstrap_f128_lwe_ciphertext(
&lwe_ciphertext_in,
&mut out_pbs_ct,
&accumulator,
&bsk,
thread_count,
true,
);
out_pbs_ct
};
assert_eq!(out_pbs_ct_other, out_pbs_ct);
}
// In coverage, we break after one while loop iteration, changing message values does not
// yield higher coverage
#[cfg(tarpaulin)]
break;
}
}
#[test]
pub fn test_std_lwe_encrypt_multi_bit_deterministic_pbs_f128_decrypt_factor_3_thread_12_native_mod()
{
std_lwe_encrypt_multi_bit_deterministic_pbs_f128_decrypt_custom_mod(
MULTI_BIT_2_2_3_PARAMS_U128,
);
}
fn lwe_encrypt_multi_bit_deterministic_pbs_f128_decrypt_custom_mod(
params: MultiBitTestParams<u128>,
) {
let lwe_noise_distribution = params.lwe_noise_distribution;
let ciphertext_modulus = params.ciphertext_modulus;
let message_modulus_log = params.message_modulus_log;
let msg_modulus = 1u128 << message_modulus_log.0;
let encoding_with_padding = get_encoding_with_padding(ciphertext_modulus);
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let thread_count = params.thread_count;
let mut rsc = TestResources::new();
let f = |x| x;
let delta = encoding_with_padding / msg_modulus;
let mut msg = msg_modulus;
let accumulator = generate_programmable_bootstrap_glwe_lut(
polynomial_size,
glwe_dimension.to_glwe_size(),
msg_modulus.cast_into(),
ciphertext_modulus,
delta,
f,
);
assert!(check_encrypted_content_respects_mod(
&accumulator,
ciphertext_modulus
));
let (input_lwe_secret_key, output_lwe_secret_key, bsk, fbsk) = {
// Create the LweSecretKey
let input_lwe_secret_key = allocate_and_generate_new_binary_lwe_secret_key(
params.lwe_dimension,
&mut rsc.secret_random_generator,
);
let output_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
params.glwe_dimension,
params.polynomial_size,
&mut rsc.secret_random_generator,
);
let output_lwe_secret_key = output_glwe_secret_key.clone().into_lwe_secret_key();
let mut bsk = LweMultiBitBootstrapKey::new(
0u128,
params.glwe_dimension.to_glwe_size(),
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
params.lwe_dimension,
params.grouping_factor,
params.ciphertext_modulus,
);
par_generate_lwe_multi_bit_bootstrap_key(
&input_lwe_secret_key,
&output_glwe_secret_key,
&mut bsk,
params.glwe_noise_distribution,
&mut rsc.encryption_random_generator,
);
let mut fbsk = Fourier128LweMultiBitBootstrapKey::new(
bsk.input_lwe_dimension(),
bsk.glwe_size(),
bsk.polynomial_size(),
bsk.decomposition_base_log(),
bsk.decomposition_level_count(),
bsk.grouping_factor(),
);
par_convert_standard_lwe_multi_bit_bootstrap_key_to_fourier_128(&bsk, &mut fbsk);
(input_lwe_secret_key, output_lwe_secret_key, bsk, fbsk)
};
assert!(check_encrypted_content_respects_mod(
&*bsk,
ciphertext_modulus
));
while msg != 0u128 {
msg = msg.wrapping_sub(1);
for _ in 0..NB_TESTS_LIGHT {
let plaintext = Plaintext(msg * delta);
let lwe_ciphertext_in = allocate_and_encrypt_new_lwe_ciphertext(
&input_lwe_secret_key,
plaintext,
lwe_noise_distribution,
ciphertext_modulus,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&lwe_ciphertext_in,
ciphertext_modulus
));
let out_pbs_ct = {
let mut out_pbs_ct = LweCiphertext::new(
0u128,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
multi_bit_programmable_bootstrap_f128_lwe_ciphertext(
&lwe_ciphertext_in,
&mut out_pbs_ct,
&accumulator,
&fbsk,
thread_count,
true,
);
assert!(check_encrypted_content_respects_mod(
&out_pbs_ct,
ciphertext_modulus
));
let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &out_pbs_ct);
let decoded = round_decode(decrypted.0, delta) % msg_modulus;
assert_eq!(decoded, f(msg));
out_pbs_ct
};
let out_pbs_ct_other = {
let mut out_pbs_ct = LweCiphertext::new(
0u128,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
multi_bit_programmable_bootstrap_f128_lwe_ciphertext(
&lwe_ciphertext_in,
&mut out_pbs_ct,
&accumulator,
&fbsk,
thread_count,
true,
);
out_pbs_ct
};
assert_eq!(out_pbs_ct_other, out_pbs_ct);
}
// In coverage, we break after one while loop iteration, changing message values does not
// yield higher coverage
#[cfg(tarpaulin)]
break;
}
}
#[test]
pub fn test_lwe_encrypt_multi_bit_deterministic_pbs_f128_decrypt_factor_3_thread_12_native_mod() {
lwe_encrypt_multi_bit_deterministic_pbs_f128_decrypt_custom_mod(MULTI_BIT_2_2_3_PARAMS_U128);
}

View File

@@ -179,12 +179,12 @@ pub const DUMMY_31_U32: ClassicTestParams<u32> = ClassicTestParams {
};
pub const MULTI_BIT_2_2_2_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
input_lwe_dimension: LweDimension(818),
lwe_dimension: LweDimension(818),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.000002226459789930014,
)),
decomp_base_log: DecompositionBaseLog(22),
decomp_level_count: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
@@ -197,12 +197,12 @@ pub const MULTI_BIT_2_2_2_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
};
pub const MULTI_BIT_3_3_2_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
input_lwe_dimension: LweDimension(922),
lwe_dimension: LweDimension(922),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.0000003272369292345697,
)),
decomp_base_log: DecompositionBaseLog(14),
decomp_level_count: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(14),
pbs_level: DecompositionLevelCount(2),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
@@ -215,12 +215,12 @@ pub const MULTI_BIT_3_3_2_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
};
pub const MULTI_BIT_2_2_2_CUSTOM_MOD_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
input_lwe_dimension: LweDimension(818),
lwe_dimension: LweDimension(818),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.000002226459789930014,
)),
decomp_base_log: DecompositionBaseLog(22),
decomp_level_count: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
@@ -233,12 +233,12 @@ pub const MULTI_BIT_2_2_2_CUSTOM_MOD_PARAMS: MultiBitTestParams<u64> = MultiBitT
};
pub const MULTI_BIT_2_2_3_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
input_lwe_dimension: LweDimension(888),
lwe_dimension: LweDimension(888),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.0000006125031601933181,
)),
decomp_base_log: DecompositionBaseLog(21),
decomp_level_count: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(21),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
@@ -250,13 +250,31 @@ pub const MULTI_BIT_2_2_3_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
thread_count: ThreadCount(12),
};
pub const MULTI_BIT_2_2_3_PARAMS_U128: MultiBitTestParams<u128> = MultiBitTestParams {
lwe_dimension: LweDimension(888),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.0000006125031601933181 * 0.0000006125031601933181,
)),
pbs_base_log: DecompositionBaseLog(21),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.0000000000000003152931493498455 * 0.0000000000000003152931493498455,
)),
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(12),
};
pub const MULTI_BIT_3_3_3_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
input_lwe_dimension: LweDimension(972),
lwe_dimension: LweDimension(972),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.00000013016688349592805,
)),
decomp_base_log: DecompositionBaseLog(14),
decomp_level_count: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(14),
pbs_level: DecompositionLevelCount(2),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
@@ -269,12 +287,12 @@ pub const MULTI_BIT_3_3_3_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
};
pub const MULTI_BIT_2_2_3_CUSTOM_MOD_PARAMS: MultiBitTestParams<u64> = MultiBitTestParams {
input_lwe_dimension: LweDimension(888),
lwe_dimension: LweDimension(888),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
0.0000006125031601933181,
)),
decomp_base_log: DecompositionBaseLog(21),
decomp_level_count: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(21),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(

View File

@@ -0,0 +1,796 @@
use super::*;
use crate::core_crypto::commons::generators::DeterministicSeeder;
use crate::core_crypto::commons::math::random::Seed;
use crate::core_crypto::commons::noise_formulas::lwe_multi_bit_programmable_bootstrap::*;
use crate::core_crypto::commons::noise_formulas::secure_noise::*;
use crate::core_crypto::commons::test_tools::variance;
use crate::core_crypto::entities::{
Fourier128LweMultiBitBootstrapKeyOwned, FourierLweMultiBitBootstrapKeyOwned,
};
use npyz::{DType, WriterBuilder};
use rayon::prelude::*;
use serde::de::DeserializeOwned;
use serde::Serialize;
use std::any::TypeId;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::mem::discriminant;
use std::path::PathBuf;
// This is 1 / 16 which is exactly representable in an f64 (even an f32)
// 1 / 32 is too strict and fails the tests
const RELATIVE_TOLERANCE: f64 = 0.0625;
const NB_TESTS: usize = 5;
const EXP_NAME: &str = "u64-u128"; // wide-search-2000-gauss gpu-gauss gpu-tuniform
#[derive(Clone, Debug)]
enum MultiBitFourierBsk {
F64(FourierLweMultiBitBootstrapKeyOwned),
F128(Fourier128LweMultiBitBootstrapKeyOwned),
}
fn lwe_encrypt_multi_bit_pbs_decrypt_custom_mod<
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize> + Serialize + DeserializeOwned,
>(
params: &MultiBitTestParams<Scalar>,
run_measurements: &bool,
) {
let lwe_dimension = params.lwe_dimension;
let lwe_noise_distribution = params.lwe_noise_distribution;
let glwe_noise_distribution = params.glwe_noise_distribution;
let ciphertext_modulus = params.ciphertext_modulus;
let message_modulus_log = params.message_modulus_log;
let msg_modulus = Scalar::ONE << message_modulus_log.0;
let encoding_with_padding = get_encoding_with_padding(ciphertext_modulus);
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let pbs_decomposition_base_log = params.pbs_base_log;
let pbs_decomposition_level_count = params.pbs_level;
let grouping_factor = params.grouping_factor;
assert_eq!(
discriminant(&lwe_noise_distribution),
discriminant(&glwe_noise_distribution),
"Noises are not of the same variant"
);
let distro: &str = if let DynamicDistribution::Gaussian(_) = lwe_noise_distribution {
"GAUSSIAN"
} else if let DynamicDistribution::TUniform(_) = lwe_noise_distribution {
"TUNIFORM"
} else {
panic!("Unknown distribution: {lwe_noise_distribution:?}")
};
let modulus_as_f64 = if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
};
// output predicted noises to JSON
export_noise_predictions::<Scalar>(params);
if !run_measurements {
return;
}
let mut rsc = {
let mut deterministic_seeder = Box::new(
DeterministicSeeder::<ActivatedRandomGenerator>::new(Seed(420)),
);
let encryption_random_generator = EncryptionRandomGenerator::new(
deterministic_seeder.seed(),
deterministic_seeder.as_mut(),
);
let secret_random_generator = SecretRandomGenerator::new(deterministic_seeder.seed());
TestResources {
seeder: deterministic_seeder,
encryption_random_generator,
secret_random_generator,
}
};
let f = |x: Scalar| x;
let delta: Scalar = encoding_with_padding / msg_modulus;
let mut msg = msg_modulus;
let num_samples = NB_TESTS * <Scalar as CastInto<usize>>::cast_into(msg);
let mut noise_samples_fft = Vec::with_capacity(num_samples);
let mut noise_samples_kara = Vec::with_capacity(num_samples);
// generate pseudo-random secret
let input_lwe_secret_key = allocate_and_generate_new_binary_lwe_secret_key(
lwe_dimension,
&mut rsc.secret_random_generator,
);
// shall not play any role
//~ // rewrite with fixed Hamming weight secret (n.b., with odd dimension, this is not exactly
//~ 1/2 !!) input_lwe_secret_key.as_mut().fill(0);
//~ input_lwe_secret_key.as_mut()[..lwe_dimension/2].fill(1);
// generate pseudo-random secret
let output_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
glwe_dimension,
polynomial_size,
&mut rsc.secret_random_generator,
);
// shall not play any role either
//~ // rewrite with fixed Hamming weight secret (n.b., with odd dimension, this is not exactly
//~ 1/2 !!) let output_glwe_secret_key_len = output_glwe_secret_key.as_ref().len();
//~ output_glwe_secret_key.as_mut().fill(0);
//~ output_glwe_secret_key.as_mut()[..output_glwe_secret_key_len/2].fill(1);
let output_lwe_secret_key = output_glwe_secret_key.as_lwe_secret_key();
let (bsk, fbsk) = {
let bsk = allocate_and_generate_new_lwe_multi_bit_bootstrap_key(
&input_lwe_secret_key,
&output_glwe_secret_key,
pbs_decomposition_base_log,
pbs_decomposition_level_count,
grouping_factor,
glwe_noise_distribution,
ciphertext_modulus,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&*bsk,
ciphertext_modulus
));
let fbsk: MultiBitFourierBsk = if TypeId::of::<Scalar>() == TypeId::of::<u128>() {
let mut inner_fbsk = Fourier128LweMultiBitBootstrapKey::new(
bsk.input_lwe_dimension(),
bsk.glwe_size(),
bsk.polynomial_size(),
bsk.decomposition_base_log(),
bsk.decomposition_level_count(),
bsk.grouping_factor(),
);
par_convert_standard_lwe_multi_bit_bootstrap_key_to_fourier_128(&bsk, &mut inner_fbsk);
MultiBitFourierBsk::F128(inner_fbsk)
} else {
let mut inner_fbsk = FourierLweMultiBitBootstrapKey::new(
bsk.input_lwe_dimension(),
bsk.glwe_size(),
bsk.polynomial_size(),
bsk.decomposition_base_log(),
bsk.decomposition_level_count(),
bsk.grouping_factor(),
);
par_convert_standard_lwe_multi_bit_bootstrap_key_to_fourier(&bsk, &mut inner_fbsk);
MultiBitFourierBsk::F64(inner_fbsk)
};
(bsk, fbsk)
};
let mut accumulator = generate_programmable_bootstrap_glwe_lut(
polynomial_size,
glwe_dimension.to_glwe_size(),
msg_modulus.cast_into(),
ciphertext_modulus,
delta,
f,
);
let reference_accumulator = accumulator.clone();
let ref_acc_plain = accumulator.get_body().as_ref().to_vec();
// noiseless GLWE encryption of LUT ... s.t. mask|body are random instead of zeros|plain-LUT
let zero_noise = Gaussian::from_dispersion_parameter(Variance(0.0), 0.0);
encrypt_glwe_ciphertext_assign(
&output_glwe_secret_key,
&mut accumulator,
zero_noise,
&mut rsc.encryption_random_generator,
);
let mut sanity_plain = PlaintextList::new(
Scalar::ZERO,
PlaintextCount(accumulator.polynomial_size().0),
);
decrypt_glwe_ciphertext(&output_glwe_secret_key, &accumulator, &mut sanity_plain);
let dec_sanity = sanity_plain.as_ref().to_vec();
assert_eq!(ref_acc_plain, dec_sanity);
assert!(check_encrypted_content_respects_mod(
&accumulator,
ciphertext_modulus
));
while msg != Scalar::ZERO {
// msg = msg.wrapping_sub(Scalar::ONE);
msg = Scalar::ZERO;
println!("Acquiring {NB_TESTS} samples for \"{EXP_NAME}\" experiment ...");
let current_run_samples_kara_fft: Vec<_> = (0..NB_TESTS)
.into_par_iter()
.map(|thread_id| {
let mut rsc = TestResources::new();
let plaintext = Plaintext(msg * delta);
let lwe_ciphertext_in = allocate_and_encrypt_new_lwe_ciphertext(
&input_lwe_secret_key,
plaintext,
lwe_noise_distribution,
ciphertext_modulus,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&lwe_ciphertext_in,
ciphertext_modulus
));
let mut karatsuba_out_ct = LweCiphertext::new(
Scalar::ZERO,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
// Karatsuba functions support both u64 & u128
let karatsuba_noise = karatsuba_multi_bit_programmable_bootstrap_lwe_ciphertext(
&lwe_ciphertext_in,
&mut karatsuba_out_ct,
&accumulator,
&bsk,
params.thread_count,
Some((
&input_lwe_secret_key,
&output_glwe_secret_key,
&reference_accumulator,
)),
);
let filename_kara = format!("./results/{EXP_NAME}/samples/kara-id={thread_id}-gf={}-logB={}-l={}-k={}-N={}-distro={}-logQ={}.npy", grouping_factor.0, pbs_decomposition_base_log.0, pbs_decomposition_level_count.0, glwe_dimension.0, polynomial_size.0, distro, Scalar::BITS);
let filename_kara_path: PathBuf = filename_kara.as_str().into();
let filename_kara_parent = filename_kara_path.parent().unwrap();
std::fs::create_dir_all(&filename_kara_parent).unwrap();
let mut file = OpenOptions::new()
.create(true)
.write(true)
.open(&filename_kara)
.unwrap();
let mut writer = {
npyz::WriteOptions::new()
// 8 == number of bytes
.dtype(DType::new_scalar(">f8".parse().unwrap()))
.shape(&[
karatsuba_noise.len() as u64,
//~ karatsuba_noise[0].len() as u64,
1u64,
])
.writer(&mut file)
.begin_nd()
.unwrap()
};
for row in karatsuba_noise.iter() {
//~ for col in row.iter().copied() {
//~ writer.push(&(col as i64)).unwrap();
//~ }
let noise_as_float: f64 = row[0].into_signed().cast_into() / modulus_as_f64;
writer.push(&(noise_as_float)).unwrap(); // essentially SE
}
let last_ext_prod_karatsuba_noise = karatsuba_noise.last().unwrap();
assert!(check_encrypted_content_respects_mod(
&karatsuba_out_ct,
ciphertext_modulus
));
let mut fft_out_ct = LweCiphertext::new(
Scalar::ZERO,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
// different FFT functions for u64 & u128
let fft_noise = match &fbsk {
MultiBitFourierBsk::F64(fbsk) =>
multi_bit_programmable_bootstrap_lwe_ciphertext_return_noise(
&lwe_ciphertext_in,
&mut fft_out_ct,
&accumulator,
&fbsk,
params.thread_count,
Some((
&input_lwe_secret_key,
&output_glwe_secret_key,
&reference_accumulator,
)),
),
MultiBitFourierBsk::F128(fbsk) =>
multi_bit_programmable_bootstrap_f128_lwe_ciphertext_return_noise(
&lwe_ciphertext_in,
&mut fft_out_ct,
&accumulator,
&fbsk,
params.thread_count,
true,
Some((
&input_lwe_secret_key,
&output_glwe_secret_key,
&reference_accumulator,
)),
),
};
let filename_fft = format!("./results/{EXP_NAME}/samples/fft-id={thread_id}-gf={}-logB={}-l={}-k={}-N={}-distro={}-logQ={}.npy", grouping_factor.0, pbs_decomposition_base_log.0, pbs_decomposition_level_count.0, glwe_dimension.0, polynomial_size.0, distro, Scalar::BITS);
let filename_fft_path: PathBuf = filename_fft.as_str().into();
let filename_fft_parent = filename_fft_path.parent().unwrap();
std::fs::create_dir_all(&filename_fft_parent).unwrap();
let mut file = OpenOptions::new()
.create(true)
.write(true)
.open(&filename_fft)
.unwrap();
let mut writer = {
npyz::WriteOptions::new()
// 8 == number of bytes
.dtype(DType::new_scalar(">f8".parse().unwrap()))
.shape(&[
fft_noise.len() as u64,
//~ fft_noise[0].len() as u64,
1u64,
])
.writer(&mut file)
.begin_nd()
.unwrap()
};
for row in fft_noise.iter() {
//~ for col in row.iter().copied() {
//~ writer.push(&(col as i64)).unwrap();
//~ }
let noise_as_float: f64 = row[0].into_signed().cast_into() / modulus_as_f64;
writer.push(&(noise_as_float)).unwrap(); // essentially SE
}
let last_ext_prod_fft_noise = fft_noise.last().unwrap();
assert!(check_encrypted_content_respects_mod(
&fft_out_ct,
ciphertext_modulus
));
//TODO FIXME uncomment !!
//~ let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &karatsuba_out_ct);
//~ let decoded = round_decode(decrypted.0, delta) % msg_modulus;
//~ assert_eq!(decoded, f(msg));
// output a tuple with (Kara-noises, FFT-noises)
(
last_ext_prod_karatsuba_noise
.into_iter()
.map(|x| {
let d: f64 = (*x).cast_into();
let d = d / modulus_as_f64;
if d > 0.5 {
d - 1.0
} else {
d
}
})
.collect::<Vec<_>>(),
last_ext_prod_fft_noise
.into_iter()
.map(|x| {
let d: f64 = (*x).cast_into();
let d = d / modulus_as_f64;
if d > 0.5 {
d - 1.0
} else {
d
}
})
.collect::<Vec<_>>()
)
})
.flatten()
.collect();
noise_samples_kara.extend(
current_run_samples_kara_fft
.clone()
.into_iter()
.map(|s| s.0),
);
noise_samples_fft.extend(current_run_samples_kara_fft.into_iter().map(|s| s.1));
}
println!("Finished parameters {params:?}");
//TODO write these values somewhere?
//~ let measured_variance_fft = variance(&noise_samples_fft);
//~ let measured_variance_kara = variance(&noise_samples_kara);
//TODO uncomment, at some point
//~ let output_lwe_dimension = match &fbsk {
//~ MultiBitFourierBsk::F64(k) => k.output_lwe_dimension(),
//~ MultiBitFourierBsk::F128(k) => k.output_lwe_dimension(),
//~ };
//~ let minimal_variance = minimal_lwe_variance_for_132_bits_security_gaussian(
//~ output_lwe_dimension,
//~ if ciphertext_modulus.is_native_modulus() {
//~ 2.0f64.powi(Scalar::BITS as i32)
//~ } else {
//~ ciphertext_modulus.get_custom_modulus() as f64
//~ },
//~ );
//~ if measured_variance_fft.0 < expected_variance_fft.0 {
//~ // We are in the clear as long as we have at least the noise for security
//~ assert!(
//~ measured_variance_fft.0 >= minimal_variance.0,
//~ "Found insecure variance after PBS\n\
//~ measure_variance={measured_variance_fft:?}\n\
//~ minimal_variance={minimal_variance:?}"
//~ );
//~ } else {
//~ // Check we are not too far from the expected variance if we are bigger
//~ let var_abs_diff = (expected_variance_fft.0 - measured_variance_fft.0).abs();
//~ let tolerance_threshold = RELATIVE_TOLERANCE * expected_variance_fft.0;
//~ assert!(
//~ var_abs_diff < tolerance_threshold,
//~ "Absolute difference for variance: {var_abs_diff}, \
//~ tolerance threshold: {tolerance_threshold}, \
//~ got variance: {measured_variance_fft:?}, \
//~ expected variance w/ FFT: {expected_variance_fft:?}"
//~ );
//~ }
}
fn export_noise_predictions<Scalar: UnsignedInteger>(params: &MultiBitTestParams<Scalar>) {
// output predicted noises to JSON
let distro: &str = if let DynamicDistribution::Gaussian(_) = params.lwe_noise_distribution {
"GAUSSIAN"
} else if let DynamicDistribution::TUniform(_) = params.lwe_noise_distribution {
"TUNIFORM"
} else {
panic!("Unknown distribution: {}", params.lwe_noise_distribution)
};
let log_q = if params.ciphertext_modulus.is_native_modulus() {
Scalar::BITS as u32
} else {
params.ciphertext_modulus.get_custom_modulus().ilog2()
};
let filename_exp_var = format!(
"./results/{EXP_NAME}/expected-variances-gf={}-logB={}-l={}-k={}-N={}-distro={}-logQ={}.json",
params.grouping_factor.0,
params.pbs_base_log.0,
params.pbs_level.0,
params.glwe_dimension.0,
params.polynomial_size.0,
distro,
log_q,
);
let filename_exp_var_path: PathBuf = filename_exp_var.as_str().into();
let filename_exp_var_parent = filename_exp_var_path.parent().unwrap();
std::fs::create_dir_all(&filename_exp_var_parent).unwrap();
let mut file_exp_var = File::create(&filename_exp_var).unwrap();
let (expected_variance_kara, expected_variance_fft) =
noise_prediction_kara_fft::<Scalar>(params);
file_exp_var
.write_all(
format!(
r#"{{
"lwe_dimension": {},
"grouping_factor": {},
"log_base": {},
"level": {},
"glwe_dimension": {},
"polynomial_degree": {},
"distribution": "{}",
"expected_variance_kara": {},
"expected_variance_fft": {}
}}"#,
params.lwe_dimension.0,
params.grouping_factor.0,
params.pbs_base_log.0,
params.pbs_level.0,
params.glwe_dimension.0,
params.polynomial_size.0,
distro,
expected_variance_kara.0,
expected_variance_fft.0,
)
.as_bytes(),
)
.unwrap();
}
//TODO make this somehow a bit more compact
fn noise_prediction_kara_fft<Scalar: UnsignedInteger>(
params: &MultiBitTestParams<Scalar>,
) -> (Variance, Variance) {
if !params.ciphertext_modulus.is_native_modulus() {
panic!("With FFT, only native modulus is supported.")
}
let modulus_as_f64 = 2.0f64.powi(Scalar::BITS as i32);
let mantissa_size_as_f64 = if Scalar::BITS == 64 {
53_f64
} else if Scalar::BITS == 128 {
104_f64 //TODO check this (also make sure Sarah's impl is used, not the quadruple type,
// mantissa size of which is 112+1
} else {
panic!(
"Unexpected bit-len of ciphertext modulus: {:?}",
Scalar::BITS
)
};
return (
if let DynamicDistribution::Gaussian(_) = params.lwe_noise_distribution {
match params.grouping_factor.0 {
2 => pbs_variance_132_bits_security_gaussian_gf_2_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
),
3 => pbs_variance_132_bits_security_gaussian_gf_3_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
),
4 => pbs_variance_132_bits_security_gaussian_gf_4_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
),
_ => panic!("Unsupported grouping factor: {}", params.grouping_factor.0),
}
} else if let DynamicDistribution::TUniform(_) = params.lwe_noise_distribution {
match params.grouping_factor.0 {
2 => pbs_variance_132_bits_security_tuniform_gf_2_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
),
3 => pbs_variance_132_bits_security_tuniform_gf_3_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
),
4 => pbs_variance_132_bits_security_tuniform_gf_4_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
),
_ => panic!("Unsupported grouping factor: {}", params.grouping_factor.0),
}
} else {
panic!("Unknown distribution: {:?}", params.lwe_noise_distribution)
},
if let DynamicDistribution::Gaussian(_) = params.lwe_noise_distribution {
match params.grouping_factor.0 {
2 => pbs_variance_132_bits_security_gaussian_gf_2_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
),
3 => pbs_variance_132_bits_security_gaussian_gf_3_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
),
4 => pbs_variance_132_bits_security_gaussian_gf_4_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
),
_ => panic!("Unsupported grouping factor: {}", params.grouping_factor.0),
}
} else if let DynamicDistribution::TUniform(_) = params.lwe_noise_distribution {
match params.grouping_factor.0 {
2 => pbs_variance_132_bits_security_tuniform_gf_2_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
),
3 => pbs_variance_132_bits_security_tuniform_gf_3_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
),
4 => pbs_variance_132_bits_security_tuniform_gf_4_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
),
_ => panic!("Unsupported grouping factor: {}", params.grouping_factor.0),
}
} else {
panic!("Unknown distribution: {:?}", params.lwe_noise_distribution)
},
);
}
#[test]
fn test_lwe_encrypt_multi_bit_pbs_decrypt_custom_mod_noise_test_params_multi_bit_4_bits_native_u64_132_bits() {
test_impl::<u64>(true);
}
#[test]
fn test_lwe_encrypt_multi_bit_pbs_decrypt_custom_mod_noise_test_params_multi_bit_4_bits_native_u128_132_bits() {
test_impl::<u128>(true);
}
#[test]
fn test_export_multi_bit_noise_predictions_native_u64_132_bits() {
test_impl::<u64>(false);
}
#[test]
fn test_export_multi_bit_noise_predictions_native_u128_132_bits() {
test_impl::<u128>(false);
}
fn test_impl<
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize> + Serialize + DeserializeOwned,
>(
run_measurements: bool,
) where
usize: CastFrom<Scalar>,
{
//TODO FIXME: params need to be updated, cf. mod.rs where they are defined
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_2_2_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_2_4_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_2_6_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_2_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_4_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_6_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_4_2_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_4_4_BITS_NATIVE_U64_132_BITS_TUNIFORM);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_4_6_BITS_NATIVE_U64_132_BITS_TUNIFORM); return;
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_2_BITS_NATIVE_U64_132_BITS_GAUSSIAN);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN);
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&
//~ NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_6_BITS_NATIVE_U64_132_BITS_GAUSSIAN); return;
let ciphertext_modulus = CiphertextModulus::<Scalar>::new_native();
let modulus_as_f64 = if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
};
let msg_mod_log = 4;
let (level_upbnd, logB_l_upbnd) = if Scalar::BITS == 64 {(6,36)} else if Scalar::BITS == 128 {(4,96)} else {panic!("Unexpected bit-len of ciphertext modulus: {:?}",Scalar::BITS)};
for gf in [2, 3, 4] {
for logbase in 5..=30 {
for level in 1..=level_upbnd {
if logbase * level > logB_l_upbnd {
continue;
} // also used: logbase * level < 15
//~ for (k,logN) in [(3,9),(4,9),(1,10),(2,10),(1,11)].iter() {
for (k,logN) in [(4,9),(2,10),(1,11),(3,10),(2,11),(1,12),(1,13),].iter() {
//~ // skip those not interesting 1 is here to make a margin
//~ if ((logbase*(level+1)) as f64) < 53_f64 - *logN as f64 - (((k+1)*level) as f64).log2() - 1_f64 || logbase * level > 36 {
//~ println!("Early-discarded: l={level}, logB={logbase}, (k,N)=({k},{})", 1<<*logN);
//~ continue;
//~ }
// Gaussian noise
let glwe_var = minimal_glwe_variance_for_132_bits_security_gaussian(GlweDimension(*k), PolynomialSize(1<<logN), modulus_as_f64);
let gaussian_params: MultiBitTestParams<Scalar> = MultiBitTestParams {
lwe_dimension: LweDimension(100 * gf),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
1.4742441118914234e-06 // this shall play no role, right..?
)),
pbs_base_log: DecompositionBaseLog(logbase),
pbs_level: DecompositionLevelCount(level),
glwe_dimension: GlweDimension(*k),
polynomial_size: PolynomialSize(1 << logN),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(glwe_var.get_standard_dev())),
message_modulus_log: MessageModulusLog(msg_mod_log),
ciphertext_modulus,
grouping_factor: LweBskGroupingFactor(gf),
thread_count: ThreadCount(12),
};
// skip those that predict FFT noise <10% of the overall noise
let (exp_var_kara,exp_var_fft) = noise_prediction_kara_fft::<Scalar>(&gaussian_params);
// 3 sigma > half interval size (msg-mod + padding bit)
if 3.0*exp_var_fft.0.sqrt() > 0.5 / (2usize.pow(msg_mod_log as u32 + 1) as f64) {
println!("3-sigma-discarded: l={level}, logB={logbase}, (k,N)=({k},{})", 1<<logN);
continue;
}
if exp_var_fft.0 < exp_var_kara.0 * 1.1 {
println!("FFT-ratio-discarded: l={level}, logB={logbase}, (k,N)=({k},{})", 1<<logN);
continue;
}
lwe_encrypt_multi_bit_pbs_decrypt_custom_mod::<Scalar>(&gaussian_params, &run_measurements);
//~ // TUniform noise
//~ let glwe_bnd = minimal_glwe_bound_for_132_bits_security_tuniform(GlweDimension(*k), PolynomialSize(1<<logN), 2.0_f64.powf(64.0));
//~ let tuniform_params: MultiBitTestParams<u64> = MultiBitTestParams {
//~ lwe_dimension: LweDimension(100 * gf),
//~ lwe_noise_distribution: DynamicDistribution::new_t_uniform(10), // this shall play no role, right..?
//~ decomp_base_log: DecompositionBaseLog(logbase),
//~ decomp_level_count: DecompositionLevelCount(level),
//~ glwe_dimension: GlweDimension(*k),
//~ polynomial_size: PolynomialSize(1 << logN),
//~ glwe_noise_distribution: DynamicDistribution::new_t_uniform(glwe_bnd),
//~ message_modulus_log: MessageModulusLog(4),
//~ ciphertext_modulus: CiphertextModulus::new_native(),
//~ grouping_factor: LweBskGroupingFactor(gf),
//~ thread_count: ThreadCount(12),
//~ };
//~ lwe_encrypt_multi_bit_pbs_decrypt_custom_mod(&tuniform_params, &run_measurements);
}
}
}
}
}

View File

@@ -1,30 +1,65 @@
use super::*;
use crate::core_crypto::commons::noise_formulas::lwe_programmable_bootstrap::pbs_variance_132_bits_security_gaussian;
use crate::core_crypto::commons::noise_formulas::secure_noise::minimal_lwe_variance_for_132_bits_security_gaussian;
use crate::core_crypto::commons::test_tools::{torus_modular_diff, variance};
use crate::core_crypto::commons::generators::DeterministicSeeder;
use crate::core_crypto::commons::math::random::{RandomGenerable, Seed};
use crate::core_crypto::commons::noise_formulas::lwe_programmable_bootstrap::*;
use crate::core_crypto::commons::noise_formulas::secure_noise::*;
use crate::core_crypto::commons::numeric;
use crate::core_crypto::commons::test_tools::variance;
use crate::core_crypto::prelude::UnsignedInteger;
use npyz::{DType, WriterBuilder};
use rayon::prelude::*;
use serde::de::DeserializeOwned;
use serde::Serialize;
use std::any::TypeId;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::mem::discriminant;
use std::path::PathBuf;
// This is 1 / 16 which is exactly representable in an f64 (even an f32)
// 1 / 32 is too strict and fails the tests
const RELATIVE_TOLERANCE: f64 = 0.0625;
const NB_TESTS: usize = 1000;
const NB_TESTS: usize = 5;
const EXP_NAME: &str = "u64-u128"; // wide-search-2000-gauss gpu-gauss gpu-tuniform
fn lwe_encrypt_pbs_decrypt_custom_mod<Scalar>(params: ClassicTestParams<Scalar>)
where
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize>,
#[derive(Clone, Debug)]
enum FourierBsk {
F64(FourierLweBootstrapKeyOwned),
F128(Fourier128LweBootstrapKeyOwned),
}
fn lwe_encrypt_pbs_decrypt_custom_mod<
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize> + Serialize + DeserializeOwned,
>(
params: &ClassicTestParams<Scalar>,
run_measurements: &bool,
) where
usize: numeric::CastFrom<Scalar>,
{
let input_lwe_dimension = params.lwe_dimension;
let lwe_dimension = params.lwe_dimension;
let lwe_noise_distribution = params.lwe_noise_distribution;
let glwe_noise_distribution = params.glwe_noise_distribution;
let ciphertext_modulus = params.ciphertext_modulus;
let message_modulus_log = params.message_modulus_log;
let msg_modulus = Scalar::ONE.shl(message_modulus_log.0);
let msg_modulus = Scalar::ONE << message_modulus_log.0;
let encoding_with_padding = get_encoding_with_padding(ciphertext_modulus);
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let pbs_decomposition_base_log = params.pbs_base_log;
let pbs_decomposition_level_count = params.pbs_level;
assert_eq!(
discriminant(&lwe_noise_distribution),
discriminant(&glwe_noise_distribution),
"Noises are not of the same variant"
);
let distro: &str = if let DynamicDistribution::Gaussian(_) = lwe_noise_distribution {
"GAUSSIAN"
} else if let DynamicDistribution::TUniform(_) = lwe_noise_distribution {
"TUNIFORM"
} else {
panic!("Unknown distribution: {lwe_noise_distribution:?}")
};
let modulus_as_f64 = if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
@@ -32,16 +67,27 @@ where
ciphertext_modulus.get_custom_modulus() as f64
};
let expected_variance = pbs_variance_132_bits_security_gaussian(
input_lwe_dimension,
glwe_dimension,
polynomial_size,
pbs_decomposition_base_log,
pbs_decomposition_level_count,
modulus_as_f64,
);
// output predicted noises to JSON
export_noise_predictions::<Scalar>(params);
if !run_measurements {
return;
}
let mut rsc = TestResources::new();
let mut rsc = {
let mut deterministic_seeder = Box::new(
DeterministicSeeder::<ActivatedRandomGenerator>::new(Seed(420)),
);
let encryption_random_generator = EncryptionRandomGenerator::new(
deterministic_seeder.seed(),
deterministic_seeder.as_mut(),
);
let secret_random_generator = SecretRandomGenerator::new(deterministic_seeder.seed());
TestResources {
seeder: deterministic_seeder,
encryption_random_generator,
secret_random_generator,
}
};
let f = |x: Scalar| x;
@@ -49,13 +95,16 @@ where
let mut msg = msg_modulus;
let num_samples = NB_TESTS * <Scalar as CastInto<usize>>::cast_into(msg);
let mut noise_samples = Vec::with_capacity(num_samples);
let mut noise_samples_fft = Vec::with_capacity(num_samples);
let mut noise_samples_kara = Vec::with_capacity(num_samples);
// generate pseudo-random secret
let input_lwe_secret_key = allocate_and_generate_new_binary_lwe_secret_key(
input_lwe_dimension,
lwe_dimension,
&mut rsc.secret_random_generator,
);
// generate pseudo-random secret
let output_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
glwe_dimension,
polynomial_size,
@@ -64,7 +113,7 @@ where
let output_lwe_secret_key = output_glwe_secret_key.as_lwe_secret_key();
let fbsk = {
let (bsk, fbsk) = {
let bsk = allocate_and_generate_new_lwe_bootstrap_key(
&input_lwe_secret_key,
&output_glwe_secret_key,
@@ -80,20 +129,36 @@ where
ciphertext_modulus
));
let mut fbsk = FourierLweBootstrapKey::new(
bsk.input_lwe_dimension(),
bsk.glwe_size(),
bsk.polynomial_size(),
bsk.decomposition_base_log(),
bsk.decomposition_level_count(),
);
let fbsk: FourierBsk = if TypeId::of::<Scalar>() == TypeId::of::<u128>() {
let mut inner_fbsk = Fourier128LweBootstrapKey::new(
bsk.input_lwe_dimension(),
bsk.glwe_size(),
bsk.polynomial_size(),
bsk.decomposition_base_log(),
bsk.decomposition_level_count(),
);
par_convert_standard_lwe_bootstrap_key_to_fourier(&bsk, &mut fbsk);
convert_standard_lwe_bootstrap_key_to_fourier_128(&bsk, &mut inner_fbsk);
fbsk
FourierBsk::F128(inner_fbsk)
} else {
let mut inner_fbsk = FourierLweBootstrapKey::new(
bsk.input_lwe_dimension(),
bsk.glwe_size(),
bsk.polynomial_size(),
bsk.decomposition_base_log(),
bsk.decomposition_level_count(),
);
par_convert_standard_lwe_bootstrap_key_to_fourier(&bsk, &mut inner_fbsk);
FourierBsk::F64(inner_fbsk)
};
(bsk, fbsk)
};
let accumulator = generate_programmable_bootstrap_glwe_lut(
let mut accumulator = generate_programmable_bootstrap_glwe_lut(
polynomial_size,
glwe_dimension.to_glwe_size(),
msg_modulus.cast_into(),
@@ -102,17 +167,44 @@ where
f,
);
let reference_accumulator = accumulator.clone();
let ref_acc_plain = accumulator.get_body().as_ref().to_vec();
// noiseless GLWE encryption of LUT ... s.t. mask|body are random instead of zeros|plain-LUT
let zero_noise = Gaussian::from_dispersion_parameter(Variance(0.0), 0.0);
encrypt_glwe_ciphertext_assign(
&output_glwe_secret_key,
&mut accumulator,
zero_noise,
&mut rsc.encryption_random_generator,
);
let mut sanity_plain = PlaintextList::new(
Scalar::ZERO,
PlaintextCount(accumulator.polynomial_size().0),
);
decrypt_glwe_ciphertext(&output_glwe_secret_key, &accumulator, &mut sanity_plain);
let dec_sanity = sanity_plain.as_ref().to_vec();
assert_eq!(ref_acc_plain, dec_sanity);
assert!(check_encrypted_content_respects_mod(
&accumulator,
ciphertext_modulus
));
while msg != Scalar::ZERO {
msg = msg.wrapping_sub(Scalar::ONE);
// msg = msg.wrapping_sub(Scalar::ONE);
msg = Scalar::ZERO;
let current_run_samples: Vec<_> = (0..NB_TESTS)
println!("Acquiring {NB_TESTS} samples for \"{EXP_NAME}\" experiment ...");
let current_run_samples_kara_fft: Vec<_> = (0..NB_TESTS)
.into_par_iter()
.map(|_| {
.map(|thread_id| {
let mut rsc = TestResources::new();
let plaintext = Plaintext(msg * delta);
@@ -130,76 +222,458 @@ where
ciphertext_modulus
));
let mut out_pbs_ct = LweCiphertext::new(
let mut karatsuba_out_ct = LweCiphertext::new(
Scalar::ZERO,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
programmable_bootstrap_lwe_ciphertext(
// Karatsuba functions support both u64 & u128
let karatsuba_noise = karatsuba_programmable_bootstrap_lwe_ciphertext_return_noise(
&lwe_ciphertext_in,
&mut out_pbs_ct,
&mut karatsuba_out_ct,
&accumulator,
&fbsk,
&bsk,
Some((
&input_lwe_secret_key,
&output_glwe_secret_key,
&reference_accumulator,
)),
);
let filename_kara = format!("./results/{EXP_NAME}/samples/kara-id={thread_id}-gf=1-logB={}-l={}-k={}-N={}-distro={}-logQ={}.npy", pbs_decomposition_base_log.0, pbs_decomposition_level_count.0, glwe_dimension.0, polynomial_size.0, distro, Scalar::BITS);
let filename_kara_path: PathBuf = filename_kara.as_str().into();
let filename_kara_parent = filename_kara_path.parent().unwrap();
std::fs::create_dir_all(&filename_kara_parent).unwrap();
let mut file = OpenOptions::new()
.create(true)
.write(true)
.open(&filename_kara)
.unwrap();
let mut writer = {
npyz::WriteOptions::new()
.dtype(DType::new_scalar(">f8".parse().unwrap()))
.shape(&[
karatsuba_noise.len() as u64,
//~ karatsuba_noise[0].len() as u64,
1u64,
])
.writer(&mut file)
.begin_nd()
.unwrap()
};
for row in karatsuba_noise.iter() {
//~ for col in row.iter().copied() {
//~ writer.push(&(col as i64)).unwrap();
//~ }
let noise_as_float: f64 = row[0].into_signed().cast_into() / modulus_as_f64;
writer.push(&(noise_as_float)).unwrap(); // essentially SE
}
let last_ext_prod_karatsuba_noise = karatsuba_noise.last().unwrap();
assert!(check_encrypted_content_respects_mod(
&out_pbs_ct,
&karatsuba_out_ct,
ciphertext_modulus
));
let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &out_pbs_ct);
let mut fft_out_ct = LweCiphertext::new(
Scalar::ZERO,
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
let decoded = round_decode(decrypted.0, delta) % msg_modulus;
// different FFT functions for u64 & u128
let fft_noise = match &fbsk {
FourierBsk::F64(fbsk) => programmable_bootstrap_lwe_ciphertext_return_noise(
&lwe_ciphertext_in,
&mut fft_out_ct,
&accumulator,
&fbsk,
Some((
&input_lwe_secret_key,
&output_glwe_secret_key,
&reference_accumulator,
)),
),
FourierBsk::F128(fbsk) => programmable_bootstrap_f128_lwe_ciphertext_return_noise(
&lwe_ciphertext_in,
&mut fft_out_ct,
&accumulator,
&fbsk,
Some((
&input_lwe_secret_key,
&output_glwe_secret_key,
&reference_accumulator,
)),
),
};
assert_eq!(decoded, f(msg));
let filename_fft = format!("./results/{EXP_NAME}/samples/fft-id={thread_id}-gf=1-logB={}-l={}-k={}-N={}-distro={}-logQ={}.npy", pbs_decomposition_base_log.0, pbs_decomposition_level_count.0, glwe_dimension.0, polynomial_size.0, distro, Scalar::BITS);
torus_modular_diff(plaintext.0, decrypted.0, ciphertext_modulus)
let filename_fft_path: PathBuf = filename_fft.as_str().into();
let filename_fft_parent = filename_fft_path.parent().unwrap();
std::fs::create_dir_all(&filename_fft_parent).unwrap();
let mut file = OpenOptions::new()
.create(true)
.write(true)
.open(&filename_fft)
.unwrap();
let mut writer = {
npyz::WriteOptions::new()
.dtype(DType::new_scalar(">f8".parse().unwrap()))
.shape(&[
fft_noise.len() as u64,
//~ fft_noise[0].len() as u64,
1u64,
])
.writer(&mut file)
.begin_nd()
.unwrap()
};
for row in fft_noise.iter() {
//~ for col in row.iter().copied() {
//~ writer.push(&(col as i64)).unwrap();
//~ }
let noise_as_float: f64 = row[0].into_signed().cast_into() / modulus_as_f64;
writer.push(&(noise_as_float)).unwrap(); // essentially SE
}
let last_ext_prod_fft_noise = fft_noise.last().unwrap();
assert!(check_encrypted_content_respects_mod(
&fft_out_ct,
ciphertext_modulus
));
//TODO FIXME uncomment !!
//~ let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &karatsuba_out_ct);
//~ let decoded = round_decode(decrypted.0, delta) % msg_modulus;
//~ assert_eq!(decoded, f(msg));
// output a tuple with (Kara-noises, FFT-noises)
(
last_ext_prod_karatsuba_noise
.into_iter()
.map(|x| {
let d: f64 = (*x).cast_into();
let d = d / modulus_as_f64;
if d > 0.5 {
d - 1.0
} else {
d
}
})
.collect::<Vec<_>>(),
last_ext_prod_fft_noise
.into_iter()
.map(|x| {
let d: f64 = (*x).cast_into();
let d = d / modulus_as_f64;
if d > 0.5 {
d - 1.0
} else {
d
}
})
.collect::<Vec<_>>()
)
})
.flatten()
.collect();
noise_samples.extend(current_run_samples);
noise_samples_kara.extend(
current_run_samples_kara_fft
.clone()
.into_iter()
.map(|s| s.0),
);
noise_samples_fft.extend(current_run_samples_kara_fft.into_iter().map(|s| s.1));
}
let measured_variance = variance(&noise_samples);
println!("Finished parameters {params:?}");
let minimal_variance = minimal_lwe_variance_for_132_bits_security_gaussian(
fbsk.output_lwe_dimension(),
if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
},
);
//TODO write these values somewhere?
//~ let measured_variance_fft = variance(&noise_samples_fft);
//~ let measured_variance_kara = variance(&noise_samples_kara);
// Have a log even if it's a test to have a trace in no capture mode to eyeball variances
println!("measured_variance={measured_variance:?}");
println!("expected_variance={expected_variance:?}");
println!("minimal_variance={minimal_variance:?}");
if measured_variance.0 < expected_variance.0 {
// We are in the clear as long as we have at least the noise for security
assert!(
measured_variance.0 >= minimal_variance.0,
"Found insecure variance after PBS\n\
measure_variance={measured_variance:?}\n\
minimal_variance={minimal_variance:?}"
);
} else {
// Check we are not too far from the expected variance if we are bigger
let var_abs_diff = (expected_variance.0 - measured_variance.0).abs();
let tolerance_threshold = RELATIVE_TOLERANCE * expected_variance.0;
assert!(
var_abs_diff < tolerance_threshold,
"Absolute difference for variance: {var_abs_diff}, \
tolerance threshold: {tolerance_threshold}, \
got variance: {measured_variance:?}, \
expected variance: {expected_variance:?}"
);
}
//TODO add TUniform?
//TODO uncomment, at some point
//~ let minimal_variance = minimal_lwe_variance_for_132_bits_security_gaussian(
//~ fbsk.output_lwe_dimension(),
//~ modulus_as_f64,
//~ );
//~ if measured_variance_fft.0 < expected_variance_fft.0 {
//~ // We are in the clear as long as we have at least the noise for security
//~ assert!(
//~ measured_variance_fft.0 >= minimal_variance.0,
//~ "Found insecure variance after PBS\n\
//~ measure_variance={measured_variance_fft:?}\n\
//~ minimal_variance={minimal_variance:?}"
//~ );
//~ } else {
//~ // Check we are not too far from the expected variance if we are bigger
//~ let var_abs_diff = (expected_variance_fft.0 - measured_variance_fft.0).abs();
//~ let tolerance_threshold = RELATIVE_TOLERANCE * expected_variance_fft.0;
//~ assert!(
//~ var_abs_diff < tolerance_threshold,
//~ "Absolute difference for variance: {var_abs_diff}, \
//~ tolerance threshold: {tolerance_threshold}, \
//~ got variance: {measured_variance_fft:?}, \
//~ expected variance w/ FFT: {expected_variance_fft:?}"
//~ );
//~ }
}
create_parametrized_test!(lwe_encrypt_pbs_decrypt_custom_mod {
NOISE_TEST_PARAMS_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN
});
fn export_noise_predictions<
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize> + Serialize + DeserializeOwned,
>(
params: &ClassicTestParams<Scalar>,
) {
// output predicted noises to JSON
let distro: &str = if let DynamicDistribution::Gaussian(_) = params.lwe_noise_distribution {
"GAUSSIAN"
} else if let DynamicDistribution::TUniform(_) = params.lwe_noise_distribution {
"TUNIFORM"
} else {
panic!("Unknown distribution: {}", params.lwe_noise_distribution)
};
let log_q = if params.ciphertext_modulus.is_native_modulus() {
Scalar::BITS as u32
} else {
params.ciphertext_modulus.get_custom_modulus().ilog2()
};
let filename_exp_var = format!(
"./results/{EXP_NAME}/expected-variances-gf=1-logB={}-l={}-k={}-N={}-distro={}-logQ={}.json",
params.pbs_base_log.0,
params.pbs_level.0,
params.glwe_dimension.0,
params.polynomial_size.0,
distro,
log_q,
);
let filename_exp_var_path: PathBuf = filename_exp_var.as_str().into();
let filename_exp_var_parent = filename_exp_var_path.parent().unwrap();
std::fs::create_dir_all(&filename_exp_var_parent).unwrap();
let mut file_exp_var = File::create(&filename_exp_var).unwrap();
let (expected_variance_kara, expected_variance_fft) =
noise_prediction_kara_fft::<Scalar>(params);
file_exp_var
.write_all(
format!(
r#"{{
"lwe_dimension": {},
"log_base": {},
"level": {},
"glwe_dimension": {},
"polynomial_degree": {},
"distribution": "{}",
"expected_variance_kara": {},
"expected_variance_fft": {}
}}"#,
params.lwe_dimension.0,
params.pbs_base_log.0,
params.pbs_level.0,
params.glwe_dimension.0,
params.polynomial_size.0,
distro,
expected_variance_kara.0,
expected_variance_fft.0,
)
.as_bytes(),
)
.unwrap();
}
//TODO make this somehow a bit more compact
fn noise_prediction_kara_fft<
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize> + Serialize + DeserializeOwned,
>(
params: &ClassicTestParams<Scalar>,
) -> (Variance, Variance) {
if !params.ciphertext_modulus.is_native_modulus() {
panic!("With FFT, only native modulus is supported.")
}
let modulus_as_f64 = 2.0f64.powi(Scalar::BITS as i32);
let mantissa_size_as_f64 = if Scalar::BITS == 64 {
53_f64
} else if Scalar::BITS == 128 {
104_f64 //TODO check this (also make sure Sarah's impl is used, not the quadruple type,
// mantissa size of which is 112+1
} else {
panic!(
"Unexpected bit-len of ciphertext modulus: {:?}",
Scalar::BITS
)
};
return (
if let DynamicDistribution::Gaussian(_) = params.lwe_noise_distribution {
pbs_variance_132_bits_security_gaussian_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
)
} else if let DynamicDistribution::TUniform(_) = params.lwe_noise_distribution {
pbs_variance_132_bits_security_tuniform_exact_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
modulus_as_f64,
)
} else {
panic!("Unknown distribution: {:?}", params.lwe_noise_distribution)
},
if let DynamicDistribution::Gaussian(_) = params.lwe_noise_distribution {
pbs_variance_132_bits_security_gaussian_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
)
} else if let DynamicDistribution::TUniform(_) = params.lwe_noise_distribution {
pbs_variance_132_bits_security_tuniform_fft_mul(
params.lwe_dimension,
params.glwe_dimension,
params.polynomial_size,
params.pbs_base_log,
params.pbs_level,
mantissa_size_as_f64,
modulus_as_f64,
)
} else {
panic!("Unknown distribution: {:?}", params.lwe_noise_distribution)
},
);
}
#[test]
fn test_lwe_encrypt_pbs_decrypt_custom_mod_noise_test_params_4_bits_native_u64_132_bits() {
test_impl::<u64>(true);
}
#[test]
fn test_lwe_encrypt_pbs_decrypt_custom_mod_noise_test_params_4_bits_native_u128_132_bits() {
test_impl::<u128>(true);
}
#[test]
fn test_export_noise_predictions_native_u64_132_bits() {
test_impl::<u64>(false);
}
#[test]
fn test_export_noise_predictions_native_u128_132_bits() {
test_impl::<u128>(false);
}
fn test_impl<
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize> + Serialize + DeserializeOwned,
>(
run_measurements: bool,
) where
usize: CastFrom<Scalar>,
{
//TODO FIXME: params need to be updated, cf. mod.rs where they are defined
//~ lwe_encrypt_pbs_decrypt_custom_mod<Scalar>(&
//~ NOISE_TEST_PARAMS_2_BITS_NATIVE_U64_132_BITS_TUNIFORM); return;
let ciphertext_modulus = CiphertextModulus::<Scalar>::new_native();
let modulus_as_f64 = if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
};
let msg_mod_log = 4;
let (level_upbnd, logB_l_upbnd) = if Scalar::BITS == 64 {(6,36)} else if Scalar::BITS == 128 {(4,96)} else {panic!("Unexpected bit-len of ciphertext modulus: {:?}",Scalar::BITS)};
for logbase in 5..=30 {
for level in 1..=level_upbnd { // 6 for u64, 4 for u128
if logbase * level > logB_l_upbnd { // 36 for u64, 96 for u128
continue;
} // also used: logbase * level < 15
//~ for (k,logN) in [(3,9),(4,9),(1,10),(2,10),(1,11)].iter() {
for (k,logN) in [(4,9),(2,10),(1,11),(3,10),(2,11),(1,12),(1,13),].iter() {
//~ // skip those not interesting 1 is here to make a margin
//~ if ((logbase*(level+1)) as f64) < 53_f64 - *logN as f64 - (((k+1)*level) as f64).log2() - 1_f64 || logbase * level > 36 {
//~ println!("Early-discarded: l={level}, logB={logbase}, (k,N)=({k},{})", 1<<*logN);
//~ continue;
//~ }
// Gaussian noise
let glwe_var = minimal_glwe_variance_for_132_bits_security_gaussian(GlweDimension(*k), PolynomialSize(1<<logN), modulus_as_f64); // TODO put after defining gaussian_params with CiphertextModulus::new_native() ???
let gaussian_params: ClassicTestParams<Scalar> = ClassicTestParams {
lwe_dimension: LweDimension(100),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
1.4742441118914234e-06 // this shall play no role, right..?
)),
pbs_base_log: DecompositionBaseLog(logbase),
pbs_level: DecompositionLevelCount(level),
glwe_dimension: GlweDimension(*k),
polynomial_size: PolynomialSize(1 << logN),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(glwe_var.get_standard_dev())),
message_modulus_log: MessageModulusLog(msg_mod_log),
ciphertext_modulus,
// unused param's
ks_level: DecompositionLevelCount(0),
ks_base_log: DecompositionBaseLog(0),
pfks_level: DecompositionLevelCount(0),
pfks_base_log: DecompositionBaseLog(0),
pfks_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(0.0)),
cbs_level: DecompositionLevelCount(0),
cbs_base_log: DecompositionBaseLog(0),
};
// skip those that predict FFT noise <10% of the overall noise
let (exp_var_kara,exp_var_fft) = noise_prediction_kara_fft::<Scalar>(&gaussian_params);
// 3 sigma > half interval size (msg-mod + padding bit)
if 3.0*exp_var_fft.0.sqrt() > 0.5 / (2usize.pow(msg_mod_log as u32 + 1) as f64) {
println!("3-sigma-discarded: l={level}, logB={logbase}, (k,N)=({k},{})", 1<<logN);
continue;
}
if exp_var_fft.0 < exp_var_kara.0 * 1.1 {
println!("FFT-ratio-discarded: l={level}, logB={logbase}, (k,N)=({k},{})", 1<<logN);
continue;
}
lwe_encrypt_pbs_decrypt_custom_mod::<Scalar>(&gaussian_params, &run_measurements);
//~ // TUniform noise
//~ let glwe_bnd = minimal_glwe_bound_for_132_bits_security_tuniform(GlweDimension(*k), PolynomialSize(1<<logN), 2.0_f64.powf(64.0));
//~ let tuniform_params: ClassicTestParams<Scalar> = ClassicTestParams {
//~ lwe_dimension: LweDimension(100),
//~ lwe_noise_distribution: DynamicDistribution::new_t_uniform(10), // this shall play no role, right..?
//~ decomp_base_log: DecompositionBaseLog(logbase),
//~ decomp_level_count: DecompositionLevelCount(level),
//~ glwe_dimension: GlweDimension(*k),
//~ polynomial_size: PolynomialSize(1 << logN),
//~ glwe_noise_distribution: DynamicDistribution::new_t_uniform(glwe_bnd),
//~ message_modulus_log: MessageModulusLog(4),
//~ CiphertextModulus::<Scalar>::new_native(), //TODO remove generics?
//~ // unused param's
//~ ks_level: DecompositionLevelCount(0),
//~ ks_base_log: DecompositionBaseLog(0),
//~ pfks_level: DecompositionLevelCount(0),
//~ pfks_base_log: DecompositionBaseLog(0),
//~ pfks_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(0.0)),
//~ cbs_level: DecompositionLevelCount(0),
//~ cbs_base_log: DecompositionBaseLog(0),
//~ };
//~ lwe_encrypt_pbs_decrypt_custom_mod<Scalar>(&tuniform_params, &run_measurements);
}
}
}
}

View File

@@ -2,6 +2,7 @@ use super::*;
mod lwe_encryption_noise;
mod lwe_keyswitch_noise;
mod lwe_multi_bit_programmable_bootstrapping_noise;
mod lwe_programmable_bootstrapping_noise;
#[allow(clippy::excessive_precision)]
@@ -28,3 +29,213 @@ pub const NOISE_TEST_PARAMS_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN: ClassicTestPara
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
};
//TODO FIXME after 3af71b4 in the optimizer, param's changed, not updated here yet:
// ---- GAUSSIAN ---------------------------------------------------------
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_2_BITS_NATIVE_U64_132_BITS_GAUSSIAN:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(256 * 3),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
1.1098369627275701e-05,
)),
pbs_base_log: DecompositionBaseLog(17),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(3),
polynomial_size: PolynomialSize(512),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
1.9524392655548086e-11,
)),
message_modulus_log: MessageModulusLog(2),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(279 * 3),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
3.3747142481837397e-06,
)),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
2.845267479601915e-15,
)),
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_6_BITS_NATIVE_U64_132_BITS_GAUSSIAN:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(326 * 3),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
2.962875621642539e-07,
)),
pbs_base_log: DecompositionBaseLog(14),
pbs_level: DecompositionLevelCount(2),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
2.168404344971009e-19,
)),
message_modulus_log: MessageModulusLog(6),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(12),
};
// ---- TUNIFORM ---------------------------------------------------------
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_2_2_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(400 * 2),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(48),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(4),
polynomial_size: PolynomialSize(512),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(17),
message_modulus_log: MessageModulusLog(2),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(2),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_2_4_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(440 * 2),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(46),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(17),
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(2),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_2_6_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(499 * 2),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(43),
pbs_base_log: DecompositionBaseLog(14),
pbs_level: DecompositionLevelCount(2),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(4),
message_modulus_log: MessageModulusLog(6),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(2),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_2_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(267 * 3),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(48),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(4),
polynomial_size: PolynomialSize(512),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(17),
message_modulus_log: MessageModulusLog(2),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_4_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(293 * 3),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(46),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(17),
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_3_6_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(333 * 3),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(43),
pbs_base_log: DecompositionBaseLog(14),
pbs_level: DecompositionLevelCount(2),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(4),
message_modulus_log: MessageModulusLog(6),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_4_2_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(200 * 4),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(48),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(4),
polynomial_size: PolynomialSize(512),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(17),
message_modulus_log: MessageModulusLog(2),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(4),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_4_4_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(220 * 4),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(46),
pbs_base_log: DecompositionBaseLog(21),
pbs_level: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(17),
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(4),
thread_count: ThreadCount(12),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_MULTI_BIT_GROUP_4_6_BITS_NATIVE_U64_132_BITS_TUNIFORM:
MultiBitTestParams<u64> = MultiBitTestParams {
lwe_dimension: LweDimension(250 * 4),
lwe_noise_distribution: DynamicDistribution::new_t_uniform(43),
pbs_base_log: DecompositionBaseLog(14),
pbs_level: DecompositionLevelCount(2),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
glwe_noise_distribution: DynamicDistribution::new_t_uniform(4),
message_modulus_log: MessageModulusLog(6),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(4),
thread_count: ThreadCount(12),
};

View File

@@ -66,10 +66,10 @@ pub struct ClassicTestParams<Scalar: UnsignedInteger> {
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct MultiBitTestParams<Scalar: UnsignedInteger> {
pub input_lwe_dimension: LweDimension,
pub lwe_dimension: LweDimension,
pub lwe_noise_distribution: DynamicDistribution<Scalar>,
pub decomp_base_log: DecompositionBaseLog,
pub decomp_level_count: DecompositionLevelCount,
pub pbs_base_log: DecompositionBaseLog,
pub pbs_level: DecompositionLevelCount,
pub glwe_dimension: GlweDimension,
pub polynomial_size: PolynomialSize,
pub glwe_noise_distribution: DynamicDistribution<Scalar>,
@@ -83,10 +83,10 @@ pub struct MultiBitTestParams<Scalar: UnsignedInteger> {
// to change its value in test without the need of regenerating keys in the key cache.
impl<Scalar: UnsignedInteger> PartialEq for MultiBitTestParams<Scalar> {
fn eq(&self, other: &Self) -> bool {
self.input_lwe_dimension == other.input_lwe_dimension
self.lwe_dimension == other.lwe_dimension
&& self.lwe_noise_distribution == other.lwe_noise_distribution
&& self.decomp_base_log == other.decomp_base_log
&& self.decomp_level_count == other.decomp_level_count
&& self.pbs_base_log == other.pbs_base_log
&& self.pbs_level == other.pbs_level
&& self.glwe_dimension == other.glwe_dimension
&& self.polynomial_size == other.polynomial_size
&& self.glwe_noise_distribution == other.glwe_noise_distribution
@@ -171,8 +171,8 @@ impl<Scalar: UnsignedInteger> NamedParam for MultiBitTestParams<Scalar> {
fn name(&self) -> String {
format!(
"PARAM_LWE_MULTI_BIT_BOOTSTRAP_glwe_{}_poly_{}_decomp_base_log_{}_decomp_level_{}_input_dim_{}_ct_modulus_{}_msg_modulus_log_{}_group_factor_{}",
self.glwe_dimension.0, self.polynomial_size.0, self.decomp_base_log.0,
self.decomp_level_count.0, self.input_lwe_dimension.0, self.ciphertext_modulus, self.message_modulus_log.0,
self.glwe_dimension.0, self.polynomial_size.0, self.pbs_base_log.0,
self.pbs_level.0, self.lwe_dimension.0, self.ciphertext_modulus, self.message_modulus_log.0,
self.grouping_factor.0,
)
}

View File

@@ -2,6 +2,7 @@ use tfhe_fft::c64;
use tfhe_versionable::deprecation::{Deprecable, Deprecated};
use tfhe_versionable::VersionsDispatch;
use crate::core_crypto::entities::lwe_multi_bit_bootstrap_key::Fourier128LweMultiBitBootstrapKey;
use crate::core_crypto::prelude::{
Container, FourierLweMultiBitBootstrapKey, LweMultiBitBootstrapKey, UnsignedInteger,
};
@@ -33,3 +34,8 @@ pub enum FourierLweMultiBitBootstrapKeyVersions<C: Container<Element = c64>> {
V0(Deprecated<FourierLweMultiBitBootstrapKey<C>>),
V1(FourierLweMultiBitBootstrapKey<C>),
}
#[derive(VersionsDispatch)]
pub enum Fourier128MultiBitLweBootstrapKeyVersions<C: Container<Element = f64>> {
V0(Fourier128LweMultiBitBootstrapKey<C>),
}

View File

@@ -33,12 +33,60 @@ pub fn keyswitch_additive_variance_132_bits_security_gaussian_impl(
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
(1_f64 / 3.0)
* decomposition_level_count
decomposition_level_count
* input_lwe_dimension
* ((5.31469187675068 - 0.0497829131652661 * output_lwe_dimension).exp2()
+ 16.0 * modulus.powf(-2.0))
* ((1_f64 / 4.0) * decomposition_base.powf(2.0) + 0.5)
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (5.31469187675068 - 0.0497829131652661 * output_lwe_dimension).exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
+ input_lwe_dimension
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667 * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn keyswitch_additive_variance_132_bits_security_tuniform(
input_lwe_dimension: LweDimension,
output_lwe_dimension: LweDimension,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(keyswitch_additive_variance_132_bits_security_tuniform_impl(
input_lwe_dimension.0 as f64,
output_lwe_dimension.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn keyswitch_additive_variance_132_bits_security_tuniform_impl(
input_lwe_dimension: f64,
output_lwe_dimension: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
decomposition_level_count
* input_lwe_dimension
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_lwe_dimension
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
+ input_lwe_dimension
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667 * decomposition_base.powf(-2.0 * decomposition_level_count))

View File

@@ -0,0 +1,911 @@
// This file was autogenerated, do not modify by hand.
use crate::core_crypto::commons::dispersion::Variance;
use crate::core_crypto::commons::parameters::*;
// FFT Multiplication
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_2_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_gf_2_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_2_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
(1_f64 / 2.0)
* input_lwe_dimension
* (4.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (0.00161646632509532
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.77234652499817)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.82385461667286)
* (output_glwe_dimension + 1.0).powf(1.77234652499817))
.min(
(0.0359920092044659
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.13417603503624)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.19997688457614)
* (output_glwe_dimension + 1.0).powf(1.13417603503624))
.max(
0.00161646632509532
* (2.0
* (58.0
- 1.44269504088896
* (decomposition_level_count
* output_polynomial_size
* (output_glwe_dimension + 1.0))
.ln())
* (decomposition_level_count + 1.0).recip())
.exp2()
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_level_count.powf(1.77234652499817)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.82385461667286)
* (output_glwe_dimension + 1.0).powf(1.77234652499817),
),
)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_3_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_gf_3_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_3_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
(1_f64 / 3.0)
* input_lwe_dimension
* (8.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (0.00199266437277764
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.56465014441664)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.95465822637966)
* (output_glwe_dimension + 1.0).powf(1.56465014441664))
.min(
(0.0871692998932619
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.10016173309229)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.18609670373585)
* (output_glwe_dimension + 1.0).powf(1.10016173309229))
.max(
0.00199266437277764
* (2.0
* (58.0
- 1.44269504088896
* (decomposition_level_count
* output_polynomial_size
* (output_glwe_dimension + 1.0))
.ln())
* (decomposition_level_count + 1.0).recip())
.exp2()
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_level_count.powf(1.56465014441664)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.95465822637966)
* (output_glwe_dimension + 1.0).powf(1.56465014441664),
),
)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_4_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_gf_4_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_4_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
(1_f64 / 4.0)
* input_lwe_dimension
* (16.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (0.00783734686643254
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.4448158032271)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.8850256339231)
* (output_glwe_dimension + 1.0).powf(1.4448158032271))
.min(
(0.219460209700405
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.12084795039463)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.16541303875524)
* (output_glwe_dimension + 1.0).powf(1.12084795039463))
.max(
0.00783734686643254
* (2.0
* (58.0
- 1.44269504088896
* (decomposition_level_count
* output_polynomial_size
* (output_glwe_dimension + 1.0))
.ln())
* (decomposition_level_count + 1.0).recip())
.exp2()
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_level_count.powf(1.4448158032271)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.8850256339231)
* (output_glwe_dimension + 1.0).powf(1.4448158032271),
),
)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_2_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_gf_2_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_2_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
1e42 + //TODO FIXME
(1_f64 / 2.0)
* input_lwe_dimension
* (4.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (
3.4 * (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0))
.min(( 0.18 * (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0))
.max(3.4 * (2.0 * (58.0
- 1.44269504088896
* (decomposition_level_count
* output_polynomial_size
* (output_glwe_dimension + 1.0))
.ln())
* (decomposition_level_count + 1.0).recip())
.exp2()
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0),
),
)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_3_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_gf_3_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_3_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
1e42 + //TODO FIXME
(1_f64 / 3.0)
* input_lwe_dimension
* (8.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ ( 5.2 * (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0))
.min(( 0.36 * (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0))
.max(5.2 * (2.0 * (58.0
- 1.44269504088896
* (decomposition_level_count
* output_polynomial_size
* (output_glwe_dimension + 1.0))
.ln())
* (decomposition_level_count + 1.0).recip())
.exp2()
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0),
),
)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_4_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_gf_4_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_4_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
1e42 + //TODO FIXME
(1_f64 / 4.0)
* input_lwe_dimension
* (16.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ ( 8.6 * (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0))
.min(( 0.8 * (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0))
.max(8.6 * (2.0 * (58.0
- 1.44269504088896
* (decomposition_level_count
* output_polynomial_size
* (output_glwe_dimension + 1.0))
.ln())
* (decomposition_level_count + 1.0).recip())
.exp2()
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0),
),
)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
// Exact (Karatsuba) Multiplication
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_2_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_gf_2_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_2_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
(1_f64 / 2.0)
* input_lwe_dimension
* (4.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_3_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_gf_3_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_3_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
(1_f64 / 3.0)
* input_lwe_dimension
* (8.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_4_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_gf_4_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_gf_4_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
(1_f64 / 4.0)
* input_lwe_dimension
* (16.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_2_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_gf_2_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_2_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
(1_f64 / 2.0)
* input_lwe_dimension
* (4.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_3_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_gf_3_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_3_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
(1_f64 / 3.0)
* input_lwe_dimension
* (8.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_4_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_gf_4_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_gf_4_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
(1_f64 / 4.0)
* input_lwe_dimension
* (16.0
* decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}

View File

@@ -0,0 +1,109 @@
// This file was autogenerated, do not modify by hand.
use crate::core_crypto::commons::dispersion::Variance;
use crate::core_crypto::commons::parameters::*;
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn packing_keyswitch_additive_variance_132_bits_security_gaussian(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
lwe_to_pack: f64,
modulus: f64,
) -> Variance {
Variance(
packing_keyswitch_additive_variance_132_bits_security_gaussian_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
lwe_to_pack,
modulus,
),
)
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn packing_keyswitch_additive_variance_132_bits_security_gaussian_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
lwe_to_pack: f64,
modulus: f64,
) -> f64 {
decomposition_level_count
* input_lwe_dimension
* lwe_to_pack
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0513935574229692 * output_glwe_dimension * output_polynomial_size
+ 5.35186274509803)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
+ 0.5
* input_lwe_dimension
* ((1_f64 / 6.0) * modulus.powf(-2.0)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn packing_keyswitch_additive_variance_132_bits_security_tuniform(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
lwe_to_pack: f64,
modulus: f64,
) -> Variance {
Variance(
packing_keyswitch_additive_variance_132_bits_security_tuniform_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
lwe_to_pack,
modulus,
),
)
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn packing_keyswitch_additive_variance_132_bits_security_tuniform_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
lwe_to_pack: f64,
modulus: f64,
) -> f64 {
decomposition_level_count
* input_lwe_dimension
* lwe_to_pack
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0513935574229692 * output_glwe_dimension * output_polynomial_size
+ 5.35186274509803)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
+ 0.5
* input_lwe_dimension
* ((1_f64 / 6.0) * modulus.powf(-2.0)
+ (1_f64 / 12.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}

View File

@@ -2,11 +2,150 @@
use crate::core_crypto::commons::dispersion::Variance;
use crate::core_crypto::commons::parameters::*;
// FFT Multiplication
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian(
pub fn pbs_variance_132_bits_security_gaussian_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
#[allow(clippy::suspicious_operation_groupings)]
pub fn pbs_variance_132_bits_security_gaussian_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
input_lwe_dimension
* (0.00812383963561811
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.16546250805694)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.18681397422566)
* (output_glwe_dimension + 1.0).powf(1.16546250805694)
+ decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 12.0) * modulus.powf(-2.0)
+ (1_f64 / 2.0)
* output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 24.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_fft_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_fft_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_fft_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
input_lwe_dimension
* (0.00812383963561811
* (2.0 * 0.0_f64.max(1.44269504088896 * modulus.ln() - mantissa_size)).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count.powf(1.16546250805694)
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.18681397422566)
* (output_glwe_dimension + 1.0).powf(1.16546250805694)
+ decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 12.0) * modulus.powf(-2.0)
+ (1_f64 / 2.0)
* output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 24.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
// Exact (Karatsuba) Multiplication
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
@@ -14,7 +153,7 @@ pub fn pbs_variance_132_bits_security_gaussian(
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_gaussian_impl(
Variance(pbs_variance_132_bits_security_gaussian_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
@@ -28,7 +167,8 @@ pub fn pbs_variance_132_bits_security_gaussian(
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_gaussian_impl(
#[allow(clippy::suspicious_operation_groupings)]
pub fn pbs_variance_132_bits_security_gaussian_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
@@ -37,20 +177,73 @@ pub fn pbs_variance_132_bits_security_gaussian_impl(
modulus: f64,
) -> f64 {
input_lwe_dimension
* (2.06537277069845e-33
* decomposition_base.powf(2.0)
* decomposition_level_count
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 3.0)
* decomposition_level_count
* output_polynomial_size
* ((-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
* (decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2()
+ 16.0 * modulus.powf(-2.0))
* ((1_f64 / 4.0) * decomposition_base.powf(2.0) + 0.5)
* (output_glwe_dimension + 1.0)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 12.0) * modulus.powf(-2.0)
+ (1_f64 / 2.0)
* output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 24.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_exact_mul(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
modulus: f64,
) -> Variance {
Variance(pbs_variance_132_bits_security_tuniform_exact_mul_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_variance_132_bits_security_tuniform_exact_mul_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
modulus: f64,
) -> f64 {
input_lwe_dimension
* (decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 12.0) * modulus.powf(-2.0)
+ (1_f64 / 2.0)
* output_glwe_dimension

View File

@@ -0,0 +1,138 @@
// This file was autogenerated, do not modify by hand.
use crate::core_crypto::commons::dispersion::Variance;
use crate::core_crypto::commons::parameters::*;
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_128_variance_132_bits_security_gaussian(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_128_variance_132_bits_security_gaussian_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
#[allow(clippy::suspicious_operation_groupings)]
pub fn pbs_128_variance_132_bits_security_gaussian_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
input_lwe_dimension
* (0.0686891258869679
* (2.0 * 0.0_f64.max(-1.0 * mantissa_size + 1.44269504088896 * modulus.ln())).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0)
+ decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (-0.0497829131652661 * output_glwe_dimension * output_polynomial_size
+ 5.31469187675068)
.exp2())
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 12.0) * modulus.powf(-2.0)
+ (1_f64 / 2.0)
* output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 24.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_128_variance_132_bits_security_tuniform(
input_lwe_dimension: LweDimension,
output_glwe_dimension: GlweDimension,
output_polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
mantissa_size: f64,
modulus: f64,
) -> Variance {
Variance(pbs_128_variance_132_bits_security_tuniform_impl(
input_lwe_dimension.0 as f64,
output_glwe_dimension.0 as f64,
output_polynomial_size.0 as f64,
2.0f64.powi(decomposition_base_log.0 as i32),
decomposition_level_count.0 as f64,
mantissa_size,
modulus,
))
}
/// This formula is only valid if the proper noise distributions are used and
/// if the keys used are encrypted using secure noise given by the
/// [`minimal_glwe_variance`](`super::secure_noise`)
/// and [`minimal_lwe_variance`](`super::secure_noise`) family of functions.
pub fn pbs_128_variance_132_bits_security_tuniform_impl(
input_lwe_dimension: f64,
output_glwe_dimension: f64,
output_polynomial_size: f64,
decomposition_base: f64,
decomposition_level_count: f64,
mantissa_size: f64,
modulus: f64,
) -> f64 {
input_lwe_dimension
* (0.0686891258869679
* (2.0 * 0.0_f64.max(-1.0 * mantissa_size + 1.44269504088896 * modulus.ln())).exp2()
* decomposition_base.powf(2.0)
* decomposition_level_count
* modulus.powf(-2.0)
* output_glwe_dimension
* output_polynomial_size.powf(2.0)
* (output_glwe_dimension + 1.0)
+ decomposition_level_count
* output_polynomial_size
* ((4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * output_glwe_dimension * output_polynomial_size
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5))
* ((1_f64 / 12.0) * decomposition_base.powf(2.0) + 0.166666666666667)
* (output_glwe_dimension + 1.0)
+ (1_f64 / 12.0) * modulus.powf(-2.0)
+ (1_f64 / 2.0)
* output_glwe_dimension
* output_polynomial_size
* (0.0208333333333333 * modulus.powf(-2.0)
+ 0.0416666666666667
* decomposition_base.powf(-2.0 * decomposition_level_count))
+ (1_f64 / 24.0) * decomposition_base.powf(-2.0 * decomposition_level_count))
}

View File

@@ -1,4 +1,8 @@
// This file was autogenerated, do not modify by hand.
pub mod lwe_keyswitch;
pub mod lwe_multi_bit_programmable_bootstrap;
pub mod lwe_packing_keyswitch;
pub mod lwe_programmable_bootstrap;
pub mod lwe_programmable_bootstrap_128;
pub mod modulus_switch;
pub mod secure_noise;

View File

@@ -0,0 +1,28 @@
// This file was autogenerated, do not modify by hand.
use crate::core_crypto::commons::dispersion::Variance;
use crate::core_crypto::commons::parameters::*;
/// This formula is only valid when going from a larger to a smaller modulus
pub fn modulus_switch_additive_variance(
input_lwe_dimension: LweDimension,
modulus: f64,
new_modulus: f64,
) -> Variance {
Variance(modulus_switch_additive_variance_impl(
input_lwe_dimension.0 as f64,
modulus,
new_modulus,
))
}
/// This formula is only valid when going from a larger to a smaller modulus
pub fn modulus_switch_additive_variance_impl(
input_lwe_dimension: f64,
modulus: f64,
new_modulus: f64,
) -> f64 {
input_lwe_dimension
* (0.0208333333333333 * modulus.powf(-2.0) + 0.0416666666666667 * new_modulus.powf(-2.0))
+ (1_f64 / 6.0) * modulus.powf(-2.0)
+ (1_f64 / 12.0) * new_modulus.powf(-2.0)
}

View File

@@ -25,5 +25,62 @@ pub fn minimal_variance_for_132_bits_security_gaussian_impl(
lwe_dimension: f64,
modulus: f64,
) -> f64 {
(5.31469187675068 - 0.0497829131652661 * lwe_dimension).exp2() + 16.0 * modulus.powf(-2.0)
(4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (5.31469187675068 - 0.0497829131652661 * lwe_dimension).exp2()
}
pub fn minimal_glwe_variance_for_132_bits_security_tuniform(
glwe_dimension: GlweDimension,
polynomial_size: PolynomialSize,
modulus: f64,
) -> Variance {
let lwe_dimension = glwe_dimension.to_equivalent_lwe_dimension(polynomial_size);
minimal_lwe_variance_for_132_bits_security_tuniform(lwe_dimension, modulus)
}
pub fn minimal_lwe_variance_for_132_bits_security_tuniform(
lwe_dimension: LweDimension,
modulus: f64,
) -> Variance {
Variance(minimal_variance_for_132_bits_security_tuniform_impl(
lwe_dimension.0 as f64,
modulus,
))
}
pub fn minimal_variance_for_132_bits_security_tuniform_impl(
lwe_dimension: f64,
modulus: f64,
) -> f64 {
(4.0 - 2.88539008177793 * modulus.ln()).exp2()
+ (1_f64 / 3.0)
* modulus.powf(-2.0)
* ((2.0
* (-0.025167785 * lwe_dimension
+ 1.44269504088896 * modulus.ln()
+ 4.10067100000001)
.ceil())
.exp2()
+ 0.5)
}
pub fn minimal_glwe_bound_for_132_bits_security_tuniform(
glwe_dimension: GlweDimension,
polynomial_size: PolynomialSize,
modulus: f64,
) -> u32 {
let lwe_dimension = glwe_dimension.to_equivalent_lwe_dimension(polynomial_size);
minimal_lwe_bound_for_132_bits_security_tuniform(lwe_dimension, modulus)
}
pub fn minimal_lwe_bound_for_132_bits_security_tuniform(
lwe_dimension: LweDimension,
modulus: f64,
) -> u32 {
minimal_bound_for_132_bits_security_tuniform_impl(lwe_dimension.0 as f64, modulus)
}
pub fn minimal_bound_for_132_bits_security_tuniform_impl(lwe_dimension: f64, modulus: f64) -> u32 {
(-0.025167785 * lwe_dimension + 1.44269504088896 * modulus.ln() + 4.10067100000001).ceil()
as u32
}

View File

@@ -46,6 +46,20 @@ pub fn ggsw_ciphertext_list_size(
ciphertext_count.0 * ggsw_ciphertext_size(glwe_size, polynomial_size, decomp_level_count)
}
pub fn fourier_ggsw_ciphertext_list_size(
ciphertext_count: GgswCiphertextCount,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomp_level_count: DecompositionLevelCount,
) -> usize {
ciphertext_count.0
* fourier_ggsw_ciphertext_size(
glwe_size,
polynomial_size.to_fourier_polynomial_size(),
decomp_level_count,
)
}
pub fn ggsw_ciphertext_list_encryption_fork_config<Scalar, MaskDistribution, NoiseDistribution>(
ggsw_ciphertext_count: GgswCiphertextCount,
glwe_size: GlweSize,

View File

@@ -0,0 +1,206 @@
use super::fourier_lwe_multi_bit_bootstrap_key_size;
use crate::core_crypto::backward_compatibility::entities::lwe_multi_bit_bootstrap_key::Fourier128MultiBitLweBootstrapKeyVersions;
use crate::core_crypto::commons::parameters::{
DecompositionBaseLog, DecompositionLevelCount, GlweSize, LweBskGroupingFactor, LweDimension,
PolynomialSize,
};
use crate::core_crypto::commons::traits::{Container, Split};
use crate::core_crypto::commons::utils::izip;
use crate::core_crypto::fft_impl::fft128::crypto::ggsw::Fourier128GgswCiphertext;
use aligned_vec::{avec, ABox};
use tfhe_versionable::Versionize;
#[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize, Versionize)]
#[versionize(Fourier128MultiBitLweBootstrapKeyVersions)]
pub struct Fourier128LweMultiBitBootstrapKey<C: Container<Element = f64>> {
data_re0: C,
data_re1: C,
data_im0: C,
data_im1: C,
polynomial_size: PolynomialSize,
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
}
impl<C: Container<Element = f64>> Fourier128LweMultiBitBootstrapKey<C> {
#[allow(clippy::too_many_arguments)]
pub fn from_container(
data_re0: C,
data_re1: C,
data_im0: C,
data_im1: C,
polynomial_size: PolynomialSize,
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Self {
let container_len = fourier_lwe_multi_bit_bootstrap_key_size(
input_lwe_dimension,
glwe_size,
polynomial_size,
decomposition_level_count,
grouping_factor,
)
.unwrap();
assert_eq!(data_re0.container_len(), container_len);
assert_eq!(data_re1.container_len(), container_len);
assert_eq!(data_im0.container_len(), container_len);
assert_eq!(data_im1.container_len(), container_len);
Self {
data_re0,
data_re1,
data_im0,
data_im1,
polynomial_size,
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
}
}
/// Return an iterator over the GGSW ciphertexts composing the key.
pub fn into_ggsw_iter(self) -> impl DoubleEndedIterator<Item = Fourier128GgswCiphertext<C>>
where
C: Split,
{
let multi_bit_lwe_dim = self.multi_bit_input_lwe_dimension();
let ggsw_count =
multi_bit_lwe_dim.0 * self.grouping_factor().ggsw_per_multi_bit_element().0;
izip!(
self.data_re0.split_into(ggsw_count),
self.data_re1.split_into(ggsw_count),
self.data_im0.split_into(ggsw_count),
self.data_im1.split_into(ggsw_count),
)
.map(move |(data_re0, data_re1, data_im0, data_im1)| {
Fourier128GgswCiphertext::from_container(
data_re0,
data_re1,
data_im0,
data_im1,
self.polynomial_size,
self.glwe_size,
self.decomposition_base_log,
self.decomposition_level_count,
)
})
}
pub fn input_lwe_dimension(&self) -> LweDimension {
self.input_lwe_dimension
}
pub fn polynomial_size(&self) -> PolynomialSize {
self.polynomial_size
}
pub fn glwe_size(&self) -> GlweSize {
self.glwe_size
}
pub fn decomposition_base_log(&self) -> DecompositionBaseLog {
self.decomposition_base_log
}
pub fn decomposition_level_count(&self) -> DecompositionLevelCount {
self.decomposition_level_count
}
pub fn output_lwe_dimension(&self) -> LweDimension {
LweDimension((self.glwe_size.0 - 1) * self.polynomial_size().0)
}
pub fn grouping_factor(&self) -> LweBskGroupingFactor {
self.grouping_factor
}
pub fn multi_bit_input_lwe_dimension(&self) -> LweDimension {
LweDimension(self.input_lwe_dimension().0 / self.grouping_factor.0)
}
pub fn data(self) -> (C, C, C, C) {
(self.data_re0, self.data_re1, self.data_im0, self.data_im1)
}
pub fn as_view(&self) -> Fourier128LweMultiBitBootstrapKey<&[C::Element]> {
Fourier128LweMultiBitBootstrapKey {
data_re0: self.data_re0.as_ref(),
data_re1: self.data_re1.as_ref(),
data_im0: self.data_im0.as_ref(),
data_im1: self.data_im1.as_ref(),
polynomial_size: self.polynomial_size,
input_lwe_dimension: self.input_lwe_dimension,
glwe_size: self.glwe_size,
decomposition_base_log: self.decomposition_base_log,
decomposition_level_count: self.decomposition_level_count,
grouping_factor: self.grouping_factor,
}
}
pub fn as_mut_view(&mut self) -> Fourier128LweMultiBitBootstrapKey<&mut [C::Element]>
where
C: AsMut<[C::Element]>,
{
Fourier128LweMultiBitBootstrapKey {
data_re0: self.data_re0.as_mut(),
data_re1: self.data_re1.as_mut(),
data_im0: self.data_im0.as_mut(),
data_im1: self.data_im1.as_mut(),
polynomial_size: self.polynomial_size,
input_lwe_dimension: self.input_lwe_dimension,
glwe_size: self.glwe_size,
decomposition_base_log: self.decomposition_base_log,
decomposition_level_count: self.decomposition_level_count,
grouping_factor: self.grouping_factor,
}
}
}
pub type Fourier128LweMultiBitBootstrapKeyOwned = Fourier128LweMultiBitBootstrapKey<ABox<[f64]>>;
impl Fourier128LweMultiBitBootstrapKey<ABox<[f64]>> {
pub fn new(
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Self {
let container_len = fourier_lwe_multi_bit_bootstrap_key_size(
input_lwe_dimension,
glwe_size,
polynomial_size,
decomposition_level_count,
grouping_factor,
)
.unwrap();
let boxed_re0 = avec![0.0f64; container_len].into_boxed_slice();
let boxed_re1 = avec![0.0f64; container_len].into_boxed_slice();
let boxed_im0 = avec![0.0f64; container_len].into_boxed_slice();
let boxed_im1 = avec![0.0f64; container_len].into_boxed_slice();
Fourier128LweMultiBitBootstrapKey::from_container(
boxed_re0,
boxed_re1,
boxed_im0,
boxed_im1,
polynomial_size,
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
)
}
}

View File

@@ -0,0 +1,257 @@
use super::{lwe_multi_bit_bootstrap_key_size, MultiBitBootstrapKeyConformanceParams};
use crate::conformance::ParameterSetConformant;
use crate::core_crypto::backward_compatibility::entities::lwe_multi_bit_bootstrap_key::FourierLweMultiBitBootstrapKeyVersions;
use crate::core_crypto::commons::parameters::*;
use crate::core_crypto::commons::traits::*;
use crate::core_crypto::entities::*;
use crate::core_crypto::fft_impl::fft64::math::fft::FourierPolynomialList;
use aligned_vec::{avec, ABox};
use tfhe_fft::c64;
use tfhe_versionable::Versionize;
#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize, Versionize)]
#[serde(bound(deserialize = "C: IntoContainerOwned"))]
#[versionize(FourierLweMultiBitBootstrapKeyVersions)]
pub struct FourierLweMultiBitBootstrapKey<C: Container<Element = c64>> {
fourier: FourierPolynomialList<C>,
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
}
pub type FourierLweMultiBitBootstrapKeyOwned = FourierLweMultiBitBootstrapKey<ABox<[c64]>>;
pub type FourierLweMultiBitBootstrapKeyView<'a> = FourierLweMultiBitBootstrapKey<&'a [c64]>;
pub type FourierLweMultiBitBootstrapKeyMutView<'a> = FourierLweMultiBitBootstrapKey<&'a mut [c64]>;
impl<C: Container<Element = c64>> FourierLweMultiBitBootstrapKey<C> {
pub fn from_container(
data: C,
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Self {
assert!(
input_lwe_dimension.0 % grouping_factor.0 == 0,
"Multi Bit BSK requires input LWE dimension to be a multiple of {}",
grouping_factor.0
);
let equivalent_multi_bit_dimension = input_lwe_dimension.0 / grouping_factor.0;
let ggsw_count =
equivalent_multi_bit_dimension * grouping_factor.ggsw_per_multi_bit_element().0;
let expected_container_size = ggsw_count
* fourier_ggsw_ciphertext_size(
glwe_size,
polynomial_size.to_fourier_polynomial_size(),
decomposition_level_count,
);
assert_eq!(data.container_len(), expected_container_size);
Self {
fourier: FourierPolynomialList {
data,
polynomial_size,
},
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
}
}
/// Return an iterator over the GGSW ciphertexts composing the key.
pub fn ggsw_iter(
&self,
) -> impl DoubleEndedIterator<Item = FourierGgswCiphertext<&'_ [C::Element]>> {
self.fourier
.data
.as_ref()
.chunks_exact(fourier_ggsw_ciphertext_size(
self.glwe_size,
self.fourier.polynomial_size.to_fourier_polynomial_size(),
self.decomposition_level_count,
))
.map(move |slice| {
FourierGgswCiphertext::from_container(
slice,
self.glwe_size,
self.fourier.polynomial_size,
self.decomposition_base_log,
self.decomposition_level_count,
)
})
}
pub fn input_lwe_dimension(&self) -> LweDimension {
self.input_lwe_dimension
}
pub fn multi_bit_input_lwe_dimension(&self) -> LweDimension {
LweDimension(self.input_lwe_dimension().0 / self.grouping_factor.0)
}
pub fn polynomial_size(&self) -> PolynomialSize {
self.fourier.polynomial_size
}
pub fn glwe_size(&self) -> GlweSize {
self.glwe_size
}
pub fn decomposition_base_log(&self) -> DecompositionBaseLog {
self.decomposition_base_log
}
pub fn decomposition_level_count(&self) -> DecompositionLevelCount {
self.decomposition_level_count
}
pub fn output_lwe_dimension(&self) -> LweDimension {
LweDimension((self.glwe_size.0 - 1) * self.polynomial_size().0)
}
pub fn grouping_factor(&self) -> LweBskGroupingFactor {
self.grouping_factor
}
pub fn data(self) -> C {
self.fourier.data
}
pub fn as_view(&self) -> FourierLweMultiBitBootstrapKeyView<'_> {
FourierLweMultiBitBootstrapKeyView {
fourier: FourierPolynomialList {
data: self.fourier.data.as_ref(),
polynomial_size: self.fourier.polynomial_size,
},
input_lwe_dimension: self.input_lwe_dimension,
glwe_size: self.glwe_size,
decomposition_base_log: self.decomposition_base_log,
decomposition_level_count: self.decomposition_level_count,
grouping_factor: self.grouping_factor,
}
}
pub fn as_mut_view(&mut self) -> FourierLweMultiBitBootstrapKeyMutView<'_>
where
C: AsMut<[c64]>,
{
FourierLweMultiBitBootstrapKeyMutView {
fourier: FourierPolynomialList {
data: self.fourier.data.as_mut(),
polynomial_size: self.fourier.polynomial_size,
},
input_lwe_dimension: self.input_lwe_dimension,
glwe_size: self.glwe_size,
decomposition_base_log: self.decomposition_base_log,
decomposition_level_count: self.decomposition_level_count,
grouping_factor: self.grouping_factor,
}
}
pub fn as_polynomial_list(&self) -> FourierPolynomialList<&'_ [c64]> {
FourierPolynomialList {
data: self.fourier.data.as_ref(),
polynomial_size: self.fourier.polynomial_size,
}
}
pub fn as_mut_polynomial_list(&mut self) -> FourierPolynomialList<&'_ mut [c64]>
where
C: AsMut<[c64]>,
{
FourierPolynomialList {
data: self.fourier.data.as_mut(),
polynomial_size: self.fourier.polynomial_size,
}
}
}
impl FourierLweMultiBitBootstrapKeyOwned {
pub fn new(
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Self {
assert!(
input_lwe_dimension.0 % grouping_factor.0 == 0,
"Multi Bit BSK requires input LWE dimension ({}) to be a multiple of {}",
input_lwe_dimension.0,
grouping_factor.0
);
let equivalent_multi_bit_dimension = input_lwe_dimension.0 / grouping_factor.0;
let ggsw_count =
equivalent_multi_bit_dimension * grouping_factor.ggsw_per_multi_bit_element().0;
let container_size = ggsw_count
* fourier_ggsw_ciphertext_size(
glwe_size,
polynomial_size.to_fourier_polynomial_size(),
decomposition_level_count,
);
let boxed = avec![
c64::default();
container_size
]
.into_boxed_slice();
Self {
fourier: FourierPolynomialList {
data: boxed,
polynomial_size,
},
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
}
}
}
impl<C: Container<Element = c64>> ParameterSetConformant for FourierLweMultiBitBootstrapKey<C> {
type ParameterSet = MultiBitBootstrapKeyConformanceParams;
fn is_conformant(&self, parameter_set: &Self::ParameterSet) -> bool {
let Self {
fourier:
FourierPolynomialList {
data,
polynomial_size,
},
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
} = self;
if input_lwe_dimension.0 % grouping_factor.0 != 0 {
return false;
}
data.container_len()
== lwe_multi_bit_bootstrap_key_size(
*input_lwe_dimension,
*glwe_size,
*polynomial_size,
*decomposition_level_count,
*grouping_factor,
)
.unwrap()
&& *grouping_factor == parameter_set.grouping_factor
&& *decomposition_base_log == parameter_set.decomp_base_log
&& *decomposition_level_count == parameter_set.decomp_level_count
&& *input_lwe_dimension == parameter_set.input_lwe_dimension
&& *glwe_size == parameter_set.output_glwe_size
&& *polynomial_size == parameter_set.polynomial_size
}
}

View File

@@ -0,0 +1,77 @@
//! Module containing the definition of the [`LweMultiBitBootstrapKey`].
pub mod fft128_lwe_multi_bit_bootstrap_key;
pub mod fft64_lwe_multi_bit_bootstrap_key;
pub mod standard_lwe_multi_bit_bootstrap_key;
pub use fft128_lwe_multi_bit_bootstrap_key::{
Fourier128LweMultiBitBootstrapKey, Fourier128LweMultiBitBootstrapKeyOwned,
};
pub use fft64_lwe_multi_bit_bootstrap_key::{
FourierLweMultiBitBootstrapKey, FourierLweMultiBitBootstrapKeyMutView,
FourierLweMultiBitBootstrapKeyOwned, FourierLweMultiBitBootstrapKeyView,
};
pub use standard_lwe_multi_bit_bootstrap_key::{
lwe_multi_bit_bootstrap_key_fork_config, LweMultiBitBootstrapKey, LweMultiBitBootstrapKeyOwned,
MultiBitBootstrapKeyConformanceParams,
};
use crate::core_crypto::commons::parameters::{
DecompositionLevelCount, GgswCiphertextCount, GlweSize, LweBskGroupingFactor, LweDimension,
PolynomialSize,
};
use crate::core_crypto::entities::ggsw_ciphertext_list::{
fourier_ggsw_ciphertext_list_size, ggsw_ciphertext_list_size,
};
pub fn equivalent_multi_bit_lwe_dimension(
input_lwe_dimension: LweDimension,
grouping_factor: LweBskGroupingFactor,
) -> Result<LweDimension, &'static str> {
if input_lwe_dimension.0 % grouping_factor.0 != 0 {
return Err("equivalent_multi_bit_lwe_dimension error: \
input_lwe_dimension is required to be a multiple of grouping_factor");
}
Ok(LweDimension(input_lwe_dimension.0 / grouping_factor.0))
}
pub fn lwe_multi_bit_bootstrap_key_size(
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomp_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Result<usize, &'static str> {
let equivalent_multi_bit_lwe_dimension =
equivalent_multi_bit_lwe_dimension(input_lwe_dimension, grouping_factor)?;
let ggsw_count =
equivalent_multi_bit_lwe_dimension.0 * grouping_factor.ggsw_per_multi_bit_element().0;
Ok(ggsw_ciphertext_list_size(
GgswCiphertextCount(ggsw_count),
glwe_size,
polynomial_size,
decomp_level_count,
))
}
pub fn fourier_lwe_multi_bit_bootstrap_key_size(
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomp_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Result<usize, &'static str> {
let equivalent_multi_bit_lwe_dimension =
equivalent_multi_bit_lwe_dimension(input_lwe_dimension, grouping_factor)?;
let ggsw_count =
equivalent_multi_bit_lwe_dimension.0 * grouping_factor.ggsw_per_multi_bit_element().0;
Ok(fourier_ggsw_ciphertext_list_size(
GgswCiphertextCount(ggsw_count),
glwe_size,
polynomial_size,
decomp_level_count,
))
}

View File

@@ -1,17 +1,10 @@
//! Module containing the definition of the [`LweMultiBitBootstrapKey`].
use crate::conformance::ParameterSetConformant;
use crate::core_crypto::backward_compatibility::entities::lwe_multi_bit_bootstrap_key::{
FourierLweMultiBitBootstrapKeyVersions, LweMultiBitBootstrapKeyVersions,
};
use crate::core_crypto::backward_compatibility::entities::lwe_multi_bit_bootstrap_key::LweMultiBitBootstrapKeyVersions;
use crate::core_crypto::commons::generators::EncryptionRandomGeneratorForkConfig;
use crate::core_crypto::commons::math::random::{Distribution, RandomGenerable};
use crate::core_crypto::commons::parameters::*;
use crate::core_crypto::commons::traits::*;
use crate::core_crypto::entities::*;
use crate::core_crypto::fft_impl::fft64::math::fft::FourierPolynomialList;
use aligned_vec::{avec, ABox};
use tfhe_fft::c64;
use tfhe_versionable::Versionize;
#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize, Versionize)]
@@ -45,30 +38,6 @@ impl<Scalar: UnsignedInteger, C: ContainerMut<Element = Scalar>> std::ops::Deref
}
}
pub fn lwe_multi_bit_bootstrap_key_size(
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomp_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Result<usize, &'static str> {
if input_lwe_dimension.0 % grouping_factor.0 != 0 {
return Err("lwe_multi_bit_bootstrap_key_size error: \
input_lwe_dimension is required to be a multiple of grouping_factor");
}
let equivalent_multi_bit_dimension = input_lwe_dimension.0 / grouping_factor.0;
let ggsw_count =
equivalent_multi_bit_dimension * grouping_factor.ggsw_per_multi_bit_element().0;
Ok(ggsw_ciphertext_list_size(
GgswCiphertextCount(ggsw_count),
glwe_size,
polynomial_size,
decomp_level_count,
))
}
#[allow(clippy::too_many_arguments)]
pub fn lwe_multi_bit_bootstrap_key_fork_config<Scalar, MaskDistribution, NoiseDistribution>(
input_lwe_dimension: LweDimension,
@@ -388,213 +357,6 @@ impl<Scalar: UnsignedInteger> LweMultiBitBootstrapKeyOwned<Scalar> {
}
}
#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize, Versionize)]
#[serde(bound(deserialize = "C: IntoContainerOwned"))]
#[versionize(FourierLweMultiBitBootstrapKeyVersions)]
pub struct FourierLweMultiBitBootstrapKey<C: Container<Element = c64>> {
fourier: FourierPolynomialList<C>,
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
}
pub type FourierLweMultiBitBootstrapKeyOwned = FourierLweMultiBitBootstrapKey<ABox<[c64]>>;
pub type FourierLweMultiBitBootstrapKeyView<'a> = FourierLweMultiBitBootstrapKey<&'a [c64]>;
pub type FourierLweMultiBitBootstrapKeyMutView<'a> = FourierLweMultiBitBootstrapKey<&'a mut [c64]>;
impl<C: Container<Element = c64>> FourierLweMultiBitBootstrapKey<C> {
pub fn from_container(
data: C,
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Self {
assert!(
input_lwe_dimension.0 % grouping_factor.0 == 0,
"Multi Bit BSK requires input LWE dimension to be a multiple of {}",
grouping_factor.0
);
let equivalent_multi_bit_dimension = input_lwe_dimension.0 / grouping_factor.0;
let ggsw_count =
equivalent_multi_bit_dimension * grouping_factor.ggsw_per_multi_bit_element().0;
let expected_container_size = ggsw_count
* fourier_ggsw_ciphertext_size(
glwe_size,
polynomial_size.to_fourier_polynomial_size(),
decomposition_level_count,
);
assert_eq!(data.container_len(), expected_container_size);
Self {
fourier: FourierPolynomialList {
data,
polynomial_size,
},
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
}
}
/// Return an iterator over the GGSW ciphertexts composing the key.
pub fn ggsw_iter(
&self,
) -> impl DoubleEndedIterator<Item = FourierGgswCiphertext<&'_ [C::Element]>> {
self.fourier
.data
.as_ref()
.chunks_exact(fourier_ggsw_ciphertext_size(
self.glwe_size,
self.fourier.polynomial_size.to_fourier_polynomial_size(),
self.decomposition_level_count,
))
.map(move |slice| {
FourierGgswCiphertext::from_container(
slice,
self.glwe_size,
self.fourier.polynomial_size,
self.decomposition_base_log,
self.decomposition_level_count,
)
})
}
pub fn input_lwe_dimension(&self) -> LweDimension {
self.input_lwe_dimension
}
pub fn multi_bit_input_lwe_dimension(&self) -> LweDimension {
LweDimension(self.input_lwe_dimension().0 / self.grouping_factor.0)
}
pub fn polynomial_size(&self) -> PolynomialSize {
self.fourier.polynomial_size
}
pub fn glwe_size(&self) -> GlweSize {
self.glwe_size
}
pub fn decomposition_base_log(&self) -> DecompositionBaseLog {
self.decomposition_base_log
}
pub fn decomposition_level_count(&self) -> DecompositionLevelCount {
self.decomposition_level_count
}
pub fn output_lwe_dimension(&self) -> LweDimension {
LweDimension((self.glwe_size.0 - 1) * self.polynomial_size().0)
}
pub fn grouping_factor(&self) -> LweBskGroupingFactor {
self.grouping_factor
}
pub fn data(self) -> C {
self.fourier.data
}
pub fn as_view(&self) -> FourierLweMultiBitBootstrapKeyView<'_> {
FourierLweMultiBitBootstrapKeyView {
fourier: FourierPolynomialList {
data: self.fourier.data.as_ref(),
polynomial_size: self.fourier.polynomial_size,
},
input_lwe_dimension: self.input_lwe_dimension,
glwe_size: self.glwe_size,
decomposition_base_log: self.decomposition_base_log,
decomposition_level_count: self.decomposition_level_count,
grouping_factor: self.grouping_factor,
}
}
pub fn as_mut_view(&mut self) -> FourierLweMultiBitBootstrapKeyMutView<'_>
where
C: AsMut<[c64]>,
{
FourierLweMultiBitBootstrapKeyMutView {
fourier: FourierPolynomialList {
data: self.fourier.data.as_mut(),
polynomial_size: self.fourier.polynomial_size,
},
input_lwe_dimension: self.input_lwe_dimension,
glwe_size: self.glwe_size,
decomposition_base_log: self.decomposition_base_log,
decomposition_level_count: self.decomposition_level_count,
grouping_factor: self.grouping_factor,
}
}
pub fn as_polynomial_list(&self) -> FourierPolynomialList<&'_ [c64]> {
FourierPolynomialList {
data: self.fourier.data.as_ref(),
polynomial_size: self.fourier.polynomial_size,
}
}
pub fn as_mut_polynomial_list(&mut self) -> FourierPolynomialList<&'_ mut [c64]>
where
C: AsMut<[c64]>,
{
FourierPolynomialList {
data: self.fourier.data.as_mut(),
polynomial_size: self.fourier.polynomial_size,
}
}
}
impl FourierLweMultiBitBootstrapKeyOwned {
pub fn new(
input_lwe_dimension: LweDimension,
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
grouping_factor: LweBskGroupingFactor,
) -> Self {
assert!(
input_lwe_dimension.0 % grouping_factor.0 == 0,
"Multi Bit BSK requires input LWE dimension ({}) to be a multiple of {}",
input_lwe_dimension.0,
grouping_factor.0
);
let equivalent_multi_bit_dimension = input_lwe_dimension.0 / grouping_factor.0;
let ggsw_count =
equivalent_multi_bit_dimension * grouping_factor.ggsw_per_multi_bit_element().0;
let container_size = ggsw_count
* fourier_ggsw_ciphertext_size(
glwe_size,
polynomial_size.to_fourier_polynomial_size(),
decomposition_level_count,
);
let boxed = avec![
c64::default();
container_size
]
.into_boxed_slice();
Self {
fourier: FourierPolynomialList {
data: boxed,
polynomial_size,
},
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
}
}
}
pub struct MultiBitBootstrapKeyConformanceParams {
pub decomp_base_log: DecompositionBaseLog,
pub decomp_level_count: DecompositionLevelCount,
@@ -604,42 +366,3 @@ pub struct MultiBitBootstrapKeyConformanceParams {
pub grouping_factor: LweBskGroupingFactor,
pub ciphertext_modulus: CiphertextModulus<u64>,
}
impl<C: Container<Element = c64>> ParameterSetConformant for FourierLweMultiBitBootstrapKey<C> {
type ParameterSet = MultiBitBootstrapKeyConformanceParams;
fn is_conformant(&self, parameter_set: &Self::ParameterSet) -> bool {
let Self {
fourier:
FourierPolynomialList {
data,
polynomial_size,
},
input_lwe_dimension,
glwe_size,
decomposition_base_log,
decomposition_level_count,
grouping_factor,
} = self;
if input_lwe_dimension.0 % grouping_factor.0 != 0 {
return false;
}
data.container_len()
== lwe_multi_bit_bootstrap_key_size(
*input_lwe_dimension,
*glwe_size,
*polynomial_size,
*decomposition_level_count,
*grouping_factor,
)
.unwrap()
&& *grouping_factor == parameter_set.grouping_factor
&& *decomposition_base_log == parameter_set.decomp_base_log
&& *decomposition_level_count == parameter_set.decomp_level_count
&& *input_lwe_dimension == parameter_set.input_lwe_dimension
&& *glwe_size == parameter_set.output_glwe_size
&& *polynomial_size == parameter_set.polynomial_size
}
}

View File

@@ -245,24 +245,30 @@ where
Cont: Container<Element = f64>,
{
// CastInto required for PBS modulus switch which returns a usize
pub fn blind_rotate_assign<Scalar, ContLut, ContLwe>(
pub fn blind_rotate_assign<InputScalar, OutputScalar, ContLut, ContLwe>(
&self,
lut: &mut GlweCiphertext<ContLut>,
lwe: &LweCiphertext<ContLwe>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
) where
Scalar: UnsignedTorus + CastInto<usize>,
ContLut: ContainerMut<Element = Scalar>,
ContLwe: Container<Element = Scalar>,
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
ContLut: ContainerMut<Element = OutputScalar>,
ContLwe: Container<Element = InputScalar>,
{
fn implementation<Scalar: UnsignedTorus + CastInto<usize>>(
fn implementation<InputScalar, OutputScalar>(
this: Fourier128LweBootstrapKey<&[f64]>,
mut lut: GlweCiphertext<&mut [Scalar]>,
lwe: LweCiphertext<&[Scalar]>,
mut lut: GlweCiphertext<&mut [OutputScalar]>,
lwe: LweCiphertext<&[InputScalar]>,
fft: Fft128View<'_>,
mut stack: PodStack<'_>,
) {
) where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
{
let lwe = lwe.as_ref();
let (lwe_body, lwe_mask) = lwe.split_last().unwrap();
@@ -286,7 +292,7 @@ where
for (lwe_mask_element, bootstrap_key_ggsw) in
izip!(lwe_mask.iter(), this.into_ggsw_iter())
{
if *lwe_mask_element != Scalar::ZERO {
if *lwe_mask_element != InputScalar::ZERO {
let stack = stack.rb_mut();
// We copy ct_0 to ct_1
let (ct1, stack) =
@@ -329,7 +335,7 @@ where
implementation(self.as_view(), lut.as_mut_view(), lwe.as_view(), fft, stack);
}
pub fn bootstrap<Scalar, ContLweOut, ContLweIn, ContAcc>(
pub fn bootstrap<InputScalar, OutputScalar, ContLweOut, ContLweIn, ContAcc>(
&self,
lwe_out: &mut LweCiphertext<ContLweOut>,
lwe_in: &LweCiphertext<ContLweIn>,
@@ -338,24 +344,28 @@ where
stack: PodStack<'_>,
) where
// CastInto required for PBS modulus switch which returns a usize
Scalar: UnsignedTorus + CastInto<usize>,
ContLweOut: ContainerMut<Element = Scalar>,
ContLweIn: Container<Element = Scalar>,
ContAcc: Container<Element = Scalar>,
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
ContLweOut: ContainerMut<Element = OutputScalar>,
ContLweIn: Container<Element = InputScalar>,
ContAcc: Container<Element = OutputScalar>,
{
fn implementation<Scalar: UnsignedTorus + CastInto<usize>>(
fn implementation<InputScalar, OutputScalar>(
this: Fourier128LweBootstrapKey<&[f64]>,
mut lwe_out: LweCiphertext<&mut [Scalar]>,
lwe_in: LweCiphertext<&[Scalar]>,
accumulator: GlweCiphertext<&[Scalar]>,
mut lwe_out: LweCiphertext<&mut [OutputScalar]>,
lwe_in: LweCiphertext<&[InputScalar]>,
accumulator: GlweCiphertext<&[OutputScalar]>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
) {
) where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
{
// We type check dynamically with TypeId
#[allow(clippy::transmute_undefined_repr)]
if TypeId::of::<Scalar>() == TypeId::of::<u128>() {
if TypeId::of::<OutputScalar>() == TypeId::of::<u128>() {
let mut lwe_out: LweCiphertext<&mut [u128]> = unsafe { transmute(lwe_out) };
let lwe_in: LweCiphertext<&[u128]> = unsafe { transmute(lwe_in) };
let accumulator: GlweCiphertext<&[u128]> = unsafe { transmute(accumulator) };
return this.bootstrap_u128(&mut lwe_out, &lwe_in, &accumulator, fft, stack);
@@ -385,6 +395,102 @@ where
stack,
);
}
pub fn bootstrap_return_noise<InputScalar, OutputScalar, ContLweOut, ContLweIn, ContAcc>(
&self,
lwe_out: &mut LweCiphertext<ContLweOut>,
lwe_in: &LweCiphertext<ContLweIn>,
accumulator: &GlweCiphertext<ContAcc>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
ContLweOut: ContainerMut<Element = OutputScalar>,
ContLweIn: Container<Element = InputScalar>,
ContAcc: Container<Element = OutputScalar>,
{
#[allow(clippy::transmute_ptr_to_ptr)]
#[allow(clippy::missing_transmute_annotations)]
fn implementation<InputScalar, OutputScalar>(
this: Fourier128LweBootstrapKey<&[f64]>,
lwe_out: LweCiphertext<&mut [OutputScalar]>,
lwe_in: LweCiphertext<&[InputScalar]>,
accumulator: GlweCiphertext<&[OutputScalar]>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
{
// We type check dynamically with TypeId
#[allow(clippy::transmute_undefined_repr)]
if TypeId::of::<OutputScalar>() == TypeId::of::<u128>() {
let mut lwe_out: LweCiphertext<&mut [u128]> = unsafe { transmute(lwe_out) };
let accumulator: GlweCiphertext<&[u128]> = unsafe { transmute(accumulator) };
let debug_material = debug_material.map(|debug_material| {
(
debug_material.0,
unsafe { transmute(debug_material.1) },
unsafe { transmute(debug_material.2) },
)
});
let result = this.bootstrap_u128_return_noise(
&mut lwe_out,
&lwe_in,
&accumulator,
fft,
stack,
debug_material,
);
return unsafe { transmute::<Vec<Vec<u128>>, Vec<Vec<OutputScalar>>>(result) };
}
// This is only ok for our testing/tools to make sure we don't make a mistake
panic!("This is not a u128 PBS, did you make a mistake?")
// let (local_accumulator_data, stack) =
// stack.collect_aligned(CACHELINE_ALIGN, accumulator.as_ref().iter().copied());
// let mut local_accumulator = GlweCiphertextMutView::from_container(
// local_accumulator_data,
// accumulator.polynomial_size(),
// accumulator.ciphertext_modulus(),
// );
// this.blind_rotate_assign(&mut local_accumulator.as_mut_view(), &lwe_in, fft, stack);
// extract_lwe_sample_from_glwe_ciphertext(
// &local_accumulator,
// &mut lwe_out,
// MonomialDegree(0),
// );
}
implementation(
self.as_view(),
lwe_out.as_mut_view(),
lwe_in.as_view(),
accumulator.as_view(),
fft,
stack,
debug_material,
)
}
}
impl<Scalar> FourierBootstrapKey<Scalar> for Fourier128LweBootstrapKeyOwned

View File

@@ -10,12 +10,13 @@ use crate::core_crypto::commons::traits::{
};
use crate::core_crypto::commons::utils::izip;
use crate::core_crypto::entities::ggsw_ciphertext::{
fourier_ggsw_level_matrix_size, GgswCiphertext,
fourier_ggsw_ciphertext_size, fourier_ggsw_level_matrix_size, GgswCiphertext,
};
use crate::core_crypto::entities::glwe_ciphertext::{GlweCiphertext, GlweCiphertextView};
use crate::core_crypto::fft_impl::fft64::math::decomposition::TensorSignedDecompositionLendingIter;
use crate::core_crypto::prelude::ContainerMut;
use aligned_vec::CACHELINE_ALIGN;
use aligned_vec::{avec, ABox, CACHELINE_ALIGN};
use dyn_stack::{PodStack, ReborrowMut, SizeOverflow, StackReq};
use tfhe_fft::fft128::f128;
use tfhe_versionable::Versionize;
@@ -35,6 +36,39 @@ pub struct Fourier128GgswCiphertext<C: Container<Element = f64>> {
decomposition_level_count: DecompositionLevelCount,
}
pub type Fourier128GgswCiphertextOwned = Fourier128GgswCiphertext<ABox<[f64]>>;
impl Fourier128GgswCiphertext<ABox<[f64]>> {
pub fn new(
glwe_size: GlweSize,
polynomial_size: PolynomialSize,
decomposition_base_log: DecompositionBaseLog,
decomposition_level_count: DecompositionLevelCount,
) -> Self {
let container_len = fourier_ggsw_ciphertext_size(
glwe_size,
polynomial_size.to_fourier_polynomial_size(),
decomposition_level_count,
);
let boxed_re0 = avec![0.0f64; container_len].into_boxed_slice();
let boxed_re1 = avec![0.0f64; container_len].into_boxed_slice();
let boxed_im0 = avec![0.0f64; container_len].into_boxed_slice();
let boxed_im1 = avec![0.0f64; container_len].into_boxed_slice();
Fourier128GgswCiphertext::from_container(
boxed_re0,
boxed_re1,
boxed_im0,
boxed_im1,
polynomial_size,
glwe_size,
decomposition_base_log,
decomposition_level_count,
)
}
}
/// A matrix containing a single level of gadget decomposition, in the Fourier domain.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Fourier128GgswLevelMatrix<C: Container<Element = f64>> {
@@ -73,10 +107,11 @@ impl<C: Container<Element = f64>> Fourier128GgswCiphertext<C> {
decomposition_level_count: DecompositionLevelCount,
) -> Self {
assert_eq!(polynomial_size.0 % 2, 0);
let container_len = polynomial_size.to_fourier_polynomial_size().0
* glwe_size.0
* glwe_size.0
* decomposition_level_count.0;
let container_len = fourier_ggsw_ciphertext_size(
glwe_size,
polynomial_size.to_fourier_polynomial_size(),
decomposition_level_count,
);
assert_eq!(data_re0.container_len(), container_len);
Self {

View File

@@ -1 +1,2 @@
pub mod fft;
pub mod polynomial;

View File

@@ -0,0 +1,92 @@
use crate::core_crypto::commons::parameters::*;
use crate::core_crypto::commons::traits::*;
use aligned_vec::{avec, ABox};
//--------------------------------------------------------------------------------
// Structure definitions
//--------------------------------------------------------------------------------
/// Polynomial in the Fourier128 domain.
///
/// # Note
///
/// Polynomials in the Fourier128 domain have half the size of the corresponding polynomials in
/// the standard domain.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Fourier128Polynomial<C: Container> {
pub data_re0: C,
pub data_re1: C,
pub data_im0: C,
pub data_im1: C,
}
pub type Fourier128PolynomialView<'a> = Fourier128Polynomial<&'a [f64]>;
pub type Fourier128PolynomialMutView<'a> = Fourier128Polynomial<&'a mut [f64]>;
pub type Fourier128PolynomialOwned = Fourier128Polynomial<ABox<[f64]>>;
impl Fourier128Polynomial<ABox<[f64]>> {
pub fn new(polynomial_size: PolynomialSize) -> Self {
let boxed_re0 = avec![
f64::default();
polynomial_size.to_fourier_polynomial_size().0
]
.into_boxed_slice();
let boxed_re1 = avec![
f64::default();
polynomial_size.to_fourier_polynomial_size().0
]
.into_boxed_slice();
let boxed_im0 = avec![
f64::default();
polynomial_size.to_fourier_polynomial_size().0
]
.into_boxed_slice();
let boxed_im1 = avec![
f64::default();
polynomial_size.to_fourier_polynomial_size().0
]
.into_boxed_slice();
Fourier128Polynomial {
data_re0: boxed_re0,
data_re1: boxed_re1,
data_im0: boxed_im0,
data_im1: boxed_im1,
}
}
}
impl<C: Container<Element = f64>> Fourier128Polynomial<C> {
pub fn as_view(&self) -> Fourier128PolynomialView<'_> {
Fourier128Polynomial {
data_re0: self.data_re0.as_ref(),
data_re1: self.data_re1.as_ref(),
data_im0: self.data_im0.as_ref(),
data_im1: self.data_im1.as_ref(),
}
}
pub fn as_mut_view(&mut self) -> Fourier128PolynomialMutView<'_>
where
C: AsMut<[f64]>,
{
let Self {
data_re0,
data_re1,
data_im0,
data_im1,
} = self;
Fourier128Polynomial {
data_re0: data_re0.as_mut(),
data_re1: data_re1.as_mut(),
data_im0: data_im0.as_mut(),
data_im1: data_im1.as_mut(),
}
}
pub fn polynomial_size(&self) -> PolynomialSize {
PolynomialSize(self.data_re0.container_len() * 2)
}
}

View File

@@ -1,9 +1,17 @@
use super::super::math::fft::{wrapping_neg, Fft128View};
use super::ggsw::cmux_split;
use crate::core_crypto::algorithms::extract_lwe_sample_from_glwe_ciphertext;
use crate::core_crypto::algorithms::polynomial_algorithms::{
polynomial_wrapping_monic_monomial_div_assign, polynomial_wrapping_monic_monomial_mul_assign,
};
use crate::core_crypto::algorithms::{
decrypt_glwe_ciphertext, extract_lwe_sample_from_glwe_ciphertext,
};
use crate::core_crypto::commons::math::decomposition::SignedDecomposer;
use crate::core_crypto::commons::math::torus::UnsignedTorus;
use crate::core_crypto::commons::numeric::CastInto;
use crate::core_crypto::commons::parameters::{
CiphertextModulus, DecompositionBaseLog, DecompositionLevelCount, MonomialDegree,
PlaintextCount,
};
use crate::core_crypto::commons::traits::ContiguousEntityContainerMut;
use crate::core_crypto::commons::utils::izip;
@@ -58,7 +66,7 @@ impl<Cont> Fourier128LweBootstrapKey<Cont>
where
Cont: Container<Element = f64>,
{
pub fn blind_rotate_assign_split<ContLutLo, ContLutHi, ContLwe>(
pub fn blind_rotate_assign_split<InputScalar, ContLutLo, ContLutHi, ContLwe>(
&self,
lut_lo: &mut GlweCiphertext<ContLutLo>,
lut_hi: &mut GlweCiphertext<ContLutHi>,
@@ -66,18 +74,23 @@ where
fft: Fft128View<'_>,
stack: PodStack<'_>,
) where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
ContLutLo: ContainerMut<Element = u64>,
ContLutHi: ContainerMut<Element = u64>,
ContLwe: Container<Element = u128>,
ContLwe: Container<Element = InputScalar>,
{
fn implementation(
fn implementation<InputScalar>(
this: Fourier128LweBootstrapKey<&[f64]>,
mut lut_lo: GlweCiphertext<&mut [u64]>,
mut lut_hi: GlweCiphertext<&mut [u64]>,
lwe: LweCiphertext<&[u128]>,
lwe: LweCiphertext<&[InputScalar]>,
fft: Fft128View<'_>,
mut stack: PodStack<'_>,
) {
) where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
{
let lwe = lwe.as_ref();
let (lwe_body, lwe_mask) = lwe.split_last().unwrap();
@@ -102,7 +115,7 @@ where
for (lwe_mask_element, bootstrap_key_ggsw) in
izip!(lwe_mask.iter(), this.into_ggsw_iter())
{
if *lwe_mask_element != 0 {
if *lwe_mask_element != InputScalar::ZERO {
let stack = stack.rb_mut();
// We copy ct_0 to ct_1
let (ct1_lo, stack) =
@@ -154,7 +167,189 @@ where
);
}
pub fn bootstrap_u128<ContLweOut, ContLweIn, ContAcc>(
pub fn blind_rotate_assign_split_return_noise<InputScalar, ContLutLo, ContLutHi, ContLwe>(
&self,
lut_lo: &mut GlweCiphertext<ContLutLo>,
lut_hi: &mut GlweCiphertext<ContLutHi>,
lwe: &LweCiphertext<ContLwe>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
acc_ciphertext_modulus: CiphertextModulus<u128>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<u128>,
&GlweCiphertextOwned<u128>,
)>,
) -> Vec<Vec<u128>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
ContLutLo: ContainerMut<Element = u64>,
ContLutHi: ContainerMut<Element = u64>,
ContLwe: Container<Element = InputScalar>,
{
fn implementation<InputScalar>(
this: Fourier128LweBootstrapKey<&[f64]>,
mut lut_lo: GlweCiphertext<&mut [u64]>,
mut lut_hi: GlweCiphertext<&mut [u64]>,
lwe: LweCiphertext<&[InputScalar]>,
fft: Fft128View<'_>,
mut stack: PodStack<'_>,
acc_ciphertext_modulus: CiphertextModulus<u128>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<u128>,
&GlweCiphertextOwned<u128>,
)>,
) -> Vec<Vec<u128>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
{
let mut noise_vec = vec![];
let lwe = lwe.as_ref();
let (lwe_body, lwe_mask) = lwe.split_last().unwrap();
let lut_poly_size = lut_lo.polynomial_size();
let monomial_degree = MonomialDegree(pbs_modulus_switch(*lwe_body, lut_poly_size));
let mut clear_accumulator = Polynomial::from_container(
debug_material.map_or(vec![], |x| x.2.get_body().as_ref().to_vec()),
);
for (poly_lo, poly_hi) in izip!(
lut_lo.as_mut_polynomial_list().iter_mut(),
lut_hi.as_mut_polynomial_list().iter_mut(),
) {
polynomial_wrapping_monic_monomial_div_assign_split(
poly_lo,
poly_hi,
monomial_degree,
);
}
// Apply the same computation on the clear polynomial
polynomial_wrapping_monic_monomial_div_assign(&mut clear_accumulator, monomial_degree);
// We initialize the ct_0 used for the successive cmuxes
let mut ct0_lo = lut_lo;
let mut ct0_hi = lut_hi;
for (loop_idx, (lwe_mask_element, bootstrap_key_ggsw)) in
izip!(lwe_mask.iter(), this.into_ggsw_iter()).enumerate()
{
if *lwe_mask_element != InputScalar::ZERO {
let stack = stack.rb_mut();
// We copy ct_0 to ct_1
let (ct1_lo, stack) =
stack.collect_aligned(CACHELINE_ALIGN, ct0_lo.as_ref().iter().copied());
let (ct1_hi, stack) =
stack.collect_aligned(CACHELINE_ALIGN, ct0_hi.as_ref().iter().copied());
let mut ct1_lo = GlweCiphertextMutView::from_container(
&mut *ct1_lo,
ct0_lo.polynomial_size(),
ct0_lo.ciphertext_modulus(),
);
let mut ct1_hi = GlweCiphertextMutView::from_container(
&mut *ct1_hi,
ct0_lo.polynomial_size(),
ct0_lo.ciphertext_modulus(),
);
let monomial_degree =
MonomialDegree(pbs_modulus_switch(*lwe_mask_element, lut_poly_size));
// We rotate ct_1 by performing ct_1 <- ct_1 * X^{a_hat}
for (poly_lo, poly_hi) in izip!(
ct1_lo.as_mut_polynomial_list().iter_mut(),
ct1_hi.as_mut_polynomial_list().iter_mut(),
) {
polynomial_wrapping_monic_monomial_mul_assign_split(
poly_lo,
poly_hi,
monomial_degree,
);
}
cmux_split(
&mut ct0_lo,
&mut ct0_hi,
&mut ct1_lo,
&mut ct1_hi,
&bootstrap_key_ggsw,
fft,
stack,
);
if let Some((lwe_secret_key, glwe_secret_key, _)) = &debug_material {
let lwe_key_bit: usize = lwe_secret_key.as_ref()[loop_idx].cast_into();
// Rotate the clear accumulator depending on the key bit value
polynomial_wrapping_monic_monomial_mul_assign(
&mut clear_accumulator,
MonomialDegree(monomial_degree.0 * lwe_key_bit),
);
let mut decrypted =
PlaintextList::new(0u128, PlaintextCount(ct0_hi.polynomial_size().0));
let mut reconstructed_acc = GlweCiphertext::new(
0u128,
ct0_hi.glwe_size(),
ct0_hi.polynomial_size(),
acc_ciphertext_modulus,
);
for (out, (&lo, &hi)) in reconstructed_acc
.as_mut()
.iter_mut()
.zip(ct0_lo.as_ref().iter().zip(ct0_hi.as_ref().iter()))
{
*out = ((hi as u128) << 64) | (lo as u128);
}
decrypt_glwe_ciphertext(
glwe_secret_key,
&reconstructed_acc,
&mut decrypted,
);
// println!("decrypted={:?}", decrypted.as_ref());
// println!("clear_accumulator={:?}", clear_accumulator.as_ref());
let diff_to_clear: Vec<_> = decrypted
.as_ref()
.iter()
.copied()
.zip(clear_accumulator.as_ref().iter().copied())
.map(|(dec, clear)| dec.wrapping_sub(clear))
.collect();
// println!("diff_to_clear={:?}", &diff_to_clear);
// assert!(diff_to_clear.iter().copied().all(|x| x == Scalar::ZERO));
noise_vec.push(diff_to_clear);
}
}
}
noise_vec
}
implementation(
self.as_view(),
lut_lo.as_mut_view(),
lut_hi.as_mut_view(),
lwe.as_view(),
fft,
stack,
acc_ciphertext_modulus,
debug_material,
)
}
pub fn bootstrap_u128<InputScalar, ContLweOut, ContLweIn, ContAcc>(
&self,
lwe_out: &mut LweCiphertext<ContLweOut>,
lwe_in: &LweCiphertext<ContLweIn>,
@@ -162,18 +357,23 @@ where
fft: Fft128View<'_>,
stack: PodStack<'_>,
) where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
ContLweOut: ContainerMut<Element = u128>,
ContLweIn: Container<Element = u128>,
ContLweIn: Container<Element = InputScalar>,
ContAcc: Container<Element = u128>,
{
fn implementation(
fn implementation<InputScalar>(
this: Fourier128LweBootstrapKey<&[f64]>,
mut lwe_out: LweCiphertext<&mut [u128]>,
lwe_in: LweCiphertext<&[u128]>,
lwe_in: LweCiphertext<&[InputScalar]>,
accumulator: GlweCiphertext<&[u128]>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
) {
) where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
{
let align = CACHELINE_ALIGN;
let ciphertext_modulus = accumulator.ciphertext_modulus();
@@ -250,4 +450,123 @@ where
stack,
);
}
pub fn bootstrap_u128_return_noise<InputScalar, ContLweOut, ContLweIn, ContAcc>(
&self,
lwe_out: &mut LweCiphertext<ContLweOut>,
lwe_in: &LweCiphertext<ContLweIn>,
accumulator: &GlweCiphertext<ContAcc>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<u128>,
&GlweCiphertextOwned<u128>,
)>,
) -> Vec<Vec<u128>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
ContLweOut: ContainerMut<Element = u128>,
ContLweIn: Container<Element = InputScalar>,
ContAcc: Container<Element = u128>,
{
fn implementation<InputScalar>(
this: Fourier128LweBootstrapKey<&[f64]>,
mut lwe_out: LweCiphertext<&mut [u128]>,
lwe_in: LweCiphertext<&[InputScalar]>,
accumulator: GlweCiphertext<&[u128]>,
fft: Fft128View<'_>,
stack: PodStack<'_>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<u128>,
&GlweCiphertextOwned<u128>,
)>,
) -> Vec<Vec<u128>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
{
let align = CACHELINE_ALIGN;
let ciphertext_modulus = accumulator.ciphertext_modulus();
let (local_accumulator_lo, stack) =
stack.collect_aligned(align, accumulator.as_ref().iter().map(|i| *i as u64));
let (local_accumulator_hi, mut stack) = stack.collect_aligned(
align,
accumulator.as_ref().iter().map(|i| (*i >> 64) as u64),
);
let mut local_accumulator_lo = GlweCiphertextMutView::from_container(
&mut *local_accumulator_lo,
accumulator.polynomial_size(),
// Here we split a u128 to two u64 containers and the ciphertext modulus does not
// match anymore in terms of the underlying Scalar type, so we'll provide a dummy
// native modulus
CiphertextModulus::new_native(),
);
let mut local_accumulator_hi = GlweCiphertextMutView::from_container(
&mut *local_accumulator_hi,
accumulator.polynomial_size(),
// Here we split a u128 to two u64 containers and the ciphertext modulus does not
// match anymore in terms of the underlying Scalar type, so we'll provide a dummy
// native modulus
CiphertextModulus::new_native(),
);
let noise_vec = this.blind_rotate_assign_split_return_noise(
&mut local_accumulator_lo,
&mut local_accumulator_hi,
&lwe_in,
fft,
stack.rb_mut(),
ciphertext_modulus,
debug_material,
);
let (local_accumulator, _) = stack.collect_aligned(
align,
izip!(local_accumulator_lo.as_ref(), local_accumulator_hi.as_ref())
.map(|(&lo, &hi)| lo as u128 | ((hi as u128) << 64)),
);
let mut local_accumulator = GlweCiphertextMutView::from_container(
&mut *local_accumulator,
accumulator.polynomial_size(),
accumulator.ciphertext_modulus(),
);
assert!(ciphertext_modulus.is_compatible_with_native_modulus());
if !ciphertext_modulus.is_native_modulus() {
// When we convert back from the fourier domain, integer values will contain up to
// about 100 MSBs with information. In our representation of power of 2
// moduli < native modulus we fill the MSBs and leave the LSBs
// empty, this usage of the signed decomposer allows to round while
// keeping the data in the MSBs
let signed_decomposer = SignedDecomposer::new(
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
DecompositionLevelCount(1),
);
local_accumulator
.as_mut()
.iter_mut()
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
}
extract_lwe_sample_from_glwe_ciphertext(
&local_accumulator,
&mut lwe_out,
MonomialDegree(0),
);
noise_vec
}
implementation(
self.as_view(),
lwe_out.as_mut_view(),
lwe_in.as_view(),
accumulator.as_view(),
fft,
stack,
debug_material,
)
}
}

View File

@@ -1,15 +1,17 @@
use super::super::math::fft::{Fft, FftView, FourierPolynomialList};
use super::ggsw::*;
use crate::conformance::ParameterSetConformant;
use crate::core_crypto::algorithms::extract_lwe_sample_from_glwe_ciphertext;
use crate::core_crypto::algorithms::polynomial_algorithms::*;
use crate::core_crypto::algorithms::{
decrypt_glwe_ciphertext, extract_lwe_sample_from_glwe_ciphertext,
};
use crate::core_crypto::backward_compatibility::fft_impl::FourierLweBootstrapKeyVersions;
use crate::core_crypto::commons::math::decomposition::SignedDecomposer;
use crate::core_crypto::commons::math::torus::UnsignedTorus;
use crate::core_crypto::commons::numeric::CastInto;
use crate::core_crypto::commons::parameters::{
DecompositionBaseLog, DecompositionLevelCount, GlweSize, LweDimension, MonomialDegree,
PolynomialSize,
PlaintextCount, PolynomialSize,
};
use crate::core_crypto::commons::traits::{
Container, ContiguousEntityContainer, ContiguousEntityContainerMut, IntoContainerOwned, Split,
@@ -369,6 +371,144 @@ impl<'a> FourierLweBootstrapKeyView<'a> {
}
}
// CastInto required for PBS modulus switch which returns a usize
pub fn blind_rotate_assign_return_noise<InputScalar, OutputScalar>(
self,
mut lut: GlweCiphertextMutView<'_, OutputScalar>,
lwe: LweCiphertextView<'_, InputScalar>,
fft: FftView<'_>,
mut stack: PodStack<'_>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
{
let mut noise_vec = vec![];
let (lwe_mask, lwe_body) = lwe.get_mask_and_body();
let lut_poly_size = lut.polynomial_size();
let ciphertext_modulus = lut.ciphertext_modulus();
assert!(ciphertext_modulus.is_compatible_with_native_modulus());
let monomial_degree = MonomialDegree(pbs_modulus_switch(*lwe_body.data, lut_poly_size));
let mut clear_accumulator = Polynomial::from_container(
debug_material.map_or(vec![], |x| x.2.get_body().as_ref().to_vec()),
);
lut.as_mut_polynomial_list()
.iter_mut()
.for_each(|mut poly| {
let (tmp_poly, _) = stack
.rb_mut()
.make_aligned_raw(poly.as_ref().len(), CACHELINE_ALIGN);
let mut tmp_poly = Polynomial::from_container(&mut *tmp_poly);
tmp_poly.as_mut().copy_from_slice(poly.as_ref());
polynomial_wrapping_monic_monomial_div(&mut poly, &tmp_poly, monomial_degree);
});
// Apply the same computation on the clear polynomial
polynomial_wrapping_monic_monomial_div_assign(&mut clear_accumulator, monomial_degree);
// We initialize the ct_0 used for the successive cmuxes
let mut ct0 = lut;
let (ct1, mut stack) = stack.make_aligned_raw(ct0.as_ref().len(), CACHELINE_ALIGN);
let mut ct1 =
GlweCiphertextMutView::from_container(&mut *ct1, lut_poly_size, ciphertext_modulus);
for (loop_idx, (lwe_mask_element, bootstrap_key_ggsw)) in
izip!(lwe_mask.as_ref().iter(), self.into_ggsw_iter()).enumerate()
{
if *lwe_mask_element != InputScalar::ZERO {
let monomial_degree =
MonomialDegree(pbs_modulus_switch(*lwe_mask_element, lut_poly_size));
// we effectively inline the body of cmux here, merging the initial subtraction
// operation with the monic polynomial multiplication, then performing the external
// product manually
// We rotate ct_1 and subtract ct_0 (first step of cmux) by performing
// ct_1 <- (ct_0 * X^{a_hat}) - ct_0
for (mut ct1_poly, ct0_poly) in izip!(
ct1.as_mut_polynomial_list().iter_mut(),
ct0.as_polynomial_list().iter(),
) {
polynomial_wrapping_monic_monomial_mul_and_subtract(
&mut ct1_poly,
&ct0_poly,
monomial_degree,
);
}
// as_mut_view is required to keep borrow rules consistent
// second step of cmux
add_external_product_assign(
ct0.as_mut_view(),
bootstrap_key_ggsw,
ct1.as_view(),
fft,
stack.rb_mut(),
);
if let Some((lwe_secret_key, glwe_secret_key, _)) = &debug_material {
let lwe_key_bit: usize = lwe_secret_key.as_ref()[loop_idx].cast_into();
// Rotate the clear accumulator depending on the key bit value
polynomial_wrapping_monic_monomial_mul_assign(
&mut clear_accumulator,
MonomialDegree(monomial_degree.0 * lwe_key_bit),
);
let mut decrypted = PlaintextList::new(
OutputScalar::ZERO,
PlaintextCount(ct0.polynomial_size().0),
);
decrypt_glwe_ciphertext(glwe_secret_key, &ct0, &mut decrypted);
// println!("decrypted={:?}", decrypted.as_ref());
// println!("clear_accumulator={:?}", clear_accumulator.as_ref());
let diff_to_clear: Vec<_> = decrypted
.as_ref()
.iter()
.copied()
.zip(clear_accumulator.as_ref().iter().copied())
.map(|(dec, clear)| dec.wrapping_sub(clear))
.collect();
// println!("diff_to_clear={:?}", &diff_to_clear);
// assert!(diff_to_clear.iter().copied().all(|x| x == Scalar::ZERO));
noise_vec.push(diff_to_clear);
}
}
}
if !ciphertext_modulus.is_native_modulus() {
// When we convert back from the fourier domain, integer values will contain up to 53
// MSBs with information. In our representation of power of 2 moduli < native modulus we
// fill the MSBs and leave the LSBs empty, this usage of the signed decomposer allows to
// round while keeping the data in the MSBs
let signed_decomposer = SignedDecomposer::new(
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
DecompositionLevelCount(1),
);
ct0.as_mut()
.iter_mut()
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
}
noise_vec
}
// CastInto required for PBS modulus switch which returns a usize
pub fn batch_blind_rotate_assign<InputScalar, OutputScalar>(
self,
@@ -512,6 +652,55 @@ impl<'a> FourierLweBootstrapKeyView<'a> {
);
}
pub fn bootstrap_return_noise<InputScalar, OutputScalar>(
self,
mut lwe_out: LweCiphertextMutView<'_, OutputScalar>,
lwe_in: LweCiphertextView<'_, InputScalar>,
accumulator: GlweCiphertextView<'_, OutputScalar>,
fft: FftView<'_>,
stack: PodStack<'_>,
debug_material: Option<(
&LweSecretKeyOwned<InputScalar>,
&GlweSecretKeyOwned<OutputScalar>,
&GlweCiphertextOwned<OutputScalar>,
)>,
) -> Vec<Vec<OutputScalar>>
where
// CastInto required for PBS modulus switch which returns a usize
InputScalar: UnsignedTorus + CastInto<usize>,
OutputScalar: UnsignedTorus,
{
assert!(lwe_in.ciphertext_modulus().is_power_of_two());
assert!(lwe_out.ciphertext_modulus().is_power_of_two());
assert_eq!(
lwe_out.ciphertext_modulus(),
accumulator.ciphertext_modulus()
);
let (local_accumulator_data, stack) =
stack.collect_aligned(CACHELINE_ALIGN, accumulator.as_ref().iter().copied());
let mut local_accumulator = GlweCiphertextMutView::from_container(
&mut *local_accumulator_data,
accumulator.polynomial_size(),
accumulator.ciphertext_modulus(),
);
let noise = self.blind_rotate_assign_return_noise(
local_accumulator.as_mut_view(),
lwe_in.as_view(),
fft,
stack,
debug_material,
);
extract_lwe_sample_from_glwe_ciphertext(
&local_accumulator,
&mut lwe_out,
MonomialDegree(0),
);
noise
}
pub fn batch_bootstrap<InputScalar, OutputScalar>(
self,
mut lwe_out: LweCiphertextListMutView<'_, OutputScalar>,

View File

@@ -1,6 +1,9 @@
use super::super::math::decomposition::TensorSignedDecompositionLendingIter;
use super::super::math::fft::{FftView, FourierPolynomialList};
use super::super::math::polynomial::FourierPolynomialMutView;
use crate::core_crypto::algorithms::polynomial_algorithms::{
polynomial_wrapping_add_assign, polynomial_wrapping_add_mul_assign, polynomial_wrapping_mul,
};
use crate::core_crypto::backward_compatibility::fft_impl::FourierGgswCiphertextVersions;
use crate::core_crypto::commons::math::decomposition::{DecompositionLevel, SignedDecomposer};
use crate::core_crypto::commons::math::torus::UnsignedTorus;
@@ -15,6 +18,7 @@ use crate::core_crypto::entities::ggsw_ciphertext::{
fourier_ggsw_level_matrix_size, GgswCiphertextView,
};
use crate::core_crypto::entities::glwe_ciphertext::{GlweCiphertextMutView, GlweCiphertextView};
use crate::core_crypto::entities::polynomial::Polynomial;
use aligned_vec::{avec, ABox, CACHELINE_ALIGN};
use dyn_stack::{PodStack, ReborrowMut, SizeOverflow, StackReq};
use tfhe_fft::c64;
@@ -601,6 +605,145 @@ pub fn add_external_product_assign<Scalar>(
}
}
/// Perform the external product of `ggsw` and `glwe`, and adds the result to `out`.
#[cfg_attr(feature = "__profiling", inline(never))]
pub fn karatsuba_add_external_product_assign<Scalar>(
mut out: GlweCiphertextMutView<'_, Scalar>,
ggsw: GgswCiphertextView<Scalar>,
glwe: GlweCiphertextView<Scalar>,
stack: PodStack<'_>,
) where
Scalar: UnsignedTorus,
{
// we check that the polynomial sizes match
debug_assert_eq!(ggsw.polynomial_size(), glwe.polynomial_size());
debug_assert_eq!(ggsw.polynomial_size(), out.polynomial_size());
// we check that the glwe sizes match
debug_assert_eq!(ggsw.glwe_size(), glwe.glwe_size());
debug_assert_eq!(ggsw.glwe_size(), out.glwe_size());
let align = CACHELINE_ALIGN;
let poly_size = ggsw.polynomial_size().0;
// we round the input mask and body
let decomposer = SignedDecomposer::<Scalar>::new(
ggsw.decomposition_base_log(),
ggsw.decomposition_level_count(),
);
let (output_buffer, mut substack0) =
stack.make_aligned_raw::<Scalar>(poly_size * ggsw.glwe_size().0, align);
// output_fft_buffer is initially uninitialized, considered to be implicitly zero, to avoid
// the cost of filling it up with zeros. `is_output_uninit` is set to `false` once
// it has been fully initialized for the first time.
let output_buffer = &mut *output_buffer;
let mut is_output_uninit = true;
{
// ------------------------------------------------------ EXTERNAL PRODUCT IN FOURIER DOMAIN
// In this section, we perform the external product in the fourier domain, and accumulate
// the result in the output_fft_buffer variable.
let (mut decomposition, mut substack1) = TensorSignedDecompositionLendingIter::new(
glwe.as_ref()
.iter()
.map(|s| decomposer.init_decomposer_state(*s)),
DecompositionBaseLog(decomposer.base_log),
DecompositionLevelCount(decomposer.level_count),
substack0.rb_mut(),
);
// We loop through the levels (we reverse to match the order of the decomposition iterator.)
ggsw.iter().for_each(|ggsw_decomp_matrix| {
// We retrieve the decomposition of this level.
let (_glwe_level, glwe_decomp_term, _substack2) =
collect_next_term(&mut decomposition, &mut substack1, align);
let glwe_decomp_term = GlweCiphertextView::from_container(
&*glwe_decomp_term,
ggsw.polynomial_size(),
out.ciphertext_modulus(),
);
// For each level we have to add the result of the vector-matrix product between the
// decomposition of the glwe, and the ggsw level matrix to the output. To do so, we
// iteratively add to the output, the product between every line of the matrix, and
// the corresponding (scalar) polynomial in the glwe decomposition:
//
// ggsw_mat ggsw_mat
// glwe_dec | - - - - | < glwe_dec | - - - - |
// | - - - | x | - - - - | | - - - | x | - - - - | <
// ^ | - - - - | ^ | - - - - |
//
// t = 1 t = 2 ...
izip!(
ggsw_decomp_matrix.as_glwe_list().iter(),
glwe_decomp_term.as_polynomial_list().iter()
)
.for_each(|(ggsw_row, glwe_poly)| {
// let (fourier, substack3) =
// substack2.rb_mut().make_aligned_raw::<c64>(poly_size, align);
// // We perform the forward fft transform for the glwe polynomial
// let fourier = fft
// .forward_as_integer(
// FourierPolynomialMutView { data: fourier },
// glwe_poly,
// substack3,
// )
// .data;
// // Now we loop through the polynomials of the output, and add the
// // corresponding product of polynomials.
// update_with_fmadd(
// output_buffer,
// ggsw_row.data(),
// fourier,
// is_output_uninit,
// poly_size,
// );
// // we initialized `output_fft_buffer, so we can set this to false
// is_output_uninit = false;
let row_as_poly_list = ggsw_row.as_polynomial_list();
if is_output_uninit {
for (mut output_poly, row_poly) in output_buffer
.chunks_exact_mut(poly_size)
.map(Polynomial::from_container)
.zip(row_as_poly_list.iter())
{
polynomial_wrapping_mul(&mut output_poly, &row_poly, &glwe_poly);
}
} else {
for (mut output_poly, row_poly) in output_buffer
.chunks_exact_mut(poly_size)
.map(Polynomial::from_container)
.zip(row_as_poly_list.iter())
{
polynomial_wrapping_add_mul_assign(&mut output_poly, &row_poly, &glwe_poly);
}
}
is_output_uninit = false;
});
});
}
// -------------------------------------------- TRANSFORMATION OF RESULT TO STANDARD DOMAIN
// In this section, we bring the result from the fourier domain, back to the standard
// domain, and add it to the output.
//
// We iterate over the polynomials in the output.
if !is_output_uninit {
izip!(
out.as_mut_polynomial_list().iter_mut(),
output_buffer
.into_chunks(poly_size)
.map(Polynomial::from_container),
)
.for_each(|(mut out, res)| polynomial_wrapping_add_assign(&mut out, &res));
}
}
#[cfg_attr(feature = "__profiling", inline(never))]
pub(crate) fn collect_next_term<'a, Scalar: UnsignedTorus>(
decomposition: &mut TensorSignedDecompositionLendingIter<'_, Scalar>,

View File

@@ -6,6 +6,7 @@ mod lwe_linear_algebra;
mod lwe_multi_bit_programmable_bootstrapping;
mod lwe_packing_keyswitch;
mod lwe_programmable_bootstrapping;
mod noise_distribution;
pub struct CudaPackingKeySwitchKeys<Scalar: UnsignedInteger> {
pub lwe_sk: LweSecretKey<Vec<Scalar>>,

View File

@@ -0,0 +1,251 @@
use super::*;
use crate::core_crypto::commons::noise_formulas::lwe_multi_bit_programmable_bootstrap::multi_bit_pbs_variance_132_bits_security_gaussian_gf_3;
use crate::core_crypto::commons::noise_formulas::secure_noise::minimal_lwe_variance_for_132_bits_security_gaussian;
use crate::core_crypto::commons::test_tools::{torus_modular_diff, variance};
use crate::core_crypto::gpu::glwe_ciphertext_list::CudaGlweCiphertextList;
use crate::core_crypto::gpu::lwe_bootstrap_key::CudaLweBootstrapKey;
use crate::core_crypto::gpu::lwe_ciphertext_list::CudaLweCiphertextList;
use crate::core_crypto::gpu::lwe_multi_bit_bootstrap_key::CudaLweMultiBitBootstrapKey;
use crate::core_crypto::gpu::vec::CudaVec;
use crate::core_crypto::gpu::{cuda_multi_bit_programmable_bootstrap_lwe_ciphertext, CudaStreams};
use itertools::Itertools;
use rayon::prelude::*;
// This is 1 / 16 which is exactly representable in an f64 (even an f32)
// 1 / 32 is too strict and fails the tests
const RELATIVE_TOLERANCE: f64 = 0.0625;
const NB_TESTS: usize = 1000;
fn lwe_encrypt_multi_bit_pbs_decrypt_custom_mod<Scalar>(params: MultiBitTestParams<Scalar>)
where
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize>,
{
let input_lwe_dimension = params.input_lwe_dimension;
let lwe_noise_distribution = params.lwe_noise_distribution;
let glwe_noise_distribution = params.glwe_noise_distribution;
let ciphertext_modulus = params.ciphertext_modulus;
let message_modulus_log = params.message_modulus_log;
let msg_modulus = Scalar::ONE.shl(message_modulus_log.0);
let encoding_with_padding = get_encoding_with_padding(ciphertext_modulus);
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let pbs_decomposition_base_log = params.decomp_base_log;
let pbs_decomposition_level_count = params.decomp_level_count;
let grouping_factor = params.grouping_factor;
let number_of_messages = 1;
let gpu_index = 0;
let stream = CudaStreams::new_single_gpu(gpu_index);
let modulus_as_f64 = if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
};
let expected_variance = multi_bit_pbs_variance_132_bits_security_gaussian_gf_3(
input_lwe_dimension,
glwe_dimension,
polynomial_size,
pbs_decomposition_base_log,
pbs_decomposition_level_count,
modulus_as_f64,
);
let mut rsc = TestResources::new();
let f = |x: Scalar| x;
let delta: Scalar = encoding_with_padding / msg_modulus;
let mut msg = msg_modulus;
let num_samples = NB_TESTS * <Scalar as CastInto<usize>>::cast_into(msg);
let mut noise_samples = Vec::with_capacity(num_samples);
let input_lwe_secret_key = allocate_and_generate_new_binary_lwe_secret_key(
input_lwe_dimension,
&mut rsc.secret_random_generator,
);
let output_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
glwe_dimension,
polynomial_size,
&mut rsc.secret_random_generator,
);
let output_lwe_secret_key = output_glwe_secret_key.as_lwe_secret_key();
let output_lwe_dimension = output_lwe_secret_key.lwe_dimension();
let accumulator = generate_programmable_bootstrap_glwe_lut(
polynomial_size,
glwe_dimension.to_glwe_size(),
msg_modulus.cast_into(),
ciphertext_modulus,
delta,
f,
);
assert!(check_encrypted_content_respects_mod(
&accumulator,
ciphertext_modulus
));
let mut bsk = LweMultiBitBootstrapKey::new(
Scalar::ZERO,
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_decomposition_base_log,
pbs_decomposition_level_count,
input_lwe_dimension,
grouping_factor,
ciphertext_modulus,
);
par_generate_lwe_multi_bit_bootstrap_key(
&input_lwe_secret_key,
&output_glwe_secret_key,
&mut bsk,
glwe_noise_distribution,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&*bsk,
ciphertext_modulus
));
let d_bsk = CudaLweMultiBitBootstrapKey::from_lwe_multi_bit_bootstrap_key(&bsk, &stream);
while msg != Scalar::ZERO {
msg = msg.wrapping_sub(Scalar::ONE);
let current_run_samples: Vec<_> = (0..NB_TESTS)
.into_par_iter()
.map(|_| {
let mut rsc = TestResources::new();
let plaintext = Plaintext(msg * delta);
let lwe_ciphertext_in = allocate_and_encrypt_new_lwe_ciphertext(
&input_lwe_secret_key,
plaintext,
lwe_noise_distribution,
ciphertext_modulus,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&lwe_ciphertext_in,
ciphertext_modulus
));
let d_lwe_ciphertext_in =
CudaLweCiphertextList::from_lwe_ciphertext(&lwe_ciphertext_in, &stream);
let mut d_out_pbs_ct = CudaLweCiphertextList::new(
output_lwe_dimension,
LweCiphertextCount(1),
ciphertext_modulus,
&stream,
);
let d_accumulator =
CudaGlweCiphertextList::from_glwe_ciphertext(&accumulator, &stream);
let mut test_vector_indexes: Vec<Scalar> = vec![Scalar::ZERO; number_of_messages];
for (i, ind) in test_vector_indexes.iter_mut().enumerate() {
*ind = <usize as CastInto<Scalar>>::cast_into(i);
}
let mut d_test_vector_indexes =
unsafe { CudaVec::<Scalar>::new_async(number_of_messages, &stream, 0) };
unsafe {
d_test_vector_indexes.copy_from_cpu_async(&test_vector_indexes, &stream, 0)
};
let num_blocks = d_lwe_ciphertext_in.0.lwe_ciphertext_count.0;
let lwe_indexes_usize: Vec<usize> = (0..num_blocks).collect_vec();
let lwe_indexes = lwe_indexes_usize
.iter()
.map(|&x| <usize as CastInto<Scalar>>::cast_into(x))
.collect_vec();
let mut d_output_indexes =
unsafe { CudaVec::<Scalar>::new_async(num_blocks, &stream, 0) };
let mut d_input_indexes =
unsafe { CudaVec::<Scalar>::new_async(num_blocks, &stream, 0) };
unsafe {
d_input_indexes.copy_from_cpu_async(&lwe_indexes, &stream, 0);
d_output_indexes.copy_from_cpu_async(&lwe_indexes, &stream, 0);
}
cuda_multi_bit_programmable_bootstrap_lwe_ciphertext(
&d_lwe_ciphertext_in,
&mut d_out_pbs_ct,
&d_accumulator,
&d_test_vector_indexes,
&d_output_indexes,
&d_input_indexes,
&d_bsk,
&stream,
);
let out_pbs_ct = d_out_pbs_ct.into_lwe_ciphertext(&stream);
assert!(check_encrypted_content_respects_mod(
&out_pbs_ct,
ciphertext_modulus
));
let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &out_pbs_ct);
let decoded = round_decode(decrypted.0, delta) % msg_modulus;
assert_eq!(decoded, f(msg));
torus_modular_diff(plaintext.0, decrypted.0, ciphertext_modulus)
})
.collect();
noise_samples.extend(current_run_samples);
}
let measured_variance = variance(&noise_samples);
let minimal_variance = minimal_lwe_variance_for_132_bits_security_gaussian(
bsk.output_lwe_dimension(),
if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
},
);
// Have a log even if it's a test to have a trace in no capture mode to eyeball variances
println!("measured_variance={measured_variance:?}");
println!("expected_variance={expected_variance:?}");
println!("minimal_variance={minimal_variance:?}");
if measured_variance.0 < expected_variance.0 {
// We are in the clear as long as we have at least the noise for security
assert!(
measured_variance.0 >= minimal_variance.0,
"Found insecure variance after PBS\n\
measure_variance={measured_variance:?}\n\
minimal_variance={minimal_variance:?}"
);
} else {
// Check we are not too far from the expected variance if we are bigger
let var_abs_diff = (expected_variance.0 - measured_variance.0).abs();
let tolerance_threshold = RELATIVE_TOLERANCE * expected_variance.0;
assert!(
var_abs_diff < tolerance_threshold,
"Absolute difference for variance: {var_abs_diff}, \
tolerance threshold: {tolerance_threshold}, \
got variance: {measured_variance:?}, \
expected variance: {expected_variance:?}"
);
}
}
create_parametrized_test!(lwe_encrypt_multi_bit_pbs_decrypt_custom_mod {
NOISE_TEST_PARAMS_GPU_MULTI_BIT_GROUP_3_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN
});

View File

@@ -0,0 +1,248 @@
use super::*;
use crate::core_crypto::commons::noise_formulas::lwe_programmable_bootstrap::pbs_variance_132_bits_security_gaussian;
use crate::core_crypto::commons::noise_formulas::secure_noise::minimal_lwe_variance_for_132_bits_security_gaussian;
use crate::core_crypto::commons::test_tools::{torus_modular_diff, variance};
use crate::core_crypto::gpu::glwe_ciphertext_list::CudaGlweCiphertextList;
use crate::core_crypto::gpu::lwe_bootstrap_key::CudaLweBootstrapKey;
use crate::core_crypto::gpu::lwe_ciphertext_list::CudaLweCiphertextList;
use crate::core_crypto::gpu::vec::CudaVec;
use crate::core_crypto::gpu::{cuda_programmable_bootstrap_lwe_ciphertext, CudaStreams};
use itertools::Itertools;
use rayon::prelude::*;
// This is 1 / 16 which is exactly representable in an f64 (even an f32)
// 1 / 32 is too strict and fails the tests
const RELATIVE_TOLERANCE: f64 = 0.0625;
const NB_TESTS: usize = 1000;
fn lwe_encrypt_pbs_decrypt_custom_mod<Scalar>(params: ClassicTestParams<Scalar>)
where
Scalar: UnsignedTorus + Sync + Send + CastFrom<usize> + CastInto<usize>,
{
let input_lwe_dimension = params.lwe_dimension;
let lwe_noise_distribution = params.lwe_noise_distribution;
let glwe_noise_distribution = params.glwe_noise_distribution;
let ciphertext_modulus = params.ciphertext_modulus;
let message_modulus_log = params.message_modulus_log;
let msg_modulus = Scalar::ONE.shl(message_modulus_log.0);
let encoding_with_padding = get_encoding_with_padding(ciphertext_modulus);
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let pbs_decomposition_base_log = params.pbs_base_log;
let pbs_decomposition_level_count = params.pbs_level;
let number_of_messages = 1;
let gpu_index = 0;
let stream = CudaStreams::new_single_gpu(gpu_index);
let modulus_as_f64 = if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
};
let expected_variance = pbs_variance_132_bits_security_gaussian(
input_lwe_dimension,
glwe_dimension,
polynomial_size,
pbs_decomposition_base_log,
pbs_decomposition_level_count,
modulus_as_f64,
);
let mut rsc = TestResources::new();
let f = |x: Scalar| x;
let delta: Scalar = encoding_with_padding / msg_modulus;
let mut msg = msg_modulus;
let num_samples = NB_TESTS * <Scalar as CastInto<usize>>::cast_into(msg);
let mut noise_samples = Vec::with_capacity(num_samples);
let input_lwe_secret_key = allocate_and_generate_new_binary_lwe_secret_key(
input_lwe_dimension,
&mut rsc.secret_random_generator,
);
let output_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
glwe_dimension,
polynomial_size,
&mut rsc.secret_random_generator,
);
let output_lwe_secret_key = output_glwe_secret_key.as_lwe_secret_key();
let output_lwe_dimension = output_lwe_secret_key.lwe_dimension();
let accumulator = generate_programmable_bootstrap_glwe_lut(
polynomial_size,
glwe_dimension.to_glwe_size(),
msg_modulus.cast_into(),
ciphertext_modulus,
delta,
f,
);
assert!(check_encrypted_content_respects_mod(
&accumulator,
ciphertext_modulus
));
let mut bsk = LweBootstrapKey::new(
Scalar::ZERO,
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_decomposition_base_log,
pbs_decomposition_level_count,
input_lwe_dimension,
ciphertext_modulus,
);
par_generate_lwe_bootstrap_key(
&input_lwe_secret_key,
&output_glwe_secret_key,
&mut bsk,
glwe_noise_distribution,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&*bsk,
ciphertext_modulus
));
let d_bsk = CudaLweBootstrapKey::from_lwe_bootstrap_key(&bsk, &stream);
while msg != Scalar::ZERO {
msg = msg.wrapping_sub(Scalar::ONE);
let current_run_samples: Vec<_> = (0..NB_TESTS)
.into_par_iter()
.map(|_| {
let mut rsc = TestResources::new();
let plaintext = Plaintext(msg * delta);
let lwe_ciphertext_in = allocate_and_encrypt_new_lwe_ciphertext(
&input_lwe_secret_key,
plaintext,
lwe_noise_distribution,
ciphertext_modulus,
&mut rsc.encryption_random_generator,
);
assert!(check_encrypted_content_respects_mod(
&lwe_ciphertext_in,
ciphertext_modulus
));
let d_lwe_ciphertext_in =
CudaLweCiphertextList::from_lwe_ciphertext(&lwe_ciphertext_in, &stream);
let mut d_out_pbs_ct = CudaLweCiphertextList::new(
output_lwe_dimension,
LweCiphertextCount(1),
ciphertext_modulus,
&stream,
);
let d_accumulator =
CudaGlweCiphertextList::from_glwe_ciphertext(&accumulator, &stream);
let mut test_vector_indexes: Vec<Scalar> = vec![Scalar::ZERO; number_of_messages];
for (i, ind) in test_vector_indexes.iter_mut().enumerate() {
*ind = <usize as CastInto<Scalar>>::cast_into(i);
}
let mut d_test_vector_indexes =
unsafe { CudaVec::<Scalar>::new_async(number_of_messages, &stream, 0) };
unsafe {
d_test_vector_indexes.copy_from_cpu_async(&test_vector_indexes, &stream, 0)
};
let num_blocks = d_lwe_ciphertext_in.0.lwe_ciphertext_count.0;
let lwe_indexes_usize: Vec<usize> = (0..num_blocks).collect_vec();
let lwe_indexes = lwe_indexes_usize
.iter()
.map(|&x| <usize as CastInto<Scalar>>::cast_into(x))
.collect_vec();
let mut d_output_indexes =
unsafe { CudaVec::<Scalar>::new_async(num_blocks, &stream, 0) };
let mut d_input_indexes =
unsafe { CudaVec::<Scalar>::new_async(num_blocks, &stream, 0) };
unsafe {
d_input_indexes.copy_from_cpu_async(&lwe_indexes, &stream, 0);
d_output_indexes.copy_from_cpu_async(&lwe_indexes, &stream, 0);
}
cuda_programmable_bootstrap_lwe_ciphertext(
&d_lwe_ciphertext_in,
&mut d_out_pbs_ct,
&d_accumulator,
&d_test_vector_indexes,
&d_output_indexes,
&d_input_indexes,
LweCiphertextCount(num_blocks),
&d_bsk,
&stream,
);
let out_pbs_ct = d_out_pbs_ct.into_lwe_ciphertext(&stream);
assert!(check_encrypted_content_respects_mod(
&out_pbs_ct,
ciphertext_modulus
));
let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &out_pbs_ct);
let decoded = round_decode(decrypted.0, delta) % msg_modulus;
assert_eq!(decoded, f(msg));
torus_modular_diff(plaintext.0, decrypted.0, ciphertext_modulus)
})
.collect();
noise_samples.extend(current_run_samples);
}
let measured_variance = variance(&noise_samples);
let minimal_variance = minimal_lwe_variance_for_132_bits_security_gaussian(
bsk.output_lwe_dimension(),
if ciphertext_modulus.is_native_modulus() {
2.0f64.powi(Scalar::BITS as i32)
} else {
ciphertext_modulus.get_custom_modulus() as f64
},
);
// Have a log even if it's a test to have a trace in no capture mode to eyeball variances
println!("measured_variance={measured_variance:?}");
println!("expected_variance={expected_variance:?}");
println!("minimal_variance={minimal_variance:?}");
if measured_variance.0 < expected_variance.0 {
// We are in the clear as long as we have at least the noise for security
assert!(
measured_variance.0 >= minimal_variance.0,
"Found insecure variance after PBS\n\
measure_variance={measured_variance:?}\n\
minimal_variance={minimal_variance:?}"
);
} else {
// Check we are not too far from the expected variance if we are bigger
let var_abs_diff = (expected_variance.0 - measured_variance.0).abs();
let tolerance_threshold = RELATIVE_TOLERANCE * expected_variance.0;
assert!(
var_abs_diff < tolerance_threshold,
"Absolute difference for variance: {var_abs_diff}, \
tolerance threshold: {tolerance_threshold}, \
got variance: {measured_variance:?}, \
expected variance: {expected_variance:?}"
);
}
}
create_parametrized_test!(lwe_encrypt_pbs_decrypt_custom_mod {
NOISE_TEST_PARAMS_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN
});

View File

@@ -0,0 +1,48 @@
use super::*;
mod lwe_multi_bit_programmable_bootstrapping_noise;
mod lwe_programmable_bootstrapping_noise;
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN: ClassicTestParams<u64> =
ClassicTestParams {
lwe_dimension: LweDimension(841),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
3.1496674685772435e-06,
)),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
2.845267479601915e-15,
)),
pbs_base_log: DecompositionBaseLog(22),
pbs_level: DecompositionLevelCount(1),
ks_level: DecompositionLevelCount(5),
ks_base_log: DecompositionBaseLog(3),
pfks_level: DecompositionLevelCount(0),
pfks_base_log: DecompositionBaseLog(0),
pfks_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(0.0)),
cbs_level: DecompositionLevelCount(0),
cbs_base_log: DecompositionBaseLog(0),
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
};
#[allow(clippy::excessive_precision)]
pub const NOISE_TEST_PARAMS_GPU_MULTI_BIT_GROUP_3_4_BITS_NATIVE_U64_132_BITS_GAUSSIAN:
MultiBitTestParams<u64> = MultiBitTestParams {
input_lwe_dimension: LweDimension(909),
lwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
9.743962418842028e-07,
)),
decomp_base_log: DecompositionBaseLog(21),
decomp_level_count: DecompositionLevelCount(1),
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
glwe_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(StandardDev(
2.845267479601915e-15,
)),
message_modulus_log: MessageModulusLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
grouping_factor: LweBskGroupingFactor(3),
thread_count: ThreadCount(1),
};