Compare commits

...

37 Commits

Author SHA1 Message Date
yuxizama
8599f15a93 chore(docs): fix the survey format 2024-10-31 11:23:16 +01:00
Nicolas Sarlin
b4083b92c7 chore(tfhe): prepare release 0.9.1 2024-10-30 15:36:33 +01:00
Arthur Meyre
21a4c1d879 chore(ci): update chrome version 2024-10-30 15:36:33 +01:00
Nicolas Sarlin
a726a1f716 chore(zk): bump version to 0.3.1 2024-10-30 15:36:33 +01:00
Nicolas Sarlin
70444e7693 fix(zk): proof compatiblity between 32/64b platforms 2024-10-30 15:36:33 +01:00
Arthur Meyre
9c971e3269 chore(data)!: breaking data changes for future compatibility
- invert the LweKeyswitchKey level order and propagate change
- remove dependency on unsupported wopbs keys for the HL keys
2024-10-22 17:44:33 +02:00
Arthur Meyre
27dd30f3f8 chore(cuda): bump version to 0.5.0 2024-10-22 17:44:33 +02:00
Arthur Meyre
004d4ef807 chore(tfhe): bump version to 0.9.0 2024-10-22 17:44:33 +02:00
Nicolas Sarlin
eb09518679 fix(wasm): fix size used for serialization in benches 2024-10-22 16:47:15 +02:00
Nicolas Sarlin
d15f4dd178 chore(tfhe): prepare release 0.8.6 2024-10-22 13:37:05 +02:00
Nicolas Sarlin
57273cb47c feat(wasm): export safe_deserialize for CompactPkePublicParams 2024-10-22 13:37:05 +02:00
Nicolas Sarlin
9bc70ff4b9 chore(tfhe): prepare release 0.8.5 2024-10-21 14:31:18 +02:00
Nicolas Sarlin
62aed00846 fix(serialization): safe_serialization with unlimited size 2024-10-21 14:31:18 +02:00
Nicolas Sarlin
0d8ac5a066 fix(serialization): serialized_size_limit includes the header 2024-10-21 14:31:18 +02:00
Nicolas Sarlin
bc98374830 chore(tfhe): prepare release 0.8.4 2024-10-16 17:53:29 +02:00
Nicolas Sarlin
430c2ffdd1 chore(all): update MSRV to 1.81 2024-10-16 17:53:29 +02:00
Nicolas Sarlin
26aeb315a3 feat(serialization): add safe_serialized_size 2024-10-16 17:53:29 +02:00
Arthur Meyre
e27fd60960 chore(tfhe): bump version to 0.8.3 2024-10-10 17:13:04 +02:00
Arthur Meyre
98ab8b1c36 chore(cuda): bump version to 0.4.1 2024-10-10 17:13:04 +02:00
Pedro Alves
e86efc9393 fix(compression): update compression parameters, fix compression on GPU and improve test
- the new compression parameters went through a noise check to verify constraints
- CPU and GPU compression tests are improved and the same
- implement Debug, Eq, PartialEq to CompressedCiphertextList
- fix gpu compression when a radix ciphertext is split through more than one compact GLWE
2024-10-10 17:13:04 +02:00
Pedro Alves
d9e81e780d fix(gpu): fix the indexes used in compression
- also general minor fixes to compression
2024-10-10 17:13:04 +02:00
Agnes Leroy
5465065919 chore(gpu): do not unwrap in blocks_of, to have the same behavior as the CPU 2024-10-10 17:13:04 +02:00
Agnes Leroy
b50f94650f chore(hl): fix clippy error in test 2024-10-10 17:13:04 +02:00
Arthur Meyre
c7d9663f69 chore(ci): the original build fix was not conservative enough
- this makes sure we honour the original requirement while making sure we
don't pull the broken dep in
2024-10-10 17:13:04 +02:00
Arthur Meyre
54b2fcb59c chore(ci): wasm-bindgen introduced a bug in 0.2.94 preventing our build
- 0.2.93 works properly, changing the requirement to allow wasm to build
2024-10-10 17:13:04 +02:00
Nicolas Sarlin
3738f35b48 feat(core_crypto): impl Named for LweSecretKey and GlweSecretKey 2024-10-10 14:15:55 +02:00
Nicolas Sarlin
8f53e1851f doc: update serialization doc 2024-10-10 09:34:06 +02:00
Nicolas Sarlin
f37f55cbd0 feat(hl): create server key conformance from config 2024-10-10 09:34:06 +02:00
Arthur Meyre
7cbe12d9a2 chore(tfhe): bump version to 0.8.2 2024-10-08 13:29:41 +02:00
Arthur Meyre
b2c8338065 feat(integer): construct proven ct list conformance from another source
- allows to use ZK parameters directly
2024-10-08 13:29:41 +02:00
tmontaigu
81edfbc51b chore(ci): bump version to 0.8.1 2024-10-03 19:37:41 +02:00
tmontaigu
102fdef9e8 fix(hlapi): pub use HlCompressible,HlExpandable
Pub re-export the `HlCompressible` and `HlExpandable`
traits, as users may need them to write generic code
that manipulates CompressedCiphertextList/Builder
2024-10-03 19:37:41 +02:00
Agnes Leroy
9ca132dd1f chore(doc): add compression tutorial on GPU 2024-10-03 16:22:35 +02:00
Arthur Meyre
4b5f6d998a chore(doc): add a bit more substance to the array documentation 2024-10-03 16:22:35 +02:00
Arthur Meyre
ba38983e1e chore(doc): add make command to print parameters used in doc benchmarks 2024-10-03 16:22:35 +02:00
Arthur Meyre
68219c6fec chore(docs): improve getting started page following feedback
- add more details to set-up a rust project from 0 and add TFHE-rs as a
dependency
2024-10-03 16:22:35 +02:00
Agnes Leroy
7e75e9ae2f chore(gpu): add ks/pbs benchmarks in the documentation 2024-10-03 16:22:35 +02:00
76 changed files with 1594 additions and 1230 deletions

View File

@@ -21,7 +21,7 @@ BENCH_OP_FLAVOR?=DEFAULT
NODE_VERSION=22.6
FORWARD_COMPAT?=OFF
BACKWARD_COMPAT_DATA_URL=https://github.com/zama-ai/tfhe-backward-compat-data.git
BACKWARD_COMPAT_DATA_BRANCH?=v0.2
BACKWARD_COMPAT_DATA_BRANCH?=v0.3
BACKWARD_COMPAT_DATA_PROJECT=tfhe-backward-compat-data
BACKWARD_COMPAT_DATA_DIR=$(BACKWARD_COMPAT_DATA_PROJECT)
TFHE_SPEC:=tfhe
@@ -163,16 +163,16 @@ install_web_resource:
rm checksum && \
unzip $(filename)
install_chrome_browser: url = "https://storage.googleapis.com/chrome-for-testing-public/128.0.6613.137/linux64/chrome-linux64.zip"
install_chrome_browser: checksum = "c5d7da679f3a353ae4e4420ab113de06d4bd459152f5b17558390c02d9520566"
install_chrome_browser: url = "https://storage.googleapis.com/chrome-for-testing-public/130.0.6723.69/linux64/chrome-linux64.zip"
install_chrome_browser: checksum = "f789d53911a50cfa4a2bc1f09cde57567247f52515436d92b1aa9de93c2787d0"
install_chrome_browser: dest = "$(WEB_RUNNER_DIR)/chrome"
install_chrome_browser: filename = "chrome-linux64.zip"
.PHONY: install_chrome_browser # Install Chrome browser for Linux
install_chrome_browser: install_web_resource
install_chrome_web_driver: url = "https://storage.googleapis.com/chrome-for-testing-public/128.0.6613.137/linux64/chromedriver-linux64.zip"
install_chrome_web_driver: checksum = "f041092f403fb7455a6da2871070b6587c32814a3e3c2b0a794d3d4aa4739151"
install_chrome_web_driver: url = "https://storage.googleapis.com/chrome-for-testing-public/130.0.6723.69/linux64/chromedriver-linux64.zip"
install_chrome_web_driver: checksum = "90fe8dedf33eefe4b72704f626fa9f5834427c042235cfeb4251f18c9f0336ea"
install_chrome_web_driver: dest = "$(WEB_RUNNER_DIR)/chrome"
install_chrome_web_driver: filename = "chromedriver-linux64.zip"
@@ -542,6 +542,13 @@ test_integer_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),integer,gpu -p $(TFHE_SPEC) -- integer::gpu::server_key::
.PHONY: test_integer_compression
test_integer_compression: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),integer -p $(TFHE_SPEC) -- integer::ciphertext::compressed_ciphertext_list::tests::
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),integer -p $(TFHE_SPEC) -- integer::ciphertext::compress
.PHONY: test_integer_compression_gpu
test_integer_compression_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
@@ -947,6 +954,11 @@ dieharder_csprng: install_dieharder build_concrete_csprng
# Benchmarks
#
.PHONY: print_doc_bench_parameters # Print parameters used in doc benchmarks
print_doc_bench_parameters:
RUSTFLAGS="" cargo run --example print_doc_bench_parameters \
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p tfhe
.PHONY: bench_integer # Run benchmarks for unsigned integer
bench_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \

View File

@@ -1,6 +1,6 @@
[package]
name = "tfhe-cuda-backend"
version = "0.4.0"
version = "0.5.0"
edition = "2021"
authors = ["Zama team"]
license = "BSD-3-Clause-Clear"

View File

@@ -18,7 +18,7 @@ void cuda_convert_lwe_ciphertext_vector_to_cpu_64(void *stream,
void cuda_glwe_sample_extract_64(void *stream, uint32_t gpu_index,
void *lwe_array_out, void *glwe_array_in,
uint32_t *nth_array, uint32_t num_glwes,
uint32_t *nth_array, uint32_t num_nths,
uint32_t glwe_dimension,
uint32_t polynomial_size);
};

View File

@@ -8,7 +8,7 @@ void scratch_cuda_integer_compress_radix_ciphertext_64(
void **streams, uint32_t *gpu_indexes, uint32_t gpu_count, int8_t **mem_ptr,
uint32_t compression_glwe_dimension, uint32_t compression_polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t num_lwes, uint32_t message_modulus, uint32_t carry_modulus,
uint32_t num_radix_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, uint32_t lwe_per_glwe, uint32_t storage_log_modulus,
bool allocate_gpu_memory);
@@ -17,7 +17,7 @@ void scratch_cuda_integer_decompress_radix_ciphertext_64(
uint32_t encryption_glwe_dimension, uint32_t encryption_polynomial_size,
uint32_t compression_glwe_dimension, uint32_t compression_polynomial_size,
uint32_t lwe_dimension, uint32_t pbs_level, uint32_t pbs_base_log,
uint32_t num_lwes, uint32_t message_modulus, uint32_t carry_modulus,
uint32_t num_radix_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, uint32_t storage_log_modulus, uint32_t body_count,
bool allocate_gpu_memory);
@@ -96,7 +96,7 @@ template <typename Torus> struct int_decompression {
uint32_t storage_log_modulus;
uint32_t num_lwes;
uint32_t num_radix_blocks;
uint32_t body_count;
Torus *tmp_extracted_glwe;
@@ -113,7 +113,7 @@ template <typename Torus> struct int_decompression {
this->encryption_params = encryption_params;
this->compression_params = compression_params;
this->storage_log_modulus = storage_log_modulus;
this->num_lwes = num_radix_blocks;
this->num_radix_blocks = num_radix_blocks;
this->body_count = body_count;
if (allocate_gpu_memory) {
@@ -134,7 +134,7 @@ template <typename Torus> struct int_decompression {
tmp_extracted_lwe = (Torus *)cuda_malloc_async(
num_radix_blocks * lwe_accumulator_size * sizeof(Torus), streams[0],
gpu_indexes[0]);
// Decompression
// Carry extract LUT
auto carry_extract_f = [encryption_params](Torus x) -> Torus {
return x / encryption_params.message_modulus;
@@ -157,7 +157,7 @@ template <typename Torus> struct int_decompression {
cuda_drop_async(tmp_indexes_array, streams[0], gpu_indexes[0]);
carry_extract_lut->release(streams, gpu_indexes, gpu_count);
delete (carry_extract_lut);
delete carry_extract_lut;
}
};
#endif

View File

@@ -23,7 +23,7 @@ void cuda_convert_lwe_ciphertext_vector_to_cpu_64(void *stream,
void cuda_glwe_sample_extract_64(void *stream, uint32_t gpu_index,
void *lwe_array_out, void *glwe_array_in,
uint32_t *nth_array, uint32_t num_glwes,
uint32_t *nth_array, uint32_t num_nths,
uint32_t glwe_dimension,
uint32_t polynomial_size) {
@@ -31,43 +31,43 @@ void cuda_glwe_sample_extract_64(void *stream, uint32_t gpu_index,
case 256:
host_sample_extract<uint64_t, AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_glwes,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_nths,
glwe_dimension);
break;
case 512:
host_sample_extract<uint64_t, AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_glwes,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_nths,
glwe_dimension);
break;
case 1024:
host_sample_extract<uint64_t, AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_glwes,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_nths,
glwe_dimension);
break;
case 2048:
host_sample_extract<uint64_t, AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_glwes,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_nths,
glwe_dimension);
break;
case 4096:
host_sample_extract<uint64_t, AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_glwes,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_nths,
glwe_dimension);
break;
case 8192:
host_sample_extract<uint64_t, AmortizedDegree<8192>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_glwes,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_nths,
glwe_dimension);
break;
case 16384:
host_sample_extract<uint64_t, AmortizedDegree<16384>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_glwes,
(uint64_t *)glwe_array_in, (uint32_t *)nth_array, num_nths,
glwe_dimension);
break;
default:

View File

@@ -75,7 +75,8 @@ keyswitch(Torus *lwe_array_out, const Torus *__restrict__ lwe_output_indexes,
level_count);
Torus state = a_i >> (sizeof(Torus) * 8 - base_log * level_count);
for (int j = 0; j < level_count; j++) {
for (int j = level_count - 1; j >= 0; j--) {
// Levels are stored in reverse order
auto ksk_block =
get_ith_block(ksk, i, j, lwe_dimension_out, level_count);
Torus decomposed = decompose_one<Torus>(state, mask_mod_b, base_log);
@@ -207,7 +208,8 @@ __device__ void packing_keyswitch_lwe_ciphertext_into_glwe_ciphertext(
// block of key for current lwe coefficient (cur_input_lwe[i])
auto ksk_block = &fp_ksk[i * ksk_block_size];
for (int j = 0; j < level_count; j++) {
for (int j = level_count - 1; j >= 0; j--) {
// Levels are stored in reverse order
auto ksk_glwe = &ksk_block[j * glwe_size * polynomial_size];
// Iterate through each level and multiply by the ksk piece
auto ksk_glwe_chunk = &ksk_glwe[poly_id * coef_per_block];

View File

@@ -4,7 +4,7 @@ void scratch_cuda_integer_compress_radix_ciphertext_64(
void **streams, uint32_t *gpu_indexes, uint32_t gpu_count, int8_t **mem_ptr,
uint32_t compression_glwe_dimension, uint32_t compression_polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t num_lwes, uint32_t message_modulus, uint32_t carry_modulus,
uint32_t num_radix_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, uint32_t lwe_per_glwe, uint32_t storage_log_modulus,
bool allocate_gpu_memory) {
@@ -16,15 +16,16 @@ void scratch_cuda_integer_compress_radix_ciphertext_64(
scratch_cuda_compress_integer_radix_ciphertext<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
(int_compression<uint64_t> **)mem_ptr, num_lwes, compression_params,
lwe_per_glwe, storage_log_modulus, allocate_gpu_memory);
(int_compression<uint64_t> **)mem_ptr, num_radix_blocks,
compression_params, lwe_per_glwe, storage_log_modulus,
allocate_gpu_memory);
}
void scratch_cuda_integer_decompress_radix_ciphertext_64(
void **streams, uint32_t *gpu_indexes, uint32_t gpu_count, int8_t **mem_ptr,
uint32_t encryption_glwe_dimension, uint32_t encryption_polynomial_size,
uint32_t compression_glwe_dimension, uint32_t compression_polynomial_size,
uint32_t lwe_dimension, uint32_t pbs_level, uint32_t pbs_base_log,
uint32_t num_lwes, uint32_t message_modulus, uint32_t carry_modulus,
uint32_t num_radix_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, uint32_t storage_log_modulus, uint32_t body_count,
bool allocate_gpu_memory) {
@@ -41,7 +42,7 @@ void scratch_cuda_integer_decompress_radix_ciphertext_64(
scratch_cuda_integer_decompress_radix_ciphertext<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
(int_decompression<uint64_t> **)mem_ptr, num_lwes, body_count,
(int_decompression<uint64_t> **)mem_ptr, num_radix_blocks, body_count,
encryption_params, compression_params, storage_log_modulus,
allocate_gpu_memory);
}

View File

@@ -12,7 +12,7 @@
template <typename Torus>
__global__ void pack(Torus *array_out, Torus *array_in, uint32_t log_modulus,
uint32_t num_glwes, uint32_t in_len, uint32_t out_len) {
uint32_t num_coeffs, uint32_t in_len, uint32_t out_len) {
auto nbits = sizeof(Torus) * 8;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
@@ -21,7 +21,7 @@ __global__ void pack(Torus *array_out, Torus *array_in, uint32_t log_modulus,
auto chunk_array_in = array_in + glwe_index * in_len;
auto chunk_array_out = array_out + glwe_index * out_len;
if (tid < num_glwes * out_len) {
if (tid < num_coeffs) {
auto k = nbits * i / log_modulus;
auto j = k;
@@ -44,11 +44,15 @@ __global__ void pack(Torus *array_out, Torus *array_in, uint32_t log_modulus,
template <typename Torus>
__host__ void host_pack(cudaStream_t stream, uint32_t gpu_index,
Torus *array_out, Torus *array_in, uint32_t num_glwes,
int_compression<Torus> *mem_ptr) {
uint32_t num_lwes, int_compression<Torus> *mem_ptr) {
if (array_in == array_out)
PANIC("Cuda error: Input and output must be different");
cudaSetDevice(gpu_index);
auto compression_params = mem_ptr->compression_params;
auto log_modulus = mem_ptr->storage_log_modulus;
// [0..num_glwes-1) GLWEs
auto in_len = (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size;
auto number_bits_to_pack = in_len * log_modulus;
@@ -56,20 +60,27 @@ __host__ void host_pack(cudaStream_t stream, uint32_t gpu_index,
// number_bits_to_pack.div_ceil(Scalar::BITS)
auto out_len = (number_bits_to_pack + nbits - 1) / nbits;
// Last GLWE
number_bits_to_pack = in_len * log_modulus;
auto last_out_len = (number_bits_to_pack + nbits - 1) / nbits;
auto num_coeffs = (num_glwes - 1) * out_len + last_out_len;
int num_blocks = 0, num_threads = 0;
getNumBlocksAndThreads(num_glwes * out_len, 1024, num_blocks, num_threads);
getNumBlocksAndThreads(num_coeffs, 1024, num_blocks, num_threads);
dim3 grid(num_blocks);
dim3 threads(num_threads);
pack<Torus><<<grid, threads, 0, stream>>>(array_out, array_in, log_modulus,
num_glwes, in_len, out_len);
num_coeffs, in_len, out_len);
check_cuda_error(cudaGetLastError());
}
template <typename Torus>
__host__ void host_integer_compress(cudaStream_t *streams,
uint32_t *gpu_indexes, uint32_t gpu_count,
Torus *glwe_array_out, Torus *lwe_array_in,
Torus **fp_ksk, uint32_t num_lwes,
Torus **fp_ksk, uint32_t num_radix_blocks,
int_compression<Torus> *mem_ptr) {
auto compression_params = mem_ptr->compression_params;
@@ -80,21 +91,23 @@ __host__ void host_integer_compress(cudaStream_t *streams,
host_cleartext_multiplication<Torus>(
streams[0], gpu_indexes[0], lwe_shifted, lwe_array_in,
(uint64_t)compression_params.message_modulus, input_lwe_dimension,
num_lwes);
num_radix_blocks);
uint32_t lwe_in_size = input_lwe_dimension + 1;
uint32_t glwe_out_size = (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size;
uint32_t num_glwes = num_lwes / mem_ptr->lwe_per_glwe + 1;
uint32_t num_glwes_for_compression =
num_radix_blocks / mem_ptr->lwe_per_glwe + 1;
// Keyswitch LWEs to GLWE
auto tmp_glwe_array_out = mem_ptr->tmp_glwe_array_out;
cuda_memset_async(tmp_glwe_array_out, 0,
num_glwes * (compression_params.glwe_dimension + 1) *
num_glwes_for_compression *
(compression_params.glwe_dimension + 1) *
compression_params.polynomial_size * sizeof(Torus),
streams[0], gpu_indexes[0]);
auto fp_ks_buffer = mem_ptr->fp_ks_buffer;
auto rem_lwes = num_lwes;
auto rem_lwes = num_radix_blocks;
auto lwe_subset = lwe_shifted;
auto glwe_out = tmp_glwe_array_out;
@@ -115,13 +128,13 @@ __host__ void host_integer_compress(cudaStream_t *streams,
// Modulus switch
host_modulus_switch_inplace<Torus>(
streams[0], gpu_indexes[0], tmp_glwe_array_out,
num_glwes * (compression_params.glwe_dimension + 1) *
num_glwes_for_compression * (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size,
mem_ptr->storage_log_modulus);
check_cuda_error(cudaGetLastError());
host_pack<Torus>(streams[0], gpu_indexes[0], glwe_array_out,
tmp_glwe_array_out, num_glwes, mem_ptr);
tmp_glwe_array_out, num_glwes_for_compression,
num_radix_blocks, mem_ptr);
}
template <typename Torus>
@@ -160,11 +173,15 @@ __global__ void extract(Torus *glwe_array_out, Torus *array_in, uint32_t index,
}
}
/// Extracts the glwe_index-nth GLWE ciphertext
template <typename Torus>
__host__ void host_extract(cudaStream_t stream, uint32_t gpu_index,
Torus *glwe_array_out, Torus *array_in,
uint32_t glwe_index,
int_decompression<Torus> *mem_ptr) {
if (array_in == glwe_array_out)
PANIC("Cuda error: Input and output must be different");
cudaSetDevice(gpu_index);
auto compression_params = mem_ptr->compression_params;
@@ -221,7 +238,10 @@ host_integer_decompress(cudaStream_t *streams, uint32_t *gpu_indexes,
"be smaller than "
"polynomial_size.")
auto num_lwes = h_mem_ptr->num_lwes;
auto num_radix_blocks = h_mem_ptr->num_radix_blocks;
if (num_radix_blocks != indexes_array_size)
PANIC("Cuda error: wrong number of LWEs in decompress: the number of LWEs "
"should be the same as indexes_array_size.")
// the first element is the last index in h_indexes_array that lies in the
// related GLWE
@@ -251,23 +271,23 @@ host_integer_decompress(cudaStream_t *streams, uint32_t *gpu_indexes,
}
}
// Sample extract all LWEs
Torus lwe_accumulator_size =
(compression_params.glwe_dimension * compression_params.polynomial_size +
1);
Torus lwe_accumulator_size = compression_params.small_lwe_dimension + 1;
auto extracted_lwe = h_mem_ptr->tmp_extracted_lwe;
uint32_t current_idx = 0;
auto d_indexes_array_chunk = d_indexes_array;
for (const auto &max_idx_and_glwe : glwe_vec) {
uint32_t max_idx = max_idx_and_glwe.first;
uint32_t last_idx = max_idx_and_glwe.first;
extracted_glwe = max_idx_and_glwe.second;
cuda_glwe_sample_extract_64(
streams[0], gpu_indexes[0], extracted_lwe, extracted_glwe,
d_indexes_array, max_idx + 1 - current_idx,
compression_params.glwe_dimension, compression_params.polynomial_size);
extracted_lwe += lwe_accumulator_size;
current_idx = max_idx;
auto num_lwes = last_idx + 1 - current_idx;
cuda_glwe_sample_extract_64(streams[0], gpu_indexes[0], extracted_lwe,
extracted_glwe, d_indexes_array_chunk, num_lwes,
compression_params.glwe_dimension,
compression_params.polynomial_size);
d_indexes_array_chunk += num_lwes;
extracted_lwe += num_lwes * lwe_accumulator_size;
current_idx = last_idx;
}
// Reset
@@ -280,9 +300,8 @@ host_integer_decompress(cudaStream_t *streams, uint32_t *gpu_indexes,
/// dimension to a big LWE dimension
auto encryption_params = h_mem_ptr->encryption_params;
auto lut = h_mem_ptr->carry_extract_lut;
auto active_gpu_count = get_active_gpu_count(num_lwes, gpu_count);
auto active_gpu_count = get_active_gpu_count(num_radix_blocks, gpu_count);
if (active_gpu_count == 1) {
execute_pbs_async<Torus>(
streams, gpu_indexes, active_gpu_count, d_lwe_array_out,
lut->lwe_indexes_out, lut->lut_vec, lut->lut_indexes_vec, extracted_lwe,
@@ -291,7 +310,7 @@ host_integer_decompress(cudaStream_t *streams, uint32_t *gpu_indexes,
compression_params.small_lwe_dimension,
encryption_params.polynomial_size, encryption_params.pbs_base_log,
encryption_params.pbs_level, encryption_params.grouping_factor,
num_lwes, encryption_params.pbs_type, lut_count, lut_stride);
num_radix_blocks, encryption_params.pbs_type, lut_count, lut_stride);
} else {
/// For multi GPU execution we create vectors of pointers for inputs and
/// outputs
@@ -306,7 +325,7 @@ host_integer_decompress(cudaStream_t *streams, uint32_t *gpu_indexes,
/// gather data to GPU 0 we can copy back to the original indexing
multi_gpu_scatter_lwe_async<Torus>(
streams, gpu_indexes, active_gpu_count, lwe_array_in_vec, extracted_lwe,
lut->h_lwe_indexes_in, lut->using_trivial_lwe_indexes, num_lwes,
lut->h_lwe_indexes_in, lut->using_trivial_lwe_indexes, num_radix_blocks,
compression_params.small_lwe_dimension + 1);
/// Apply PBS
@@ -318,14 +337,14 @@ host_integer_decompress(cudaStream_t *streams, uint32_t *gpu_indexes,
compression_params.small_lwe_dimension,
encryption_params.polynomial_size, encryption_params.pbs_base_log,
encryption_params.pbs_level, encryption_params.grouping_factor,
num_lwes, encryption_params.pbs_type, lut_count, lut_stride);
num_radix_blocks, encryption_params.pbs_type, lut_count, lut_stride);
/// Copy data back to GPU 0 and release vecs
multi_gpu_gather_lwe_async<Torus>(streams, gpu_indexes, active_gpu_count,
d_lwe_array_out, lwe_after_pbs_vec,
lut->h_lwe_indexes_out,
lut->using_trivial_lwe_indexes, num_lwes,
encryption_params.big_lwe_dimension + 1);
multi_gpu_gather_lwe_async<Torus>(
streams, gpu_indexes, active_gpu_count, d_lwe_array_out,
lwe_after_pbs_vec, lut->h_lwe_indexes_out,
lut->using_trivial_lwe_indexes, num_radix_blocks,
encryption_params.big_lwe_dimension + 1);
/// Synchronize all GPUs
for (uint i = 0; i < active_gpu_count; i++) {
@@ -337,24 +356,25 @@ host_integer_decompress(cudaStream_t *streams, uint32_t *gpu_indexes,
template <typename Torus>
__host__ void scratch_cuda_compress_integer_radix_ciphertext(
cudaStream_t *streams, uint32_t *gpu_indexes, uint32_t gpu_count,
int_compression<Torus> **mem_ptr, uint32_t num_lwes,
int_compression<Torus> **mem_ptr, uint32_t num_radix_blocks,
int_radix_params compression_params, uint32_t lwe_per_glwe,
uint32_t storage_log_modulus, bool allocate_gpu_memory) {
*mem_ptr = new int_compression<Torus>(
streams, gpu_indexes, gpu_count, compression_params, num_lwes,
streams, gpu_indexes, gpu_count, compression_params, num_radix_blocks,
lwe_per_glwe, storage_log_modulus, allocate_gpu_memory);
}
template <typename Torus>
__host__ void scratch_cuda_integer_decompress_radix_ciphertext(
cudaStream_t *streams, uint32_t *gpu_indexes, uint32_t gpu_count,
int_decompression<Torus> **mem_ptr, uint32_t num_lwes, uint32_t body_count,
int_radix_params encryption_params, int_radix_params compression_params,
uint32_t storage_log_modulus, bool allocate_gpu_memory) {
int_decompression<Torus> **mem_ptr, uint32_t num_radix_blocks,
uint32_t body_count, int_radix_params encryption_params,
int_radix_params compression_params, uint32_t storage_log_modulus,
bool allocate_gpu_memory) {
*mem_ptr = new int_decompression<Torus>(
streams, gpu_indexes, gpu_count, encryption_params, compression_params,
num_lwes, body_count, storage_log_modulus, allocate_gpu_memory);
num_radix_blocks, body_count, storage_log_modulus, allocate_gpu_memory);
}
#endif

View File

@@ -96,7 +96,7 @@ extern "C" {
lwe_dimension: u32,
ks_level: u32,
ks_base_log: u32,
num_lwes: u32,
num_radix_blocks: u32,
message_modulus: u32,
carry_modulus: u32,
pbs_type: u32,
@@ -117,7 +117,7 @@ extern "C" {
lwe_dimension: u32,
pbs_level: u32,
pbs_base_log: u32,
num_lwes: u32,
num_radix_blocks: u32,
message_modulus: u32,
carry_modulus: u32,
pbs_type: u32,

View File

@@ -1,6 +1,6 @@
[package]
name = "tfhe-zk-pok"
version = "0.3.0"
version = "0.3.1"
edition = "2021"
keywords = ["zero", "knowledge", "proof", "vector-commitments"]
homepage = "https://zama.ai/"

View File

@@ -595,7 +595,7 @@ pub fn prove<G: Curve>(
let x_bytes = &*[
q.to_le_bytes().as_slice(),
d.to_le_bytes().as_slice(),
(d as u64).to_le_bytes().as_slice(),
b_i.to_le_bytes().as_slice(),
t.to_le_bytes().as_slice(),
msbs_zero_padding_bit_count.to_le_bytes().as_slice(),
@@ -1041,7 +1041,7 @@ pub fn verify<G: Curve>(
let x_bytes = &*[
q.to_le_bytes().as_slice(),
d.to_le_bytes().as_slice(),
(d as u64).to_le_bytes().as_slice(),
b_i.to_le_bytes().as_slice(),
t.to_le_bytes().as_slice(),
msbs_zero_padding_bit_count.to_le_bytes().as_slice(),

View File

@@ -817,7 +817,7 @@ pub fn prove<G: Curve>(
let x_bytes = &*[
q.to_le_bytes().as_slice(),
d.to_le_bytes().as_slice(),
(d as u64).to_le_bytes().as_slice(),
B.to_le_bytes().as_slice(),
t_input.to_le_bytes().as_slice(),
msbs_zero_padding_bit_count.to_le_bytes().as_slice(),
@@ -1852,7 +1852,7 @@ pub fn verify<G: Curve>(
let x_bytes = &*[
q.to_le_bytes().as_slice(),
d.to_le_bytes().as_slice(),
(d as u64).to_le_bytes().as_slice(),
B.to_le_bytes().as_slice(),
t_input.to_le_bytes().as_slice(),
msbs_zero_padding_bit_count.to_le_bytes().as_slice(),

View File

@@ -225,7 +225,7 @@ pub fn prove<G: Curve>(
core::slice::from_mut(s),
&[
hash_s,
&i.to_le_bytes(),
&(i as u64).to_le_bytes(),
v_hat.to_le_bytes().as_ref(),
c_hat.to_le_bytes().as_ref(),
c_y.to_le_bytes().as_ref(),
@@ -336,7 +336,7 @@ pub fn verify<G: Curve>(
core::slice::from_mut(s),
&[
hash_s,
&i.to_le_bytes(),
&(i as u64).to_le_bytes(),
v_hat.to_le_bytes().as_ref(),
c_hat.to_le_bytes().as_ref(),
c_y.to_le_bytes().as_ref(),

View File

@@ -1,6 +1,6 @@
[package]
name = "tfhe"
version = "0.8.0"
version = "0.9.1"
edition = "2021"
readme = "../README.md"
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
@@ -17,7 +17,7 @@ exclude = [
"/js_on_wasm_tests/",
"/web_wasm_parallel_tests/",
]
rust-version = "1.76"
rust-version = "1.81"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -46,7 +46,7 @@ hex = "0.4.3"
# End regex-engine deps
# Used for backward compatibility test metadata
ron = "0.8"
tfhe-backward-compat-data = { git = "https://github.com/zama-ai/tfhe-backward-compat-data.git", branch = "v0.2", default-features = false, features = [
tfhe-backward-compat-data = { git = "https://github.com/zama-ai/tfhe-backward-compat-data.git", branch = "v0.3", default-features = false, features = [
"load",
] }
@@ -65,7 +65,7 @@ bincode = "1.3.3"
concrete-fft = { version = "0.5.1", features = ["serde", "fft128"] }
concrete-ntt = { version = "0.2.0" }
pulp = "0.18.22"
tfhe-cuda-backend = { version = "0.4.0", path = "../backends/tfhe-cuda-backend", optional = true }
tfhe-cuda-backend = { version = "0.5.0", path = "../backends/tfhe-cuda-backend", optional = true }
aligned-vec = { version = "0.5", features = ["serde"] }
dyn-stack = { version = "0.10" }
paste = "1.0.7"
@@ -75,11 +75,11 @@ sha3 = { version = "0.10", optional = true }
# While we wait for repeat_n in rust standard library
itertools = "0.11.0"
rand_core = { version = "0.6.4", features = ["std"] }
tfhe-zk-pok = { version = "0.3.0", path = "../tfhe-zk-pok", optional = true }
tfhe-zk-pok = { version = "0.3.1", path = "../tfhe-zk-pok", optional = true }
tfhe-versionable = { version = "0.3.0", path = "../utils/tfhe-versionable" }
# wasm deps
wasm-bindgen = { version = "0.2.86", features = [
wasm-bindgen = { version = ">=0.2.86,<0.2.94", features = [
"serde-serialize",
], optional = true }
wasm-bindgen-rayon = { version = "1.0", optional = true }
@@ -295,6 +295,11 @@ name = "write_params_to_file"
path = "examples/utilities/params_to_file.rs"
required-features = ["boolean", "shortint", "internal-keycache"]
[[example]]
name = "print_doc_bench_parameters"
path = "examples/utilities/print_doc_bench_parameters.rs"
required-features = ["shortint", "internal-keycache"]
# Real use-case examples
[[example]]

View File

@@ -106,6 +106,7 @@ fn multi_bit_benchmark_parameters_64bits(
vec![
PARAM_GPU_MULTI_BIT_MESSAGE_1_CARRY_1_GROUP_3_KS_PBS,
PARAM_GPU_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_3_KS_PBS,
PARAM_GPU_MULTI_BIT_MESSAGE_3_CARRY_3_GROUP_3_KS_PBS,
]
} else {
vec![

View File

@@ -61,4 +61,8 @@ Collaborate with us to advance the FHE spaces and drive innovation together.
***
We value your feedback! [Take a 5-question developer survey](https://www.zama.ai/developer-survey) to improve the TFHE-rs library and the documentation and help other developers use FHE.
{% hint style="success" %}
**Zama 5-Question Developer Survey**
We want to hear from you! Take 1 minute to share your thoughts and helping us enhance our documentation and libraries. **👉** [**Click here**](https://www.zama.ai/developer-survey) to participate.
{% endhint %}

View File

@@ -59,16 +59,19 @@ The following example shows how to compress and decompress a list containing 4 m
```rust
use tfhe::prelude::*;
use tfhe::shortint::parameters::{COMP_PARAM_MESSAGE_2_CARRY_2, PARAM_MESSAGE_2_CARRY_2};
use tfhe::shortint::parameters::{
COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64, PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
};
use tfhe::{
set_server_key, CompressedCiphertextList, CompressedCiphertextListBuilder, FheBool,
FheInt64, FheUint16, FheUint2, FheUint32,
};
fn main() {
let config = tfhe::ConfigBuilder::with_custom_parameters(PARAM_MESSAGE_2_CARRY_2)
.enable_compression(COMP_PARAM_MESSAGE_2_CARRY_2)
.build();
let config =
tfhe::ConfigBuilder::with_custom_parameters(PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64)
.enable_compression(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64)
.build();
let ck = tfhe::ClientKey::generate(config);
let sk = tfhe::ServerKey::new(&ck);

View File

@@ -2,20 +2,163 @@
This document explains the `serialization` and `deserialization` features that are useful to send data to a server to perform the computations.
## Serialization/deserialization
## Safe serialization/deserialization
When dealing with sensitive types, it's important to implement safe serialization and safe deserialization functions to prevent runtime errors and enhance security. **TFHE-rs** provide easy to use functions for this purpose, such as `safe_serialize`, `safe_deserialize` and `safe_deserialize_conformant`.
Here is a basic example on how to use it:
```rust
// main.rs
use tfhe::safe_serialization::{safe_deserialize_conformant, safe_serialize};
use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
use tfhe::ServerKey;
use tfhe::{generate_keys, ConfigBuilder};
fn main() {
let params_1 = PARAM_MESSAGE_2_CARRY_2_KS_PBS;
let config = ConfigBuilder::with_custom_parameters(params_1).build();
let (client_key, server_key) = generate_keys(config);
let mut buffer = vec![];
// The last argument is the max allowed size for the serialized buffer
safe_serialize(&server_key, &mut buffer, 1 << 30).unwrap();
let _server_key_deser: ServerKey =
safe_deserialize_conformant(buffer.as_slice(), 1 << 30, &config.into()).unwrap();
}
```
The safe deserialization must take the output of a safe-serialization as input. During the process, the following validation occurs:
* **Type match**: deserializing `type A` from a serialized `type B` raises an error indicating "On deserialization, expected type A, got type B".
* **Version compatibility**: data serialized in previous versions of **TFHE-rs** are automatically upgraded to the latest version using the [data versioning](../guides/data\_versioning.md) feature.
* **Parameter compatibility**: deserializing an object of `type A` with one set of crypto parameters from an object of `type A` with another set of crypto parameters raises an error indicating "Deserialized object of type A not conformant with given parameter set"
* If both parameter sets have the same LWE dimension for ciphertexts, a ciphertext from param 1 may not fail this deserialization check with param 2.
* This check can't distinguish ciphertexts/server keys from independent client keys with the same parameters.
* This check is meant to prevent runtime errors in server homomorphic operations by checking that server keys and ciphertexts are compatible with the same parameter set.
* You can use the standalone `is_conformant` method to check parameter compatibility. Besides, the `safe_deserialize_conformant` function includes the parameter compatibility check, and the `safe_deserialize` function does not include the compatibility check.
* **Size limit**: both serialization and deserialization processes expect a size limit (measured in bytes) for the serialized data:
* On serialization, an error is raised if the serialized output exceeds the specific limit.
* On deserialization, an error is raised if the serialized input exceeds the specific limit.
This feature aims to gracefully return an error in case of an attacker trying to cause an out-of-memory error on deserialization.
Here is a more complete example:
```rust
// main.rs
use tfhe::conformance::ParameterSetConformant;
use tfhe::prelude::*;
use tfhe::safe_serialization::{safe_serialize, safe_deserialize_conformant};
use tfhe::shortint::parameters::{PARAM_MESSAGE_2_CARRY_2_KS_PBS, PARAM_MESSAGE_2_CARRY_2_PBS_KS};
use tfhe::conformance::ListSizeConstraint;
use tfhe::{
generate_keys, FheUint8, CompactCiphertextList, FheUint8ConformanceParams,
CompactPublicKey, ConfigBuilder, CompactCiphertextListConformanceParams
};
fn main() {
let params_1 = PARAM_MESSAGE_2_CARRY_2_KS_PBS;
let params_2 = PARAM_MESSAGE_2_CARRY_2_PBS_KS;
let config = ConfigBuilder::with_custom_parameters(params_1).build();
let (client_key, server_key) = generate_keys(config);
let conformance_params_1 = FheUint8ConformanceParams::from(params_1);
let conformance_params_2 = FheUint8ConformanceParams::from(params_2);
let public_key = CompactPublicKey::new(&client_key);
let msg = 27u8;
let ct = FheUint8::try_encrypt(msg, &client_key).unwrap();
assert!(ct.is_conformant(&conformance_params_1));
assert!(!ct.is_conformant(&conformance_params_2));
let mut buffer = vec![];
safe_serialize(&ct, &mut buffer, 1 << 20).unwrap();
assert!(safe_deserialize_conformant::<FheUint8>(buffer.as_slice(), 1 << 20, &conformance_params_2)
.is_err());
let ct2: FheUint8 = safe_deserialize_conformant(buffer.as_slice(), 1 << 20, &conformance_params_1)
.unwrap();
let dec: u8 = ct2.decrypt(&client_key);
assert_eq!(msg, dec);
// Example with a compact list:
let msgs = [27, 188u8];
let mut builder = CompactCiphertextList::builder(&public_key);
builder.extend(msgs.iter().copied());
let compact_list = builder.build();
let mut buffer = vec![];
safe_serialize(&compact_list, &mut buffer, 1 << 20).unwrap();
let conformance_params = CompactCiphertextListConformanceParams {
shortint_params: params_1.to_shortint_conformance_param(),
num_elements_constraint: ListSizeConstraint::exact_size(2),
};
safe_deserialize_conformant::<CompactCiphertextList>(buffer.as_slice(), 1 << 20, &conformance_params)
.unwrap();
}
```
The safe serialization and deserialization use `bincode` internally.
To selectively disable some of the features of the safe serialization, you can use `SerializationConfig`/`DeserializationConfig` builders.
For example, it is possible to disable the data versioning:
```rust
// main.rs
use tfhe::safe_serialization::{safe_deserialize_conformant, SerializationConfig};
use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
use tfhe::ServerKey;
use tfhe::{generate_keys, ConfigBuilder};
fn main() {
let params_1 = PARAM_MESSAGE_2_CARRY_2_KS_PBS;
let config = ConfigBuilder::with_custom_parameters(params_1).build();
let (client_key, server_key) = generate_keys(config);
let mut buffer = vec![];
SerializationConfig::new(1 << 30).disable_versioning().serialize_into(&server_key, &mut buffer).unwrap();
// You will still be able to load this item with `safe_deserialize_conformant`, but only using the current version of TFHE-rs
let _server_key_deser: ServerKey =
safe_deserialize_conformant(buffer.as_slice(), 1 << 30, &config.into()).unwrap();
}
```
## Serialization/deserialization using serde
**TFHE-rs** uses the [Serde](https://crates.io/crates/serde) framework and implements Serde's `Serialize` and `Deserialize` traits.
To serialize the data, you need to choose a [data format](https://serde.rs/#data-formats). In the following example, we use [bincode](https://crates.io/crates/bincode) for its binary format.
This allows you to serialize into any [data format](https://serde.rs/#data-formats) supported by serde.
However, this is a more bare bone approach as none of the checks described in the previous section will be performed for you.
Here is a full example:
In the following example, we use [bincode](https://crates.io/crates/bincode) for its binary format:
```toml
# Cargo.toml
[dependencies]
# ...
tfhe = { version = "0.8.0", features = ["integer","x86_64-unix"]}
tfhe = { version = "0.9.1", features = ["integer", "x86_64-unix"] }
bincode = "1.3.3"
```
@@ -70,92 +213,3 @@ fn server_function(serialized_data: &[u8]) -> Result<Vec<u8>, Box<dyn std::error
Ok(serialized_result)
}
```
## Safe serialization/deserialization
When dealing with sensitive types, it's important to implement safe serialization and safe deserialization functions to prevent runtime errors and enhance security. The safe serialization and deserialization use `bincode` internally.
The safe deserialization must take the output of a safe-serialization as input. During the process, the following validation occurs:
* **Type match**: deserializing `type A` from a serialized `type B` raises an error indicating "On deserialization, expected type A, got type B".
* **Version compatibility**: data serialized in previous versions of **TFHE-rs** are automatically upgraded to the latest version using the [data versioning](../guides/data\_versioning.md) feature.
* **Parameter compatibility**: deserializing an object of `type A` with one set of crypto parameters from an object of `type A` with another set of crypto parameters raises an error indicating "Deserialized object of type A not conformant with given parameter set"
* If both parameter sets have the same LWE dimension for ciphertexts, a ciphertext from param 1 may not fail this deserialization check with param 2.
* This check can't distinguish ciphertexts/server keys from independent client keys with the same parameters.
* This check is meant to prevent runtime errors in server homomorphic operations by checking that server keys and ciphertexts are compatible with the same parameter set.
* You can use the standalone `is_conformant` method to check parameter compatibility. Besides, the `safe_deserialize_conformant` function includes the parameter compatibility check, and the `safe_deserialize` function does not include the compatibility check.
* **Size limit**: both serialization and deserialization processes expect a size limit (measured in bytes) for the serialized data:
* On serialization, an error is raised if the serialized output exceeds the specific limit.
* On deserialization, an error is raised if the serialized input exceeds the specific limit.
This feature aims to gracefully return an error in case of an attacker trying to cause an out-of-memory error on deserialization.
Here is an example:
```rust
// main.rs
use tfhe::conformance::ParameterSetConformant;
use tfhe::prelude::*;
use tfhe::safe_serialization::{SerializationConfig, DeserializationConfig};
use tfhe::shortint::parameters::{PARAM_MESSAGE_2_CARRY_2_KS_PBS, PARAM_MESSAGE_2_CARRY_2_PBS_KS};
use tfhe::conformance::ListSizeConstraint;
use tfhe::{
generate_keys, FheUint8, CompactCiphertextList, FheUint8ConformanceParams,
CompactPublicKey, ConfigBuilder, CompactCiphertextListConformanceParams
};
fn main() {
let params_1 = PARAM_MESSAGE_2_CARRY_2_KS_PBS;
let params_2 = PARAM_MESSAGE_2_CARRY_2_PBS_KS;
let config = ConfigBuilder::with_custom_parameters(params_1).build();
let (client_key, server_key) = generate_keys(config);
let conformance_params_1 = FheUint8ConformanceParams::from(params_1);
let conformance_params_2 = FheUint8ConformanceParams::from(params_2);
let public_key = CompactPublicKey::new(&client_key);
let msg = 27u8;
let ct = FheUint8::try_encrypt(msg, &client_key).unwrap();
assert!(ct.is_conformant(&conformance_params_1));
assert!(!ct.is_conformant(&conformance_params_2));
let mut buffer = vec![];
SerializationConfig::new(1 << 20).serialize_into(&ct, &mut buffer).unwrap();
assert!(DeserializationConfig::new(1 << 20)
.deserialize_from::<FheUint8>(buffer.as_slice(), &conformance_params_2)
.is_err());
let ct2 = DeserializationConfig::new(1 << 20)
.deserialize_from::<FheUint8>(buffer.as_slice(), &conformance_params_1)
.unwrap();
let dec: u8 = ct2.decrypt(&client_key);
assert_eq!(msg, dec);
// Example with a compact list:
let msgs = [27, 188u8];
let mut builder = CompactCiphertextList::builder(&public_key);
builder.extend(msgs.iter().copied());
let compact_list = builder.build();
let mut buffer = vec![];
SerializationConfig::new(1 << 20).serialize_into(&compact_list, &mut buffer).unwrap();
let conformance_params = CompactCiphertextListConformanceParams {
shortint_params: params_1.to_shortint_conformance_param(),
num_elements_constraint: ListSizeConstraint::exact_size(2),
};
DeserializationConfig::new(1 << 20)
.deserialize_from::<CompactCiphertextList>(buffer.as_slice(), &conformance_params)
.unwrap();
}
```

View File

@@ -25,3 +25,9 @@ The following table shows the performance when the inputs of the benchmarked ope
The following table shows the performance when the left input of the benchmarked operation is encrypted and the other is a clear scalar of the same size:
{% embed url="https://docs.google.com/spreadsheets/d/1nLPt_m1MbkSdhMop0iKDnSN_c605l_JdMpK5JC90N_Q/edit?usp=sharing" %}
## Programmable bootstrapping
The next table shows the execution time of a keyswitch followed by a programmable bootstrapping depending on the precision of the input message. The associated parameter set is given.
{% embed url="https://docs.google.com/spreadsheets/d/1UrjB6MdwMmp6SL8nHx1uB4TKieT1OTei-hoSN5j-UzI/edit?gid=398722698#gid=398722698" %}

View File

@@ -2,6 +2,12 @@
This document summarizes the timings of some homomorphic operations over 64-bit encrypted integers, depending on the hardware. More details are given for [the CPU](cpu\_benchmarks.md), [the GPU](gpu\_benchmarks.md), or [zeros-knowledge proofs](zk\_proof\_benchmarks.md).
You can get the parameters used for benchmarks by cloning the repository and checking out the commit you want to use (starting with the v0.9.0 release) and run the following make command:
```console
make print_doc_bench_parameters
```
### Operation time (ms) over FheUint 64
{% embed url="https://docs.google.com/spreadsheets/d/1ZbgsKnFH8eKrFjy9khFeaLYnUhbSV8Xu4H6rwulo0o8/edit?usp=sharing" %}

View File

@@ -9,13 +9,13 @@ First, add **TFHE-rs** as a dependency in your `Cargo.toml`.
**For `x86_64` machine running a Unix-like OS:**
```toml
tfhe = { version = "0.8.0", features = [ "boolean", "shortint", "integer", "x86_64-unix" ] }
tfhe = { version = "0.9.1", features = [ "boolean", "shortint", "integer", "x86_64-unix" ] }
```
**For `ARM` machine running a Unix-like OS:**
```toml
tfhe = { version = "0.8.0", features = [ "boolean", "shortint", "integer", "aarch64-unix" ] }
tfhe = { version = "0.9.1", features = [ "boolean", "shortint", "integer", "aarch64-unix" ] }
```
**For `x86_64` machines with the** [**`rdseed instruction`**](https://en.wikipedia.org/wiki/RDRAND) **running Windows:**

View File

@@ -2,11 +2,83 @@
This document explains the basic steps of using the high-level API of **TFHE-rs.**
## Workflow explanation
## Setting up a Rust project
These are the steps to use the **TFHE-rs** high-level API:
If you already know how to set up a Rust project, feel free to go directly to the next [section](#using-tfhe-rs-and-its-apis).
1. [Import the **TFHE-rs** prelude](quick\_start.md#imports)
First, install the Rust programming language tools. Visit https://rustup.rs/ and follow the instructions. For alternative installation methods, refer to the [official Rust installation page](https://rust-lang.github.io/rustup/installation/other.html).
After installing Rust, you can call the build and package manager `Cargo`:
```console
$ cargo --version
cargo 1.81.0 (2dbb1af80 2024-08-20)
```
Your version may differ depending on when you installed Rust. To update your installation, invoke `rustup update`.
Now you can invoke `Cargo` and create a new default Rust project:
```console
$ cargo new tfhe-example
Creating binary (application) `tfhe-example` package
note: see more `Cargo.toml` keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
```
This will create a `tfhe-example` directory and populate it with the following:
```console
$ tree tfhe-example/
tfhe-example/
├── Cargo.toml
└── src
└── main.rs
1 directory, 2 files
```
You now have a minimal Rust project.
In the next section, we'll explain how to add **TFHE-rs** as a dependency to the project and start using it to perform FHE computations.
## Using TFHE-rs and its APIs
To use **TFHE-rs**, you need to add it as a dependency to `tfhe-example`.
The `Cargo.toml` file is located at the root of the project. Initially, the file is minimal and doesn't contain any dependencies:
```toml
[package]
name = "tfhe-example"
version = "0.1.0"
edition = "2021"
[dependencies]
```
For x86 Unix systems, add the following configuration to include **TFHE-rs**:
```toml
tfhe = { version = "0.9.1", features = ["integer", "x86_64-unix"]}
```
Your updated `Cargo.toml` file should look like this:
```toml
[package]
name = "tfhe-example"
version = "0.1.0"
edition = "2021"
[dependencies]
tfhe = { version = "0.9.1", features = ["integer", "x86_64-unix"]}
```
If you are on a different platform please refer to the [installation documentation](installation.md) for configuration options of other supported platforms.
Now that the project has **TFHE-rs** as a dependency here are the detailed steps to use its high-level API:
1. Import the **TFHE-rs** prelude with the following Rust code: `use tfhe::prelude::*;`
2. Client-side: [configure and generate keys](../fundamentals/configure-and-generate-keys.md)
3. Client-side: [encrypt data](../fundamentals/encrypt-data.md)
4. Server-side: [set the server key](../fundamentals/set-the-server-key.md)
@@ -44,20 +116,4 @@ fn main() {
}
```
The default configuration for x86 Unix machines is as follows:
```toml
tfhe = { version = "0.8.0", features = ["integer", "x86_64-unix"]}
```
Refer to the [installation documentation](installation.md) for configuration options of different platforms.Learn more about homomorphic types features in the [configuration documentation.](../guides/rust\_configuration.md)
## Step1: Importing
**TFHE-rs** uses `traits` to implement consistent APIs and generic functions. To use `traits`, they must be in scope.
The `prelude` pattern provides a convenient way to globally import all important **TFHE-rs** traits at once. This approach saves time and avoids confusion.
```rust
use tfhe::prelude::*;
```
You can learn more about homomorphic types and associated compilation features in the [configuration documentation.](../guides/rust\_configuration.md)

View File

@@ -2,11 +2,24 @@
This document describes the array types provided by the High-level API.
This new encrypted types allow you to easily perform array and tensor operations on encrypted data, taking care of the iteration and shape logic for you.
It also implements efficient algorithms in some cases, like summing elements of an array.
The following example shows a complete workflow of working with encrypted arrays, including:
- Generating keys
- Encrypting arrays of integers
- Performing operations such as:
- slicing arrays
- computing on a sub array, adding encrypted data to it
- computing on a sub array, adding clear data to it
- Decrypting the result, getting back a Rust `Vec` of decrypted values
```toml
# Cargo.toml
[dependencies]
tfhe = { version = "0.8.0", features = ["integer", "x86_64-unix"] }
tfhe = { version = "0.9.1", features = ["integer", "x86_64-unix"] }
```
```rust

View File

@@ -16,7 +16,7 @@ You can load serialized data with the `unversionize` function, even in newer ver
[dependencies]
# ...
tfhe = { version = "0.8.0", features = ["integer","x86_64-unix"]}
tfhe = { version = "0.9.1", features = ["integer","x86_64-unix"]}
tfhe-versionable = "0.2.0"
bincode = "1.3.3"
```
@@ -83,67 +83,6 @@ When possible, data will be upgraded automatically without any kind of interract
You will find below a list of breaking changes and how to upgrade them.
# 0.6 -> 0.7
- `tfhe::integer::ciphertext::CompactCiphertextList`:
in 0.6, these lists of ciphertext were statically typed and homogenous. Since 0.7, they are heterogeneous. The new version stores for each element an information about its type (Signed, Unsigned or Boolean). Since this information were not stored before, the list is set to be made of `Unsigned` integers by default. If that is not the case, you can set its type using the following snippet:
# 0.8 -> 0.9
```rust
use std::io::Cursor;
use tfhe::integer::ciphertext::{
CompactCiphertextList, DataKind, IntegerCompactCiphertextListExpansionMode,
SignedRadixCiphertext,
};
use tfhe::integer::{ClientKey, CompactPublicKey};
use tfhe::shortint::parameters::classic::compact_pk::PARAM_MESSAGE_2_CARRY_2_COMPACT_PK_KS_PBS;
use tfhe_versionable::{Unversionize, Versionize};
pub fn main() {
let fhe_params = PARAM_MESSAGE_2_CARRY_2_COMPACT_PK_KS_PBS;
let num_blocks = 4usize;
let serialized_data = {
let client_key = ClientKey::new(fhe_params);
let pk = CompactPublicKey::new(&client_key);
// Encrypt a negative value
let compact_ct = CompactCiphertextList::builder(&pk).push(u8::MAX).build();
// Versionize the data and store it
let mut serialized_data = Vec::new();
let versioned_client_key = client_key.versionize();
let versioned_ct = compact_ct.versionize();
bincode::serialize_into(&mut serialized_data, &versioned_client_key).unwrap();
bincode::serialize_into(&mut serialized_data, &versioned_ct).unwrap();
serialized_data
};
// Now load the data, after potential breaking changes in the data format
let mut serialized_data = Cursor::new(serialized_data);
let versioned_client_key = bincode::deserialize_from(&mut serialized_data).unwrap();
let versioned_ct = bincode::deserialize_from(&mut serialized_data).unwrap();
let client_key = ClientKey::unversionize(versioned_client_key).unwrap();
let mut compact_ct = CompactCiphertextList::unversionize(versioned_ct).unwrap();
// Reinterpret the data as needed after the load, here we simulate the need to load Unsigned
// data
compact_ct
.reinterpret_data(&[DataKind::Signed(num_blocks)])
.unwrap();
let expander = compact_ct
.expand(IntegerCompactCiphertextListExpansionMode::NoCastingAndNoUnpacking)
.unwrap();
let expanded = expander.get::<SignedRadixCiphertext>(0).unwrap().unwrap();
let decrypted: i8 = client_key.decrypt_signed_radix(&expanded);
// -1i8 == u8::MAX
assert_eq!(-1i8, decrypted);
}
```
- `tfhe::{CompactFheInt, CompactFheUint, CompactFheIntList, CompactFheUintList}`:
The types have been deprecated, they are only kept in **TFHE-rs** for backward compatibility. They can now be accessed using the `tfhe::high_level_api::backward_compatibility::integers` module. The only functionality that is still supported is to unversionize them and expand them into regular `FheInt`, `FheUint`, `Vec<FehInt>` and `Vec<FheUint>`:
```Rust
let loaded_ct = CompactFheUint8::unversionize(versioned_ct).unwrap();
let ct = loaded_ct.expand();
```
Starting with v0.7, this compact list functionality is provided by the `tfhe::CompactCiphertextList` type.
We will share code to manage outdated data for this breaking change here shortly.

View File

@@ -19,13 +19,13 @@ To use the **TFHE-rs** GPU backend in your project, add the following dependency
If you are using an `x86` machine:
```toml
tfhe = { version = "0.8.0", features = [ "boolean", "shortint", "integer", "x86_64-unix", "gpu" ] }
tfhe = { version = "0.9.1", features = [ "boolean", "shortint", "integer", "x86_64-unix", "gpu" ] }
```
If you are using an `ARM` machine:
```toml
tfhe = { version = "0.8.0", features = [ "boolean", "shortint", "integer", "aarch64-unix", "gpu" ] }
tfhe = { version = "0.9.1", features = [ "boolean", "shortint", "integer", "aarch64-unix", "gpu" ] }
```
{% hint style="success" %}
@@ -183,3 +183,75 @@ Please refer to the [GPU benchmarks](../getting_started/benchmarks/gpu_benchmark
## Warning
When measuring GPU times on your own on Linux, set the environment variable `CUDA_MODULE_LOADING=EAGER` to avoid CUDA API overheads during the first kernel execution.
## Compressing ciphertexts after some homomorphic computation on the GPU
You can compress ciphertexts using the GPU, even after computations, just like on the [CPU](../fundamentals/compress.md#compression-ciphertexts-after-some-homomorphic-computation).
The way to do it is very similar to how it's done on the CPU.
The following example shows how to compress and decompress a list containing 4 messages:
- One 32-bits integer
- One 64-bit integer
- One Boolean
- One 2-bit integer
```rust
use tfhe::prelude::*;
use tfhe::shortint::parameters::{
COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64, PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
};
use tfhe::{
set_server_key, CompressedCiphertextList, CompressedCiphertextListBuilder, FheBool,
FheInt64, FheUint16, FheUint2, FheUint32,
};
fn main() {
let config =
tfhe::ConfigBuilder::with_custom_parameters(PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64)
.enable_compression(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64)
.build();
let ck = tfhe::ClientKey::generate(config);
let compressed_server_key = tfhe::CompressedServerKey::new(&ck);
let gpu_key = compressed_server_key.decompress_to_gpu();
set_server_key(gpu_key);
let ct1 = FheUint32::encrypt(17_u32, &ck);
let ct2 = FheInt64::encrypt(-1i64, &ck);
let ct3 = FheBool::encrypt(false, &ck);
let ct4 = FheUint2::encrypt(3u8, &ck);
let compressed_list = CompressedCiphertextListBuilder::new()
.push(ct1)
.push(ct2)
.push(ct3)
.push(ct4)
.build()
.unwrap();
let serialized = bincode::serialize(&compressed_list).unwrap();
println!("Serialized size: {} bytes", serialized.len());
let compressed_list: CompressedCiphertextList = bincode::deserialize(&serialized).unwrap();
let a: FheUint32 = compressed_list.get(0).unwrap().unwrap();
let b: FheInt64 = compressed_list.get(1).unwrap().unwrap();
let c: FheBool = compressed_list.get(2).unwrap().unwrap();
let d: FheUint2 = compressed_list.get(3).unwrap().unwrap();
let a: u32 = a.decrypt(&ck);
assert_eq!(a, 17);
let b: i64 = b.decrypt(&ck);
assert_eq!(b, -1);
let c = c.decrypt(&ck);
assert!(!c);
let d: u8 = d.decrypt(&ck);
assert_eq!(d, 3);
}
```

View File

@@ -9,7 +9,7 @@ Welcome to this tutorial about `TFHE-rs` `core_crypto` module.
To use `TFHE-rs`, it first has to be added as a dependency in the `Cargo.toml`:
```toml
tfhe = { version = "0.8.0", features = [ "x86_64-unix" ] }
tfhe = { version = "0.9.1", features = [ "x86_64-unix" ] }
```
This enables the `x86_64-unix` feature to have efficient implementations of various algorithms for `x86_64` CPUs on a Unix-like system. The 'unix' suffix indicates that the `UnixSeeder`, which uses `/dev/random` to generate random numbers, is activated as a fallback if no hardware number generator is available (like `rdseed` on `x86_64` or if the [`Randomization Services`](https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc) on Apple platforms are not available). To avoid having the `UnixSeeder` as a potential fallback or to run on non-Unix systems (e.g., Windows), the `x86_64` feature is sufficient.
@@ -19,19 +19,19 @@ For Apple Silicon, the `aarch64-unix` or `aarch64` feature should be enabled. `a
In short: For `x86_64`-based machines running Unix-like OSes:
```toml
tfhe = { version = "0.8.0", features = ["x86_64-unix"] }
tfhe = { version = "0.9.1", features = ["x86_64-unix"] }
```
For Apple Silicon or aarch64-based machines running Unix-like OSes:
```toml
tfhe = { version = "0.8.0", features = ["aarch64-unix"] }
tfhe = { version = "0.9.1", features = ["aarch64-unix"] }
```
For `x86_64`-based machines with the [`rdseed instruction`](https://en.wikipedia.org/wiki/RDRAND) running Windows:
```toml
tfhe = { version = "0.8.0", features = ["x86_64"] }
tfhe = { version = "0.9.1", features = ["x86_64"] }
```
### Commented code to double a 2-bit message in a leveled fashion and using a PBS with the `core_crypto` module.

View File

@@ -25,7 +25,7 @@ To use the `FheUint8` type, enable the `integer` feature:
[dependencies]
# Default configuration for x86 Unix machines:
tfhe = { version = "0.8.0", features = ["integer", "x86_64-unix"]}
tfhe = { version = "0.9.1", features = ["integer", "x86_64-unix"]}
```
Refer to the [installation guide](../getting\_started/installation.md) for other configurations.

View File

@@ -18,7 +18,7 @@ This function returns a Boolean (`true` or `false`) so that the total count of `
# Cargo.toml
# Default configuration for x86 Unix machines:
tfhe = { version = "0.8.0", features = ["integer", "x86_64-unix"]}
tfhe = { version = "0.9.1", features = ["integer", "x86_64-unix"]}
```
Refer to the [installation](../getting\_started/installation.md) for other configurations.

View File

@@ -0,0 +1,82 @@
use tfhe::keycache::NamedParam;
use tfhe::shortint::parameters::classic::gaussian::p_fail_2_minus_64::ks_pbs::{
PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M64, PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
PARAM_MESSAGE_3_CARRY_3_KS_PBS_GAUSSIAN_2M64, PARAM_MESSAGE_4_CARRY_4_KS_PBS_GAUSSIAN_2M64,
};
use tfhe::shortint::parameters::classic::tuniform::p_fail_2_minus_64::ks_pbs::PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use tfhe::shortint::parameters::compact_public_key_only::p_fail_2_minus_64::ks_pbs::PARAM_PKE_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use tfhe::shortint::parameters::key_switching::p_fail_2_minus_64::ks_pbs::PARAM_KEYSWITCH_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use tfhe::shortint::parameters::multi_bit::p_fail_2_minus_64::ks_pbs_gpu::{
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M64,
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_3_CARRY_3_KS_PBS_GAUSSIAN_2M64,
};
pub fn main() {
println!("CPU Integer parameters:\n");
println!("{}", PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64.name());
println!("{PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64:?}");
println!("\n\n===========================================================================\n\n");
println!("CUDA GPU Integer parameters:\n");
println!(
"{}",
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64.name()
);
println!("{PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64:?}");
println!("\n\n===========================================================================\n\n");
println!("CPU PBS parameters:\n");
for param in [
PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M64,
PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
PARAM_MESSAGE_3_CARRY_3_KS_PBS_GAUSSIAN_2M64,
PARAM_MESSAGE_4_CARRY_4_KS_PBS_GAUSSIAN_2M64,
] {
let bits = (param.message_modulus.0 * param.carry_modulus.0).ilog2();
println!("Precision {bits} bits");
println!("{}", param.name());
println!("{param:?}\n");
}
println!("\n===========================================================================\n\n");
println!("CUDA GPU PBS parameters:\n");
for param in [
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M64,
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_3_CARRY_3_KS_PBS_GAUSSIAN_2M64,
] {
let bits = (param.message_modulus.0 * param.carry_modulus.0).ilog2();
println!("Precision {bits} bits");
println!("{}", param.name());
println!("{param:?}\n");
}
println!("\n===========================================================================\n\n");
println!("ZK POK parameters:\n");
println!("Compact Public Key parameters (encryption + ZK):");
println!(
"{}",
stringify!(PARAM_PKE_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64)
);
println!("{PARAM_PKE_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64:?}\n");
println!("Corresponding compute FHE parameters:");
println!(
"{}",
stringify!(PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64)
);
println!("{PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64:?}\n");
println!("Keyswitch from encryption + ZK to compute parameters:");
println!(
"{}",
stringify!(PARAM_KEYSWITCH_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64)
);
println!("{PARAM_KEYSWITCH_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64:?}");
}

View File

@@ -216,7 +216,8 @@ pub fn keyswitch_lwe_ciphertext_native_mod_compatible<Scalar, KSKCont, InputCont
{
let decomposition_iter = decomposer.decompose(input_mask_element);
// Loop over the levels
for (level_key_ciphertext, decomposed) in keyswitch_key_block.iter().zip(decomposition_iter)
for (level_key_ciphertext, decomposed) in
keyswitch_key_block.iter().rev().zip(decomposition_iter)
{
slice_wrapping_sub_scalar_mul_assign(
output_lwe_ciphertext.as_mut(),
@@ -304,7 +305,8 @@ pub fn keyswitch_lwe_ciphertext_other_mod<Scalar, KSKCont, InputCont, OutputCont
{
let decomposition_iter = decomposer.decompose(input_mask_element);
// Loop over the levels
for (level_key_ciphertext, decomposed) in keyswitch_key_block.iter().zip(decomposition_iter)
for (level_key_ciphertext, decomposed) in
keyswitch_key_block.iter().rev().zip(decomposition_iter)
{
slice_wrapping_sub_scalar_mul_assign_custom_modulus(
output_lwe_ciphertext.as_mut(),
@@ -436,7 +438,8 @@ pub fn keyswitch_lwe_ciphertext_with_scalar_change<
{
let decomposition_iter = input_decomposer.decompose(input_mask_element);
// Loop over the levels
for (level_key_ciphertext, decomposed) in keyswitch_key_block.iter().zip(decomposition_iter)
for (level_key_ciphertext, decomposed) in
keyswitch_key_block.iter().rev().zip(decomposition_iter)
{
slice_wrapping_sub_scalar_mul_assign(
output_lwe_ciphertext.as_mut(),
@@ -799,7 +802,7 @@ pub fn par_keyswitch_lwe_ciphertext_with_thread_count_native_mod_compatible<
let decomposition_iter = decomposer.decompose(input_mask_element);
// Loop over the levels
for (level_key_ciphertext, decomposed) in
keyswitch_key_block.iter().zip(decomposition_iter)
keyswitch_key_block.iter().rev().zip(decomposition_iter)
{
slice_wrapping_sub_scalar_mul_assign(
buffer.as_mut(),
@@ -946,7 +949,7 @@ pub fn par_keyswitch_lwe_ciphertext_with_thread_count_other_mod<
let decomposition_iter = decomposer.decompose(input_mask_element);
// Loop over the levels
for (level_key_ciphertext, decomposed) in
keyswitch_key_block.iter().zip(decomposition_iter)
keyswitch_key_block.iter().rev().zip(decomposition_iter)
{
slice_wrapping_sub_scalar_mul_assign_custom_modulus(
buffer.as_mut(),

View File

@@ -159,7 +159,6 @@ pub fn generate_lwe_keyswitch_key_native_mod_compatible<
{
// We fill the buffer with the powers of the key elements
for (level, message) in (1..=decomp_level_count.0)
.rev()
.map(DecompositionLevel)
.zip(decomposition_plaintexts_buffer.iter_mut())
{
@@ -234,7 +233,6 @@ pub fn generate_lwe_keyswitch_key_other_mod<
{
// We fill the buffer with the powers of the key elements
for (level, message) in (1..=decomp_level_count.0)
.rev()
.map(DecompositionLevel)
.zip(decomposition_plaintexts_buffer.iter_mut())
{
@@ -416,7 +414,6 @@ pub fn generate_seeded_lwe_keyswitch_key<
{
// We fill the buffer with the powers of the key elmements
for (level, message) in (1..=decomp_level_count.0)
.rev()
.map(DecompositionLevel)
.zip(decomposition_plaintexts_buffer.iter_mut())
{

View File

@@ -177,7 +177,7 @@ pub fn keyswitch_lwe_ciphertext_into_glwe_ciphertext<Scalar, KeyCont, InputCont,
// Loop over the number of levels:
// We compute the multiplication of a ciphertext from the private functional
// keyswitching key with a piece of the decomposition and subtract it to the buffer
for (level_key_cipher, decomposed) in keyswitch_key_block.iter().zip(decomp) {
for (level_key_cipher, decomposed) in keyswitch_key_block.iter().rev().zip(decomp) {
slice_wrapping_sub_scalar_mul_assign(
output_glwe_ciphertext.as_mut(),
level_key_cipher.as_ref(),

View File

@@ -134,7 +134,6 @@ pub fn generate_lwe_packing_keyswitch_key<
{
// We fill the buffer with the powers of the key elements
for (level, mut messages) in (1..=decomp_level_count.0)
.rev()
.map(DecompositionLevel)
.zip(decomposition_plaintexts_buffer.chunks_exact_mut(polynomial_size.0))
{
@@ -330,7 +329,6 @@ pub fn generate_seeded_lwe_packing_keyswitch_key<
{
// We fill the buffer with the powers of the key elements
for (level, mut messages) in (1..=decomp_level_count.0)
.rev()
.map(DecompositionLevel)
.zip(decomposition_plaintexts_buffer.chunks_exact_mut(polynomial_size.0))
{

View File

@@ -1,11 +1,57 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::core_crypto::prelude::{Container, LweKeyswitchKey, UnsignedInteger};
use crate::core_crypto::prelude::{
CiphertextModulus, Container, ContainerMut, ContiguousEntityContainerMut, DecompositionBaseLog,
DecompositionLevelCount, LweKeyswitchKey, LweSize, UnsignedInteger,
};
#[derive(Version)]
pub struct LweKeyswitchKeyV0<C: Container>
where
C::Element: UnsignedInteger,
{
data: C,
decomp_base_log: DecompositionBaseLog,
decomp_level_count: DecompositionLevelCount,
output_lwe_size: LweSize,
ciphertext_modulus: CiphertextModulus<C::Element>,
}
impl<Scalar: UnsignedInteger, C: ContainerMut<Element = Scalar>> Upgrade<LweKeyswitchKey<C>>
for LweKeyswitchKeyV0<C>
{
type Error = std::convert::Infallible;
fn upgrade(self) -> Result<LweKeyswitchKey<C>, Self::Error> {
let Self {
data,
decomp_base_log,
decomp_level_count,
output_lwe_size,
ciphertext_modulus,
} = self;
let mut new_ksk = LweKeyswitchKey::from_container(
data,
decomp_base_log,
decomp_level_count,
output_lwe_size,
ciphertext_modulus,
);
// Invert levels
for mut ksk_block in new_ksk.iter_mut() {
ksk_block.reverse();
}
Ok(new_ksk)
}
}
#[derive(VersionsDispatch)]
pub enum LweKeyswitchKeyVersions<C: Container>
where
C::Element: UnsignedInteger,
{
V0(LweKeyswitchKey<C>),
V0(LweKeyswitchKeyV0<C>),
V1(LweKeyswitchKey<C>),
}

View File

@@ -1,11 +1,60 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::core_crypto::prelude::{Container, LwePackingKeyswitchKey, UnsignedInteger};
use crate::core_crypto::prelude::{
CiphertextModulus, Container, ContainerMut, ContiguousEntityContainerMut, DecompositionBaseLog,
DecompositionLevelCount, GlweSize, LwePackingKeyswitchKey, PolynomialSize, UnsignedInteger,
};
#[derive(Version)]
pub struct LwePackingKeyswitchKeyV0<C: Container>
where
C::Element: UnsignedInteger,
{
data: C,
decomp_base_log: DecompositionBaseLog,
decomp_level_count: DecompositionLevelCount,
output_glwe_size: GlweSize,
output_polynomial_size: PolynomialSize,
ciphertext_modulus: CiphertextModulus<C::Element>,
}
impl<Scalar: UnsignedInteger, C: ContainerMut<Element = Scalar>> Upgrade<LwePackingKeyswitchKey<C>>
for LwePackingKeyswitchKeyV0<C>
{
type Error = std::convert::Infallible;
fn upgrade(self) -> Result<LwePackingKeyswitchKey<C>, Self::Error> {
let Self {
data,
decomp_base_log,
decomp_level_count,
output_glwe_size,
output_polynomial_size,
ciphertext_modulus,
} = self;
let mut new_pksk = LwePackingKeyswitchKey::from_container(
data,
decomp_base_log,
decomp_level_count,
output_glwe_size,
output_polynomial_size,
ciphertext_modulus,
);
// Invert levels
for mut pksk_block in new_pksk.iter_mut() {
pksk_block.reverse();
}
Ok(new_pksk)
}
}
#[derive(VersionsDispatch)]
pub enum LwePackingKeyswitchKeyVersions<C: Container>
where
C::Element: UnsignedInteger,
{
V0(LwePackingKeyswitchKey<C>),
V0(LwePackingKeyswitchKeyV0<C>),
V1(LwePackingKeyswitchKey<C>),
}

View File

@@ -1,11 +1,29 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::core_crypto::prelude::{Container, SeededLweKeyswitchKey, UnsignedInteger};
#[derive(Version)]
pub struct UnsupportedSeededLweKeyswitchKeyV0;
impl<Scalar: UnsignedInteger, C: Container<Element = Scalar>> Upgrade<SeededLweKeyswitchKey<C>>
for UnsupportedSeededLweKeyswitchKeyV0
{
type Error = crate::Error;
fn upgrade(self) -> Result<SeededLweKeyswitchKey<C>, Self::Error> {
Err(crate::Error::new(
"Unable to load SeededLweKeyswitchKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum SeededLweKeyswitchKeyVersions<C: Container>
where
C::Element: UnsignedInteger,
{
V0(SeededLweKeyswitchKey<C>),
V0(UnsupportedSeededLweKeyswitchKeyV0),
V1(SeededLweKeyswitchKey<C>),
}

View File

@@ -1,11 +1,29 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::core_crypto::prelude::{Container, SeededLwePackingKeyswitchKey, UnsignedInteger};
#[derive(Version)]
pub struct UnsupportedSeededLwePackingKeyswitchKeyV0;
impl<Scalar: UnsignedInteger, C: Container<Element = Scalar>>
Upgrade<SeededLwePackingKeyswitchKey<C>> for UnsupportedSeededLwePackingKeyswitchKeyV0
{
type Error = crate::Error;
fn upgrade(self) -> Result<SeededLwePackingKeyswitchKey<C>, Self::Error> {
Err(crate::Error::new(
"Unable to load SeededLwePackingKeyswitchKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum SeededLwePackingKeyswitchKeyVersions<C: Container>
where
C::Element: UnsignedInteger,
{
V0(SeededLwePackingKeyswitchKey<C>),
V0(UnsupportedSeededLwePackingKeyswitchKeyV0),
V1(SeededLwePackingKeyswitchKey<C>),
}

View File

@@ -433,6 +433,17 @@ pub trait ContiguousEntityContainerMut: ContiguousEntityContainer + AsMut<[Self:
.map(|(elt, meta)| Self::SelfMutView::<'_>::create_from(elt, meta))
}
fn reverse(&mut self) {
let entity_view_pod_size = self.get_entity_view_pod_size();
let container = self.as_mut();
container.reverse();
for entity_slot in self.as_mut().chunks_exact_mut(entity_view_pod_size) {
entity_slot.reverse();
}
}
fn par_iter_mut<'this>(
&'this mut self,
) -> ParallelChunksExactWrappingLendingIteratorMut<

View File

@@ -77,7 +77,7 @@ use crate::core_crypto::prelude::*;
/// );
/// }
/// ```
#[derive(Clone, serde::Serialize, serde::Deserialize, Versionize)]
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize, Versionize)]
#[versionize(CompressedModulusSwitchedGlweCiphertextVersions)]
pub struct CompressedModulusSwitchedGlweCiphertext<Scalar: UnsignedInteger> {
pub(crate) packed_integers: PackedIntegers<Scalar>,

View File

@@ -9,6 +9,7 @@ use crate::core_crypto::commons::math::random::{RandomGenerable, UniformBinary};
use crate::core_crypto::commons::parameters::*;
use crate::core_crypto::commons::traits::*;
use crate::core_crypto::entities::*;
use crate::named::Named;
/// A [`GLWE secret key`](`GlweSecretKey`)
///
@@ -28,6 +29,10 @@ pub struct GlweSecretKey<C: Container> {
polynomial_size: PolynomialSize,
}
impl<C: Container> Named for GlweSecretKey<C> {
const NAME: &'static str = "core_crypto::GlweSecretKey";
}
impl<T, C: Container<Element = T>> AsRef<[T]> for GlweSecretKey<C> {
fn as_ref(&self) -> &[T] {
self.data.as_ref()

View File

@@ -8,6 +8,7 @@ use crate::core_crypto::commons::generators::SecretRandomGenerator;
use crate::core_crypto::commons::math::random::{RandomGenerable, UniformBinary};
use crate::core_crypto::commons::parameters::LweDimension;
use crate::core_crypto::commons::traits::*;
use crate::named::Named;
/// An [`LWE secret key`](`LweSecretKey`).
///
@@ -25,6 +26,10 @@ pub struct LweSecretKey<C: Container> {
data: C,
}
impl<C: Container> Named for LweSecretKey<C> {
const NAME: &'static str = "core_crypto::LweSecretKey";
}
impl<T, C: Container<Element = T>> AsRef<[T]> for LweSecretKey<C> {
fn as_ref(&self) -> &[T] {
self.data.as_ref()

View File

@@ -4,7 +4,7 @@ use crate::conformance::ParameterSetConformant;
use crate::core_crypto::backward_compatibility::entities::packed_integers::PackedIntegersVersions;
use crate::core_crypto::prelude::*;
#[derive(Clone, serde::Serialize, serde::Deserialize, Versionize)]
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize, Versionize)]
#[versionize(PackedIntegersVersions)]
pub struct PackedIntegers<Scalar: UnsignedInteger> {
pub(crate) packed_coeffs: Vec<Scalar>,

View File

@@ -174,7 +174,8 @@ pub fn shrinking_keyswitch_lwe_ciphertext<Scalar, KSKCont, InputCont, OutputCont
{
let decomposition_iter = decomposer.decompose(input_mask_element);
// Loop over the levels
for (level_key_ciphertext, decomposed) in keyswitch_key_block.iter().zip(decomposition_iter)
for (level_key_ciphertext, decomposed) in
keyswitch_key_block.iter().rev().zip(decomposition_iter)
{
slice_wrapping_sub_scalar_mul_assign(
output_lwe_ciphertext.as_mut(),

View File

@@ -43,9 +43,9 @@ where
for<'a> FheBackendArraySlice<'a, Backend, Id>:
BitAnd<FheBackendArraySlice<'a, Backend, Id>, Output = FheBackendArray<Backend, Id>>,
for<'a> &'a FheBackendArray<Backend, Id>: BitAnd<FheBackendArray<Backend, Id>, Output = FheBackendArray<Backend, Id>>
+ BitAnd<&'a FheBackendArray<Backend, Id>, Output = FheBackendArray<Backend, Id>>,
// for the 2 tested slice/array ops
for<'a> &'a FheBackendArray<Backend, Id>: BitAnd<FheBackendArraySlice<'a, Backend, Id>, Output = FheBackendArray<Backend, Id>>
+ BitAnd<&'a FheBackendArray<Backend, Id>, Output = FheBackendArray<Backend, Id>>
// for the 2 tested slice/array ops
+ BitAnd<FheBackendArraySlice<'a, Backend, Id>, Output = FheBackendArray<Backend, Id>>
+ BitAnd<&'a FheBackendArray<Backend, Id>, Output = FheBackendArray<Backend, Id>>,
for<'a> FheBackendArraySlice<'a, Backend, Id>:
BitAnd<&'a FheBackendArray<Backend, Id>, Output = FheBackendArray<Backend, Id>>,

View File

@@ -1,16 +1,10 @@
#![allow(deprecated)]
use serde::{Deserialize, Serialize};
use tfhe_versionable::{Upgrade, Version, Versionize, VersionsDispatch};
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::high_level_api::booleans::{
InnerBoolean, InnerBooleanVersionOwned, InnerCompressedFheBool,
};
use crate::integer::ciphertext::{CompactCiphertextList, DataKind};
use crate::prelude::CiphertextList;
use crate::{
CompactCiphertextList as HlCompactCiphertextList, CompressedFheBool, Error, FheBool, Tag,
};
use crate::{CompressedFheBool, FheBool, Tag};
use std::convert::Infallible;
// Manual impl
@@ -42,11 +36,6 @@ pub enum FheBoolVersions {
V1(FheBool),
}
#[derive(VersionsDispatch)]
pub enum CompactFheBoolVersions {
V0(CompactFheBool),
}
#[derive(VersionsDispatch)]
pub enum InnerCompressedFheBoolVersions {
V0(InnerCompressedFheBool),
@@ -75,88 +64,3 @@ pub enum CompressedFheBoolVersions {
V0(CompressedFheBoolV0),
V1(CompressedFheBool),
}
#[derive(VersionsDispatch)]
pub enum CompactFheBoolListVersions {
V0(CompactFheBoolList),
}
// Basic support for deprecated compact list, to be able to load them and convert them to something
// else
#[derive(Versionize)]
#[versionize(CompactFheBoolVersions)]
#[deprecated(since = "0.7.0", note = "Use CompactCiphertextList instead")]
pub struct CompactFheBool {
pub(in crate::high_level_api) list: CompactCiphertextList,
}
impl CompactFheBool {
/// Expand to a [FheBool]
///
/// See [CompactFheBool] example.
pub fn expand(mut self) -> Result<FheBool, Error> {
// This compact list might have been loaded from an homogenous compact list without type
// info
self.list
.info
.iter_mut()
.for_each(|info| *info = DataKind::Boolean);
let hl_list = HlCompactCiphertextList {
inner: self.list,
tag: Tag::default(),
};
let list = hl_list.expand()?;
let block = list
.inner
.get::<crate::integer::BooleanBlock>(0)
.map(|b| b.ok_or_else(|| Error::new("Failed to expand compact list".to_string())))??;
let mut ciphertext = FheBool::new(block, Tag::default());
ciphertext.ciphertext.move_to_device_of_server_key_if_set();
Ok(ciphertext)
}
}
#[derive(Versionize)]
#[versionize(CompactFheBoolListVersions)]
#[deprecated(since = "0.7.0", note = "Use CompactCiphertextList instead")]
pub struct CompactFheBoolList {
list: CompactCiphertextList,
}
impl CompactFheBoolList {
/// Expand to a Vec<[FheBool]>
pub fn expand(mut self) -> Result<Vec<FheBool>, Error> {
// This compact list might have been loaded from an homogenous compact list without type
// info
self.list
.info
.iter_mut()
.for_each(|info| *info = DataKind::Boolean);
let hl_list = HlCompactCiphertextList {
inner: self.list,
tag: Tag::default(),
};
let list = hl_list.expand()?;
let len = list.len();
(0..len)
.map(|idx| {
let block = list
.inner
.get::<crate::integer::BooleanBlock>(idx)
.map(|list| {
list.ok_or_else(|| Error::new("Failed to expand compact list".to_string()))
})??;
let mut ciphertext = FheBool::new(block, Tag::default());
ciphertext.ciphertext.move_to_device_of_server_key_if_set();
Ok(ciphertext)
})
.collect::<Result<Vec<_>, _>>()
}
}

View File

@@ -1,9 +1,7 @@
#![allow(deprecated)]
use std::convert::Infallible;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use tfhe_versionable::{Upgrade, Version, Versionize, VersionsDispatch};
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::high_level_api::global_state::with_cpu_internal_keys;
use crate::high_level_api::integers::*;
@@ -12,14 +10,13 @@ use crate::integer::backward_compatibility::ciphertext::{
CompressedModulusSwitchedSignedRadixCiphertextTFHE06,
};
use crate::integer::ciphertext::{
BaseRadixCiphertext, BaseSignedRadixCiphertext, CompactCiphertextList,
BaseRadixCiphertext, BaseSignedRadixCiphertext,
CompressedRadixCiphertext as IntegerCompressedRadixCiphertext,
CompressedSignedRadixCiphertext as IntegerCompressedSignedRadixCiphertext, DataKind,
CompressedSignedRadixCiphertext as IntegerCompressedSignedRadixCiphertext,
};
use crate::prelude::CiphertextList;
use crate::shortint::ciphertext::CompressedModulusSwitchedCiphertext;
use crate::shortint::{Ciphertext, ServerKey};
use crate::{CompactCiphertextList as HlCompactCiphertextList, Error, Tag};
use crate::Tag;
use serde::{Deserialize, Serialize};
use self::signed::RadixCiphertext as SignedRadixCiphertext;
@@ -152,11 +149,6 @@ pub enum FheIntVersions<Id: FheIntId> {
V1(FheInt<Id>),
}
#[derive(VersionsDispatch)]
pub enum CompactFheIntVersions<Id: FheIntId> {
V0(CompactFheInt<Id>),
}
#[derive(Version)]
pub struct CompressedFheIntV0<Id>
where
@@ -184,11 +176,6 @@ pub enum CompressedFheIntVersions<Id: FheIntId> {
V1(CompressedFheInt<Id>),
}
#[derive(VersionsDispatch)]
pub enum CompactFheIntListVersions<Id: FheIntId> {
V0(CompactFheIntList<Id>),
}
#[derive(Version)]
pub struct FheUintV0<Id: FheUintId> {
pub(in crate::high_level_api) ciphertext: UnsignedRadixCiphertext,
@@ -213,11 +200,6 @@ pub enum FheUintVersions<Id: FheUintId> {
V1(FheUint<Id>),
}
#[derive(VersionsDispatch)]
pub enum CompactFheUintVersions<Id: FheUintId> {
V0(CompactFheUint<Id>),
}
#[derive(Version)]
pub struct CompressedFheUintV0<Id>
where
@@ -240,235 +222,3 @@ pub enum CompressedFheUintVersions<Id: FheUintId> {
V0(CompressedFheUintV0<Id>),
V1(CompressedFheUint<Id>),
}
#[derive(VersionsDispatch)]
pub enum CompactFheUintListVersions<Id: FheUintId> {
V0(CompactFheUintList<Id>),
}
// Basic support for deprecated compact list, to be able to load them and convert them to something
// else
#[derive(Clone, Versionize)]
#[versionize(CompactFheIntVersions)]
#[deprecated(since = "0.7.0", note = "Use CompactCiphertextList instead")]
pub struct CompactFheInt<Id: FheIntId> {
list: CompactCiphertextList,
id: Id,
}
impl<Id> CompactFheInt<Id>
where
Id: FheIntId,
{
/// Expand to a [FheInt]
pub fn expand(mut self) -> Result<FheInt<Id>, Error> {
// This compact list might have been loaded from an homogenous compact list without type
// info
self.list
.info
.iter_mut()
.for_each(|info| *info = DataKind::Signed(info.num_blocks()));
let hl_list = HlCompactCiphertextList {
inner: self.list,
tag: Tag::default(),
};
let list = hl_list.expand()?;
let ct = list
.inner
.get::<crate::integer::SignedRadixCiphertext>(0)
.map(|list| {
list.ok_or_else(|| Error::new("Failed to expand compact list".to_string()))
})??;
Ok(FheInt::new(ct, Tag::default()))
}
}
#[derive(Clone, Versionize)]
#[versionize(CompactFheIntListVersions)]
#[deprecated(since = "0.7.0", note = "Use CompactCiphertextList instead")]
pub struct CompactFheIntList<Id: FheIntId> {
list: CompactCiphertextList,
id: Id,
}
impl<Id> CompactFheIntList<Id>
where
Id: FheIntId,
{
/// Expand to a Vec<[FheInt]>
pub fn expand(mut self) -> Result<Vec<FheInt<Id>>, Error> {
// This compact list might have been loaded from an homogenous compact list without type
// info
self.list
.info
.iter_mut()
.for_each(|info| *info = DataKind::Signed(info.num_blocks()));
let hl_list = HlCompactCiphertextList {
inner: self.list,
tag: Tag::default(),
};
let list = hl_list.expand()?;
let len = list.len();
(0..len)
.map(|idx| {
let ct = list
.inner
.get::<crate::integer::SignedRadixCiphertext>(idx)
.map(|list| {
list.ok_or_else(|| Error::new("Failed to expand compact list".to_string()))
})??;
Ok(FheInt::new(ct, Tag::default()))
})
.collect::<Result<Vec<_>, _>>()
}
}
#[derive(Clone, Versionize)]
#[versionize(CompactFheUintVersions)]
#[deprecated(since = "0.7.0", note = "Use CompactCiphertextList instead")]
pub struct CompactFheUint<Id: FheUintId> {
list: CompactCiphertextList,
id: Id,
}
impl<Id> CompactFheUint<Id>
where
Id: FheUintId,
{
/// Expand to a [FheUint]
pub fn expand(mut self) -> Result<FheUint<Id>, Error> {
// This compact list might have been loaded from an homogenous compact list without type
// info
self.list
.info
.iter_mut()
.for_each(|info| *info = DataKind::Unsigned(info.num_blocks()));
let hl_list = HlCompactCiphertextList {
inner: self.list,
tag: Tag::default(),
};
let list = hl_list.expand()?;
let ct = list
.inner
.get::<crate::integer::RadixCiphertext>(0)
.map(|ct| {
ct.ok_or_else(|| Error::new("Failed to expand compact list".to_string()))
})??;
Ok(FheUint::new(ct, Tag::default()))
}
}
#[derive(Clone, Versionize)]
#[versionize(CompactFheUintListVersions)]
#[deprecated(since = "0.7.0", note = "Use CompactCiphertextList instead")]
pub struct CompactFheUintList<Id: FheUintId> {
list: CompactCiphertextList,
id: Id,
}
impl<Id> CompactFheUintList<Id>
where
Id: FheUintId,
{
/// Expand to a Vec<[FheUint]>
pub fn expand(mut self) -> Result<Vec<FheUint<Id>>, Error> {
// This compact list might have been loaded from an homogenous compact list without type
// info
self.list
.info
.iter_mut()
.for_each(|info| *info = DataKind::Unsigned(info.num_blocks()));
let hl_list = HlCompactCiphertextList {
inner: self.list,
tag: Tag::default(),
};
let list = hl_list.expand()?;
let len = list.len();
(0..len)
.map(|idx| {
let ct = list
.inner
.get::<crate::integer::RadixCiphertext>(idx)
.map(|ct| {
ct.ok_or_else(|| Error::new("Failed to expand compact list".to_string()))
})??;
Ok(FheUint::new(ct, Tag::default()))
})
.collect::<Result<Vec<_>, _>>()
}
}
macro_rules! static_int_type {
(num_bits: $num_bits:literal,) => {
::paste::paste! {
pub type [<Compact FheInt $num_bits>] = CompactFheInt<[<FheInt $num_bits Id>]>;
pub type [<Compact FheInt $num_bits List>] = CompactFheIntList<[<FheInt $num_bits Id>]>;
pub type [<Compact FheUint $num_bits>] = CompactFheUint<[<FheUint $num_bits Id>]>;
pub type [<Compact FheUint $num_bits List>] = CompactFheUintList<[<FheUint $num_bits Id>]>;
}
};
}
static_int_type! {
num_bits: 2,
}
static_int_type! {
num_bits: 4,
}
static_int_type! {
num_bits: 6,
}
static_int_type! {
num_bits: 8,
}
static_int_type! {
num_bits: 10,
}
static_int_type! {
num_bits: 12,
}
static_int_type! {
num_bits: 14,
}
static_int_type! {
num_bits: 16,
}
static_int_type! {
num_bits: 32,
}
static_int_type! {
num_bits: 64,
}
static_int_type! {
num_bits: 128,
}
static_int_type! {
num_bits: 160,
}
static_int_type! {
num_bits: 256,
}

View File

@@ -1,5 +1,4 @@
use crate::high_level_api::keys::*;
use crate::shortint::list_compression::{CompressionKey, CompressionPrivateKeys, DecompressionKey};
use crate::Tag;
use std::convert::Infallible;
use std::sync::Arc;
@@ -185,41 +184,32 @@ pub(crate) enum IntegerConfigVersions {
}
#[derive(Version)]
pub(crate) struct IntegerClientKeyV0 {
pub(crate) key: crate::integer::ClientKey,
pub(crate) wopbs_block_parameters: Option<crate::shortint::WopbsParameters>,
}
pub struct UnsupportedIntegerClientKeyV0;
#[derive(Version)]
pub(crate) struct IntegerClientKeyV1 {
pub(crate) key: crate::integer::ClientKey,
pub(crate) wopbs_block_parameters: Option<crate::shortint::WopbsParameters>,
pub(crate) dedicated_compact_private_key: Option<CompactPrivateKey>,
pub(crate) compression_key: Option<CompressionPrivateKeys>,
}
pub struct UnsupportedIntegerClientKeyV1;
impl Upgrade<IntegerClientKeyV1> for IntegerClientKeyV0 {
type Error = Infallible;
impl Upgrade<UnsupportedIntegerClientKeyV1> for UnsupportedIntegerClientKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<IntegerClientKeyV1, Self::Error> {
Ok(IntegerClientKeyV1 {
key: self.key,
wopbs_block_parameters: self.wopbs_block_parameters,
dedicated_compact_private_key: None,
compression_key: None,
})
fn upgrade(self) -> Result<UnsupportedIntegerClientKeyV1, Self::Error> {
Err(crate::Error::new(
"Unable to load IntegerClientKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
impl Upgrade<IntegerClientKeyV2> for IntegerClientKeyV1 {
type Error = Infallible;
impl Upgrade<IntegerClientKeyV2> for UnsupportedIntegerClientKeyV1 {
type Error = crate::Error;
fn upgrade(self) -> Result<IntegerClientKeyV2, Self::Error> {
Ok(IntegerClientKeyV2 {
key: self.key,
dedicated_compact_private_key: self.dedicated_compact_private_key,
compression_key: self.compression_key,
})
Err(crate::Error::new(
"Unable to load IntegerClientKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
@@ -247,52 +237,39 @@ impl Upgrade<IntegerClientKey> for IntegerClientKeyV2 {
#[derive(VersionsDispatch)]
#[allow(unused)]
pub(crate) enum IntegerClientKeyVersions {
V0(IntegerClientKeyV0),
V1(IntegerClientKeyV1),
V0(UnsupportedIntegerClientKeyV0),
V1(UnsupportedIntegerClientKeyV1),
V2(IntegerClientKeyV2),
V3(IntegerClientKey),
}
#[derive(Version)]
pub struct IntegerServerKeyV0 {
pub(crate) key: crate::integer::ServerKey,
pub(crate) wopbs_key: Option<crate::integer::wopbs::WopbsKey>,
}
pub struct UnsupportedIntegerServerKeyV0;
#[derive(Version)]
pub struct IntegerServerKeyV1 {
pub(crate) key: crate::integer::ServerKey,
pub(crate) wopbs_key: Option<crate::integer::wopbs::WopbsKey>,
pub(crate) cpk_key_switching_key_material:
Option<crate::integer::key_switching_key::KeySwitchingKeyMaterial>,
pub(crate) compression_key: Option<CompressionKey>,
pub(crate) decompression_key: Option<DecompressionKey>,
}
pub struct UnsupportedIntegerServerKeyV1;
impl Upgrade<IntegerServerKeyV1> for IntegerServerKeyV0 {
type Error = Infallible;
impl Upgrade<UnsupportedIntegerServerKeyV1> for UnsupportedIntegerServerKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<IntegerServerKeyV1, Self::Error> {
Ok(IntegerServerKeyV1 {
key: self.key,
wopbs_key: self.wopbs_key,
cpk_key_switching_key_material: None,
compression_key: None,
decompression_key: None,
})
fn upgrade(self) -> Result<UnsupportedIntegerServerKeyV1, Self::Error> {
Err(crate::Error::new(
"Unable to load IntegerServerKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
impl Upgrade<IntegerServerKeyV2> for IntegerServerKeyV1 {
type Error = Infallible;
impl Upgrade<IntegerServerKeyV2> for UnsupportedIntegerServerKeyV1 {
type Error = crate::Error;
fn upgrade(self) -> Result<IntegerServerKeyV2, Self::Error> {
Ok(IntegerServerKeyV2 {
key: self.key,
cpk_key_switching_key_material: self.cpk_key_switching_key_material,
compression_key: self.compression_key,
decompression_key: self.decompression_key,
})
Err(crate::Error::new(
"Unable to load IntegerServerKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
@@ -324,33 +301,30 @@ impl Upgrade<IntegerServerKey> for IntegerServerKeyV2 {
#[derive(VersionsDispatch)]
pub enum IntegerServerKeyVersions {
V0(IntegerServerKeyV0),
V1(IntegerServerKeyV1),
V0(UnsupportedIntegerServerKeyV0),
V1(UnsupportedIntegerServerKeyV1),
V2(IntegerServerKeyV2),
V3(IntegerServerKey),
}
#[derive(Version)]
pub struct IntegerCompressedServerKeyV0 {
pub(crate) key: crate::integer::CompressedServerKey,
}
pub struct UnsupportedIntegerCompressedServerKeyV0;
impl Upgrade<IntegerCompressedServerKey> for IntegerCompressedServerKeyV0 {
type Error = Infallible;
impl Upgrade<IntegerCompressedServerKey> for UnsupportedIntegerCompressedServerKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<IntegerCompressedServerKey, Self::Error> {
Ok(IntegerCompressedServerKey {
key: self.key,
cpk_key_switching_key_material: None,
compression_key: None,
decompression_key: None,
})
Err(crate::Error::new(
"Unable to load IntegerCompressedServerKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum IntegerCompressedServerKeyVersions {
V0(IntegerCompressedServerKeyV0),
V0(UnsupportedIntegerCompressedServerKeyV0),
V1(IntegerCompressedServerKey),
}

View File

@@ -14,7 +14,7 @@ use crate::shortint::parameters::{
CompactPublicKeyEncryptionParameters, ShortintKeySwitchingParameters,
};
use crate::shortint::{EncryptionKeyChoice, MessageModulus, PBSParameters};
use crate::Error;
use crate::{Config, Error};
use concrete_csprng::seeders::Seed;
use serde::{Deserialize, Serialize};
use tfhe_versionable::Versionize;
@@ -472,6 +472,16 @@ pub struct IntegerServerKeyConformanceParams {
pub compression_param: Option<CompressionParameters>,
}
impl From<Config> for IntegerServerKeyConformanceParams {
fn from(value: Config) -> Self {
Self {
sk_param: value.inner.block_parameters,
cpk_param: value.inner.dedicated_compact_public_key_parameters,
compression_param: value.inner.compression_parameters,
}
}
}
impl
TryFrom<(
PBSParameters,

View File

@@ -92,7 +92,9 @@ pub use compact_list::ProvenCompactCiphertextList;
pub use compact_list::{
CompactCiphertextList, CompactCiphertextListBuilder, CompactCiphertextListExpander,
};
pub use compressed_ciphertext_list::{CompressedCiphertextList, CompressedCiphertextListBuilder};
pub use compressed_ciphertext_list::{
CompressedCiphertextList, CompressedCiphertextListBuilder, HlCompressible, HlExpandable,
};
pub use tag::Tag;
pub use traits::FheId;

View File

@@ -1,6 +1,7 @@
use crate::prelude::*;
use crate::shortint::parameters::compact_public_key_only::p_fail_2_minus_64::ks_pbs::PARAM_PKE_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use crate::shortint::parameters::key_switching::p_fail_2_minus_64::ks_pbs::PARAM_KEYSWITCH_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use crate::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use crate::shortint::parameters::*;
use crate::shortint::ClassicPBSParameters;
use crate::{
@@ -20,7 +21,7 @@ fn test_tag_propagation_cpu() {
PARAM_PKE_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
PARAM_KEYSWITCH_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
)),
Some(COMP_PARAM_MESSAGE_2_CARRY_2),
Some(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64),
)
}
@@ -139,9 +140,9 @@ fn test_tag_propagation_zk_pok() {
fn test_tag_propagation_gpu() {
test_tag_propagation(
Device::CudaGpu,
PARAM_MESSAGE_2_CARRY_2,
PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
None,
Some(COMP_PARAM_MESSAGE_2_CARRY_2),
Some(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64),
)
}

View File

@@ -1,4 +1,4 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::integer::key_switching_key::{
CompressedKeySwitchingKey, CompressedKeySwitchingKeyMaterial, KeySwitchingKey,
@@ -15,12 +15,44 @@ pub enum KeySwitchingKeyVersions {
V0(KeySwitchingKey),
}
#[derive(Version)]
pub struct UnsupportedCompressedKeySwitchingKeyMaterialV0;
impl Upgrade<CompressedKeySwitchingKeyMaterial> for UnsupportedCompressedKeySwitchingKeyMaterialV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedKeySwitchingKeyMaterial, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedKeySwitchingKeyMaterial, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedKeySwitchingKeyMaterialVersions {
V0(CompressedKeySwitchingKeyMaterial),
V0(UnsupportedCompressedKeySwitchingKeyMaterialV0),
V1(CompressedKeySwitchingKeyMaterial),
}
#[derive(Version)]
pub struct UnsupportedCompressedKeySwitchingKeyV0;
impl Upgrade<CompressedKeySwitchingKey> for UnsupportedCompressedKeySwitchingKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedKeySwitchingKey, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedKeySwitchingKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedKeySwitchingKeyVersions {
V0(CompressedKeySwitchingKey),
V0(UnsupportedCompressedKeySwitchingKeyV0),
V1(CompressedKeySwitchingKey),
}

View File

@@ -2,7 +2,7 @@ use crate::integer::compression_keys::{
CompressedCompressionKey, CompressedDecompressionKey, CompressionKey, CompressionPrivateKeys,
DecompressionKey,
};
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
#[derive(VersionsDispatch)]
pub enum CompressionKeyVersions {
@@ -14,9 +14,25 @@ pub enum DecompressionKeyVersions {
V0(DecompressionKey),
}
#[derive(Version)]
pub struct UnsupportedCompressedCompressionKeyV0;
impl Upgrade<CompressedCompressionKey> for UnsupportedCompressedCompressionKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedCompressionKey, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedCompressionKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedCompressionKeyVersions {
V0(CompressedCompressionKey),
V0(UnsupportedCompressedCompressionKeyV0),
V1(CompressedCompressionKey),
}
#[derive(VersionsDispatch)]

View File

@@ -6,4 +6,3 @@ pub mod key_switching_key;
pub mod list_compression;
pub mod public_key;
pub mod server_key;
pub mod wopbs;

View File

@@ -1,4 +1,4 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::integer::{CompressedServerKey, ServerKey};
@@ -7,7 +7,23 @@ pub enum ServerKeyVersions {
V0(ServerKey),
}
#[derive(Version)]
pub struct UnsupportedCompressedServerKeyV0;
impl Upgrade<CompressedServerKey> for UnsupportedCompressedServerKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedServerKey, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedServerKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedServerKeyVersions {
V0(CompressedServerKey),
V0(UnsupportedCompressedServerKeyV0),
V1(CompressedServerKey),
}

View File

@@ -1,8 +0,0 @@
use tfhe_versionable::VersionsDispatch;
use crate::integer::wopbs::WopbsKey;
#[derive(VersionsDispatch)]
pub enum WopbsKeyVersions {
V0(WopbsKey),
}

View File

@@ -1008,6 +1008,16 @@ impl IntegerProvenCompactCiphertextListConformanceParams {
pub fn from_crs_and_parameters(
value: CompactPublicKeyEncryptionParameters,
crs_params: &CompactPkeCrs,
) -> Self {
Self::from_public_key_encryption_parameters_and_crs_parameters(
value,
crs_params.public_params(),
)
}
pub fn from_public_key_encryption_parameters_and_crs_parameters(
value: CompactPublicKeyEncryptionParameters,
crs_params: &crate::zk::CompactPkePublicParams,
) -> Self {
Self {
encryption_lwe_dimension: value.encryption_lwe_dimension,
@@ -1015,7 +1025,7 @@ impl IntegerProvenCompactCiphertextListConformanceParams {
carry_modulus: value.carry_modulus,
ciphertext_modulus: value.ciphertext_modulus,
expansion_kind: value.expansion_kind,
max_elements_per_compact_list: crs_params.public_params().k,
max_elements_per_compact_list: crs_params.k,
}
}
}

View File

@@ -95,7 +95,7 @@ impl CompressedCiphertextListBuilder {
}
}
#[derive(Clone, Serialize, Deserialize, Versionize)]
#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize, Versionize)]
#[versionize(CompressedCiphertextListVersions)]
pub struct CompressedCiphertextList {
pub(crate) packed_list: ShortintCompressedCiphertextList,
@@ -153,13 +153,22 @@ impl CompressedCiphertextList {
#[cfg(test)]
mod tests {
use super::*;
use crate::integer::ClientKey;
use crate::integer::{gen_keys, IntegerKeyKind};
use crate::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use crate::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use itertools::Itertools;
use rand::Rng;
const NB_TESTS: usize = 10;
const NB_OPERATOR_TESTS: usize = 10;
#[test]
fn test_heterogeneous_ciphertext_compression_ci_run_filter() {
let cks = ClientKey::new(PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64);
fn test_ciphertext_compression() {
const NUM_BLOCKS: usize = 32;
let (cks, sks) = gen_keys(
PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
IntegerKeyKind::Radix,
);
let private_compression_key =
cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64);
@@ -167,32 +176,170 @@ mod tests {
let (compression_key, decompression_key) =
cks.new_compression_decompression_keys(&private_compression_key);
let ct1 = cks.encrypt_radix(3_u32, 16);
const MAX_NB_MESSAGES: usize = 2 * COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
.lwe_per_glwe
.0
/ NUM_BLOCKS;
let ct2 = cks.encrypt_signed_radix(-2, 16);
let mut rng = rand::thread_rng();
let ct3 = cks.encrypt_bool(true);
let message_modulus: u128 = cks.parameters().message_modulus().0 as u128;
let compressed = CompressedCiphertextListBuilder::new()
.push(ct1)
.push(ct2)
.push(ct3)
.build(&compression_key);
for _ in 0..NB_TESTS {
// Unsigned
let modulus = message_modulus.pow(NUM_BLOCKS as u32);
for _ in 0..NB_OPERATOR_TESTS {
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let messages = (0..nb_messages)
.map(|_| rng.gen::<u128>() % modulus)
.collect::<Vec<_>>();
let decompressed1 = compressed.get(0, &decompression_key).unwrap().unwrap();
let cts = messages
.iter()
.map(|message| cks.encrypt_radix(*message, NUM_BLOCKS))
.collect_vec();
let decrypted: u32 = cks.decrypt_radix(&decompressed1);
let mut builder = CompressedCiphertextListBuilder::new();
assert_eq!(decrypted, 3_u32);
for ct in cts {
let and_ct = sks.bitand_parallelized(&ct, &ct);
builder.push(and_ct);
}
let decompressed2 = compressed.get(1, &decompression_key).unwrap().unwrap();
let compressed = builder.build(&compression_key);
let decrypted2: i32 = cks.decrypt_signed_radix(&decompressed2);
for (i, message) in messages.iter().enumerate() {
let decompressed = compressed.get(i, &decompression_key).unwrap().unwrap();
let decrypted: u128 = cks.decrypt_radix(&decompressed);
assert_eq!(decrypted, *message);
}
}
assert_eq!(decrypted2, -2);
// Signed
let modulus = message_modulus.pow((NUM_BLOCKS - 1) as u32) as i128;
for _ in 0..NB_OPERATOR_TESTS {
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let messages = (0..nb_messages)
.map(|_| rng.gen::<i128>() % modulus)
.collect::<Vec<_>>();
let decompressed3 = compressed.get(2, &decompression_key).unwrap().unwrap();
let cts = messages
.iter()
.map(|message| cks.encrypt_signed_radix(*message, NUM_BLOCKS))
.collect_vec();
assert!(cks.decrypt_bool(&decompressed3));
let mut builder = CompressedCiphertextListBuilder::new();
for ct in cts {
let and_ct = sks.bitand_parallelized(&ct, &ct);
builder.push(and_ct);
}
let compressed = builder.build(&compression_key);
for (i, message) in messages.iter().enumerate() {
let decompressed = compressed.get(i, &decompression_key).unwrap().unwrap();
let decrypted: i128 = cks.decrypt_signed_radix(&decompressed);
assert_eq!(decrypted, *message);
}
}
// Boolean
for _ in 0..NB_OPERATOR_TESTS {
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let messages = (0..nb_messages)
.map(|_| rng.gen::<i64>() % 2 != 0)
.collect::<Vec<_>>();
let cts = messages
.iter()
.map(|message| cks.encrypt_bool(*message))
.collect_vec();
let mut builder = CompressedCiphertextListBuilder::new();
for ct in cts {
let and_ct = sks.boolean_bitand(&ct, &ct);
builder.push(and_ct);
}
let compressed = builder.build(&compression_key);
for (i, message) in messages.iter().enumerate() {
let decompressed = compressed.get(i, &decompression_key).unwrap().unwrap();
let decrypted = cks.decrypt_bool(&decompressed);
assert_eq!(decrypted, *message);
}
}
// Hybrid
enum MessageType {
Unsigned(u128),
Signed(i128),
Boolean(bool),
}
for _ in 0..NB_OPERATOR_TESTS {
let mut builder = CompressedCiphertextListBuilder::new();
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let mut messages = vec![];
for _ in 0..nb_messages {
let case_selector = rng.gen_range(0..3);
match case_selector {
0 => {
// Unsigned
let modulus = message_modulus.pow(NUM_BLOCKS as u32);
let message = rng.gen::<u128>() % modulus;
let ct = cks.encrypt_radix(message, NUM_BLOCKS);
let and_ct = sks.bitand_parallelized(&ct, &ct);
builder.push(and_ct);
messages.push(MessageType::Unsigned(message));
}
1 => {
// Signed
let modulus = message_modulus.pow((NUM_BLOCKS - 1) as u32) as i128;
let message = rng.gen::<i128>() % modulus;
let ct = cks.encrypt_signed_radix(message, NUM_BLOCKS);
let and_ct = sks.bitand_parallelized(&ct, &ct);
builder.push(and_ct);
messages.push(MessageType::Signed(message));
}
_ => {
// Boolean
let message = rng.gen::<i64>() % 2 != 0;
let ct = cks.encrypt_bool(message);
let and_ct = sks.boolean_bitand(&ct, &ct);
builder.push(and_ct);
messages.push(MessageType::Boolean(message));
}
}
}
let compressed = builder.build(&compression_key);
for (i, val) in messages.iter().enumerate() {
match val {
MessageType::Unsigned(message) => {
let decompressed =
compressed.get(i, &decompression_key).unwrap().unwrap();
let decrypted: u128 = cks.decrypt_radix(&decompressed);
assert_eq!(decrypted, *message);
}
MessageType::Signed(message) => {
let decompressed =
compressed.get(i, &decompression_key).unwrap().unwrap();
let decrypted: i128 = cks.decrypt_signed_radix(&decompressed);
assert_eq!(decrypted, *message);
}
MessageType::Boolean(message) => {
let decompressed =
compressed.get(i, &decompression_key).unwrap().unwrap();
let decrypted = cks.decrypt_bool(&decompressed);
assert_eq!(decrypted, *message);
}
}
}
}
}
}
}

View File

@@ -85,8 +85,8 @@ impl CudaCompressedCiphertextList {
decomp_key: &CudaDecompressionKey,
streams: &CudaStreams,
) -> Option<(CudaRadixCiphertext, DataKind)> {
let preceding_infos = self.info.get(..index).unwrap();
let current_info = self.info.get(index).copied().unwrap();
let preceding_infos = self.info.get(..index)?;
let current_info = self.info.get(index).copied()?;
let start_block_index: usize = preceding_infos
.iter()
@@ -129,82 +129,87 @@ impl CudaCompressedCiphertextList {
/// use tfhe::integer::gpu::ciphertext::compressed_ciphertext_list::CudaCompressedCiphertextListBuilder;
/// use tfhe::integer::gpu::ciphertext::{CudaSignedRadixCiphertext, CudaUnsignedRadixCiphertext};
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64;
/// use tfhe::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
///
/// let cks = ClientKey::new(PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64);
/// let num_blocks = 32;
/// let streams = CudaStreams::new_multi_gpu();
///
/// let private_compression_key =
/// cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64);
/// let (radix_cks, _) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
/// num_blocks,
/// &streams,
/// );
/// let cks = radix_cks.as_ref();
///
/// let streams = CudaStreams::new_multi_gpu();
/// let private_compression_key =
/// cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64);
///
/// let num_blocks = 32;
/// let (radix_cks, _) = gen_keys_radix_gpu(
/// PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
/// num_blocks,
/// &streams,
/// );
/// let (compressed_compression_key, compressed_decompression_key) =
/// radix_cks.new_compressed_compression_decompression_keys(&private_compression_key);
/// let (cuda_compression_key, cuda_decompression_key) =
/// radix_cks.new_cuda_compression_decompression_keys(&private_compression_key, &streams);
///
/// let cuda_compression_key = compressed_compression_key.decompress_to_cuda(&streams);
/// let private_compression_key =
/// cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64);
///
/// let compression_key = compressed_compression_key.decompress();
/// let decompression_key = compressed_decompression_key.decompress();
/// let (compressed_compression_key, compressed_decompression_key) =
/// radix_cks.new_compressed_compression_decompression_keys(&private_compression_key);
///
/// let ct1 = radix_cks.encrypt(3_u32);
/// let ct2 = radix_cks.encrypt_signed(-2);
/// let ct3 = radix_cks.encrypt_bool(true);
/// let cuda_compression_key = compressed_compression_key.decompress_to_cuda(&streams);
///
/// /// Copy to GPU
/// let d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &streams);
/// let d_ct2 = CudaSignedRadixCiphertext::from_signed_radix_ciphertext(&ct2, &streams);
/// let d_ct3 = CudaBooleanBlock::from_boolean_block(&ct3, &streams);
/// let compression_key = compressed_compression_key.decompress();
/// let decompression_key = compressed_decompression_key.decompress();
///
/// let cuda_compressed = CudaCompressedCiphertextListBuilder::new()
/// .push(d_ct1, &streams)
/// .push(d_ct2, &streams)
/// .push(d_ct3, &streams)
/// .build(&cuda_compression_key, &streams);
/// let ct1 = radix_cks.encrypt(3_u32);
/// let ct2 = radix_cks.encrypt_signed(-2);
/// let ct3 = radix_cks.encrypt_bool(true);
///
/// let reference_compressed = CompressedCiphertextListBuilder::new()
/// .push(ct1)
/// .push(ct2)
/// .push(ct3)
/// .build(&compression_key);
/// /// Copy to GPU
/// let d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &streams);
/// let d_ct2 = CudaSignedRadixCiphertext::from_signed_radix_ciphertext(&ct2, &streams);
/// let d_ct3 = CudaBooleanBlock::from_boolean_block(&ct3, &streams);
///
/// let converted_compressed = cuda_compressed.to_compressed_ciphertext_list(&streams);
/// let cuda_compressed = CudaCompressedCiphertextListBuilder::new()
/// .push(d_ct1, &streams)
/// .push(d_ct2, &streams)
/// .push(d_ct3, &streams)
/// .build(&cuda_compression_key, &streams);
///
/// let decompressed1: RadixCiphertext = converted_compressed
/// .get(0, &decompression_key)
/// .unwrap()
/// .unwrap();
/// let reference_decompressed1 = reference_compressed
/// .get(0, &decompression_key)
/// .unwrap()
/// .unwrap();
/// assert_eq!(decompressed1, reference_decompressed1);
/// let reference_compressed = CompressedCiphertextListBuilder::new()
/// .push(ct1)
/// .push(ct2)
/// .push(ct3)
/// .build(&compression_key);
///
/// let decompressed2: SignedRadixCiphertext = converted_compressed
/// .get(1, &decompression_key)
/// .unwrap()
/// .unwrap();
/// let reference_decompressed2 = reference_compressed
/// .get(1, &decompression_key)
/// .unwrap()
/// .unwrap();
/// assert_eq!(decompressed2, reference_decompressed2);
/// let converted_compressed = cuda_compressed.to_compressed_ciphertext_list(&streams);
///
/// let decompressed3: BooleanBlock = converted_compressed
/// .get(2, &decompression_key)
/// .unwrap()
/// .unwrap();
/// let reference_decompressed3 = reference_compressed
/// .get(2, &decompression_key)
/// .unwrap()
/// .unwrap();
/// assert_eq!(decompressed3, reference_decompressed3);
/// let decompressed1: RadixCiphertext = converted_compressed
/// .get(0, &decompression_key)
/// .unwrap()
/// .unwrap();
/// let reference_decompressed1 = reference_compressed
/// .get(0, &decompression_key)
/// .unwrap()
/// .unwrap();
/// assert_eq!(decompressed1, reference_decompressed1);
///
/// let decompressed2: SignedRadixCiphertext = converted_compressed
/// .get(1, &decompression_key)
/// .unwrap()
/// .unwrap();
/// let reference_decompressed2 = reference_compressed
/// .get(1, &decompression_key)
/// .unwrap()
/// .unwrap();
/// assert_eq!(decompressed2, reference_decompressed2);
///
/// let decompressed3: BooleanBlock = converted_compressed
/// .get(2, &decompression_key)
/// .unwrap()
/// .unwrap();
/// let reference_decompressed3 = reference_compressed
/// .get(2, &decompression_key)
/// .unwrap()
/// .unwrap();
/// assert_eq!(decompressed3, reference_decompressed3);
/// ```
pub fn to_compressed_ciphertext_list(&self, streams: &CudaStreams) -> CompressedCiphertextList {
let glwe_list = self
@@ -261,72 +266,74 @@ impl CudaCompressedCiphertextList {
}
impl CompressedCiphertextList {
/// ```rust
/// use tfhe::core_crypto::gpu::CudaStreams;
///```rust
/// use tfhe::core_crypto::gpu::CudaStreams;
/// use tfhe::integer::ciphertext::CompressedCiphertextListBuilder;
/// use tfhe::integer::ClientKey;
/// use tfhe::integer::gpu::ciphertext::{CudaSignedRadixCiphertext, CudaUnsignedRadixCiphertext};
/// use tfhe::integer::gpu::ciphertext::boolean_value::CudaBooleanBlock;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64;
/// use tfhe::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
///
/// let cks = ClientKey::new(PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64);
/// let num_blocks = 32;
/// let streams = CudaStreams::new_multi_gpu();
///
/// let private_compression_key =
/// cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64);
/// let (radix_cks, _) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
/// num_blocks,
/// &streams,
/// );
/// let cks = radix_cks.as_ref();
///
/// let streams = CudaStreams::new_multi_gpu();
/// let private_compression_key =
/// cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64);
///
/// let num_blocks = 32;
/// let (radix_cks, _) = gen_keys_radix_gpu(
/// PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
/// num_blocks,
/// &streams,
/// );
/// let (compressed_compression_key, compressed_decompression_key) =
/// radix_cks.new_compressed_compression_decompression_keys(&private_compression_key);
/// let (compressed_compression_key, compressed_decompression_key) =
/// radix_cks.new_compressed_compression_decompression_keys(&private_compression_key);
///
/// let cuda_decompression_key =
/// compressed_decompression_key.decompress_to_cuda(
/// radix_cks.parameters().glwe_dimension(),
/// radix_cks.parameters().polynomial_size(),
/// radix_cks.parameters().message_modulus(),
/// radix_cks.parameters().carry_modulus(),
/// radix_cks.parameters().ciphertext_modulus(),
/// &streams);
/// let cuda_decompression_key = compressed_decompression_key.decompress_to_cuda(
/// radix_cks.parameters().glwe_dimension(),
/// radix_cks.parameters().polynomial_size(),
/// radix_cks.parameters().message_modulus(),
/// radix_cks.parameters().carry_modulus(),
/// radix_cks.parameters().ciphertext_modulus(),
/// &streams
/// );
///
/// let compression_key = compressed_compression_key.decompress();
/// let compression_key = compressed_compression_key.decompress();
///
/// let ct1 = radix_cks.encrypt(3_u32);
/// let ct2 = radix_cks.encrypt_signed(-2);
/// let ct3 = radix_cks.encrypt_bool(true);
/// let ct1 = radix_cks.encrypt(3_u32);
/// let ct2 = radix_cks.encrypt_signed(-2);
/// let ct3 = radix_cks.encrypt_bool(true);
///
/// let compressed = CompressedCiphertextListBuilder::new()
/// .push(ct1)
/// .push(ct2)
/// .push(ct3)
/// .build(&compression_key);
/// let compressed = CompressedCiphertextListBuilder::new()
/// .push(ct1)
/// .push(ct2)
/// .push(ct3)
/// .build(&compression_key);
///
/// let cuda_compressed = compressed.to_cuda_compressed_ciphertext_list(&streams);
/// let cuda_compressed = compressed.to_cuda_compressed_ciphertext_list(&streams);
/// let recovered_cuda_compressed = cuda_compressed.to_compressed_ciphertext_list(&streams);
///
/// let d_decompressed1: CudaUnsignedRadixCiphertext =
/// cuda_compressed.get(0, &cuda_decompression_key, &streams).unwrap().unwrap();
/// let decompressed1 = d_decompressed1.to_radix_ciphertext(&streams);
/// let decrypted: u32 = radix_cks.decrypt(&decompressed1);
/// assert_eq!(decrypted, 3_u32);
/// assert_eq!(recovered_cuda_compressed, compressed);
///
/// let d_decompressed2: CudaSignedRadixCiphertext =
/// cuda_compressed.get(1, &cuda_decompression_key, &streams).unwrap().unwrap();
/// let decompressed2 = d_decompressed2.to_signed_radix_ciphertext(&streams);
/// let decrypted: i32 = radix_cks.decrypt_signed(&decompressed2);
/// assert_eq!(decrypted, -2);
/// let d_decompressed1: CudaUnsignedRadixCiphertext =
/// cuda_compressed.get(0, &cuda_decompression_key, &streams).unwrap().unwrap();
/// let decompressed1 = d_decompressed1.to_radix_ciphertext(&streams);
/// let decrypted: u32 = radix_cks.decrypt(&decompressed1);
/// assert_eq!(decrypted, 3_u32);
///
/// let d_decompressed3: CudaBooleanBlock =
/// cuda_compressed.get(2, &cuda_decompression_key, &streams).unwrap().unwrap();
/// let decompressed3 = d_decompressed3.to_boolean_block(&streams);
/// let decrypted = radix_cks.decrypt_bool(&decompressed3);
/// assert!(decrypted);
/// let d_decompressed2: CudaSignedRadixCiphertext =
/// cuda_compressed.get(1, &cuda_decompression_key, &streams).unwrap().unwrap();
/// let decompressed2 = d_decompressed2.to_signed_radix_ciphertext(&streams);
/// let decrypted: i32 = radix_cks.decrypt_signed(&decompressed2);
/// assert_eq!(decrypted, -2);
///
/// let d_decompressed3: CudaBooleanBlock =
/// cuda_compressed.get(2, &cuda_decompression_key, &streams).unwrap().unwrap();
/// let decompressed3 = d_decompressed3.to_boolean_block(&streams);
/// let decrypted = radix_cks.decrypt_bool(&decompressed3);
/// assert!(decrypted);
/// ```
pub fn to_cuda_compressed_ciphertext_list(
&self,
@@ -513,9 +520,8 @@ impl<'de> serde::Deserialize<'de> for CudaCompressedCiphertextList {
mod tests {
use super::*;
use crate::integer::gpu::gen_keys_radix_gpu;
use crate::integer::ClientKey;
use crate::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64;
use crate::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64;
use crate::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use crate::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64;
use rand::Rng;
const NB_TESTS: usize = 10;
@@ -523,31 +529,36 @@ mod tests {
#[test]
fn test_gpu_ciphertext_compression() {
let cks = ClientKey::new(PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64);
let private_compression_key =
cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64);
const NUM_BLOCKS: usize = 32;
let streams = CudaStreams::new_multi_gpu();
let num_blocks = 32;
let (radix_cks, _) = gen_keys_radix_gpu(
PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
num_blocks,
let (radix_cks, sks) = gen_keys_radix_gpu(
PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
NUM_BLOCKS,
&streams,
);
let cks = radix_cks.as_ref();
let private_compression_key =
cks.new_compression_private_key(COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64);
let (cuda_compression_key, cuda_decompression_key) =
radix_cks.new_cuda_compression_decompression_keys(&private_compression_key, &streams);
const MAX_NB_MESSAGES: usize = 2 * COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
.lwe_per_glwe
.0
/ NUM_BLOCKS;
let mut rng = rand::thread_rng();
let message_modulus: u128 = cks.parameters().message_modulus().0 as u128;
for _ in 0..NB_TESTS {
// Unsigned
let modulus = message_modulus.pow(num_blocks as u32);
let modulus = message_modulus.pow(NUM_BLOCKS as u32);
for _ in 0..NB_OPERATOR_TESTS {
let nb_messages = 1 + (rng.gen::<u64>() % 6);
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let messages = (0..nb_messages)
.map(|_| rng.gen::<u128>() % modulus)
.collect::<Vec<_>>();
@@ -563,7 +574,8 @@ mod tests {
let mut builder = CudaCompressedCiphertextListBuilder::new();
for d_ct in d_cts {
builder.push(d_ct, &streams);
let d_and_ct = sks.bitand(&d_ct, &d_ct, &streams);
builder.push(d_and_ct, &streams);
}
let cuda_compressed = builder.build(&cuda_compression_key, &streams);
@@ -580,9 +592,9 @@ mod tests {
}
// Signed
let modulus = message_modulus.pow((num_blocks - 1) as u32) as i128;
let modulus = message_modulus.pow((NUM_BLOCKS - 1) as u32) as i128;
for _ in 0..NB_OPERATOR_TESTS {
let nb_messages = 1 + (rng.gen::<u64>() % 6);
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let messages = (0..nb_messages)
.map(|_| rng.gen::<i128>() % modulus)
.collect::<Vec<_>>();
@@ -598,7 +610,8 @@ mod tests {
let mut builder = CudaCompressedCiphertextListBuilder::new();
for d_ct in d_cts {
builder.push(d_ct, &streams);
let d_and_ct = sks.bitand(&d_ct, &d_ct, &streams);
builder.push(d_and_ct, &streams);
}
let cuda_compressed = builder.build(&cuda_compression_key, &streams);
@@ -616,7 +629,7 @@ mod tests {
// Boolean
for _ in 0..NB_OPERATOR_TESTS {
let nb_messages = 1 + (rng.gen::<u64>() % 6);
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let messages = (0..nb_messages)
.map(|_| rng.gen::<i64>() % 2 != 0)
.collect::<Vec<_>>();
@@ -631,8 +644,12 @@ mod tests {
let mut builder = CudaCompressedCiphertextListBuilder::new();
for d_ct in d_cts {
builder.push(d_ct, &streams);
for d_boolean_ct in d_cts {
let d_ct = d_boolean_ct.0;
let d_and_ct = sks.bitand(&d_ct, &d_ct, &streams);
let d_and_boolean_ct =
CudaBooleanBlock::from_cuda_radix_ciphertext(d_and_ct.ciphertext);
builder.push(d_and_boolean_ct, &streams);
}
let cuda_compressed = builder.build(&cuda_compression_key, &streams);
@@ -657,38 +674,44 @@ mod tests {
for _ in 0..NB_OPERATOR_TESTS {
let mut builder = CudaCompressedCiphertextListBuilder::new();
let nb_messages = 1 + (rng.gen::<u64>() % 6);
let nb_messages = rng.gen_range(1..=MAX_NB_MESSAGES as u64);
let mut messages = vec![];
for _ in 0..nb_messages {
let case_selector = rng.gen_range(0..3);
match case_selector {
0 => {
// Unsigned
let modulus = message_modulus.pow(num_blocks as u32);
let modulus = message_modulus.pow(NUM_BLOCKS as u32);
let message = rng.gen::<u128>() % modulus;
let ct = radix_cks.encrypt(message);
let d_ct =
CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &streams);
builder.push(d_ct, &streams);
let d_and_ct = sks.bitand(&d_ct, &d_ct, &streams);
builder.push(d_and_ct, &streams);
messages.push(MessageType::Unsigned(message));
}
1 => {
// Signed
let modulus = message_modulus.pow((num_blocks - 1) as u32) as i128;
let modulus = message_modulus.pow((NUM_BLOCKS - 1) as u32) as i128;
let message = rng.gen::<i128>() % modulus;
let ct = radix_cks.encrypt_signed(message);
let d_ct = CudaSignedRadixCiphertext::from_signed_radix_ciphertext(
&ct, &streams,
);
builder.push(d_ct, &streams);
let d_and_ct = sks.bitand(&d_ct, &d_ct, &streams);
builder.push(d_and_ct, &streams);
messages.push(MessageType::Signed(message));
}
_ => {
// Boolean
let message = rng.gen::<i64>() % 2 != 0;
let ct = radix_cks.encrypt_bool(message);
let d_ct = CudaBooleanBlock::from_boolean_block(&ct, &streams);
builder.push(d_ct, &streams);
let d_boolean_ct = CudaBooleanBlock::from_boolean_block(&ct, &streams);
let d_ct = d_boolean_ct.0;
let d_and_ct = sks.bitand(&d_ct, &d_ct, &streams);
let d_and_boolean_ct =
CudaBooleanBlock::from_cuda_radix_ciphertext(d_and_ct.ciphertext);
builder.push(d_and_boolean_ct, &streams);
messages.push(MessageType::Boolean(message));
}
}

View File

@@ -92,32 +92,27 @@ impl CudaCompressionKey {
let lwe_ciphertext_count = LweCiphertextCount(total_num_blocks);
let gpu_index = streams.gpu_indexes[0];
let d_vec = unsafe {
let mut d_vec = CudaVec::new_async(
lwe_dimension.to_lwe_size().0 * lwe_ciphertext_count.0,
streams,
gpu_index,
let mut d_vec = CudaVec::new_async(
lwe_dimension.to_lwe_size().0 * lwe_ciphertext_count.0,
streams,
gpu_index,
);
let mut offset: usize = 0;
for ciphertext in vec_ciphertexts {
let dest_ptr = d_vec
.as_mut_c_ptr(gpu_index)
.add(offset * std::mem::size_of::<u64>());
let size = ciphertext.d_blocks.0.d_vec.len * std::mem::size_of::<u64>();
cuda_memcpy_async_gpu_to_gpu(
dest_ptr,
ciphertext.d_blocks.0.d_vec.as_c_ptr(gpu_index),
size as u64,
streams.ptr[gpu_index as usize],
streams.gpu_indexes[gpu_index as usize],
);
let mut offset: usize = 0;
for ciphertext in vec_ciphertexts {
let dest_ptr = d_vec
.as_mut_c_ptr(gpu_index)
.add(offset * std::mem::size_of::<u64>());
let size = ciphertext.d_blocks.0.d_vec.len * std::mem::size_of::<u64>();
cuda_memcpy_async_gpu_to_gpu(
dest_ptr,
ciphertext.d_blocks.0.d_vec.as_c_ptr(gpu_index),
size as u64,
streams.ptr[gpu_index as usize],
streams.gpu_indexes[gpu_index as usize],
);
offset += ciphertext.d_blocks.0.d_vec.len;
}
streams.synchronize();
d_vec
};
offset += ciphertext.d_blocks.0.d_vec.len;
}
CudaLweCiphertextList::from_cuda_vec(d_vec, lwe_ciphertext_count, ciphertext_modulus)
}

View File

@@ -65,8 +65,6 @@ pub mod public_key;
pub mod server_key;
#[cfg(feature = "experimental")]
pub mod wopbs;
#[cfg(not(feature = "experimental"))]
pub(crate) mod wopbs;
#[cfg(feature = "gpu")]
pub mod gpu;

View File

@@ -6,13 +6,9 @@
#[cfg(all(test, feature = "experimental"))]
mod test;
use super::backward_compatibility::wopbs::WopbsKeyVersions;
use serde::{Deserialize, Serialize};
use tfhe_versionable::Versionize;
#[derive(Clone, Serialize, Deserialize, Versionize)]
#[versionize(WopbsKeyVersions)]
#[derive(Clone, Serialize, Deserialize)]
pub struct WopbsKey {
wopbs_key: crate::shortint::wopbs::WopbsKey,
}

View File

@@ -54,6 +54,32 @@ impl CompactPkePublicParams {
.map_err(into_js_error)
})
}
#[wasm_bindgen]
pub fn safe_serialize(&self, serialized_size_limit: u64) -> Result<Vec<u8>, JsError> {
let mut buffer = vec![];
catch_panic_result(|| {
crate::safe_serialization::SerializationConfig::new(serialized_size_limit)
.serialize_into(&self.0, &mut buffer)
.map_err(into_js_error)
})?;
Ok(buffer)
}
#[wasm_bindgen]
pub fn safe_deserialize(
buffer: &[u8],
serialized_size_limit: u64,
) -> Result<CompactPkePublicParams, JsError> {
catch_panic_result(|| {
crate::safe_serialization::DeserializationConfig::new(serialized_size_limit)
.disable_conformance()
.deserialize_from(buffer)
.map(Self)
.map_err(into_js_error)
})
}
}
// "wasm bindgen is fragile and prefers the actual type vs. Self"

View File

@@ -67,12 +67,6 @@ impl SerializationVersioningMode {
}
}
/// `HEADER_LENGTH_LIMIT` is the maximum `SerializationHeader` size which
/// `DeserializationConfig::deserialize_from` is going to try to read (it returns an error if
/// it's too big).
/// It helps prevent an attacker passing a very long header to exhaust memory.
const HEADER_LENGTH_LIMIT: u64 = 1000;
/// Header with global metadata about the serialized object. This help checking that we are not
/// deserializing data that we can't handle.
#[derive(Serialize, Deserialize)]
@@ -145,18 +139,18 @@ Please use the versioned serialization mode for backward compatibility.",
#[derive(Clone)]
pub struct SerializationConfig {
versioned: SerializationVersioningMode,
serialized_size_limit: u64,
serialized_size_limit: Option<u64>,
}
impl SerializationConfig {
/// Creates a new serialization config. The default configuration will serialize the object
/// with versioning information for backward compatibility.
/// `serialized_size_limit` is the size limit (in number of byte) of the serialized object
/// (excluding the header).
/// (including the header).
pub fn new(serialized_size_limit: u64) -> Self {
Self {
versioned: SerializationVersioningMode::versioned(),
serialized_size_limit,
serialized_size_limit: Some(serialized_size_limit),
}
}
@@ -164,14 +158,14 @@ impl SerializationConfig {
pub fn new_with_unlimited_size() -> Self {
Self {
versioned: SerializationVersioningMode::versioned(),
serialized_size_limit: 0,
serialized_size_limit: None,
}
}
/// Disables the size limit for serialized objects
pub fn disable_size_limit(self) -> Self {
Self {
serialized_size_limit: 0,
serialized_size_limit: None,
..self
}
}
@@ -184,6 +178,14 @@ impl SerializationConfig {
}
}
/// Sets the size limit for this serialization config
pub fn with_size_limit(self, size: u64) -> Self {
Self {
serialized_size_limit: Some(size),
..self
}
}
/// Create a serialization header based on the current config
fn create_header<T: Named>(&self) -> SerializationHeader {
match self.versioned {
@@ -196,13 +198,28 @@ impl SerializationConfig {
}
}
/// Returns the max length of the serialized header
fn header_length_limit(&self) -> u64 {
if self.serialized_size_limit == 0 {
0
} else {
HEADER_LENGTH_LIMIT
}
/// Returns the size the object would take if serialized using the current config
///
/// The size is returned as a u64 to handle the serialization of large buffers under 32b
/// architectures.
pub fn serialized_size<T: Serialize + Versionize + Named>(
&self,
object: &T,
) -> bincode::Result<u64> {
let options = bincode::DefaultOptions::new().with_fixint_encoding();
let header = self.create_header::<T>();
let header_size = options.serialized_size(&header)?;
let data_size = match self.versioned {
SerializationVersioningMode::Versioned { .. } => {
options.serialized_size(&object.versionize())?
}
SerializationVersioningMode::Unversioned { .. } => options.serialized_size(&object)?,
};
Ok(header_size + data_size)
}
/// Serializes an object into a [writer](std::io::Write), based on the current config.
@@ -214,20 +231,39 @@ impl SerializationConfig {
) -> bincode::Result<()> {
let options = bincode::DefaultOptions::new()
.with_fixint_encoding()
.with_limit(0);
.with_limit(0); // Force to explicitly set the limit for each serialization
let header = self.create_header::<T>();
options
.with_limit(self.header_length_limit())
.serialize_into(&mut writer, &header)?;
let header_size = options.with_no_limit().serialized_size(&header)?;
match self.versioned {
SerializationVersioningMode::Versioned { .. } => options
.with_limit(self.serialized_size_limit)
.serialize_into(&mut writer, &object.versionize())?,
SerializationVersioningMode::Unversioned { .. } => options
.with_limit(self.serialized_size_limit)
.serialize_into(&mut writer, &object)?,
if let Some(size_limit) = self.serialized_size_limit {
options
.with_limit(size_limit)
.serialize_into(&mut writer, &header)?;
let options = options.with_limit(size_limit - header_size);
match self.versioned {
SerializationVersioningMode::Versioned { .. } => {
options.serialize_into(&mut writer, &object.versionize())?
}
SerializationVersioningMode::Unversioned { .. } => {
options.serialize_into(&mut writer, &object)?
}
};
} else {
let options = options.with_no_limit();
options.serialize_into(&mut writer, &header)?;
match self.versioned {
SerializationVersioningMode::Versioned { .. } => {
options.serialize_into(&mut writer, &object.versionize())?
}
SerializationVersioningMode::Unversioned { .. } => {
options.serialize_into(&mut writer, &object)?
}
};
};
Ok(())
@@ -238,7 +274,7 @@ impl SerializationConfig {
/// the various sanity checks that will be performed during deserialization.
#[derive(Copy, Clone)]
pub struct DeserializationConfig {
serialized_size_limit: u64,
serialized_size_limit: Option<u64>,
validate_header: bool,
}
@@ -248,11 +284,33 @@ pub struct DeserializationConfig {
/// This type should be created with [`DeserializationConfig::disable_conformance`]
#[derive(Copy, Clone)]
pub struct NonConformantDeserializationConfig {
serialized_size_limit: u64,
serialized_size_limit: Option<u64>,
validate_header: bool,
}
impl NonConformantDeserializationConfig {
/// Deserialize a header using the current config
fn deserialize_header(
&self,
reader: &mut impl std::io::Read,
) -> Result<SerializationHeader, String> {
let options = bincode::DefaultOptions::new()
.with_fixint_encoding()
.with_limit(0);
if let Some(size_limit) = self.serialized_size_limit {
options
.with_limit(size_limit)
.deserialize_from(reader)
.map_err(|err| err.to_string())
} else {
options
.with_no_limit()
.deserialize_from(reader)
.map_err(|err| err.to_string())
}
}
/// Deserializes an object serialized by [`SerializationConfig::serialize_into`] from a
/// [reader](std::io::Read). Performs various sanity checks based on the deserialization config,
/// but skips conformance checks.
@@ -260,39 +318,49 @@ impl NonConformantDeserializationConfig {
self,
mut reader: impl std::io::Read,
) -> Result<T, String> {
if self.serialized_size_limit != 0 && self.serialized_size_limit <= HEADER_LENGTH_LIMIT {
return Err(format!(
"The provided size limit is too small, provide a limit of at least \
{HEADER_LENGTH_LIMIT} bytes"
));
}
let options = bincode::DefaultOptions::new()
.with_fixint_encoding()
.with_limit(0);
.with_limit(0); // Force to explicitly set the limit for each deserialization
let deserialized_header: SerializationHeader = options
.with_limit(self.header_length_limit())
.deserialize_from(&mut reader)
let deserialized_header: SerializationHeader = self.deserialize_header(&mut reader)?;
let header_size = options
.with_no_limit()
.serialized_size(&deserialized_header)
.map_err(|err| err.to_string())?;
if self.validate_header {
deserialized_header.validate::<T>()?;
}
match deserialized_header.versioning_mode {
SerializationVersioningMode::Versioned { .. } => {
let deser_versioned = options
.with_limit(self.serialized_size_limit - self.header_length_limit())
.deserialize_from(&mut reader)
.map_err(|err| err.to_string())?;
if let Some(size_limit) = self.serialized_size_limit {
let options = options.with_limit(size_limit - header_size);
match deserialized_header.versioning_mode {
SerializationVersioningMode::Versioned { .. } => {
let deser_versioned = options
.deserialize_from(&mut reader)
.map_err(|err| err.to_string())?;
T::unversionize(deser_versioned).map_err(|e| e.to_string())
T::unversionize(deser_versioned).map_err(|e| e.to_string())
}
SerializationVersioningMode::Unversioned { .. } => options
.deserialize_from(&mut reader)
.map_err(|err| err.to_string()),
}
} else {
let options = options.with_no_limit();
match deserialized_header.versioning_mode {
SerializationVersioningMode::Versioned { .. } => {
let deser_versioned = options
.deserialize_from(&mut reader)
.map_err(|err| err.to_string())?;
T::unversionize(deser_versioned).map_err(|e| e.to_string())
}
SerializationVersioningMode::Unversioned { .. } => options
.deserialize_from(&mut reader)
.map_err(|err| err.to_string()),
}
SerializationVersioningMode::Unversioned { .. } => options
.with_limit(self.serialized_size_limit - self.header_length_limit())
.deserialize_from(&mut reader)
.map_err(|err| err.to_string()),
}
}
@@ -303,14 +371,6 @@ impl NonConformantDeserializationConfig {
validate_header: self.validate_header,
}
}
fn header_length_limit(&self) -> u64 {
if self.serialized_size_limit == 0 {
0
} else {
HEADER_LENGTH_LIMIT
}
}
}
impl DeserializationConfig {
@@ -319,14 +379,14 @@ impl DeserializationConfig {
/// By default, it will check that the serialization version and the name of the
/// deserialized type are correct.
/// `serialized_size_limit` is the size limit (in number of byte) of the serialized object
/// (excluding version and name serialization).
/// (include the safe serialization header).
///
/// It will also check that the object is conformant with the parameter set given in
/// `conformance_params`. Finally, it will check the compatibility of the loaded data with
/// the current *TFHE-rs* version.
pub fn new(serialized_size_limit: u64) -> Self {
Self {
serialized_size_limit,
serialized_size_limit: Some(serialized_size_limit),
validate_header: true,
}
}
@@ -334,7 +394,7 @@ impl DeserializationConfig {
/// Creates a new config without any size limit for the deserialized objects.
pub fn new_with_unlimited_size() -> Self {
Self {
serialized_size_limit: 0,
serialized_size_limit: None,
validate_header: true,
}
}
@@ -342,7 +402,15 @@ impl DeserializationConfig {
/// Disables the size limit for the serialized objects.
pub fn disable_size_limit(self) -> Self {
Self {
serialized_size_limit: 0,
serialized_size_limit: None,
..self
}
}
/// Sets the size limit for this deserialization config
pub fn with_size_limit(self, size: u64) -> Self {
Self {
serialized_size_limit: Some(size),
..self
}
}
@@ -394,6 +462,11 @@ pub fn safe_serialize<T: Serialize + Versionize + Named>(
SerializationConfig::new(serialized_size_limit).serialize_into(object, writer)
}
/// Return the size the object would take if serialized using [`safe_serialize`]
pub fn safe_serialized_size<T: Serialize + Versionize + Named>(object: &T) -> bincode::Result<u64> {
SerializationConfig::new_with_unlimited_size().serialized_size(object)
}
/// Serialize an object with the default configuration (with size limit, header check and
/// versioning). This is an alias for
/// `DeserializationConfig::new(serialized_size_limit).disable_conformance().deserialize_from`
@@ -428,7 +501,7 @@ mod test_shortint {
use crate::shortint::{gen_keys, Ciphertext};
#[test]
fn safe_deserialization_ct() {
fn safe_deserialization_ct_unversioned() {
let (ck, _sk) = gen_keys(PARAM_MESSAGE_2_CARRY_2_KS_PBS);
let msg = 2_u64;
@@ -437,10 +510,12 @@ mod test_shortint {
let mut buffer = vec![];
SerializationConfig::new(1 << 20)
.disable_versioning()
.serialize_into(&ct, &mut buffer)
.unwrap();
let config = SerializationConfig::new(1 << 20).disable_versioning();
let size = config.serialized_size(&ct).unwrap();
config.serialize_into(&ct, &mut buffer).unwrap();
assert_eq!(size as usize, buffer.len());
assert!(DeserializationConfig::new(1 << 20)
.deserialize_from::<Ciphertext>(
@@ -461,7 +536,7 @@ mod test_shortint {
}
#[test]
fn safe_deserialization_ct_versioned() {
fn safe_deserialization_ct() {
let (ck, _sk) = gen_keys(PARAM_MESSAGE_2_CARRY_2_KS_PBS);
let msg = 2_u64;
@@ -470,9 +545,12 @@ mod test_shortint {
let mut buffer = vec![];
SerializationConfig::new(1 << 20)
.serialize_into(&ct, &mut buffer)
.unwrap();
let config = SerializationConfig::new(1 << 20);
let size = config.serialized_size(&ct).unwrap();
config.serialize_into(&ct, &mut buffer).unwrap();
assert_eq!(size as usize, buffer.len());
assert!(DeserializationConfig::new(1 << 20,)
.deserialize_from::<Ciphertext>(
@@ -491,6 +569,62 @@ mod test_shortint {
let dec = ck.decrypt(&ct2);
assert_eq!(msg, dec);
}
#[test]
fn safe_deserialization_ct_unlimited_size() {
let (ck, _sk) = gen_keys(PARAM_MESSAGE_2_CARRY_2_KS_PBS);
let msg = 2_u64;
let ct = ck.encrypt(msg);
let mut buffer = vec![];
let config = SerializationConfig::new_with_unlimited_size();
let size = config.serialized_size(&ct).unwrap();
config.serialize_into(&ct, &mut buffer).unwrap();
assert_eq!(size as usize, buffer.len());
let ct2 = DeserializationConfig::new_with_unlimited_size()
.deserialize_from::<Ciphertext>(
buffer.as_slice(),
&PARAM_MESSAGE_2_CARRY_2_KS_PBS.to_shortint_conformance_param(),
)
.unwrap();
let dec = ck.decrypt(&ct2);
assert_eq!(msg, dec);
}
#[test]
fn safe_deserialization_size_limit() {
let (ck, _sk) = gen_keys(PARAM_MESSAGE_2_CARRY_2_KS_PBS);
let msg = 2_u64;
let ct = ck.encrypt(msg);
let mut buffer = vec![];
let config = SerializationConfig::new_with_unlimited_size().disable_versioning();
let size = config.serialized_size(&ct).unwrap();
config.serialize_into(&ct, &mut buffer).unwrap();
assert_eq!(size as usize, buffer.len());
let ct2 = DeserializationConfig::new(size)
.deserialize_from::<Ciphertext>(
buffer.as_slice(),
&PARAM_MESSAGE_2_CARRY_2_KS_PBS.to_shortint_conformance_param(),
)
.unwrap();
let dec = ck.decrypt(&ct2);
assert_eq!(msg, dec);
}
}
#[cfg(all(test, feature = "integer"))]
@@ -524,10 +658,12 @@ mod test_integer {
let mut buffer = vec![];
SerializationConfig::new(1 << 20)
.disable_versioning()
.serialize_into(&ct_list, &mut buffer)
.unwrap();
let config = SerializationConfig::new(1 << 20).disable_versioning();
let size = config.serialized_size(&ct_list).unwrap();
config.serialize_into(&ct_list, &mut buffer).unwrap();
assert_eq!(size as usize, buffer.len());
let to_param_set = |list_size_constraint| CompactCiphertextListConformanceParams {
shortint_params: PARAM_MESSAGE_2_CARRY_2_KS_PBS.to_shortint_conformance_param(),
@@ -601,9 +737,12 @@ mod test_integer {
let mut buffer = vec![];
SerializationConfig::new(1 << 20)
.serialize_into(&ct_list, &mut buffer)
.unwrap();
let config = SerializationConfig::new(1 << 20);
let size = config.serialized_size(&ct_list).unwrap();
config.serialize_into(&ct_list, &mut buffer).unwrap();
assert_eq!(size as usize, buffer.len());
let to_param_set = |list_size_constraint| CompactCiphertextListConformanceParams {
shortint_params: PARAM_MESSAGE_2_CARRY_2_KS_PBS.to_shortint_conformance_param(),

View File

@@ -1,4 +1,4 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::shortint::key_switching_key::{
CompressedKeySwitchingKeyMaterial, KeySwitchingKeyMaterial,
@@ -15,12 +15,44 @@ pub enum KeySwitchingKeyVersions {
V0(KeySwitchingKey),
}
#[derive(Version)]
pub struct UnsupportedCompressedKeySwitchingKeyMaterialV0;
impl Upgrade<CompressedKeySwitchingKeyMaterial> for UnsupportedCompressedKeySwitchingKeyMaterialV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedKeySwitchingKeyMaterial, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedKeySwitchingKeyMaterial, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedKeySwitchingKeyMaterialVersions {
V0(CompressedKeySwitchingKeyMaterial),
V0(UnsupportedCompressedKeySwitchingKeyMaterialV0),
V1(CompressedKeySwitchingKeyMaterial),
}
#[derive(Version)]
pub struct UnsupportedCompressedKeySwitchingKeyV0;
impl Upgrade<CompressedKeySwitchingKey> for UnsupportedCompressedKeySwitchingKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedKeySwitchingKey, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedKeySwitchingKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedKeySwitchingKeyVersions {
V0(CompressedKeySwitchingKey),
V0(UnsupportedCompressedKeySwitchingKeyV0),
V1(CompressedKeySwitchingKey),
}

View File

@@ -1,4 +1,4 @@
use tfhe_versionable::VersionsDispatch;
use tfhe_versionable::{Upgrade, Version, VersionsDispatch};
use crate::shortint::list_compression::{
CompressedCompressionKey, CompressedDecompressionKey, CompressionKey, CompressionPrivateKeys,
@@ -15,9 +15,25 @@ pub enum DecompressionKeyVersions {
V0(DecompressionKey),
}
#[derive(Version)]
pub struct UnsupportedCompressedCompressionKeyV0;
impl Upgrade<CompressedCompressionKey> for UnsupportedCompressedCompressionKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedCompressionKey, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedCompressionKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedCompressionKeyVersions {
V0(CompressedCompressionKey),
V0(UnsupportedCompressedCompressionKeyV0),
V1(CompressedCompressionKey),
}
#[derive(VersionsDispatch)]

View File

@@ -7,4 +7,3 @@ pub mod list_compression;
pub mod parameters;
pub mod public_key;
pub mod server_key;
pub mod wopbs;

View File

@@ -1,5 +1,5 @@
use serde::{Deserialize, Serialize};
use tfhe_versionable::{UnversionizeError, VersionsDispatch};
use tfhe_versionable::{UnversionizeError, Upgrade, Version, VersionsDispatch};
use crate::core_crypto::prelude::{Container, IntoContainerOwned};
use crate::shortint::server_key::*;
@@ -58,7 +58,23 @@ pub enum ShortintCompressedBootstrappingKeyVersions {
V0(ShortintCompressedBootstrappingKey),
}
#[derive(Version)]
pub struct UnsupportedCompressedServerKeyV0;
impl Upgrade<CompressedServerKey> for UnsupportedCompressedServerKeyV0 {
type Error = crate::Error;
fn upgrade(self) -> Result<CompressedServerKey, Self::Error> {
Err(crate::Error::new(
"Unable to load CompressedServerKey, \
this format is unsupported by this TFHE-rs version."
.to_string(),
))
}
}
#[derive(VersionsDispatch)]
pub enum CompressedServerKeyVersions {
V0(CompressedServerKey),
V0(UnsupportedCompressedServerKeyV0),
V1(CompressedServerKey),
}

View File

@@ -1,8 +0,0 @@
use tfhe_versionable::VersionsDispatch;
use crate::shortint::wopbs::WopbsKey;
#[derive(VersionsDispatch)]
pub enum WopbsKeyVersions {
V0(WopbsKey),
}

View File

@@ -7,7 +7,7 @@ use crate::shortint::backward_compatibility::ciphertext::CompressedCiphertextLis
use crate::shortint::parameters::CompressedCiphertextConformanceParams;
use crate::shortint::{CarryModulus, MessageModulus};
#[derive(Clone, serde::Serialize, serde::Deserialize, Versionize)]
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize, Versionize)]
#[versionize(CompressedCiphertextListVersions)]
pub struct CompressedCiphertextList {
pub modulus_switched_glwe_ciphertext_list: Vec<CompressedModulusSwitchedGlweCiphertext<u64>>,

View File

@@ -290,15 +290,8 @@ impl NamedParam for ShortintKeySwitchingParameters {
impl NamedParam for CompressionParameters {
fn name(&self) -> String {
named_params_impl!(expose
COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64,
COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
);
named_params_impl!(
{
*self;
Self
} == (COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64)
);
named_params_impl!(
{

View File

@@ -1,6 +1,6 @@
use tfhe_versionable::Versionize;
use crate::core_crypto::prelude::{CiphertextModulusLog, LweCiphertextCount, StandardDev};
use crate::core_crypto::prelude::{CiphertextModulusLog, LweCiphertextCount};
use crate::shortint::backward_compatibility::parameters::list_compression::CompressionParametersVersions;
use crate::shortint::parameters::{
DecompositionBaseLog, DecompositionLevelCount, DynamicDistribution, GlweDimension,
@@ -25,27 +25,12 @@ pub struct CompressionParameters {
pub const COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64: CompressionParameters =
CompressionParameters {
br_level: DecompositionLevelCount(1),
br_base_log: DecompositionBaseLog(25),
packing_ks_level: DecompositionLevelCount(2),
packing_ks_base_log: DecompositionBaseLog(8),
br_base_log: DecompositionBaseLog(23),
packing_ks_level: DecompositionLevelCount(4),
packing_ks_base_log: DecompositionBaseLog(4),
packing_ks_polynomial_size: PolynomialSize(256),
packing_ks_glwe_dimension: GlweDimension(5),
packing_ks_glwe_dimension: GlweDimension(4),
lwe_per_glwe: LweCiphertextCount(256),
storage_log_modulus: CiphertextModulusLog(11),
packing_ks_key_noise_distribution: DynamicDistribution::new_t_uniform(36),
};
pub const COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64: CompressionParameters =
CompressionParameters {
br_level: DecompositionLevelCount(1),
br_base_log: DecompositionBaseLog(25),
packing_ks_level: DecompositionLevelCount(2),
packing_ks_base_log: DecompositionBaseLog(8),
packing_ks_polynomial_size: PolynomialSize(256),
packing_ks_glwe_dimension: GlweDimension(5),
lwe_per_glwe: LweCiphertextCount(256),
storage_log_modulus: CiphertextModulusLog(11),
packing_ks_key_noise_distribution: DynamicDistribution::new_gaussian_from_std_dev(
StandardDev(1.6173527465097522e-09),
),
storage_log_modulus: CiphertextModulusLog(12),
packing_ks_key_noise_distribution: DynamicDistribution::new_t_uniform(42),
};

View File

@@ -42,7 +42,9 @@ pub use crate::shortint::parameters::classic::gaussian::p_fail_2_minus_64::ks_pb
pub use crate::shortint::parameters::classic::gaussian::p_fail_2_minus_64::pbs_ks::*;
pub use crate::shortint::parameters::classic::tuniform::p_fail_2_minus_64::ks_pbs::*;
pub use crate::shortint::parameters::classic::tuniform::p_fail_2_minus_64::pbs_ks::*;
pub use crate::shortint::parameters::list_compression::CompressionParameters;
pub use crate::shortint::parameters::list_compression::{
CompressionParameters, COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64,
};
pub use compact_public_key_only::{
CastingFunctionsOwned, CastingFunctionsView, CompactCiphertextListExpansionKind,
CompactPublicKeyEncryptionParameters, ShortintCompactCiphertextListCastingMode,
@@ -887,8 +889,3 @@ pub const PARAM_SMALL_MESSAGE_1_CARRY_1: ClassicPBSParameters = PARAM_MESSAGE_1_
pub const PARAM_SMALL_MESSAGE_2_CARRY_2: ClassicPBSParameters = PARAM_MESSAGE_2_CARRY_2_PBS_KS;
pub const PARAM_SMALL_MESSAGE_3_CARRY_3: ClassicPBSParameters = PARAM_MESSAGE_3_CARRY_3_PBS_KS;
pub const PARAM_SMALL_MESSAGE_4_CARRY_4: ClassicPBSParameters = PARAM_MESSAGE_4_CARRY_4_PBS_KS;
pub const COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS: CompressionParameters =
list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M64;
pub const COMP_PARAM_MESSAGE_2_CARRY_2: CompressionParameters = COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS;

View File

@@ -11,16 +11,12 @@ use crate::core_crypto::entities::*;
use crate::shortint::{ServerKey, WopbsParameters};
use serde::{Deserialize, Serialize};
use tfhe_versionable::Versionize;
use super::backward_compatibility::wopbs::WopbsKeyVersions;
#[cfg(all(test, feature = "experimental"))]
mod test;
// Struct for WoPBS based on the private functional packing keyswitch.
#[derive(Clone, Debug, Serialize, Deserialize, Versionize)]
#[versionize(WopbsKeyVersions)]
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct WopbsKey {
//Key for the private functional keyswitch
pub wopbs_server_key: ServerKey,

View File

@@ -1,11 +1,6 @@
#![allow(deprecated)]
use super::shortint::load_params;
use crate::{load_and_unversionize, TestedModule};
use std::path::Path;
use tfhe::backward_compatibility::booleans::{CompactFheBool, CompactFheBoolList};
use tfhe::backward_compatibility::integers::{
CompactFheInt8, CompactFheInt8List, CompactFheUint8, CompactFheUint8List,
};
use tfhe::prelude::{CiphertextList, FheDecrypt, FheEncrypt};
use tfhe::shortint::PBSParameters;
#[cfg(feature = "zk-pok")]
@@ -21,17 +16,12 @@ use tfhe_backward_compat_data::load::{
load_versioned_auxiliary, DataFormat, TestFailure, TestResult, TestSuccess,
};
use tfhe_backward_compat_data::{
DataKind, HlBoolCiphertextListTest, HlBoolCiphertextTest, HlCiphertextListTest,
HlCiphertextTest, HlClientKeyTest, HlHeterogeneousCiphertextListTest, HlPublicKeyTest,
HlServerKeyTest, HlSignedCiphertextListTest, HlSignedCiphertextTest, TestMetadata,
TestParameterSet, TestType, Testcase, ZkPkePublicParamsTest,
DataKind, HlBoolCiphertextTest, HlCiphertextTest, HlClientKeyTest,
HlHeterogeneousCiphertextListTest, HlPublicKeyTest, HlServerKeyTest, HlSignedCiphertextTest,
TestMetadata, TestParameterSet, TestType, Testcase, ZkPkePublicParamsTest,
};
use tfhe_versionable::Unversionize;
use crate::{load_and_unversionize, TestedModule};
use super::shortint::load_params;
fn load_hl_params(test_params: &TestParameterSet) -> PBSParameters {
let pbs_params = load_params(test_params);
@@ -57,9 +47,6 @@ pub fn test_hl_ciphertext(
let ct = if test.compressed {
let compressed: CompressedFheUint8 = load_and_unversionize(dir, test, format)?;
compressed.decompress()
} else if test.compact {
let compact: CompactFheUint8 = load_and_unversionize(dir, test, format)?;
compact.expand().unwrap()
} else {
load_and_unversionize(dir, test, format)?
};
@@ -98,9 +85,6 @@ pub fn test_hl_signed_ciphertext(
let ct = if test.compressed {
let compressed: CompressedFheInt8 = load_and_unversionize(dir, test, format)?;
compressed.decompress()
} else if test.compact {
let compact: CompactFheInt8 = load_and_unversionize(dir, test, format)?;
compact.expand().unwrap()
} else {
load_and_unversionize(dir, test, format)?
};
@@ -139,9 +123,6 @@ pub fn test_hl_bool_ciphertext(
let ct = if test.compressed {
let compressed: CompressedFheBool = load_and_unversionize(dir, test, format)?;
compressed.decompress()
} else if test.compact {
let compact: CompactFheBool = load_and_unversionize(dir, test, format)?;
compact.expand().unwrap()
} else {
load_and_unversionize(dir, test, format)?
};
@@ -161,108 +142,6 @@ pub fn test_hl_bool_ciphertext(
}
}
/// Test HL ciphertext list: loads the ciphertext list and compare the decrypted values to the ones
/// in the metadata.
pub fn test_hl_ciphertext_list(
dir: &Path,
test: &HlCiphertextListTest,
format: DataFormat,
) -> Result<TestSuccess, TestFailure> {
let key_file = dir.join(&*test.key_filename);
let key = ClientKey::unversionize(
load_versioned_auxiliary(key_file).map_err(|e| test.failure(e, format))?,
)
.map_err(|e| test.failure(e, format))?;
let server_key = key.generate_server_key();
set_server_key(server_key);
let compact: CompactFheUint8List = load_and_unversionize(dir, test, format)?;
let ct_list = compact.expand().unwrap();
let clear_list: Vec<u8> = ct_list.into_iter().map(|ct| ct.decrypt(&key)).collect();
let ref_values: Vec<u8> = test.clear_values.iter().map(|v| *v as u8).collect();
if clear_list != ref_values {
Err(test.failure(
format!(
"Invalid {} decrypted cleartext:\n Expected :\n{:?}\nGot:\n{:?}",
format, clear_list, ref_values
),
format,
))
} else {
Ok(test.success(format))
}
}
/// Test HL signed ciphertext list: loads the ciphertext list and compare the decrypted values to
/// the ones in the metadata.
pub fn test_hl_signed_ciphertext_list(
dir: &Path,
test: &HlSignedCiphertextListTest,
format: DataFormat,
) -> Result<TestSuccess, TestFailure> {
let key_file = dir.join(&*test.key_filename);
let key = ClientKey::unversionize(
load_versioned_auxiliary(key_file).map_err(|e| test.failure(e, format))?,
)
.map_err(|e| test.failure(e, format))?;
let server_key = key.generate_server_key();
set_server_key(server_key);
let compact: CompactFheInt8List = load_and_unversionize(dir, test, format)?;
let ct_list = compact.expand().unwrap();
let clear_list: Vec<i8> = ct_list.into_iter().map(|ct| ct.decrypt(&key)).collect();
let ref_values: Vec<i8> = test.clear_values.iter().map(|v| *v as i8).collect();
if clear_list != ref_values {
Err(test.failure(
format!(
"Invalid {} decrypted cleartext:\n Expected :\n{:?}\nGot:\n{:?}",
format, clear_list, ref_values
),
format,
))
} else {
Ok(test.success(format))
}
}
/// Test HL bool ciphertext list: loads the ciphertext list and compare the decrypted values to the
/// ones in the metadata.
pub fn test_hl_bool_ciphertext_list(
dir: &Path,
test: &HlBoolCiphertextListTest,
format: DataFormat,
) -> Result<TestSuccess, TestFailure> {
let key_file = dir.join(&*test.key_filename);
let key = ClientKey::unversionize(
load_versioned_auxiliary(key_file).map_err(|e| test.failure(e, format))?,
)
.map_err(|e| test.failure(e, format))?;
let server_key = key.generate_server_key();
set_server_key(server_key);
let compact: CompactFheBoolList = load_and_unversionize(dir, test, format)?;
let ct_list = compact.expand().unwrap();
let clear_list: Vec<bool> = ct_list.into_iter().map(|ct| ct.decrypt(&key)).collect();
let ref_values: Vec<bool> = test.clear_values.iter().copied().collect();
if clear_list != ref_values {
Err(test.failure(
format!(
"Invalid {} decrypted cleartext:\n Expected :\n{:?}\nGot:\n{:?}",
format, clear_list, ref_values
),
format,
))
} else {
Ok(test.success(format))
}
}
/// Test Zk Public params
pub fn test_zk_params(
dir: &Path,
@@ -520,15 +399,6 @@ impl TestedModule for Hl {
TestMetadata::HlBoolCiphertext(test) => {
test_hl_bool_ciphertext(test_dir.as_ref(), test, format).into()
}
TestMetadata::HlBoolCiphertextList(test) => {
test_hl_bool_ciphertext_list(test_dir.as_ref(), test, format).into()
}
TestMetadata::HlCiphertextList(test) => {
test_hl_ciphertext_list(test_dir.as_ref(), test, format).into()
}
TestMetadata::HlSignedCiphertextList(test) => {
test_hl_signed_ciphertext_list(test_dir.as_ref(), test, format).into()
}
TestMetadata::HlHeterogeneousCiphertextList(test) => {
test_hl_heterogeneous_ciphertext_list(test_dir.as_ref(), test, format).into()
}

View File

@@ -16,6 +16,7 @@ import init, {
FheUint8,
ZkComputeLoad,
CompactPkeCrs,
CompactPkePublicParams,
CompactCiphertextList,
ProvenCompactCiphertextList,
ShortintCompactPublicKeyEncryptionParameters,
@@ -56,7 +57,7 @@ async function compressedPublicKeyTest() {
let compressedPublicKey = TfheCompressedPublicKey.new(clientKey);
console.timeEnd("CompressedPublicKey Gen");
let data = compressedPublicKey.serialize();
let data = compressedPublicKey.safe_serialize(BigInt(10000000));
console.log("CompressedPublicKey size:", data.length);
console.time("CompressedPublicKey Decompression");
@@ -67,7 +68,7 @@ async function compressedPublicKeyTest() {
let encrypted = FheUint8.encrypt_with_public_key(255, publicKey);
console.timeEnd("FheUint8 encrypt with CompressedPublicKey");
let ser = encrypted.serialize();
let ser = encrypted.safe_serialize(BigInt(10000000));
console.log("Ciphertext Size", ser.length);
let decrypted = encrypted.decrypt(clientKey);
@@ -89,7 +90,7 @@ async function publicKeyTest() {
let encrypted = FheUint8.encrypt_with_public_key(255, publicKey);
console.timeEnd("FheUint8 encrypt with PublicKey");
let ser = encrypted.serialize();
let ser = encrypted.safe_serialize(BigInt(10000000));
console.log("Ciphertext Size", ser.length);
let decrypted = encrypted.decrypt(clientKey);
@@ -136,13 +137,13 @@ async function compactPublicKeyBench32BitOnConfig(config) {
console.log("CompactFheUint32List Encrypt bench: ", timing_2, " ms");
bench_results["compact_fheunit32_list_encrypt_mean"] = timing_2;
let serialized_list = compact_list.serialize();
let serialized_list = compact_list.safe_serialize(BigInt(10000000));
console.log("Serialized CompactFheUint32List size: ", serialized_list.length);
// Bench the serialization for bench_loops iterations
start = performance.now();
for (let i = 0; i < bench_loops; i++) {
let _ = compact_list.serialize();
let _ = compact_list.safe_serialize(BigInt(10000000));
}
end = performance.now();
const timing_3 = (end - start) / bench_loops;
@@ -196,7 +197,7 @@ async function compressedCompactPublicKeyTest256BitOnConfig(config) {
let publicKey = TfheCompressedCompactPublicKey.new(clientKey);
console.timeEnd("CompressedCompactPublicKey Gen");
let serialized_pk = publicKey.serialize();
let serialized_pk = publicKey.safe_serialize(BigInt(10000000));
console.log(
"Serialized CompressedCompactPublicKey size: ",
serialized_pk.length,
@@ -378,6 +379,13 @@ async function compactPublicKeyZeroKnowledgeTest() {
console.timeEnd("CRS generation");
let public_params = crs.public_params();
let serialized = public_params.safe_serialize(BigInt(1000000000));
console.log("CompactPkePublicParams size:", serialized.length);
let deserialized = CompactPkePublicParams.safe_deserialize(
serialized,
BigInt(1000000000),
);
// 320 bits is a use case we have, 8 bits per byte
const metadata = new Uint8Array(320 / 8);
crypto.getRandomValues(metadata);
@@ -400,9 +408,12 @@ async function compactPublicKeyZeroKnowledgeTest() {
" ms",
);
let serialized = list.serialize();
let serialized = list.safe_serialize(BigInt(10000000));
console.log("CompactCiphertextList size:", serialized.length);
let deserialized = ProvenCompactCiphertextList.deserialize(serialized);
let deserialized = ProvenCompactCiphertextList.safe_deserialize(
serialized,
BigInt(10000000),
);
let expander = deserialized.verify_and_expand(
public_params,
@@ -529,7 +540,7 @@ async function compactPublicKeyBench256BitOnConfig(config) {
console.log("CompactFheUint256List Encrypt bench: ", timing_2, " ms");
bench_results["compact_fheunit256_list_encrypt_mean"] = timing_2;
let serialized_list = compact_list.serialize();
let serialized_list = compact_list.safe_serialize(BigInt(10000000));
console.log(
"Serialized CompactFheUint256List size: ",
serialized_list.length,
@@ -538,7 +549,7 @@ async function compactPublicKeyBench256BitOnConfig(config) {
// Bench the serialization for bench_loops iterations
start = performance.now();
for (let i = 0; i < bench_loops; i++) {
let _ = compact_list.serialize();
let _ = compact_list.safe_serialize(BigInt(10000000));
}
end = performance.now();
const timing_3 = (end - start) / bench_loops;
@@ -592,13 +603,13 @@ async function compressedServerKeyBenchConfig(config) {
bench_results["compressed_server_key_gen_mean"] = timing_1;
let serverKey = TfheCompressedServerKey.new(clientKey);
let serialized_key = serverKey.serialize();
let serialized_key = serverKey.safe_serialize(BigInt(1000000000));
console.log("Serialized ServerKey size: ", serialized_key.length);
// Bench the serialization for bench_loops iterations
start = performance.now();
for (let i = 0; i < bench_loops; i++) {
let _ = serverKey.serialize();
let _ = serverKey.safe_serialize(BigInt(1000000000));
}
end = performance.now();
const timing_2 = (end - start) / bench_loops;
@@ -704,7 +715,7 @@ async function compactPublicKeyZeroKnowledgeBench() {
const end = performance.now();
console.timeEnd("Loop " + i);
timing += end - start;
serialized_size = list.serialize().length;
serialized_size = list.safe_serialize(BigInt(10000000)).length;
}
const mean = timing / bench_loops;
const common_bench_str =

View File

@@ -8,7 +8,7 @@ documentation = "https://docs.rs/tfhe_versionable"
repository = "https://github.com/zama-ai/tfhe-rs"
license = "BSD-3-Clause-Clear"
description = "tfhe-versionable: Add versioning informations/backward compatibility on rust types used for serialization"
rust-version = "1.76"
rust-version = "1.81"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html