feat(gpu): signed addition

This commit is contained in:
Agnes Leroy
2024-02-27 11:29:26 +01:00
committed by Agnès Leroy
parent 347fc9aaa7
commit 1c209403a6
27 changed files with 4046 additions and 3523 deletions

View File

@@ -1,6 +1,8 @@
use crate::high_level_api::details::MaybeCloned;
#[cfg(feature = "gpu")]
use crate::high_level_api::global_state::{self, with_thread_local_cuda_stream};
#[cfg(feature = "gpu")]
use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext;
use crate::integer::BooleanBlock;
use crate::Device;
use serde::{Deserializer, Serializer};
@@ -9,7 +11,7 @@ use serde::{Deserializer, Serializer};
pub(in crate::high_level_api) enum InnerBoolean {
Cpu(BooleanBlock),
#[cfg(feature = "gpu")]
Cuda(crate::integer::gpu::ciphertext::CudaRadixCiphertext),
Cuda(crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext),
}
impl Clone for InnerBoolean {
@@ -54,8 +56,8 @@ impl From<BooleanBlock> for InnerBoolean {
}
#[cfg(feature = "gpu")]
impl From<crate::integer::gpu::ciphertext::CudaRadixCiphertext> for InnerBoolean {
fn from(value: crate::integer::gpu::ciphertext::CudaRadixCiphertext) -> Self {
impl From<crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext> for InnerBoolean {
fn from(value: crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext) -> Self {
Self::Cuda(value)
}
}
@@ -87,12 +89,12 @@ impl InnerBoolean {
#[cfg(feature = "gpu")]
pub(crate) fn on_gpu(
&self,
) -> MaybeCloned<'_, crate::integer::gpu::ciphertext::CudaRadixCiphertext> {
) -> MaybeCloned<'_, crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext> {
match self {
Self::Cpu(ct) => with_thread_local_cuda_stream(|stream| {
let ct_as_radix = crate::integer::RadixCiphertext::from(vec![ct.0.clone()]);
let cuda_ct =
crate::integer::gpu::ciphertext::CudaRadixCiphertext::from_radix_ciphertext(
crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext::from_radix_ciphertext(
&ct_as_radix,
stream,
);
@@ -118,7 +120,7 @@ impl InnerBoolean {
#[track_caller]
pub(crate) fn as_gpu_mut(
&mut self,
) -> &mut crate::integer::gpu::ciphertext::CudaRadixCiphertext {
) -> &mut crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext {
if let Self::Cuda(radix_ct) = self {
radix_ct
} else {
@@ -140,7 +142,7 @@ impl InnerBoolean {
(Self::Cpu(ct), Device::CudaGpu) => {
let ct_as_radix = crate::integer::RadixCiphertext::from(vec![ct.0.clone()]);
let new_inner = with_thread_local_cuda_stream(|stream| {
crate::integer::gpu::ciphertext::CudaRadixCiphertext::from_radix_ciphertext(
crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext::from_radix_ciphertext(
&ct_as_radix,
stream,
)

View File

@@ -1,13 +1,15 @@
use crate::high_level_api::details::MaybeCloned;
#[cfg(feature = "gpu")]
use crate::high_level_api::global_state::{self, with_thread_local_cuda_stream};
#[cfg(feature = "gpu")]
use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext;
use crate::Device;
use serde::{Deserializer, Serializer};
pub(crate) enum RadixCiphertext {
Cpu(crate::integer::RadixCiphertext),
#[cfg(feature = "gpu")]
Cuda(crate::integer::gpu::ciphertext::CudaRadixCiphertext),
Cuda(crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext),
}
impl From<crate::integer::RadixCiphertext> for RadixCiphertext {
@@ -17,8 +19,8 @@ impl From<crate::integer::RadixCiphertext> for RadixCiphertext {
}
#[cfg(feature = "gpu")]
impl From<crate::integer::gpu::ciphertext::CudaRadixCiphertext> for RadixCiphertext {
fn from(value: crate::integer::gpu::ciphertext::CudaRadixCiphertext) -> Self {
impl From<crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext> for RadixCiphertext {
fn from(value: crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext) -> Self {
Self::Cuda(value)
}
}
@@ -83,11 +85,11 @@ impl RadixCiphertext {
#[cfg(feature = "gpu")]
pub(crate) fn on_gpu(
&self,
) -> MaybeCloned<'_, crate::integer::gpu::ciphertext::CudaRadixCiphertext> {
) -> MaybeCloned<'_, crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext> {
match self {
Self::Cpu(ct) => with_thread_local_cuda_stream(|stream| {
let ct =
crate::integer::gpu::ciphertext::CudaRadixCiphertext::from_radix_ciphertext(
crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext::from_radix_ciphertext(
ct, stream,
);
MaybeCloned::Cloned(ct)
@@ -111,7 +113,7 @@ impl RadixCiphertext {
#[cfg(feature = "gpu")]
pub(crate) fn as_gpu_mut(
&mut self,
) -> &mut crate::integer::gpu::ciphertext::CudaRadixCiphertext {
) -> &mut crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext {
if let Self::Cuda(radix_ct) = self {
radix_ct
} else {
@@ -142,7 +144,7 @@ impl RadixCiphertext {
#[cfg(feature = "gpu")]
(Self::Cpu(ct), Device::CudaGpu) => {
let new_inner = with_thread_local_cuda_stream(|stream| {
crate::integer::gpu::ciphertext::CudaRadixCiphertext::from_radix_ciphertext(
crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext::from_radix_ciphertext(
ct, stream,
)
});

View File

@@ -13,6 +13,8 @@ use crate::high_level_api::traits::{
DivRem, FheBootstrap, FheEq, FheMax, FheMin, FheOrd, RotateLeft, RotateLeftAssign, RotateRight,
RotateRightAssign,
};
#[cfg(feature = "gpu")]
use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext;
use crate::{FheBool, FheUint};
use std::ops::{
Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
@@ -734,7 +736,7 @@ generic_integer_impl_operation!(
InternalServerKey::Cuda(cuda_key) => {
with_thread_local_cuda_stream(|stream| {
let inner_result = cuda_key.key
.add(&lhs.ciphertext.on_gpu(), &rhs.ciphertext.on_gpu(), stream);
.add(&*lhs.ciphertext.on_gpu(), &*rhs.ciphertext.on_gpu(), stream);
FheUint::new(inner_result)
})
}

View File

@@ -1021,7 +1021,7 @@ generic_integer_impl_scalar_left_operation!(
#[cfg(feature = "gpu")]
InternalServerKey::Cuda(cuda_key) => {
with_thread_local_cuda_stream(|stream| {
let mut result = cuda_key.key.create_trivial_radix(lhs, rhs.ciphertext.on_gpu().info.blocks.len(), stream);
let mut result = cuda_key.key.create_trivial_radix(lhs, rhs.ciphertext.on_gpu().ciphertext.info.blocks.len(), stream);
cuda_key.key.sub_assign(&mut result, &rhs.ciphertext.on_gpu(), stream);
RadixCiphertext::Cuda(result)
})

View File

@@ -5,17 +5,90 @@ use crate::core_crypto::gpu::vec::CudaVec;
use crate::core_crypto::gpu::CudaStream;
use crate::core_crypto::prelude::{LweCiphertextList, LweCiphertextOwned};
use crate::integer::gpu::ciphertext::info::{CudaBlockInfo, CudaRadixCiphertextInfo};
use crate::integer::RadixCiphertext;
use crate::integer::{RadixCiphertext, SignedRadixCiphertext};
use crate::shortint::Ciphertext;
pub trait CudaIntegerRadixCiphertext: Sized {
const IS_SIGNED: bool;
fn as_ref(&self) -> &CudaRadixCiphertext;
fn as_mut(&mut self) -> &mut CudaRadixCiphertext;
fn from(ct: CudaRadixCiphertext) -> Self;
fn duplicate(&self, stream: &CudaStream) -> Self {
Self::from(self.as_ref().duplicate(stream))
}
/// # Safety
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronised
unsafe fn duplicate_async(&self, stream: &CudaStream) -> Self {
Self::from(self.as_ref().duplicate_async(stream))
}
fn block_carries_are_empty(&self) -> bool {
self.as_ref()
.info
.blocks
.iter()
.all(CudaBlockInfo::carry_is_empty)
}
fn is_equal(&self, other: &Self, stream: &CudaStream) -> bool {
self.as_ref().is_equal(other.as_ref(), stream)
}
}
pub struct CudaRadixCiphertext {
pub d_blocks: CudaLweCiphertextList<u64>,
pub info: CudaRadixCiphertextInfo,
}
impl CudaRadixCiphertext {
pub struct CudaUnsignedRadixCiphertext {
pub ciphertext: CudaRadixCiphertext,
}
pub struct CudaSignedRadixCiphertext {
pub ciphertext: CudaRadixCiphertext,
}
impl CudaIntegerRadixCiphertext for CudaUnsignedRadixCiphertext {
const IS_SIGNED: bool = false;
fn as_ref(&self) -> &CudaRadixCiphertext {
&self.ciphertext
}
fn as_mut(&mut self) -> &mut CudaRadixCiphertext {
&mut self.ciphertext
}
fn from(ct: CudaRadixCiphertext) -> Self {
Self { ciphertext: ct }
}
}
impl CudaIntegerRadixCiphertext for CudaSignedRadixCiphertext {
const IS_SIGNED: bool = true;
fn as_ref(&self) -> &CudaRadixCiphertext {
&self.ciphertext
}
fn as_mut(&mut self) -> &mut CudaRadixCiphertext {
&mut self.ciphertext
}
fn from(ct: CudaRadixCiphertext) -> Self {
Self { ciphertext: ct }
}
}
impl CudaUnsignedRadixCiphertext {
pub fn new(d_blocks: CudaLweCiphertextList<u64>, info: CudaRadixCiphertextInfo) -> Self {
Self { d_blocks, info }
Self {
ciphertext: CudaRadixCiphertext { d_blocks, info },
}
}
/// Copies a RadixCiphertext to the GPU memory
///
@@ -23,7 +96,7 @@ impl CudaRadixCiphertext {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
/// let size = 4;
@@ -40,7 +113,7 @@ impl CudaRadixCiphertext {
/// // Encrypt two messages
/// let ctxt = cks.encrypt(clear);
///
/// let mut d_ctxt = CudaRadixCiphertext::from_radix_ciphertext(&ctxt, &mut stream);
/// let mut d_ctxt = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt, &mut stream);
/// let mut h_ctxt = d_ctxt.to_radix_ciphertext(&mut stream);
///
/// assert_eq!(h_ctxt, ctxt);
@@ -76,7 +149,9 @@ impl CudaRadixCiphertext {
.collect(),
};
Self { d_blocks, info }
Self {
ciphertext: CudaRadixCiphertext { d_blocks, info },
}
}
pub fn copy_from_radix_ciphertext(&mut self, radix: &RadixCiphertext, stream: &CudaStream) {
@@ -87,14 +162,15 @@ impl CudaRadixCiphertext {
.collect::<Vec<_>>();
unsafe {
self.d_blocks
self.ciphertext
.d_blocks
.0
.d_vec
.copy_from_cpu_async(h_radix_ciphertext.as_mut_slice(), stream);
}
stream.synchronize();
self.info = CudaRadixCiphertextInfo {
self.ciphertext.info = CudaRadixCiphertextInfo {
blocks: radix
.blocks
.iter()
@@ -111,7 +187,7 @@ impl CudaRadixCiphertext {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -127,21 +203,21 @@ impl CudaRadixCiphertext {
/// let ct1 = cks.encrypt(msg1);
///
/// // Copy to GPU
/// let d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let ct2 = d_ct1.to_radix_ciphertext(&mut stream);
/// let msg2 = cks.decrypt(&ct2);
///
/// assert_eq!(msg1, msg2);
/// ```
pub fn to_radix_ciphertext(&self, stream: &CudaStream) -> RadixCiphertext {
let h_lwe_ciphertext_list = self.d_blocks.to_lwe_ciphertext_list(stream);
let h_lwe_ciphertext_list = self.ciphertext.d_blocks.to_lwe_ciphertext_list(stream);
let ciphertext_modulus = h_lwe_ciphertext_list.ciphertext_modulus();
let lwe_size = h_lwe_ciphertext_list.lwe_size().0;
let h_blocks: Vec<Ciphertext> = h_lwe_ciphertext_list
.into_container()
.chunks(lwe_size)
.zip(&self.info.blocks)
.zip(&self.ciphertext.info.blocks)
.map(|(data, i)| Ciphertext {
ct: LweCiphertextOwned::from_container(data.to_vec(), ciphertext_modulus),
degree: i.degree,
@@ -154,12 +230,203 @@ impl CudaRadixCiphertext {
RadixCiphertext::from(h_blocks)
}
}
impl CudaSignedRadixCiphertext {
pub fn new(d_blocks: CudaLweCiphertextList<u64>, info: CudaRadixCiphertextInfo) -> Self {
Self {
ciphertext: CudaRadixCiphertext { d_blocks, info },
}
}
/// Copies a RadixCiphertext to the GPU memory
///
/// # Example
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaSignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
/// let size = 4;
///
/// let gpu_index = 0;
/// let device = CudaDevice::new(gpu_index);
/// let mut stream = CudaStream::new_unchecked(device);
///
/// // Generate the client key and the server key:
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, size, &mut stream);
///
/// let clear: i64 = 255;
///
/// // Encrypt two messages
/// let ctxt = cks.encrypt_signed(clear);
///
/// let mut d_ctxt = CudaSignedRadixCiphertext::from_signed_radix_ciphertext(&ctxt, &mut stream);
/// let mut h_ctxt = d_ctxt.to_signed_radix_ciphertext(&mut stream);
///
/// assert_eq!(h_ctxt, ctxt);
/// ```
pub fn from_signed_radix_ciphertext(
radix: &SignedRadixCiphertext,
stream: &CudaStream,
) -> Self {
let mut h_radix_ciphertext = radix
.blocks
.iter()
.flat_map(|block| block.ct.clone().into_container())
.collect::<Vec<_>>();
let lwe_size = radix.blocks.first().unwrap().ct.lwe_size();
let ciphertext_modulus = radix.blocks.first().unwrap().ct.ciphertext_modulus();
let h_ct = LweCiphertextList::from_container(
h_radix_ciphertext.as_mut_slice(),
lwe_size,
ciphertext_modulus,
);
let d_blocks = CudaLweCiphertextList::from_lwe_ciphertext_list(&h_ct, stream);
let info = CudaRadixCiphertextInfo {
blocks: radix
.blocks
.iter()
.map(|block| CudaBlockInfo {
degree: block.degree,
message_modulus: block.message_modulus,
carry_modulus: block.carry_modulus,
pbs_order: block.pbs_order,
noise_level: block.noise_level(),
})
.collect(),
};
Self {
ciphertext: CudaRadixCiphertext { d_blocks, info },
}
}
pub fn copy_from_signed_radix_ciphertext(
&mut self,
radix: &SignedRadixCiphertext,
stream: &CudaStream,
) {
let mut h_radix_ciphertext = radix
.blocks
.iter()
.flat_map(|block| block.ct.clone().into_container())
.collect::<Vec<_>>();
unsafe {
self.ciphertext
.d_blocks
.0
.d_vec
.copy_from_cpu_async(h_radix_ciphertext.as_mut_slice(), stream);
}
stream.synchronize();
self.ciphertext.info = CudaRadixCiphertextInfo {
blocks: radix
.blocks
.iter()
.map(|block| CudaBlockInfo {
degree: block.degree,
message_modulus: block.message_modulus,
carry_modulus: block.carry_modulus,
pbs_order: block.pbs_order,
noise_level: block.noise_level(),
})
.collect(),
};
}
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaSignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
/// let gpu_index = 0;
/// let device = CudaDevice::new(gpu_index);
/// let mut stream = CudaStream::new_unchecked(device);
///
/// // Generate the client key and the server key:
/// let num_blocks = 4;
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let msg1 = 10i32;
/// let ct1 = cks.encrypt_signed(msg1);
///
/// // Copy to GPU
/// let d_ct1 = CudaSignedRadixCiphertext::from_signed_radix_ciphertext(&ct1, &mut stream);
/// let ct2 = d_ct1.to_signed_radix_ciphertext(&mut stream);
/// let msg2 = cks.decrypt_signed(&ct2);
///
/// assert_eq!(msg1, msg2);
/// ```
pub fn to_signed_radix_ciphertext(&self, stream: &CudaStream) -> SignedRadixCiphertext {
let h_lwe_ciphertext_list = self.ciphertext.d_blocks.to_lwe_ciphertext_list(stream);
let ciphertext_modulus = h_lwe_ciphertext_list.ciphertext_modulus();
let lwe_size = h_lwe_ciphertext_list.lwe_size().0;
let h_blocks: Vec<Ciphertext> = h_lwe_ciphertext_list
.into_container()
.chunks(lwe_size)
.zip(&self.ciphertext.info.blocks)
.map(|(data, i)| Ciphertext {
ct: LweCiphertextOwned::from_container(data.to_vec(), ciphertext_modulus),
degree: i.degree,
noise_level: i.noise_level,
message_modulus: i.message_modulus,
carry_modulus: i.carry_modulus,
pbs_order: i.pbs_order,
})
.collect();
SignedRadixCiphertext::from(h_blocks)
}
}
impl CudaRadixCiphertext {
pub fn new(d_blocks: CudaLweCiphertextList<u64>, info: CudaRadixCiphertextInfo) -> Self {
Self { d_blocks, info }
}
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaSignedRadixCiphertext};
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
/// let gpu_index = 0;
/// let device = CudaDevice::new(gpu_index);
/// let mut stream = CudaStream::new_unchecked(device);
///
/// // Generate the client key and the server key:
/// let num_blocks = 4;
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let msg = 10i32;
/// let ct = cks.encrypt_signed(msg);
///
/// // Copy to GPU
/// let d_ct = CudaSignedRadixCiphertext::from_signed_radix_ciphertext(&ct, &mut stream);
/// let d_ct_copied = d_ct.duplicate(&mut stream);
///
/// let ct_copied = d_ct_copied.to_signed_radix_ciphertext(&mut stream);
/// let msg_copied = cks.decrypt_signed(&ct_copied);
///
/// assert_eq!(msg, msg_copied);
/// ```
fn duplicate(&self, stream: &CudaStream) -> Self {
let ct = unsafe { self.duplicate_async(stream) };
stream.synchronize();
ct
}
/// # Safety
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronised
pub unsafe fn duplicate_async(&self, stream: &CudaStream) -> Self {
unsafe fn duplicate_async(&self, stream: &CudaStream) -> Self {
let lwe_ciphertext_count = self.d_blocks.lwe_ciphertext_count();
let ciphertext_modulus = self.d_blocks.ciphertext_modulus();
@@ -175,39 +442,7 @@ impl CudaRadixCiphertext {
}
}
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
/// let gpu_index = 0;
/// let device = CudaDevice::new(gpu_index);
/// let mut stream = CudaStream::new_unchecked(device);
///
/// // Generate the client key and the server key:
/// let num_blocks = 4;
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let msg = 10u32;
/// let ct = cks.encrypt(msg);
///
/// // Copy to GPU
/// let d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let d_ct_copied = d_ct.duplicate(&mut stream);
///
/// let ct_copied = d_ct_copied.to_radix_ciphertext(&mut stream);
/// let msg_copied = cks.decrypt(&ct_copied);
///
/// assert_eq!(msg, msg_copied);
/// ```
pub fn duplicate(&self, stream: &CudaStream) -> Self {
let ct = unsafe { self.duplicate_async(stream) };
stream.synchronize();
ct
}
pub fn is_equal(&self, other: &Self, stream: &CudaStream) -> bool {
fn is_equal(&self, other: &Self, stream: &CudaStream) -> bool {
let self_size = self.d_blocks.0.d_vec.len();
let other_size = other.d_blocks.0.d_vec.len();
let mut self_container: Vec<u64> = vec![0; self_size];
@@ -228,8 +463,4 @@ impl CudaRadixCiphertext {
self_container == other_container
}
pub(crate) fn block_carries_are_empty(&self) -> bool {
self.info.blocks.iter().all(CudaBlockInfo::carry_is_empty)
}
}

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext;
use crate::integer::gpu::server_key::CudaServerKey;
impl CudaServerKey {
@@ -22,7 +22,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -41,8 +41,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.add(&d_ct1, &d_ct2, &mut stream);
@@ -53,12 +53,12 @@ impl CudaServerKey {
/// let dec_result: u64 = cks.decrypt(&ct_res);
/// assert_eq!(dec_result, msg1 + msg2);
/// ```
pub fn add(
pub fn add<T: CudaIntegerRadixCiphertext>(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &T,
ct_right: &T,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> T {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.add_assign(&mut result, ct_right, stream);
result
@@ -68,10 +68,10 @@ impl CudaServerKey {
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronised
pub unsafe fn add_assign_async(
pub unsafe fn add_assign_async<T: CudaIntegerRadixCiphertext>(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut T,
ct_right: &T,
stream: &CudaStream,
) {
let mut tmp_rhs;
@@ -102,10 +102,10 @@ impl CudaServerKey {
self.propagate_single_carry_assign_async(lhs, stream);
}
pub fn add_assign(
pub fn add_assign<T: CudaIntegerRadixCiphertext>(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut T,
ct_right: &T,
stream: &CudaStream,
) {
unsafe {
@@ -116,7 +116,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -135,8 +135,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.unchecked_add(&d_ct1, &d_ct2, &mut stream);
@@ -147,12 +147,12 @@ impl CudaServerKey {
/// let dec_result: u64 = cks.decrypt(&ct_res);
/// assert_eq!(dec_result, msg1 + msg2);
/// ```
pub fn unchecked_add(
pub fn unchecked_add<T: CudaIntegerRadixCiphertext>(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &T,
ct_right: &T,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> T {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.unchecked_add_assign(&mut result, ct_right, stream);
result
@@ -162,45 +162,47 @@ impl CudaServerKey {
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_add_assign_async(
pub unsafe fn unchecked_add_assign_async<T: CudaIntegerRadixCiphertext>(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut T,
ct_right: &T,
stream: &CudaStream,
) {
let ciphertext_left = ct_left.as_mut();
let ciphertext_right = ct_right.as_ref();
assert_eq!(
ct_left.d_blocks.lwe_dimension(),
ct_right.d_blocks.lwe_dimension(),
ciphertext_left.d_blocks.lwe_dimension(),
ciphertext_right.d_blocks.lwe_dimension(),
"Mismatched lwe dimension between ct_left ({:?}) and ct_right ({:?})",
ct_left.d_blocks.lwe_dimension(),
ct_right.d_blocks.lwe_dimension()
ciphertext_left.d_blocks.lwe_dimension(),
ciphertext_right.d_blocks.lwe_dimension()
);
assert_eq!(
ct_left.d_blocks.ciphertext_modulus(),
ct_right.d_blocks.ciphertext_modulus(),
ciphertext_left.d_blocks.ciphertext_modulus(),
ciphertext_right.d_blocks.ciphertext_modulus(),
"Mismatched moduli between ct_left ({:?}) and ct_right ({:?})",
ct_left.d_blocks.ciphertext_modulus(),
ct_right.d_blocks.ciphertext_modulus()
ciphertext_left.d_blocks.ciphertext_modulus(),
ciphertext_right.d_blocks.ciphertext_modulus()
);
let lwe_dimension = ct_left.d_blocks.lwe_dimension();
let lwe_ciphertext_count = ct_left.d_blocks.lwe_ciphertext_count();
let lwe_dimension = ciphertext_left.d_blocks.lwe_dimension();
let lwe_ciphertext_count = ciphertext_left.d_blocks.lwe_ciphertext_count();
stream.unchecked_add_integer_radix_assign_async(
&mut ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut ciphertext_left.d_blocks.0.d_vec,
&ciphertext_right.d_blocks.0.d_vec,
lwe_dimension,
lwe_ciphertext_count.0 as u32,
);
ct_left.info = ct_left.info.after_add(&ct_right.info);
ciphertext_left.info = ciphertext_left.info.after_add(&ciphertext_right.info);
}
pub fn unchecked_add_assign(
pub fn unchecked_add_assign<T: CudaIntegerRadixCiphertext>(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut T,
ct_right: &T,
stream: &CudaStream,
) {
unsafe {

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaBootstrappingKey;
use crate::integer::gpu::{BitOpType, CudaServerKey};
@@ -17,7 +17,7 @@ impl CudaServerKey {
/// use std::ops::Not;
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -34,7 +34,7 @@ impl CudaServerKey {
/// let ct = cks.encrypt(msg);
///
/// // Copy to GPU
/// let d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.unchecked_bitnot(&d_ct, &mut stream);
@@ -48,9 +48,9 @@ impl CudaServerKey {
/// ```
pub fn unchecked_bitnot(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct.duplicate_async(stream) };
self.unchecked_bitnot_assign(&mut result, stream);
result
@@ -62,15 +62,15 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_bitnot_assign_async(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_bitnot_integer_radix_classic_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -92,7 +92,7 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_bitnot_integer_radix_multibit_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -116,7 +116,11 @@ impl CudaServerKey {
}
}
pub fn unchecked_bitnot_assign(&self, ct: &mut CudaRadixCiphertext, stream: &CudaStream) {
pub fn unchecked_bitnot_assign(
&self,
ct: &mut CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
self.unchecked_bitnot_assign_async(ct, stream);
}
@@ -135,7 +139,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -154,8 +158,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.unchecked_bitand(&d_ct1, &d_ct2, &mut stream);
@@ -169,10 +173,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_bitand(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.unchecked_bitand_assign(&mut result, ct_right, stream);
result
@@ -184,27 +188,27 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_bitop_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
op: BitOpType,
stream: &CudaStream,
) {
assert_eq!(
ct_left.d_blocks.lwe_dimension(),
ct_right.d_blocks.lwe_dimension()
ct_left.as_ref().d_blocks.lwe_dimension(),
ct_right.as_ref().d_blocks.lwe_dimension()
);
assert_eq!(
ct_left.d_blocks.lwe_ciphertext_count(),
ct_right.d_blocks.lwe_ciphertext_count()
ct_left.as_ref().d_blocks.lwe_ciphertext_count(),
ct_right.as_ref().d_blocks.lwe_ciphertext_count()
);
let lwe_ciphertext_count = ct_left.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct_left.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_bitop_integer_radix_classic_kb_assign_async(
&mut ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut ct_left.as_mut().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -227,8 +231,8 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_bitop_integer_radix_multibit_kb_assign_async(
&mut ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut ct_left.as_mut().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -255,13 +259,13 @@ impl CudaServerKey {
pub fn unchecked_bitand_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
self.unchecked_bitop_assign_async(ct_left, ct_right, BitOpType::And, stream);
ct_left.info = ct_left.info.after_bitand(&ct_right.info);
ct_left.as_mut().info = ct_left.as_ref().info.after_bitand(&ct_right.as_ref().info);
}
stream.synchronize();
}
@@ -278,7 +282,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -297,8 +301,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.unchecked_bitor(&d_ct1, &d_ct2, &mut stream);
@@ -312,10 +316,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_bitor(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.unchecked_bitor_assign(&mut result, ct_right, stream);
result
@@ -323,13 +327,13 @@ impl CudaServerKey {
pub fn unchecked_bitor_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
self.unchecked_bitop_assign_async(ct_left, ct_right, BitOpType::Or, stream);
ct_left.info = ct_left.info.after_bitor(&ct_right.info);
ct_left.as_mut().info = ct_left.as_ref().info.after_bitor(&ct_right.as_ref().info);
}
stream.synchronize();
}
@@ -346,7 +350,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -365,8 +369,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.unchecked_bitxor(&d_ct1, &d_ct2, &mut stream);
@@ -380,10 +384,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_bitxor(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.unchecked_bitxor_assign(&mut result, ct_right, stream);
result
@@ -391,13 +395,13 @@ impl CudaServerKey {
pub fn unchecked_bitxor_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
self.unchecked_bitop_assign_async(ct_left, ct_right, BitOpType::Xor, stream);
ct_left.info = ct_left.info.after_bitxor(&ct_right.info);
ct_left.as_mut().info = ct_left.as_ref().info.after_bitxor(&ct_right.as_ref().info);
}
stream.synchronize();
}
@@ -414,7 +418,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -433,8 +437,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.bitand(&d_ct1, &d_ct2, &mut stream);
@@ -448,10 +452,10 @@ impl CudaServerKey {
/// ```
pub fn bitand(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.bitand_assign(&mut result, ct_right, stream);
result
@@ -463,8 +467,8 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn bitand_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let mut tmp_rhs;
@@ -498,8 +502,8 @@ impl CudaServerKey {
pub fn bitand_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
@@ -520,7 +524,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -539,8 +543,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.bitor(&d_ct1, &d_ct2, &mut stream);
@@ -554,10 +558,10 @@ impl CudaServerKey {
/// ```
pub fn bitor(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.bitor_assign(&mut result, ct_right, stream);
result
@@ -569,8 +573,8 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn bitor_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let mut tmp_rhs;
@@ -603,8 +607,8 @@ impl CudaServerKey {
pub fn bitor_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
@@ -625,7 +629,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -644,8 +648,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.bitxor(&d_ct1, &d_ct2, &mut stream);
@@ -659,10 +663,10 @@ impl CudaServerKey {
/// ```
pub fn bitxor(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.bitxor_assign(&mut result, ct_right, stream);
result
@@ -674,8 +678,8 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn bitxor_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let mut tmp_rhs;
@@ -708,8 +712,8 @@ impl CudaServerKey {
pub fn bitxor_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
@@ -731,7 +735,7 @@ impl CudaServerKey {
/// use std::ops::Not;
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -748,7 +752,7 @@ impl CudaServerKey {
/// let ct = cks.encrypt(msg);
///
/// // Copy to GPU
/// let d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically a bitwise and:
/// let d_ct_res = sks.bitnot(&d_ct, &mut stream);
@@ -760,7 +764,11 @@ impl CudaServerKey {
/// let dec: u64 = cks.decrypt(&ct_res);
/// assert_eq!(dec, !msg % 256);
/// ```
pub fn bitnot(&self, ct: &CudaRadixCiphertext, stream: &CudaStream) -> CudaRadixCiphertext {
pub fn bitnot(
&self,
ct: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct.duplicate_async(stream) };
self.bitnot_assign(&mut result, stream);
result
@@ -770,7 +778,11 @@ impl CudaServerKey {
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronised
pub unsafe fn bitnot_assign_async(&self, ct: &mut CudaRadixCiphertext, stream: &CudaStream) {
pub unsafe fn bitnot_assign_async(
&self,
ct: &mut CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
if !ct.block_carries_are_empty() {
self.full_propagate_assign_async(ct, stream);
}
@@ -778,7 +790,7 @@ impl CudaServerKey {
self.unchecked_bitnot_assign_async(ct, stream);
}
pub fn bitnot_assign(&self, ct: &mut CudaRadixCiphertext, stream: &CudaStream) {
pub fn bitnot_assign(&self, ct: &mut CudaUnsignedRadixCiphertext, stream: &CudaStream) {
unsafe {
self.bitnot_assign_async(ct, stream);
}

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaBootstrappingKey;
use crate::integer::gpu::CudaServerKey;
@@ -10,22 +10,22 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_if_then_else_async(
&self,
condition: &CudaRadixCiphertext,
true_ct: &CudaRadixCiphertext,
false_ct: &CudaRadixCiphertext,
condition: &CudaUnsignedRadixCiphertext,
true_ct: &CudaUnsignedRadixCiphertext,
false_ct: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
let lwe_ciphertext_count = true_ct.d_blocks.lwe_ciphertext_count();
let mut result =
self.create_trivial_zero_radix(true_ct.d_blocks.lwe_ciphertext_count().0, stream);
) -> CudaUnsignedRadixCiphertext {
let lwe_ciphertext_count = true_ct.as_ref().d_blocks.lwe_ciphertext_count();
let mut result = self
.create_trivial_zero_radix(true_ct.as_ref().d_blocks.lwe_ciphertext_count().0, stream);
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_cmux_integer_radix_classic_kb_async(
&mut result.d_blocks.0.d_vec,
&condition.d_blocks.0.d_vec,
&true_ct.d_blocks.0.d_vec,
&false_ct.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&condition.as_ref().d_blocks.0.d_vec,
&true_ct.as_ref().d_blocks.0.d_vec,
&false_ct.as_ref().d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -47,10 +47,10 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_cmux_integer_radix_multibit_kb_async(
&mut result.d_blocks.0.d_vec,
&condition.d_blocks.0.d_vec,
&true_ct.d_blocks.0.d_vec,
&false_ct.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&condition.as_ref().d_blocks.0.d_vec,
&true_ct.as_ref().d_blocks.0.d_vec,
&false_ct.as_ref().d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -77,11 +77,11 @@ impl CudaServerKey {
}
pub fn unchecked_if_then_else(
&self,
condition: &CudaRadixCiphertext,
true_ct: &CudaRadixCiphertext,
false_ct: &CudaRadixCiphertext,
condition: &CudaUnsignedRadixCiphertext,
true_ct: &CudaUnsignedRadixCiphertext,
false_ct: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result =
unsafe { self.unchecked_if_then_else_async(condition, true_ct, false_ct, stream) };
stream.synchronize();
@@ -90,11 +90,11 @@ impl CudaServerKey {
pub fn if_then_else(
&self,
condition: &CudaRadixCiphertext,
true_ct: &CudaRadixCiphertext,
false_ct: &CudaRadixCiphertext,
condition: &CudaUnsignedRadixCiphertext,
true_ct: &CudaUnsignedRadixCiphertext,
false_ct: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_condition;
let mut tmp_true_ct;
let mut tmp_false_ct;

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaBootstrappingKey;
use crate::integer::gpu::{ComparisonType, CudaServerKey};
@@ -10,30 +10,30 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_comparison_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
op: ComparisonType,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
assert_eq!(
ct_left.d_blocks.lwe_dimension(),
ct_right.d_blocks.lwe_dimension()
ct_left.as_ref().d_blocks.lwe_dimension(),
ct_right.as_ref().d_blocks.lwe_dimension()
);
assert_eq!(
ct_left.d_blocks.lwe_ciphertext_count(),
ct_right.d_blocks.lwe_ciphertext_count()
ct_left.as_ref().d_blocks.lwe_ciphertext_count(),
ct_right.as_ref().d_blocks.lwe_ciphertext_count()
);
let mut result = ct_left.duplicate_async(stream);
let lwe_ciphertext_count = ct_left.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct_left.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_comparison_integer_radix_classic_kb_async(
&mut result.d_blocks.0.d_vec,
&ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct_left.as_ref().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -56,9 +56,9 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_comparison_integer_radix_multibit_kb_async(
&mut result.d_blocks.0.d_vec,
&ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct_left.as_ref().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -91,13 +91,13 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_eq_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result =
self.unchecked_comparison_async(ct_left, ct_right, ComparisonType::EQ, stream);
result.info = result.info.after_eq();
result.as_mut().info = result.as_ref().info.after_eq();
result
}
@@ -111,7 +111,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -130,8 +130,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.unchecked_eq(&d_ct1, &d_ct2, &mut stream);
///
@@ -144,10 +144,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_eq(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_eq_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -159,13 +159,13 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_ne_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result =
self.unchecked_comparison_async(ct_left, ct_right, ComparisonType::NE, stream);
result.info = result.info.after_ne();
result.as_mut().info = result.as_ref().info.after_ne();
result
}
@@ -179,7 +179,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -198,8 +198,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.unchecked_ne(&d_ct1, &d_ct2, &mut stream);
///
@@ -212,10 +212,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_ne(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_ne_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -227,10 +227,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn eq_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -272,7 +272,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -291,8 +291,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.eq(&d_ct1, &d_ct2, &mut stream);
///
@@ -305,10 +305,10 @@ impl CudaServerKey {
/// ```
pub fn eq(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.eq_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -320,10 +320,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn ne_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -365,7 +365,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -384,8 +384,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.ne(&d_ct1, &d_ct2, &mut stream);
///
@@ -398,10 +398,10 @@ impl CudaServerKey {
/// ```
pub fn ne(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.ne_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -413,10 +413,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_gt_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
self.unchecked_comparison_async(ct_left, ct_right, ComparisonType::GT, stream)
}
@@ -428,7 +428,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -447,8 +447,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.unchecked_gt(&d_ct1, &d_ct2, &mut stream);
///
@@ -461,10 +461,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_gt(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_gt_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -476,10 +476,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_ge_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
self.unchecked_comparison_async(ct_left, ct_right, ComparisonType::GE, stream)
}
@@ -489,10 +489,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn gt_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -526,10 +526,10 @@ impl CudaServerKey {
pub fn gt(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.gt_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -545,7 +545,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -564,8 +564,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.unchecked_ge(&d_ct1, &d_ct2, &mut stream);
///
@@ -578,10 +578,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_ge(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_ge_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -593,10 +593,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn ge_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -630,10 +630,10 @@ impl CudaServerKey {
pub fn ge(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.ge_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -645,10 +645,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_lt_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
self.unchecked_comparison_async(ct_left, ct_right, ComparisonType::LT, stream)
}
@@ -662,7 +662,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -681,8 +681,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.unchecked_lt(&d_ct1, &d_ct2, &mut stream);
///
@@ -695,10 +695,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_lt(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_lt_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -710,10 +710,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn lt_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -747,10 +747,10 @@ impl CudaServerKey {
pub fn lt(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.lt_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -762,10 +762,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_le_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
self.unchecked_comparison_async(ct_left, ct_right, ComparisonType::LE, stream)
}
@@ -779,7 +779,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -798,8 +798,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// let d_ct_res = sks.unchecked_le(&d_ct1, &d_ct2, &mut stream);
///
@@ -812,10 +812,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_le(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_le_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -827,10 +827,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn le_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -864,10 +864,10 @@ impl CudaServerKey {
pub fn le(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.le_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -879,29 +879,29 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_max_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
assert_eq!(
ct_left.d_blocks.lwe_dimension(),
ct_right.d_blocks.lwe_dimension()
ct_left.as_ref().d_blocks.lwe_dimension(),
ct_right.as_ref().d_blocks.lwe_dimension()
);
assert_eq!(
ct_left.d_blocks.lwe_ciphertext_count(),
ct_right.d_blocks.lwe_ciphertext_count()
ct_left.as_ref().d_blocks.lwe_ciphertext_count(),
ct_right.as_ref().d_blocks.lwe_ciphertext_count()
);
let mut result = ct_left.duplicate_async(stream);
let lwe_ciphertext_count = ct_left.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct_left.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_comparison_integer_radix_classic_kb_async(
&mut result.d_blocks.0.d_vec,
&ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct_left.as_ref().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -924,9 +924,9 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_comparison_integer_radix_multibit_kb_async(
&mut result.d_blocks.0.d_vec,
&ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct_left.as_ref().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -955,10 +955,10 @@ impl CudaServerKey {
pub fn unchecked_max(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_max_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -970,29 +970,29 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_min_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
assert_eq!(
ct_left.d_blocks.lwe_dimension(),
ct_right.d_blocks.lwe_dimension()
ct_left.as_ref().d_blocks.lwe_dimension(),
ct_right.as_ref().d_blocks.lwe_dimension()
);
assert_eq!(
ct_left.d_blocks.lwe_ciphertext_count(),
ct_right.d_blocks.lwe_ciphertext_count()
ct_left.as_ref().d_blocks.lwe_ciphertext_count(),
ct_right.as_ref().d_blocks.lwe_ciphertext_count()
);
let mut result = ct_left.duplicate_async(stream);
let lwe_ciphertext_count = ct_left.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct_left.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_comparison_integer_radix_classic_kb_async(
&mut result.d_blocks.0.d_vec,
&ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct_left.as_ref().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -1015,9 +1015,9 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_comparison_integer_radix_multibit_kb_async(
&mut result.d_blocks.0.d_vec,
&ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct_left.as_ref().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -1046,10 +1046,10 @@ impl CudaServerKey {
pub fn unchecked_min(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_min_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -1061,10 +1061,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn max_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -1097,10 +1097,10 @@ impl CudaServerKey {
pub fn max(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.max_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -1112,10 +1112,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn min_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut tmp_lhs;
let mut tmp_rhs;
@@ -1148,10 +1148,10 @@ impl CudaServerKey {
pub fn min(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.min_async(ct_left, ct_right, stream) };
stream.synchronize();
result

View File

@@ -5,7 +5,9 @@ use crate::core_crypto::gpu::CudaStream;
use crate::core_crypto::prelude::{ContiguousEntityContainerMut, LweCiphertextCount};
use crate::integer::block_decomposition::{BlockDecomposer, DecomposableInto};
use crate::integer::gpu::ciphertext::info::{CudaBlockInfo, CudaRadixCiphertextInfo};
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{
CudaIntegerRadixCiphertext, CudaRadixCiphertext, CudaUnsignedRadixCiphertext,
};
use crate::integer::gpu::server_key::CudaBootstrappingKey;
use crate::integer::gpu::CudaServerKey;
use crate::shortint::ciphertext::{Degree, NoiseLevel};
@@ -28,14 +30,18 @@ mod sub;
mod scalar_rotate;
#[cfg(test)]
mod tests;
#[cfg(test)]
mod tests_signed;
#[cfg(test)]
mod tests_unsigned;
impl CudaServerKey {
/// Create a trivial ciphertext filled with zeros
/// Create a trivial ciphertext filled with zeros on the GPU.
///
/// # Example
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::{gen_keys_radix, RadixCiphertext};
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -49,7 +55,7 @@ impl CudaServerKey {
/// // Generate the client key and the server key:
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let d_ctxt: CudaRadixCiphertext = sks.create_trivial_zero_radix(num_blocks, &mut stream);
/// let d_ctxt = sks.create_trivial_zero_radix(num_blocks, &mut stream);
/// let ctxt = d_ctxt.to_radix_ciphertext(&mut stream);
///
/// // Decrypt:
@@ -60,17 +66,16 @@ impl CudaServerKey {
&self,
num_blocks: usize,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
self.create_trivial_radix(0, num_blocks, stream)
}
/// Create a trivial ciphertext
/// Create a trivial ciphertext on the GPU
///
/// # Example
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::{gen_keys_radix, RadixCiphertext};
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -84,21 +89,21 @@ impl CudaServerKey {
/// // Generate the client key and the server key:
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let d_ctxt: CudaRadixCiphertext = sks.create_trivial_radix(212u64, num_blocks, &mut stream);
/// let d_ctxt = sks.create_trivial_radix(212u64, num_blocks, &mut stream);
/// let ctxt = d_ctxt.to_radix_ciphertext(&mut stream);
///
/// // Decrypt:
/// let dec: u64 = cks.decrypt(&ctxt);
/// assert_eq!(212, dec);
/// ```
pub fn create_trivial_radix<T>(
pub fn create_trivial_radix<Scalar>(
&self,
scalar: T,
scalar: Scalar,
num_blocks: usize,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
Scalar: DecomposableInto<u64>,
{
let lwe_size = match self.pbs_order {
PBSOrder::KeyswitchBootstrap => self.key_switching_key.input_key_lwe_size(),
@@ -131,9 +136,11 @@ impl CudaServerKey {
let d_blocks = CudaLweCiphertextList::from_lwe_ciphertext_list(&cpu_lwe_list, stream);
CudaRadixCiphertext {
d_blocks,
info: CudaRadixCiphertextInfo { blocks: info },
CudaUnsignedRadixCiphertext {
ciphertext: CudaRadixCiphertext {
d_blocks,
info: CudaRadixCiphertextInfo { blocks: info },
},
}
}
@@ -141,16 +148,19 @@ impl CudaServerKey {
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronized
pub(crate) unsafe fn propagate_single_carry_assign_async(
pub(crate) unsafe fn propagate_single_carry_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut T,
stream: &CudaStream,
) {
let num_blocks = ct.d_blocks.lwe_ciphertext_count().0 as u32;
) where
T: CudaIntegerRadixCiphertext,
{
let ciphertext = ct.as_mut();
let num_blocks = ciphertext.d_blocks.lwe_ciphertext_count().0 as u32;
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.propagate_single_carry_classic_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ciphertext.d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
d_bsk.input_lwe_dimension(),
@@ -161,13 +171,13 @@ impl CudaServerKey {
d_bsk.decomp_level_count(),
d_bsk.decomp_base_log(),
num_blocks,
ct.info.blocks.first().unwrap().message_modulus,
ct.info.blocks.first().unwrap().carry_modulus,
ciphertext.info.blocks.first().unwrap().message_modulus,
ciphertext.info.blocks.first().unwrap().carry_modulus,
);
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.propagate_single_carry_multibit_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ciphertext.d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
d_multibit_bsk.input_lwe_dimension(),
@@ -179,12 +189,12 @@ impl CudaServerKey {
d_multibit_bsk.decomp_base_log(),
d_multibit_bsk.grouping_factor,
num_blocks,
ct.info.blocks.first().unwrap().message_modulus,
ct.info.blocks.first().unwrap().carry_modulus,
ciphertext.info.blocks.first().unwrap().message_modulus,
ciphertext.info.blocks.first().unwrap().carry_modulus,
);
}
};
ct.info.blocks.iter_mut().for_each(|b| {
ciphertext.info.blocks.iter_mut().for_each(|b| {
b.degree = Degree::new(b.message_modulus.0 - 1);
b.noise_level = NoiseLevel::NOMINAL;
});
@@ -194,16 +204,17 @@ impl CudaServerKey {
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronized
pub(crate) unsafe fn full_propagate_assign_async(
pub(crate) unsafe fn full_propagate_assign_async<T: CudaIntegerRadixCiphertext>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut T,
stream: &CudaStream,
) {
let num_blocks = ct.d_blocks.lwe_ciphertext_count().0 as u32;
let ciphertext = ct.as_mut();
let num_blocks = ciphertext.d_blocks.lwe_ciphertext_count().0 as u32;
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.full_propagate_classic_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ciphertext.d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
d_bsk.input_lwe_dimension(),
@@ -214,13 +225,13 @@ impl CudaServerKey {
d_bsk.decomp_level_count(),
d_bsk.decomp_base_log(),
num_blocks,
ct.info.blocks.first().unwrap().message_modulus,
ct.info.blocks.first().unwrap().carry_modulus,
ciphertext.info.blocks.first().unwrap().message_modulus,
ciphertext.info.blocks.first().unwrap().carry_modulus,
);
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.full_propagate_multibit_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ciphertext.d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
d_multibit_bsk.input_lwe_dimension(),
@@ -232,19 +243,21 @@ impl CudaServerKey {
d_multibit_bsk.decomp_base_log(),
d_multibit_bsk.grouping_factor,
num_blocks,
ct.info.blocks.first().unwrap().message_modulus,
ct.info.blocks.first().unwrap().carry_modulus,
ciphertext.info.blocks.first().unwrap().message_modulus,
ciphertext.info.blocks.first().unwrap().carry_modulus,
);
}
};
ct.info
ciphertext
.info
.blocks
.iter_mut()
.for_each(|b| b.degree = Degree::new(b.message_modulus.0 - 1));
}
/// Prepend trivial zero LSB blocks to an existing [`CudaRadixCiphertext`] and returns the
/// result as a new [`CudaRadixCiphertext`]. This can be useful for casting operations.
/// Prepend trivial zero LSB blocks to an existing [`CudaUnsignedRadixCiphertext`] or
/// [`CudaSignedRadixCiphertext`] and returns the result as a new ciphertext on GPU.
/// This can be useful for casting operations.
///
/// # Example
///
@@ -264,13 +277,12 @@ impl CudaServerKey {
/// // Generate the client key and the server key:
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let mut d_ct1: CudaRadixCiphertext = sks.create_trivial_radix(7u64, num_blocks, &mut stream);
/// let mut d_ct1 = sks.create_trivial_radix(7u64, num_blocks, &mut stream);
/// let ct1 = d_ct1.to_radix_ciphertext(&mut stream);
/// assert_eq!(ct1.blocks().len(), 4);
///
/// let added_blocks = 2;
/// let d_ct_res =
/// sks.extend_radix_with_trivial_zero_blocks_lsb(&mut d_ct1, added_blocks, &mut stream);
/// let d_ct_res = sks.extend_radix_with_trivial_zero_blocks_lsb(&d_ct1, added_blocks, &mut stream);
/// let ct_res = d_ct_res.to_radix_ciphertext(&mut stream);
/// assert_eq!(ct_res.blocks().len(), 6);
///
@@ -281,22 +293,26 @@ impl CudaServerKey {
/// res
/// );
/// ```
pub fn extend_radix_with_trivial_zero_blocks_lsb(
pub fn extend_radix_with_trivial_zero_blocks_lsb<T: CudaIntegerRadixCiphertext>(
&self,
ct: &CudaRadixCiphertext,
ct: &T,
num_blocks: usize,
stream: &CudaStream,
) -> CudaRadixCiphertext {
let new_num_blocks = ct.d_blocks.lwe_ciphertext_count().0 + num_blocks;
let ciphertext_modulus = ct.d_blocks.ciphertext_modulus();
let lwe_size = ct.d_blocks.lwe_dimension().to_lwe_size();
) -> T {
let new_num_blocks = ct.as_ref().d_blocks.lwe_ciphertext_count().0 + num_blocks;
let ciphertext_modulus = ct.as_ref().d_blocks.ciphertext_modulus();
let lwe_size = ct.as_ref().d_blocks.lwe_dimension().to_lwe_size();
let shift = num_blocks * lwe_size.0;
let mut extended_ct_vec =
unsafe { CudaVec::new_async(new_num_blocks * lwe_size.0, stream) };
unsafe {
extended_ct_vec.memset_async(0u64, stream);
extended_ct_vec.copy_self_range_gpu_to_gpu_async(shift.., &ct.d_blocks.0.d_vec, stream);
extended_ct_vec.copy_self_range_gpu_to_gpu_async(
shift..,
&ct.as_ref().d_blocks.0.d_vec,
stream,
);
}
stream.synchronize();
let extended_ct_list = CudaLweCiphertextList::from_cuda_vec(
@@ -306,19 +322,20 @@ impl CudaServerKey {
);
let extended_ct_info = ct
.as_ref()
.info
.after_extend_radix_with_trivial_zero_blocks_lsb(num_blocks);
CudaRadixCiphertext::new(extended_ct_list, extended_ct_info)
T::from(CudaRadixCiphertext::new(extended_ct_list, extended_ct_info))
}
/// Append trivial zero MSB blocks to an existing [`CudaRadixCiphertext`] and returns the result
/// as a new [`CudaRadixCiphertext`]. This can be useful for casting operations.
/// Append trivial zero MSB blocks to an existing [`CudaUnsignedRadixCiphertext`] or
/// [`CudaSignedRadixCiphertext`] and returns the result as a new ciphertext on GPU.
/// This can be useful for casting operations.
///
/// # Example
///
///```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::IntegerCiphertext;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -332,7 +349,7 @@ impl CudaServerKey {
/// // Generate the client key and the server key:
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let mut d_ct1: CudaRadixCiphertext = sks.create_trivial_radix(7u64, num_blocks, &mut stream);
/// let mut d_ct1 = sks.create_trivial_radix(7u64, num_blocks, &mut stream);
/// let ct1 = d_ct1.to_radix_ciphertext(&mut stream);
/// assert_eq!(ct1.blocks().len(), 4);
///
@@ -344,21 +361,21 @@ impl CudaServerKey {
/// let res: u64 = cks.decrypt(&ct_res);
/// assert_eq!(7, res);
/// ```
pub fn extend_radix_with_trivial_zero_blocks_msb(
pub fn extend_radix_with_trivial_zero_blocks_msb<T: CudaIntegerRadixCiphertext>(
&self,
ct: &CudaRadixCiphertext,
ct: &T,
num_blocks: usize,
stream: &CudaStream,
) -> CudaRadixCiphertext {
let new_num_blocks = ct.d_blocks.lwe_ciphertext_count().0 + num_blocks;
let ciphertext_modulus = ct.d_blocks.ciphertext_modulus();
let lwe_size = ct.d_blocks.lwe_dimension().to_lwe_size();
) -> T {
let new_num_blocks = ct.as_ref().d_blocks.lwe_ciphertext_count().0 + num_blocks;
let ciphertext_modulus = ct.as_ref().d_blocks.ciphertext_modulus();
let lwe_size = ct.as_ref().d_blocks.lwe_dimension().to_lwe_size();
let mut extended_ct_vec =
unsafe { CudaVec::new_async(new_num_blocks * lwe_size.0, stream) };
unsafe {
extended_ct_vec.memset_async(0u64, stream);
extended_ct_vec.copy_from_gpu_async(&ct.d_blocks.0.d_vec, stream);
extended_ct_vec.copy_from_gpu_async(&ct.as_ref().d_blocks.0.d_vec, stream);
}
stream.synchronize();
let extended_ct_list = CudaLweCiphertextList::from_cuda_vec(
@@ -368,19 +385,20 @@ impl CudaServerKey {
);
let extended_ct_info = ct
.as_ref()
.info
.after_extend_radix_with_trivial_zero_blocks_msb(num_blocks);
CudaRadixCiphertext::new(extended_ct_list, extended_ct_info)
T::from(CudaRadixCiphertext::new(extended_ct_list, extended_ct_info))
}
/// Remove LSB blocks from an existing [`CudaRadixCiphertext`] and returns the result as a new
/// [`CudaRadixCiphertext`]. This can be useful for casting operations.
/// Remove LSB blocks from an existing [`CudaUnsignedRadixCiphertext`] or
/// [`CudaSignedRadixCiphertext`] and returns the result as a new ciphertext on GPU.
/// This can be useful for casting operations.
///
/// # Example
///
///```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::IntegerCiphertext;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -394,7 +412,7 @@ impl CudaServerKey {
/// // Generate the client key and the server key:
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let mut d_ct1: CudaRadixCiphertext = sks.create_trivial_radix(119u64, num_blocks, &mut stream);
/// let mut d_ct1 = sks.create_trivial_radix(119u64, num_blocks, &mut stream);
/// let ct1 = d_ct1.to_radix_ciphertext(&mut stream);
/// assert_eq!(ct1.blocks().len(), 4);
///
@@ -406,20 +424,24 @@ impl CudaServerKey {
/// let res: u64 = cks.decrypt(&ct_res);
/// assert_eq!(7, res);
/// ```
pub fn trim_radix_blocks_lsb(
pub fn trim_radix_blocks_lsb<T: CudaIntegerRadixCiphertext>(
&self,
ct: &CudaRadixCiphertext,
ct: &T,
num_blocks: usize,
stream: &CudaStream,
) -> CudaRadixCiphertext {
let new_num_blocks = ct.d_blocks.lwe_ciphertext_count().0 - num_blocks;
let ciphertext_modulus = ct.d_blocks.ciphertext_modulus();
let lwe_size = ct.d_blocks.lwe_dimension().to_lwe_size();
) -> T {
let new_num_blocks = ct.as_ref().d_blocks.lwe_ciphertext_count().0 - num_blocks;
let ciphertext_modulus = ct.as_ref().d_blocks.ciphertext_modulus();
let lwe_size = ct.as_ref().d_blocks.lwe_dimension().to_lwe_size();
let shift = num_blocks * lwe_size.0;
let mut trimmed_ct_vec = unsafe { CudaVec::new_async(new_num_blocks * lwe_size.0, stream) };
unsafe {
trimmed_ct_vec.copy_src_range_gpu_to_gpu_async(shift.., &ct.d_blocks.0.d_vec, stream);
trimmed_ct_vec.copy_src_range_gpu_to_gpu_async(
shift..,
&ct.as_ref().d_blocks.0.d_vec,
stream,
);
}
stream.synchronize();
let trimmed_ct_list = CudaLweCiphertextList::from_cuda_vec(
@@ -428,18 +450,18 @@ impl CudaServerKey {
ciphertext_modulus,
);
let trimmed_ct_info = ct.info.after_trim_radix_blocks_lsb(num_blocks);
CudaRadixCiphertext::new(trimmed_ct_list, trimmed_ct_info)
let trimmed_ct_info = ct.as_ref().info.after_trim_radix_blocks_lsb(num_blocks);
T::from(CudaRadixCiphertext::new(trimmed_ct_list, trimmed_ct_info))
}
/// Remove MSB blocks from an existing [`CudaRadixCiphertext`] and returns the result as a new
/// [`CudaRadixCiphertext`]. This can be useful for casting operations.
/// Remove MSB blocks from an existing [`CudaUnsignedRadixCiphertext`] or
/// [`CudaSignedRadixCiphertext`] and returns the result as a new ciphertext on GPU.
/// This can be useful for casting operations.
///
/// # Example
///
///```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::IntegerCiphertext;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -453,7 +475,7 @@ impl CudaServerKey {
/// // Generate the client key and the server key:
/// let (cks, sks) = gen_keys_radix_gpu(PARAM_MESSAGE_2_CARRY_2_KS_PBS, num_blocks, &mut stream);
///
/// let mut d_ct1: CudaRadixCiphertext = sks.create_trivial_radix(119u64, num_blocks, &mut stream);
/// let mut d_ct1 = sks.create_trivial_radix(119u64, num_blocks, &mut stream);
/// let ct1 = d_ct1.to_radix_ciphertext(&mut stream);
/// assert_eq!(ct1.blocks().len(), 4);
///
@@ -465,20 +487,24 @@ impl CudaServerKey {
/// let res: u64 = cks.decrypt(&ct_res);
/// assert_eq!(7, res);
/// ```
pub fn trim_radix_blocks_msb(
pub fn trim_radix_blocks_msb<T: CudaIntegerRadixCiphertext>(
&self,
ct: &CudaRadixCiphertext,
ct: &T,
num_blocks: usize,
stream: &CudaStream,
) -> CudaRadixCiphertext {
let new_num_blocks = ct.d_blocks.lwe_ciphertext_count().0 - num_blocks;
let ciphertext_modulus = ct.d_blocks.ciphertext_modulus();
let lwe_size = ct.d_blocks.lwe_dimension().to_lwe_size();
) -> T {
let new_num_blocks = ct.as_ref().d_blocks.lwe_ciphertext_count().0 - num_blocks;
let ciphertext_modulus = ct.as_ref().d_blocks.ciphertext_modulus();
let lwe_size = ct.as_ref().d_blocks.lwe_dimension().to_lwe_size();
let shift = new_num_blocks * lwe_size.0;
let mut trimmed_ct_vec = unsafe { CudaVec::new_async(new_num_blocks * lwe_size.0, stream) };
unsafe {
trimmed_ct_vec.copy_src_range_gpu_to_gpu_async(0..shift, &ct.d_blocks.0.d_vec, stream);
trimmed_ct_vec.copy_src_range_gpu_to_gpu_async(
0..shift,
&ct.as_ref().d_blocks.0.d_vec,
stream,
);
}
stream.synchronize();
let trimmed_ct_list = CudaLweCiphertextList::from_cuda_vec(
@@ -487,18 +513,17 @@ impl CudaServerKey {
ciphertext_modulus,
);
let trimmed_ct_info = ct.info.after_trim_radix_blocks_msb(num_blocks);
CudaRadixCiphertext::new(trimmed_ct_list, trimmed_ct_info)
let trimmed_ct_info = ct.as_ref().info.after_trim_radix_blocks_msb(num_blocks);
T::from(CudaRadixCiphertext::new(trimmed_ct_list, trimmed_ct_info))
}
/// Cast a CudaRadixCiphertext to a CudaRadixCiphertext
/// Cast a [`CudaUnsignedRadixCiphertext`] to a [`CudaUnsignedRadixCiphertext`]
/// with a possibly different number of blocks
///
/// # Example
///
///```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::IntegerCiphertext;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -513,7 +538,7 @@ impl CudaServerKey {
///
/// let msg = 2u8;
///
/// let mut d_ct1: CudaRadixCiphertext = sks.create_trivial_radix(msg, num_blocks, &mut stream);
/// let mut d_ct1 = sks.create_trivial_radix(msg, num_blocks, &mut stream);
/// let ct1 = d_ct1.to_radix_ciphertext(&mut stream);
/// assert_eq!(ct1.blocks().len(), 4);
///
@@ -527,17 +552,17 @@ impl CudaServerKey {
/// ```
pub fn cast_to_unsigned(
&self,
mut source: CudaRadixCiphertext,
mut source: CudaUnsignedRadixCiphertext,
target_num_blocks: usize,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
if !source.block_carries_are_empty() {
unsafe {
self.full_propagate_assign_async(&mut source, stream);
}
stream.synchronize();
}
let current_num_blocks = source.info.blocks.len();
let current_num_blocks = source.ciphertext.info.blocks.len();
// Casting from unsigned to unsigned, this is just about trimming/extending with zeros
if target_num_blocks > current_num_blocks {
let num_blocks_to_add = target_num_blocks - current_num_blocks;

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::{CudaBootstrappingKey, CudaServerKey};
impl CudaServerKey {
@@ -13,7 +13,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -37,8 +37,8 @@ impl CudaServerKey {
/// let ctxt_1 = cks.encrypt_radix(clear_1, number_of_blocks);
/// let ctxt_2 = cks.encrypt_radix(clear_2, number_of_blocks);
///
/// let mut d_ctxt_1 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ctxt_2 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
/// let mut d_ctxt_1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ctxt_2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
///
/// // Compute homomorphically a multiplication
/// let mut d_ct_res = sks.unchecked_mul(&mut d_ctxt_1, &d_ctxt_2, &mut stream);
@@ -50,10 +50,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_mul(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.unchecked_mul_assign(&mut result, ct_right, stream);
result
@@ -65,17 +65,17 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_mul_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let num_blocks = ct_left.d_blocks.lwe_ciphertext_count().0 as u32;
let num_blocks = ct_left.as_ref().d_blocks.lwe_ciphertext_count().0 as u32;
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_mul_integer_radix_classic_kb_assign_async(
&mut ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut ct_left.as_mut().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -92,8 +92,8 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_mul_integer_radix_multibit_kb_assign_async(
&mut ct_left.d_blocks.0.d_vec,
&ct_right.d_blocks.0.d_vec,
&mut ct_left.as_mut().d_blocks.0.d_vec,
&ct_right.as_ref().d_blocks.0.d_vec,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
self.message_modulus,
@@ -111,13 +111,13 @@ impl CudaServerKey {
}
};
ct_left.info = ct_left.info.after_mul();
ct_left.as_mut().info = ct_left.as_ref().info.after_mul();
}
pub fn unchecked_mul_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
@@ -136,7 +136,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -160,8 +160,8 @@ impl CudaServerKey {
/// let ctxt_1 = cks.encrypt_radix(clear_1, number_of_blocks);
/// let ctxt_2 = cks.encrypt_radix(clear_2, number_of_blocks);
///
/// let mut d_ctxt_1 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ctxt_2 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
/// let mut d_ctxt_1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ctxt_2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
///
/// // Compute homomorphically a multiplication
/// let mut d_ct_res = sks.mul(&mut d_ctxt_1, &d_ctxt_2, &mut stream);
@@ -173,10 +173,10 @@ impl CudaServerKey {
/// ```
pub fn mul(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct_left.duplicate_async(stream) };
self.mul_assign(&mut result, ct_right, stream);
result
@@ -188,8 +188,8 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn mul_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let mut tmp_rhs;
@@ -223,8 +223,8 @@ impl CudaServerKey {
pub fn mul_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaServerKey;
impl CudaServerKey {
@@ -15,7 +15,7 @@ impl CudaServerKey {
/// ```rust
/// // Encrypt two messages:
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -32,7 +32,7 @@ impl CudaServerKey {
///
/// // Encrypt a message
/// let mut ctxt = cks.encrypt(msg);
/// let mut d_ctxt = CudaRadixCiphertext::from_radix_ciphertext(&ctxt, &mut stream);
/// let mut d_ctxt = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt, &mut stream);
///
/// // Compute homomorphically a negation
/// let d_res = sks.unchecked_neg(&mut d_ctxt, &mut stream);
@@ -44,9 +44,9 @@ impl CudaServerKey {
/// ```
pub fn unchecked_neg(
&self,
ctxt: &CudaRadixCiphertext,
ctxt: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_neg_async(ctxt, stream) };
stream.synchronize();
result
@@ -58,9 +58,9 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_neg_async(
&self,
ctxt: &CudaRadixCiphertext,
ctxt: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = ctxt.duplicate_async(stream);
self.unchecked_neg_assign_async(&mut result, stream);
result
@@ -72,26 +72,31 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_neg_assign_async(
&self,
ctxt: &mut CudaRadixCiphertext,
ctxt: &mut CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let lwe_dimension = ctxt.d_blocks.lwe_dimension();
let lwe_ciphertext_count = ctxt.d_blocks.lwe_ciphertext_count();
let ciphertext = ctxt.as_mut();
let lwe_dimension = ciphertext.d_blocks.lwe_dimension();
let lwe_ciphertext_count = ciphertext.d_blocks.lwe_ciphertext_count();
let info = ctxt.info.blocks.first().unwrap();
let info = ciphertext.info.blocks.first().unwrap();
stream.negate_integer_radix_assign_async(
&mut ctxt.d_blocks.0.d_vec,
&mut ciphertext.d_blocks.0.d_vec,
lwe_dimension,
lwe_ciphertext_count.0 as u32,
info.message_modulus.0 as u32,
info.carry_modulus.0 as u32,
);
ctxt.info = ctxt.info.after_neg();
ciphertext.info = ciphertext.info.after_neg();
}
pub fn unchecked_neg_assign(&self, ctxt: &mut CudaRadixCiphertext, stream: &CudaStream) {
pub fn unchecked_neg_assign(
&self,
ctxt: &mut CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
self.unchecked_neg_assign_async(ctxt, stream);
}
@@ -110,7 +115,7 @@ impl CudaServerKey {
/// ```rust
/// // Encrypt two messages:
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -127,7 +132,7 @@ impl CudaServerKey {
///
/// // Encrypt a message
/// let mut ctxt = cks.encrypt(msg);
/// let mut d_ctxt = CudaRadixCiphertext::from_radix_ciphertext(&ctxt, &mut stream);
/// let mut d_ctxt = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt, &mut stream);
///
/// // Compute homomorphically a negation
/// let d_res = sks.neg(&mut d_ctxt, &mut stream);
@@ -137,7 +142,11 @@ impl CudaServerKey {
/// let dec: u64 = cks.decrypt(&res);
/// assert_eq!(modulus - msg, dec);
/// ```
pub fn neg(&self, ctxt: &CudaRadixCiphertext, stream: &CudaStream) -> CudaRadixCiphertext {
pub fn neg(
&self,
ctxt: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ctxt.duplicate_async(stream) };
self.neg_assign(&mut result, stream);
result
@@ -147,7 +156,11 @@ impl CudaServerKey {
///
/// - `stream` __must__ be synchronized to guarantee computation has finished, and inputs must
/// not be dropped until stream is synchronised
pub unsafe fn neg_assign_async(&self, ctxt: &mut CudaRadixCiphertext, stream: &CudaStream) {
pub unsafe fn neg_assign_async(
&self,
ctxt: &mut CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let mut tmp_ctxt;
let ct = if ctxt.block_carries_are_empty() {
@@ -162,7 +175,7 @@ impl CudaServerKey {
self.propagate_single_carry_assign_async(ct, stream);
}
pub fn neg_assign(&self, ctxt: &mut CudaRadixCiphertext, stream: &CudaStream) {
pub fn neg_assign(&self, ctxt: &mut CudaUnsignedRadixCiphertext, stream: &CudaStream) {
unsafe {
self.neg_assign_async(ctxt, stream);
}

View File

@@ -1,7 +1,7 @@
use crate::core_crypto::gpu::vec::CudaVec;
use crate::core_crypto::gpu::CudaStream;
use crate::integer::block_decomposition::{BlockDecomposer, DecomposableInto};
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaServerKey;
use itertools::Itertools;
@@ -17,7 +17,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -33,7 +33,7 @@ impl CudaServerKey {
/// let scalar = 40;
///
/// let ct = cks.encrypt(msg);
/// let mut d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let mut d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.unchecked_scalar_add(&d_ct, scalar, &mut stream);
@@ -45,10 +45,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_scalar_add<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u8>,
{
@@ -63,7 +63,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_add_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
@@ -75,7 +75,7 @@ impl CudaServerKey {
BlockDecomposer::with_early_stop_at_zero(scalar, bits_in_message).iter_as::<u8>();
let mut d_decomposed_scalar =
CudaVec::<u64>::new_async(ct.d_blocks.lwe_ciphertext_count().0, stream);
CudaVec::<u64>::new_async(ct.as_ref().d_blocks.lwe_ciphertext_count().0, stream);
let scalar64 = decomposer
.collect_vec()
.iter()
@@ -84,11 +84,11 @@ impl CudaServerKey {
.collect_vec();
d_decomposed_scalar.copy_from_cpu_async(scalar64.as_slice(), stream);
let lwe_dimension = ct.d_blocks.lwe_dimension();
let lwe_dimension = ct.as_ref().d_blocks.lwe_dimension();
// If the scalar is decomposed using less than the number of blocks our ciphertext
// has, we just don't touch ciphertext's last blocks
stream.scalar_addition_integer_radix_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
&d_decomposed_scalar,
lwe_dimension,
scalar64.len() as u32,
@@ -97,12 +97,12 @@ impl CudaServerKey {
);
}
ct.info = ct.info.after_scalar_add(scalar);
ct.as_mut().info = ct.as_ref().info.after_scalar_add(scalar);
}
pub fn unchecked_scalar_add_assign<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
@@ -125,7 +125,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -141,7 +141,7 @@ impl CudaServerKey {
/// let scalar = 40;
///
/// let ct = cks.encrypt(msg);
/// let mut d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let mut d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.scalar_add(&d_ct, scalar, &mut stream);
@@ -153,10 +153,10 @@ impl CudaServerKey {
/// ```
pub fn scalar_add<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u8>,
{
@@ -171,7 +171,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_add_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
@@ -185,8 +185,12 @@ impl CudaServerKey {
self.full_propagate_assign_async(ct, stream);
}
pub fn scalar_add_assign<T>(&self, ct: &mut CudaRadixCiphertext, scalar: T, stream: &CudaStream)
where
pub fn scalar_add_assign<T>(
&self,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
T: DecomposableInto<u8>,
{
unsafe {

View File

@@ -1,7 +1,7 @@
use crate::core_crypto::gpu::vec::CudaVec;
use crate::core_crypto::gpu::CudaStream;
use crate::integer::block_decomposition::{BlockDecomposer, DecomposableInto};
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaBootstrappingKey;
use crate::integer::gpu::{BitOpType, CudaServerKey};
@@ -12,14 +12,14 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_bitop_assign_async<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
op: BitOpType,
stream: &CudaStream,
) where
Scalar: DecomposableInto<u8>,
{
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
let message_modulus = self.message_modulus.0;
let h_clear_blocks = BlockDecomposer::with_early_stop_at_zero(rhs, message_modulus.ilog2())
@@ -32,7 +32,7 @@ impl CudaServerKey {
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_scalar_bitop_integer_radix_classic_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
&clear_blocks,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -56,7 +56,7 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_scalar_bitop_integer_radix_multibit_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
&clear_blocks,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -84,10 +84,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_bitand<Scalar>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
Scalar: DecomposableInto<u8>,
{
@@ -98,7 +98,7 @@ impl CudaServerKey {
pub fn unchecked_scalar_bitand_assign<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -106,17 +106,17 @@ impl CudaServerKey {
{
unsafe {
self.unchecked_scalar_bitop_assign_async(ct, rhs, BitOpType::ScalarAnd, stream);
ct.info = ct.info.after_scalar_bitand(rhs);
ct.as_mut().info = ct.as_ref().info.after_scalar_bitand(rhs);
}
stream.synchronize();
}
pub fn unchecked_scalar_bitor<Scalar>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
Scalar: DecomposableInto<u8>,
{
@@ -127,7 +127,7 @@ impl CudaServerKey {
pub fn unchecked_scalar_bitor_assign<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -135,17 +135,17 @@ impl CudaServerKey {
{
unsafe {
self.unchecked_scalar_bitop_assign_async(ct, rhs, BitOpType::ScalarOr, stream);
ct.info = ct.info.after_scalar_bitor(rhs);
ct.as_mut().info = ct.as_ref().info.after_scalar_bitor(rhs);
}
stream.synchronize();
}
pub fn unchecked_scalar_bitxor<Scalar>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
Scalar: DecomposableInto<u8>,
{
@@ -156,7 +156,7 @@ impl CudaServerKey {
pub fn unchecked_scalar_bitxor_assign<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -164,7 +164,7 @@ impl CudaServerKey {
{
unsafe {
self.unchecked_scalar_bitop_assign_async(ct, rhs, BitOpType::ScalarXor, stream);
ct.info = ct.info.after_scalar_bitxor(rhs);
ct.as_mut().info = ct.as_ref().info.after_scalar_bitxor(rhs);
}
stream.synchronize();
}
@@ -175,7 +175,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_bitand_assign_async<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -185,12 +185,12 @@ impl CudaServerKey {
self.full_propagate_assign_async(ct, stream);
}
self.unchecked_scalar_bitop_assign_async(ct, rhs, BitOpType::ScalarAnd, stream);
ct.info = ct.info.after_scalar_bitand(rhs);
ct.as_mut().info = ct.as_ref().info.after_scalar_bitand(rhs);
}
pub fn scalar_bitand_assign<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -204,10 +204,10 @@ impl CudaServerKey {
pub fn scalar_bitand<Scalar>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
Scalar: DecomposableInto<u8>,
{
@@ -222,7 +222,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_bitor_assign_async<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -232,12 +232,12 @@ impl CudaServerKey {
self.full_propagate_assign_async(ct, stream);
}
self.unchecked_scalar_bitop_assign_async(ct, rhs, BitOpType::ScalarOr, stream);
ct.info = ct.info.after_scalar_bitor(rhs);
ct.as_mut().info = ct.as_ref().info.after_scalar_bitor(rhs);
}
pub fn scalar_bitor_assign<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -251,10 +251,10 @@ impl CudaServerKey {
pub fn scalar_bitor<Scalar>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
Scalar: DecomposableInto<u8>,
{
@@ -269,7 +269,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_bitxor_assign_async<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -279,12 +279,12 @@ impl CudaServerKey {
self.full_propagate_assign_async(ct, stream);
}
self.unchecked_scalar_bitop_assign_async(ct, rhs, BitOpType::ScalarXor, stream);
ct.info = ct.info.after_scalar_bitxor(rhs);
ct.as_mut().info = ct.as_ref().info.after_scalar_bitxor(rhs);
}
pub fn scalar_bitxor_assign<Scalar>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) where
@@ -298,10 +298,10 @@ impl CudaServerKey {
pub fn scalar_bitxor<Scalar>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
rhs: Scalar,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
Scalar: DecomposableInto<u8>,
{

View File

@@ -1,7 +1,7 @@
use crate::core_crypto::gpu::vec::CudaVec;
use crate::core_crypto::gpu::CudaStream;
use crate::integer::block_decomposition::{BlockDecomposer, DecomposableInto};
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::{CudaBootstrappingKey, CudaServerKey};
use crate::integer::gpu::ComparisonType;
use crate::integer::server_key::comparator::Comparator;
@@ -13,11 +13,11 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_comparison_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
op: ComparisonType,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -25,7 +25,7 @@ impl CudaServerKey {
// ct represents an unsigned (always >= 0)
return self.create_trivial_radix(
Comparator::IS_SUPERIOR,
ct.d_blocks.lwe_ciphertext_count().0,
ct.as_ref().d_blocks.lwe_ciphertext_count().0,
stream,
);
}
@@ -40,12 +40,12 @@ impl CudaServerKey {
// scalar is obviously bigger if it has non-zero
// blocks after lhs's last block
let is_scalar_obviously_bigger = scalar_blocks
.get(ct.d_blocks.lwe_ciphertext_count().0..)
.get(ct.as_ref().d_blocks.lwe_ciphertext_count().0..)
.is_some_and(|sub_slice| sub_slice.iter().any(|&scalar_block| scalar_block != 0));
if is_scalar_obviously_bigger {
return self.create_trivial_radix(
Comparator::IS_INFERIOR,
ct.d_blocks.lwe_ciphertext_count().0,
ct.as_ref().d_blocks.lwe_ciphertext_count().0,
stream,
);
}
@@ -53,19 +53,19 @@ impl CudaServerKey {
// If we are still here, that means scalar_blocks above
// num_blocks are 0s, we can remove them
// as we will handle them separately.
scalar_blocks.truncate(ct.d_blocks.lwe_ciphertext_count().0);
scalar_blocks.truncate(ct.as_ref().d_blocks.lwe_ciphertext_count().0);
let d_scalar_blocks: CudaVec<u64> = CudaVec::from_cpu_async(&scalar_blocks, stream);
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
let mut result = ct.duplicate_async(stream);
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_scalar_comparison_integer_radix_classic_kb_async(
&mut result.d_blocks.0.d_vec,
&ct.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct.as_ref().d_blocks.0.d_vec,
&d_scalar_blocks,
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -90,8 +90,8 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_scalar_comparison_integer_radix_multibit_kb_async(
&mut result.d_blocks.0.d_vec,
&ct.d_blocks.0.d_vec,
&mut result.as_mut().d_blocks.0.d_vec,
&ct.as_ref().d_blocks.0.d_vec,
&d_scalar_blocks,
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -126,10 +126,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_eq_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -138,10 +138,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_eq<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -156,10 +156,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_eq_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -185,7 +185,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::{gen_keys_radix, RadixCiphertext};
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -205,7 +205,7 @@ impl CudaServerKey {
/// let ct1 = cks.encrypt(msg1);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &stream);
///
/// let d_ct_res = sks.scalar_eq(&d_ct1, msg2, &stream);
///
@@ -218,10 +218,10 @@ impl CudaServerKey {
/// ```
pub fn scalar_eq<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -236,10 +236,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_ne_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -265,7 +265,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::integer::{gen_keys_radix, RadixCiphertext};
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
@@ -285,7 +285,7 @@ impl CudaServerKey {
/// let ct1 = cks.encrypt(msg1);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &stream);
///
/// let d_ct_res = sks.scalar_ne(&d_ct1, msg2, &stream);
///
@@ -298,10 +298,10 @@ impl CudaServerKey {
/// ```
pub fn scalar_ne<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -316,10 +316,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_ne_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -328,10 +328,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_ne<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -346,10 +346,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_gt_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -358,10 +358,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_gt<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -376,10 +376,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_ge_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -388,10 +388,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_ge<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -406,10 +406,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_lt_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -418,10 +418,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_lt<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -436,10 +436,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_le_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -448,10 +448,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_le<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -465,10 +465,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_gt_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -486,10 +486,10 @@ impl CudaServerKey {
pub fn scalar_gt<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -504,10 +504,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_ge_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -525,10 +525,10 @@ impl CudaServerKey {
pub fn scalar_ge<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -543,10 +543,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_lt_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -564,10 +564,10 @@ impl CudaServerKey {
pub fn scalar_lt<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -581,10 +581,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_le_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -602,10 +602,10 @@ impl CudaServerKey {
pub fn scalar_le<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -620,10 +620,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_max_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -632,10 +632,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_max<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -650,10 +650,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_min_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -662,10 +662,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_min<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -680,10 +680,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_max_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -701,10 +701,10 @@ impl CudaServerKey {
pub fn scalar_max<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -719,10 +719,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_min_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{
@@ -740,10 +740,10 @@ impl CudaServerKey {
pub fn scalar_min<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u64>,
{

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaServerKey;
impl CudaServerKey {
@@ -15,7 +15,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -31,7 +31,7 @@ impl CudaServerKey {
/// let scalar = 3;
///
/// let ct = cks.encrypt(msg);
/// let mut d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let mut d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically a scalar multiplication:
/// let d_ct_res = sks.unchecked_small_scalar_mul(&d_ct, scalar, &mut stream);
@@ -42,10 +42,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_small_scalar_mul(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: u64,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct.duplicate_async(stream) };
self.unchecked_small_scalar_mul_assign(&mut result, scalar, stream);
result
@@ -57,23 +57,23 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_small_scalar_mul_assign_async(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: u64,
stream: &CudaStream,
) {
match scalar {
0 => {
ct.d_blocks.0.d_vec.memset_async(0, stream);
ct.as_mut().d_blocks.0.d_vec.memset_async(0, stream);
}
1 => {
// Multiplication by one is the identity
}
_ => {
let lwe_dimension = ct.d_blocks.lwe_dimension();
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_dimension = ct.as_ref().d_blocks.lwe_dimension();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
stream.small_scalar_mult_integer_radix_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
scalar,
lwe_dimension,
lwe_ciphertext_count.0 as u32,
@@ -81,12 +81,12 @@ impl CudaServerKey {
}
}
ct.info = ct.info.after_small_scalar_mul(scalar as u8);
ct.as_mut().info = ct.as_ref().info.after_small_scalar_mul(scalar as u8);
}
pub fn unchecked_small_scalar_mul_assign(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: u64,
stream: &CudaStream,
) {
@@ -108,7 +108,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -124,7 +124,7 @@ impl CudaServerKey {
/// let scalar = 3;
///
/// let ct = cks.encrypt(msg);
/// let mut d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let mut d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically a scalar multiplication:
/// let d_ct_res = sks.small_scalar_mul(&d_ct, scalar, &mut stream);
@@ -135,10 +135,10 @@ impl CudaServerKey {
/// ```
pub fn small_scalar_mul(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: u64,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = unsafe { ct.duplicate_async(stream) };
self.small_scalar_mul_assign(&mut result, scalar, stream);
result
@@ -150,7 +150,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn small_scalar_mul_assign_async(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: u64,
stream: &CudaStream,
) {
@@ -164,7 +164,7 @@ impl CudaServerKey {
pub fn small_scalar_mul_assign(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: u64,
stream: &CudaStream,
) {

View File

@@ -1,6 +1,6 @@
use crate::core_crypto::gpu::CudaStream;
use crate::core_crypto::prelude::CastFrom;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaBootstrappingKey;
use crate::integer::gpu::CudaServerKey;
@@ -11,10 +11,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_rotate_left_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -30,18 +30,18 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_rotate_left_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) where
T: CastFrom<u32>,
u32: CastFrom<T>,
{
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_scalar_rotate_left_integer_radix_classic_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(n),
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -64,7 +64,7 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_scalar_rotate_left_integer_radix_multibit_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(n),
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -91,10 +91,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_left_rotate<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -110,10 +110,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_rotate_right_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -129,18 +129,18 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_rotate_right_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) where
T: CastFrom<u32>,
u32: CastFrom<T>,
{
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_scalar_rotate_right_integer_radix_classic_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(n),
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -163,7 +163,7 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_scalar_rotate_right_integer_radix_multibit_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(n),
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -190,10 +190,10 @@ impl CudaServerKey {
pub fn unchecked_scalar_right_rotate<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -205,7 +205,7 @@ impl CudaServerKey {
pub fn scalar_rotate_left_assign<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) where
@@ -224,7 +224,7 @@ impl CudaServerKey {
pub fn scalar_rotate_right_assign<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
n: T,
stream: &CudaStream,
) where
@@ -243,10 +243,10 @@ impl CudaServerKey {
pub fn scalar_rotate_left<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -258,10 +258,10 @@ impl CudaServerKey {
pub fn scalar_rotate_right<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,

View File

@@ -1,7 +1,7 @@
use crate::core_crypto::gpu::CudaStream;
use crate::core_crypto::prelude::UnsignedNumeric;
use crate::integer::block_decomposition::DecomposableInto;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaServerKey;
use crate::integer::server_key::TwosComplementNegation;
@@ -17,7 +17,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -33,7 +33,7 @@ impl CudaServerKey {
/// let scalar = 3;
///
/// let ct = cks.encrypt(msg);
/// let mut d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let mut d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.unchecked_scalar_sub(&d_ct, scalar, &mut stream);
@@ -45,10 +45,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_scalar_sub<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u8> + UnsignedNumeric + TwosComplementNegation,
{
@@ -63,7 +63,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_sub_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
@@ -71,12 +71,12 @@ impl CudaServerKey {
{
let negated_scalar = scalar.twos_complement_negation();
self.unchecked_scalar_add_assign_async(ct, negated_scalar, stream);
ct.info = ct.info.after_scalar_sub(scalar);
ct.as_mut().info = ct.as_ref().info.after_scalar_sub(scalar);
}
pub fn unchecked_scalar_sub_assign<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
@@ -99,7 +99,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -115,7 +115,7 @@ impl CudaServerKey {
/// let scalar = 3;
///
/// let ct = cks.encrypt(msg);
/// let mut d_ct = CudaRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
/// let mut d_ct = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.scalar_sub(&d_ct, scalar, &mut stream);
@@ -127,10 +127,10 @@ impl CudaServerKey {
/// ```
pub fn scalar_sub<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: DecomposableInto<u8> + UnsignedNumeric + TwosComplementNegation,
{
@@ -145,7 +145,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_sub_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
@@ -159,8 +159,12 @@ impl CudaServerKey {
self.full_propagate_assign_async(ct, stream);
}
pub fn scalar_sub_assign<T>(&self, ct: &mut CudaRadixCiphertext, scalar: T, stream: &CudaStream)
where
pub fn scalar_sub_assign<T>(
&self,
ct: &mut CudaUnsignedRadixCiphertext,
scalar: T,
stream: &CudaStream,
) where
T: DecomposableInto<u8> + UnsignedNumeric + TwosComplementNegation,
{
unsafe {

View File

@@ -1,6 +1,6 @@
use crate::core_crypto::gpu::CudaStream;
use crate::core_crypto::prelude::CastFrom;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaBootstrappingKey;
use crate::integer::gpu::CudaServerKey;
@@ -11,10 +11,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_left_shift_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -30,19 +30,19 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_left_shift_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) where
T: CastFrom<u32>,
u32: CastFrom<T>,
{
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_scalar_shift_left_integer_radix_classic_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(shift),
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -65,7 +65,7 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_scalar_shift_left_integer_radix_multibit_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(shift),
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -98,7 +98,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -115,7 +115,7 @@ impl CudaServerKey {
///
/// let ct1 = cks.encrypt(msg);
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
///
/// let d_ct_res = sks.unchecked_scalar_left_shift(&d_ct1, shift, &mut stream);
///
@@ -128,10 +128,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_scalar_left_shift<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -147,10 +147,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_right_shift_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -166,19 +166,19 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_scalar_right_shift_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) where
T: CastFrom<u32>,
u32: CastFrom<T>,
{
let lwe_ciphertext_count = ct.d_blocks.lwe_ciphertext_count();
let lwe_ciphertext_count = ct.as_ref().d_blocks.lwe_ciphertext_count();
match &self.bootstrapping_key {
CudaBootstrappingKey::Classic(d_bsk) => {
stream.unchecked_scalar_shift_right_integer_radix_classic_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(shift),
&d_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -201,7 +201,7 @@ impl CudaServerKey {
}
CudaBootstrappingKey::MultiBit(d_multibit_bsk) => {
stream.unchecked_scalar_shift_right_integer_radix_multibit_kb_assign_async(
&mut ct.d_blocks.0.d_vec,
&mut ct.as_mut().d_blocks.0.d_vec,
u32::cast_from(shift),
&d_multibit_bsk.d_vec,
&self.key_switching_key.d_vec,
@@ -234,7 +234,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -251,7 +251,7 @@ impl CudaServerKey {
///
/// let ct1 = cks.encrypt(msg);
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
///
/// let d_ct_res = sks.unchecked_scalar_right_shift(&d_ct1, shift, &mut stream);
///
@@ -264,10 +264,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_scalar_right_shift<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -283,7 +283,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_right_shift_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) where
@@ -303,10 +303,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_right_shift_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -324,7 +324,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -341,7 +341,7 @@ impl CudaServerKey {
///
/// let ct1 = cks.encrypt(msg);
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
///
/// let d_ct_res = sks.scalar_right_shift(&d_ct1, shift, &mut stream);
///
@@ -354,10 +354,10 @@ impl CudaServerKey {
/// ```
pub fn scalar_right_shift<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -373,7 +373,7 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_left_shift_assign_async<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) where
@@ -393,10 +393,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn scalar_left_shift_async<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -414,7 +414,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -431,7 +431,7 @@ impl CudaServerKey {
///
/// let ct1 = cks.encrypt(msg);
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
///
/// let d_ct_res = sks.scalar_left_shift(&d_ct1, shift, &mut stream);
///
@@ -444,10 +444,10 @@ impl CudaServerKey {
/// ```
pub fn scalar_left_shift<T>(
&self,
ct: &CudaRadixCiphertext,
ct: &CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) -> CudaRadixCiphertext
) -> CudaUnsignedRadixCiphertext
where
T: CastFrom<u32>,
u32: CastFrom<T>,
@@ -459,7 +459,7 @@ impl CudaServerKey {
pub fn scalar_left_shift_assign<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) where
@@ -478,7 +478,7 @@ impl CudaServerKey {
pub fn scalar_right_shift_assign<T>(
&self,
ct: &mut CudaRadixCiphertext,
ct: &mut CudaUnsignedRadixCiphertext,
shift: T,
stream: &CudaStream,
) where

View File

@@ -1,5 +1,5 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaRadixCiphertext;
use crate::integer::gpu::ciphertext::{CudaIntegerRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::integer::gpu::server_key::CudaServerKey;
impl CudaServerKey {
@@ -15,7 +15,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -34,8 +34,8 @@ impl CudaServerKey {
/// let ctxt_2 = cks.encrypt(msg_2);
///
/// // Copy to GPU
/// let d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
/// let d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.unchecked_sub(&d_ct1, &d_ct2, &mut stream);
@@ -48,10 +48,10 @@ impl CudaServerKey {
/// ```
pub fn unchecked_sub(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.unchecked_sub_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -63,10 +63,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_sub_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = ct_left.duplicate_async(stream);
self.unchecked_sub_assign_async(&mut result, ct_right, stream);
result
@@ -78,8 +78,8 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn unchecked_sub_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let neg = self.unchecked_neg_async(ct_right, stream);
@@ -98,7 +98,7 @@ impl CudaServerKey {
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -118,8 +118,8 @@ impl CudaServerKey {
/// let ctxt_2 = cks.encrypt(msg_2);
///
/// // Copy to GPU
/// let mut d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
/// let mut d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ctxt_2, &mut stream);
///
/// // Compute homomorphically an addition:
/// sks.unchecked_sub_assign(&mut d_ct1, &d_ct2, &mut stream);
@@ -132,8 +132,8 @@ impl CudaServerKey {
/// ```
pub fn unchecked_sub_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
@@ -157,7 +157,7 @@ impl CudaServerKey {
///
/// ```rust
/// use tfhe::core_crypto::gpu::{CudaDevice, CudaStream};
/// use tfhe::integer::gpu::ciphertext::CudaRadixCiphertext;
/// use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
/// use tfhe::integer::gpu::gen_keys_radix_gpu;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
///
@@ -177,8 +177,8 @@ impl CudaServerKey {
/// let ct2 = cks.encrypt(msg_2 as u64);
///
/// // Copy to GPU
/// let d_ct1 = CudaRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
/// let d_ct1 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct1, &mut stream);
/// let d_ct2 = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct2, &mut stream);
///
/// // Compute homomorphically an addition:
/// let d_ct_res = sks.sub(&d_ct1, &d_ct2, &mut stream);
@@ -191,10 +191,10 @@ impl CudaServerKey {
/// ```
pub fn sub(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let result = unsafe { self.sub_async(ct_left, ct_right, stream) };
stream.synchronize();
result
@@ -206,10 +206,10 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn sub_async(
&self,
ct_left: &CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) -> CudaRadixCiphertext {
) -> CudaUnsignedRadixCiphertext {
let mut result = ct_left.duplicate_async(stream);
self.sub_assign_async(&mut result, ct_right, stream);
result
@@ -217,8 +217,8 @@ impl CudaServerKey {
pub fn sub_assign(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
unsafe {
@@ -233,8 +233,8 @@ impl CudaServerKey {
/// not be dropped until stream is synchronised
pub unsafe fn sub_assign_async(
&self,
ct_left: &mut CudaRadixCiphertext,
ct_right: &CudaRadixCiphertext,
ct_left: &mut CudaUnsignedRadixCiphertext,
ct_right: &CudaUnsignedRadixCiphertext,
stream: &CudaStream,
) {
let mut tmp_rhs;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,148 @@
use crate::core_crypto::gpu::CudaStream;
use crate::integer::gpu::ciphertext::CudaSignedRadixCiphertext;
use crate::integer::gpu::server_key::radix::tests::GpuFunctionExecutor;
use crate::integer::gpu::CudaServerKey;
use crate::integer::server_key::radix_parallel::tests_cases_signed::*;
use crate::integer::server_key::radix_parallel::tests_cases_unsigned::FunctionExecutor;
use crate::integer::{RadixClientKey, ServerKey, SignedRadixCiphertext};
use crate::shortint::parameters::*;
use std::sync::Arc;
// Macro to generate tests for all parameter sets
macro_rules! create_gpu_parametrized_test{
($name:ident { $($param:ident),* $(,)? }) => {
::paste::paste! {
$(
#[test]
fn [<test_gpu_ $name _ $param:lower>]() {
$name($param)
}
)*
}
};
($name:ident)=> {
create_gpu_parametrized_test!($name
{
// PARAM_MESSAGE_1_CARRY_1_KS_PBS,
PARAM_MESSAGE_2_CARRY_2_KS_PBS,
// PARAM_MESSAGE_3_CARRY_3_KS_PBS,
// PARAM_MESSAGE_4_CARRY_4_KS_PBS,
PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_3_KS_PBS
});
};
}
// Unchecked operations
create_gpu_parametrized_test!(integer_unchecked_add);
// Default operations
create_gpu_parametrized_test!(integer_add);
/// For default/unchecked binary functions
impl<'a, F>
FunctionExecutor<(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext), SignedRadixCiphertext>
for GpuFunctionExecutor<F>
where
F: Fn(
&CudaServerKey,
&CudaSignedRadixCiphertext,
&CudaSignedRadixCiphertext,
&CudaStream,
) -> CudaSignedRadixCiphertext,
{
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
self.setup_from_keys(cks, &sks);
}
fn execute(
&mut self,
input: (&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
) -> SignedRadixCiphertext {
let context = self
.context
.as_ref()
.expect("setup was not properly called");
let d_ctxt_1 =
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.stream);
let d_ctxt_2 =
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.stream);
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, &d_ctxt_2, &context.stream);
gpu_result.to_signed_radix_ciphertext(&context.stream)
}
}
/// For unchecked/default assign binary functions
impl<'a, F> FunctionExecutor<(&'a mut SignedRadixCiphertext, &'a SignedRadixCiphertext), ()>
for GpuFunctionExecutor<F>
where
F: Fn(&CudaServerKey, &mut CudaSignedRadixCiphertext, &CudaSignedRadixCiphertext, &CudaStream),
{
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
self.setup_from_keys(cks, &sks);
}
fn execute(&mut self, input: (&'a mut SignedRadixCiphertext, &'a SignedRadixCiphertext)) {
let context = self
.context
.as_ref()
.expect("setup was not properly called");
let mut d_ctxt_1 =
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.stream);
let d_ctxt_2 =
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.stream);
(self.func)(&context.sks, &mut d_ctxt_1, &d_ctxt_2, &context.stream);
*input.0 = d_ctxt_1.to_signed_radix_ciphertext(&context.stream);
}
}
/// For unchecked/default binary functions with one scalar input
impl<'a, F> FunctionExecutor<(&'a SignedRadixCiphertext, u64), SignedRadixCiphertext>
for GpuFunctionExecutor<F>
where
F: Fn(
&CudaServerKey,
&CudaSignedRadixCiphertext,
u64,
&CudaStream,
) -> CudaSignedRadixCiphertext,
{
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
self.setup_from_keys(cks, &sks);
}
fn execute(&mut self, input: (&'a SignedRadixCiphertext, u64)) -> SignedRadixCiphertext {
let context = self
.context
.as_ref()
.expect("setup was not properly called");
let d_ctxt_1 =
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.stream);
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.stream);
gpu_result.to_signed_radix_ciphertext(&context.stream)
}
}
fn integer_unchecked_add<P>(param: P)
where
P: Into<PBSParameters>,
{
let executor = GpuFunctionExecutor::new(&CudaServerKey::unchecked_add);
signed_unchecked_add_test(param, executor);
}
fn integer_add<P>(param: P)
where
P: Into<PBSParameters>,
{
let executor = GpuFunctionExecutor::new(&CudaServerKey::add);
signed_default_add_test(param, executor);
}

File diff suppressed because it is too large Load Diff

View File

@@ -1110,9 +1110,10 @@ pub(crate) mod tests_unsigned {
pub(crate) mod tests_signed {
use super::*;
use crate::integer::keycache::KEY_CACHE;
use crate::integer::server_key::radix_parallel::tests_signed::{
random_non_zero_value, signed_add_under_modulus, NB_CTXT, NB_TESTS_SMALLER,
use crate::integer::server_key::radix_parallel::tests_cases_signed::{
random_non_zero_value, signed_add_under_modulus,
};
use crate::integer::server_key::radix_parallel::tests_signed::{NB_CTXT, NB_TESTS_SMALLER};
use crate::integer::{IntegerKeyKind, RadixClientKey, SignedRadixCiphertext};
use crate::shortint::PBSParameters;
use rand::Rng;

View File

@@ -23,12 +23,13 @@ mod ilog2;
#[cfg(test)]
pub(crate) mod tests_cases_comparisons;
#[cfg(test)]
pub(crate) mod tests_cases_signed;
#[cfg(test)]
pub(crate) mod tests_cases_unsigned;
#[cfg(test)]
mod tests_signed;
#[cfg(test)]
pub(crate) mod tests_unsigned;
use crate::integer::ciphertext::IntegerRadixCiphertext;
use super::ServerKey;

View File

@@ -0,0 +1,424 @@
use crate::integer::keycache::KEY_CACHE;
use crate::integer::server_key::radix_parallel::tests_cases_unsigned::FunctionExecutor;
use crate::integer::server_key::radix_parallel::tests_signed::{NB_CTXT, NB_TESTS_SMALLER};
use crate::integer::{IntegerKeyKind, RadixClientKey, SignedRadixCiphertext};
use crate::shortint::PBSParameters;
use itertools::izip;
use rand::prelude::ThreadRng;
use rand::Rng;
use std::sync::Arc;
//================================================================================
// Helper functions
//================================================================================
pub(crate) fn signed_add_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
signed_overflowing_add_under_modulus(lhs, rhs, modulus).0
}
// Adds two signed number modulo the given modulus
//
// This is to 'simulate' i8, i16, ixy using i64 integers
//
// lhs and rhs must be in [-modulus..modulus[
pub(crate) fn signed_overflowing_add_under_modulus(
lhs: i64,
rhs: i64,
modulus: i64,
) -> (i64, bool) {
assert!(modulus > 0);
assert!((-modulus..modulus).contains(&lhs));
// The code below requires rhs and lhs to be in range -modulus..modulus
// in scalar tests, rhs may exceed modulus
// so we truncate it (is the fhe ops does)
let (mut res, mut overflowed) = if (-modulus..modulus).contains(&rhs) {
(lhs + rhs, false)
} else {
// 2*modulus to get all the bits
(lhs + (rhs % (2 * modulus)), true)
};
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
overflowed = true;
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
overflowed = true;
}
(res, overflowed)
}
pub(crate) fn signed_neg_under_modulus(lhs: i64, modulus: i64) -> i64 {
assert!(modulus > 0);
let mut res = -lhs;
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
}
res
}
// Subs two signed number modulo the given modulus
//
// This is to 'simulate' i8, i16, ixy using i64 integers
//
// lhs and rhs must be in [-modulus..modulus[
pub(crate) fn signed_sub_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
signed_overflowing_sub_under_modulus(lhs, rhs, modulus).0
}
pub(crate) fn signed_overflowing_sub_under_modulus(
lhs: i64,
rhs: i64,
modulus: i64,
) -> (i64, bool) {
// Technically we should be able to call overflowing_add_under_modulus(lhs, -rhs, ...)
// but due to -rhs being a 'special case' when rhs == -modulus, we have to
// so the impl here
assert!(modulus > 0);
assert!((-modulus..modulus).contains(&lhs));
// The code below requires rhs and lhs to be in range -modulus..modulus
// in scalar tests, rhs may exceed modulus
// so we truncate it (is the fhe ops does)
let (mut res, mut overflowed) = if (-modulus..modulus).contains(&rhs) {
(lhs - rhs, false)
} else {
// 2*modulus to get all the bits
(lhs - (rhs % (2 * modulus)), true)
};
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
overflowed = true;
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
overflowed = true;
}
(res, overflowed)
}
pub(crate) fn signed_mul_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
assert!(modulus > 0);
overflowing_mul_under_modulus(lhs, rhs, modulus).0
}
pub(crate) fn overflowing_mul_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> (i64, bool) {
let (mut res, mut overflowed) = lhs.overflowing_mul(rhs);
overflowed |= res < -modulus || res >= modulus;
res %= modulus * 2;
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
}
(res, overflowed)
}
pub(crate) fn absolute_value_under_modulus(lhs: i64, modulus: i64) -> i64 {
if lhs < 0 {
signed_neg_under_modulus(lhs, modulus)
} else {
lhs
}
}
pub(crate) fn signed_left_shift_under_modulus(lhs: i64, rhs: u32, modulus: i64) -> i64 {
signed_mul_under_modulus(lhs, 1 << rhs, modulus)
}
pub(crate) fn signed_right_shift_under_modulus(lhs: i64, rhs: u32, _modulus: i64) -> i64 {
lhs >> rhs
}
pub(crate) fn signed_div_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
// in signed integers, -modulus can be represented, but +modulus cannot
// thus, when dividing: -128 / -1 = 128 the results overflows to -128
assert!(modulus > 0);
let mut res = lhs / rhs;
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
}
res
}
pub(crate) fn signed_rem_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
assert!(modulus > 0);
let q = signed_div_under_modulus(lhs, rhs, modulus);
let q_times_rhs = signed_mul_under_modulus(q, rhs, modulus);
signed_sub_under_modulus(lhs, q_times_rhs, modulus)
}
pub(crate) fn signed_div_rem_floor_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> (i64, i64) {
let mut q = signed_div_under_modulus(lhs, rhs, modulus);
let mut r = signed_rem_under_modulus(lhs, rhs, modulus);
if (r != 0) && ((r < 0) != (rhs < 0)) {
q = signed_sub_under_modulus(q, 1, modulus);
r = signed_add_under_modulus(r, rhs, modulus);
}
(q, r)
}
/// helper function to do a rotate left when the type used to store
/// the value is bigger than the actual intended bit size
pub(crate) fn rotate_left_helper(value: i64, n: u32, actual_bit_size: u32) -> i64 {
// We start with:
// [0000000000000|xxxx]
// 64 b 0
//
// rotated will be
// [0000000000xx|xx00]
// 64 b 0
let n = n % actual_bit_size;
let mask = 1i64.wrapping_shl(actual_bit_size) - 1;
let shifted_mask = mask.wrapping_shl(n) & !mask;
// Value maybe be negative and so, have its msb
// set to one, so use mask to only keep the part that interest
// us
let rotated = (value & mask).rotate_left(n);
let tmp = (rotated & mask) | ((rotated & shifted_mask) >> actual_bit_size);
// If the sign bit after rotation is one,
// then all bits above it needs to be one
let new_sign_bit = (tmp >> (actual_bit_size - 1)) & 1;
let mut pad = -new_sign_bit;
pad <<= actual_bit_size; // only bits above actual_bit_size should be set
pad | tmp
}
/// helper function to do a rotate right when the type used to store
/// the value is bigger than the actual intended bit size
pub(crate) fn rotate_right_helper(value: i64, n: u32, actual_bit_size: u32) -> i64 {
// We start with:
// [yyyyyyyyyyyy|xxxx]
// 64 b 0
// where xs are bits that we are interested in
// and ys are either 0 or 1 depending on if value is positive
//
// mask: [yyyyyyyyyyyy|mmmm]
// shifted_ mask: [mmyyyyyyyyyy|0000]
//
// rotated will be
// [xxyyyyyyyyyy|00xx]
// 64 b 0
//
// To get the 'cycled' bits where they should be,
// we get them using a mask then shift
let n = n % actual_bit_size;
let mask = 1i64.wrapping_shl(actual_bit_size) - 1;
// shifted mask only needs the bits that cycled
let shifted_mask = mask.rotate_right(n) & !mask;
// Value maybe be negative and so, have its msb
// set to one, so use mask to only keep the part that interest
// us
let rotated = (value & mask).rotate_right(n);
let tmp = (rotated & mask) | ((rotated & shifted_mask) >> (u64::BITS - actual_bit_size));
// If the sign bit after rotation is one,
// then all bits above it needs to be one
let new_sign_bit = (tmp >> (actual_bit_size - 1)) & 1;
let mut pad = -new_sign_bit;
pad <<= actual_bit_size; // only bits above actual_bit_size should be set
pad | tmp
}
/// Returns an array filled with random values such that:
/// - the first half contains values in [0..modulus[
/// - the second half contains values in [-modulus..0]
pub(crate) fn random_signed_value_under_modulus<const N: usize>(
rng: &mut rand::prelude::ThreadRng,
modulus: i64,
) -> [i64; N] {
assert!(modulus > 0);
let mut values = [0i64; N];
for value in &mut values[..N / 2] {
*value = rng.gen_range(0..modulus);
}
for value in &mut values[N / 2..] {
*value = rng.gen_range(-modulus..=0);
}
values
}
/// Returns an array filled with random values such that:
/// - the first half contains values in ]0..modulus[
/// - the second half contains values in [-modulus..0[
pub(crate) fn random_non_zero_signed_value_under_modulus<const N: usize>(
rng: &mut rand::prelude::ThreadRng,
modulus: i64,
) -> [i64; N] {
assert!(modulus > 0);
let mut values = [0i64; N];
for value in &mut values[..N / 2] {
*value = rng.gen_range(1..modulus);
}
for value in &mut values[N / 2..] {
*value = rng.gen_range(-modulus..0);
}
values
}
/// Returns an iterator that yields pairs of i64 values in range `-modulus..modulus`
/// such that there is at least one pair of (P, P), (P, N), (N, N) (N, P)
/// where P means value >=0 and N means <= 0
pub(crate) fn create_iterator_of_signed_random_pairs<const N: usize>(
rng: &mut rand::prelude::ThreadRng,
modulus: i64,
) -> impl Iterator<Item = (i64, i64)> {
assert!(N >= 4, "N must be at least 4 to uphold the guarantee");
let mut lhs_values = [0i64; N];
let mut rhs_values = [0i64; N];
lhs_values[0] = rng.gen_range(0..modulus);
rhs_values[0] = rng.gen_range(0..modulus);
lhs_values[1] = rng.gen_range(0..modulus);
rhs_values[1] = rng.gen_range(-modulus..=0);
lhs_values[2] = rng.gen_range(-modulus..=0);
rhs_values[2] = rng.gen_range(-modulus..=0);
lhs_values[3] = rng.gen_range(-modulus..=0);
rhs_values[3] = rng.gen_range(0..modulus);
for i in 4..N {
lhs_values[i] = rng.gen_range(-modulus..modulus);
rhs_values[i] = rng.gen_range(-modulus..modulus);
}
izip!(lhs_values, rhs_values)
}
pub(crate) fn random_non_zero_value(rng: &mut ThreadRng, modulus: i64) -> i64 {
loop {
let value = rng.gen::<i64>() % modulus;
if value != 0 {
break value;
}
}
}
// Signed tests
pub(crate) fn signed_unchecked_add_test<P, T>(param: P, mut executor: T)
where
P: Into<PBSParameters>,
T: for<'a> FunctionExecutor<
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
SignedRadixCiphertext,
>,
{
let (cks, sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
let sks = Arc::new(sks);
let cks = RadixClientKey::from((cks, NB_CTXT));
let mut rng = rand::thread_rng();
let modulus = (cks.parameters().message_modulus().0.pow(NB_CTXT as u32) / 2) as i64;
executor.setup(&cks, sks);
// check some overflow behaviour
let overflowing_values = [
(-modulus, -1, modulus - 1),
(modulus - 1, 1, -modulus),
(-modulus, -2, modulus - 2),
(modulus - 2, 2, -modulus),
];
for (clear_0, clear_1, expected_clear) in overflowing_values {
let ctxt_0 = cks.encrypt_signed(clear_0);
let ctxt_1 = cks.encrypt_signed(clear_1);
let ct_res = executor.execute((&ctxt_0, &ctxt_1));
let dec_res: i64 = cks.decrypt_signed(&ct_res);
let clear_res = signed_add_under_modulus(clear_0, clear_1, modulus);
assert_eq!(clear_res, dec_res);
assert_eq!(clear_res, expected_clear);
}
for (clear_0, clear_1) in create_iterator_of_signed_random_pairs::<
{ crate::integer::server_key::radix_parallel::tests_signed::NB_TESTS_UNCHECKED },
>(&mut rng, modulus)
{
let ctxt_0 = cks.encrypt_signed(clear_0);
let ctxt_1 = cks.encrypt_signed(clear_1);
let ct_res = executor.execute((&ctxt_0, &ctxt_1));
let dec_res: i64 = cks.decrypt_signed(&ct_res);
let clear_res = signed_add_under_modulus(clear_0, clear_1, modulus);
assert_eq!(clear_res, dec_res);
}
}
pub(crate) fn signed_default_add_test<P, T>(param: P, mut executor: T)
where
P: Into<PBSParameters>,
T: for<'a> FunctionExecutor<
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
SignedRadixCiphertext,
>,
{
let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
sks.set_deterministic_pbs_execution(true);
let sks = Arc::new(sks);
let cks = RadixClientKey::from((cks, NB_CTXT));
let mut rng = rand::thread_rng();
let modulus = (cks.parameters().message_modulus().0.pow(NB_CTXT as u32) / 2) as i64;
executor.setup(&cks, sks);
let mut clear;
for _ in 0..NB_TESTS_SMALLER {
let clear_0 = rng.gen::<i64>() % modulus;
let clear_1 = rng.gen::<i64>() % modulus;
let ctxt_0 = cks.encrypt_signed(clear_0);
let ctxt_1 = cks.encrypt_signed(clear_1);
let mut ct_res = executor.execute((&ctxt_0, &ctxt_1));
let tmp_ct = executor.execute((&ctxt_0, &ctxt_1));
assert!(ct_res.block_carries_are_empty());
assert_eq!(ct_res, tmp_ct);
clear = signed_add_under_modulus(clear_0, clear_1, modulus);
// println!("clear_0 = {}, clear_1 = {}", clear_0, clear_1);
// add multiple times to raise the degree
for _ in 0..NB_TESTS_SMALLER {
ct_res = executor.execute((&ct_res, &ctxt_0));
assert!(ct_res.block_carries_are_empty());
clear = signed_add_under_modulus(clear, clear_0, modulus);
let dec_res: i64 = cks.decrypt_signed(&ct_res);
// println!("clear = {}, dec_res = {}", clear, dec_res);
assert_eq!(clear, dec_res);
}
}
}

View File

@@ -1,5 +1,7 @@
use crate::integer::keycache::KEY_CACHE;
use crate::integer::server_key::radix_parallel::sub::SignedOperation;
use crate::integer::server_key::radix_parallel::tests_cases_signed::*;
use crate::integer::server_key::radix_parallel::tests_unsigned::CpuFunctionExecutor;
use crate::integer::{
BooleanBlock, IntegerKeyKind, RadixClientKey, ServerKey, SignedRadixCiphertext,
};
@@ -7,9 +9,8 @@ use crate::shortint::ciphertext::NoiseLevel;
#[cfg(tarpaulin)]
use crate::shortint::parameters::coverage_parameters::*;
use crate::shortint::parameters::*;
use itertools::{iproduct, izip};
use itertools::iproduct;
use paste::paste;
use rand::rngs::ThreadRng;
use rand::Rng;
/// Number of loop iteration within randomized tests
@@ -105,313 +106,7 @@ macro_rules! create_parametrized_test{
}
//================================================================================
// Helper functions
//================================================================================
pub(crate) fn signed_add_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
signed_overflowing_add_under_modulus(lhs, rhs, modulus).0
}
// Adds two signed number modulo the given modulus
//
// This is to 'simulate' i8, i16, ixy using i64 integers
//
// lhs and rhs must be in [-modulus..modulus[
fn signed_overflowing_add_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> (i64, bool) {
assert!(modulus > 0);
assert!((-modulus..modulus).contains(&lhs));
// The code below requires rhs and lhs to be in range -modulus..modulus
// in scalar tests, rhs may exceed modulus
// so we truncate it (is the fhe ops does)
let (mut res, mut overflowed) = if (-modulus..modulus).contains(&rhs) {
(lhs + rhs, false)
} else {
// 2*modulus to get all the bits
(lhs + (rhs % (2 * modulus)), true)
};
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
overflowed = true;
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
overflowed = true;
}
(res, overflowed)
}
fn signed_neg_under_modulus(lhs: i64, modulus: i64) -> i64 {
assert!(modulus > 0);
let mut res = -lhs;
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
}
res
}
// Subs two signed number modulo the given modulus
//
// This is to 'simulate' i8, i16, ixy using i64 integers
//
// lhs and rhs must be in [-modulus..modulus[
fn signed_sub_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
signed_overflowing_sub_under_modulus(lhs, rhs, modulus).0
}
fn signed_overflowing_sub_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> (i64, bool) {
// Technically we should be able to call overflowing_add_under_modulus(lhs, -rhs, ...)
// but due to -rhs being a 'special case' when rhs == -modulus, we have to
// so the impl here
assert!(modulus > 0);
assert!((-modulus..modulus).contains(&lhs));
// The code below requires rhs and lhs to be in range -modulus..modulus
// in scalar tests, rhs may exceed modulus
// so we truncate it (is the fhe ops does)
let (mut res, mut overflowed) = if (-modulus..modulus).contains(&rhs) {
(lhs - rhs, false)
} else {
// 2*modulus to get all the bits
(lhs - (rhs % (2 * modulus)), true)
};
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
overflowed = true;
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
overflowed = true;
}
(res, overflowed)
}
fn signed_mul_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
assert!(modulus > 0);
overflowing_mul_under_modulus(lhs, rhs, modulus).0
}
fn overflowing_mul_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> (i64, bool) {
let (mut res, mut overflowed) = lhs.overflowing_mul(rhs);
overflowed |= res < -modulus || res >= modulus;
res %= modulus * 2;
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
}
(res, overflowed)
}
fn absolute_value_under_modulus(lhs: i64, modulus: i64) -> i64 {
if lhs < 0 {
signed_neg_under_modulus(lhs, modulus)
} else {
lhs
}
}
fn signed_left_shift_under_modulus(lhs: i64, rhs: u32, modulus: i64) -> i64 {
signed_mul_under_modulus(lhs, 1 << rhs, modulus)
}
fn signed_right_shift_under_modulus(lhs: i64, rhs: u32, _modulus: i64) -> i64 {
lhs >> rhs
}
fn signed_div_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
// in signed integers, -modulus can be represented, but +modulus cannot
// thus, when dividing: -128 / -1 = 128 the results overflows to -128
assert!(modulus > 0);
let mut res = lhs / rhs;
if res < -modulus {
// rem_euclid(modulus) would also work
res = modulus + (res - -modulus);
} else if res > modulus - 1 {
res = -modulus + (res - modulus);
}
res
}
fn signed_rem_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> i64 {
assert!(modulus > 0);
let q = signed_div_under_modulus(lhs, rhs, modulus);
let q_times_rhs = signed_mul_under_modulus(q, rhs, modulus);
signed_sub_under_modulus(lhs, q_times_rhs, modulus)
}
fn signed_div_rem_floor_under_modulus(lhs: i64, rhs: i64, modulus: i64) -> (i64, i64) {
let mut q = signed_div_under_modulus(lhs, rhs, modulus);
let mut r = signed_rem_under_modulus(lhs, rhs, modulus);
if (r != 0) && ((r < 0) != (rhs < 0)) {
q = signed_sub_under_modulus(q, 1, modulus);
r = signed_add_under_modulus(r, rhs, modulus);
}
(q, r)
}
/// helper function to do a rotate left when the type used to store
/// the value is bigger than the actual intended bit size
fn rotate_left_helper(value: i64, n: u32, actual_bit_size: u32) -> i64 {
// We start with:
// [0000000000000|xxxx]
// 64 b 0
//
// rotated will be
// [0000000000xx|xx00]
// 64 b 0
let n = n % actual_bit_size;
let mask = 1i64.wrapping_shl(actual_bit_size) - 1;
let shifted_mask = mask.wrapping_shl(n) & !mask;
// Value maybe be negative and so, have its msb
// set to one, so use mask to only keep the part that interest
// us
let rotated = (value & mask).rotate_left(n);
let tmp = (rotated & mask) | ((rotated & shifted_mask) >> actual_bit_size);
// If the sign bit after rotation is one,
// then all bits above it needs to be one
let new_sign_bit = (tmp >> (actual_bit_size - 1)) & 1;
let mut pad = -new_sign_bit;
pad <<= actual_bit_size; // only bits above actual_bit_size should be set
pad | tmp
}
/// helper function to do a rotate right when the type used to store
/// the value is bigger than the actual intended bit size
fn rotate_right_helper(value: i64, n: u32, actual_bit_size: u32) -> i64 {
// We start with:
// [yyyyyyyyyyyy|xxxx]
// 64 b 0
// where xs are bits that we are interested in
// and ys are either 0 or 1 depending on if value is positive
//
// mask: [yyyyyyyyyyyy|mmmm]
// shifted_ mask: [mmyyyyyyyyyy|0000]
//
// rotated will be
// [xxyyyyyyyyyy|00xx]
// 64 b 0
//
// To get the 'cycled' bits where they should be,
// we get them using a mask then shift
let n = n % actual_bit_size;
let mask = 1i64.wrapping_shl(actual_bit_size) - 1;
// shifted mask only needs the bits that cycled
let shifted_mask = mask.rotate_right(n) & !mask;
// Value maybe be negative and so, have its msb
// set to one, so use mask to only keep the part that interest
// us
let rotated = (value & mask).rotate_right(n);
let tmp = (rotated & mask) | ((rotated & shifted_mask) >> (u64::BITS - actual_bit_size));
// If the sign bit after rotation is one,
// then all bits above it needs to be one
let new_sign_bit = (tmp >> (actual_bit_size - 1)) & 1;
let mut pad = -new_sign_bit;
pad <<= actual_bit_size; // only bits above actual_bit_size should be set
pad | tmp
}
/// Returns an array filled with random values such that:
/// - the first half contains values in [0..modulus[
/// - the second half contains values in [-modulus..0]
fn random_signed_value_under_modulus<const N: usize>(
rng: &mut rand::prelude::ThreadRng,
modulus: i64,
) -> [i64; N] {
assert!(modulus > 0);
let mut values = [0i64; N];
for value in &mut values[..N / 2] {
*value = rng.gen_range(0..modulus);
}
for value in &mut values[N / 2..] {
*value = rng.gen_range(-modulus..=0);
}
values
}
/// Returns an array filled with random values such that:
/// - the first half contains values in ]0..modulus[
/// - the second half contains values in [-modulus..0[
fn random_non_zero_signed_value_under_modulus<const N: usize>(
rng: &mut rand::prelude::ThreadRng,
modulus: i64,
) -> [i64; N] {
assert!(modulus > 0);
let mut values = [0i64; N];
for value in &mut values[..N / 2] {
*value = rng.gen_range(1..modulus);
}
for value in &mut values[N / 2..] {
*value = rng.gen_range(-modulus..0);
}
values
}
/// Returns an iterator that yields pairs of i64 values in range `-modulus..modulus`
/// such that there is at least one pair of (P, P), (P, N), (N, N) (N, P)
/// where P means value >=0 and N means <= 0
fn create_iterator_of_signed_random_pairs<const N: usize>(
rng: &mut rand::prelude::ThreadRng,
modulus: i64,
) -> impl Iterator<Item = (i64, i64)> {
assert!(N >= 4, "N must be at least 4 to uphold the guarantee");
let mut lhs_values = [0i64; N];
let mut rhs_values = [0i64; N];
lhs_values[0] = rng.gen_range(0..modulus);
rhs_values[0] = rng.gen_range(0..modulus);
lhs_values[1] = rng.gen_range(0..modulus);
rhs_values[1] = rng.gen_range(-modulus..=0);
lhs_values[2] = rng.gen_range(-modulus..=0);
rhs_values[2] = rng.gen_range(-modulus..=0);
lhs_values[3] = rng.gen_range(-modulus..=0);
rhs_values[3] = rng.gen_range(0..modulus);
for i in 4..N {
lhs_values[i] = rng.gen_range(-modulus..modulus);
rhs_values[i] = rng.gen_range(-modulus..modulus);
}
izip!(lhs_values, rhs_values)
}
pub(crate) fn random_non_zero_value(rng: &mut ThreadRng, modulus: i64) -> i64 {
loop {
let value = rng.gen::<i64>() % modulus;
if value != 0 {
break value;
}
}
}
//================================================================================
// Encrypt/Derypt Tests
// Encrypt/Decrypt Tests
//================================================================================
create_parametrized_test!(integer_signed_encrypt_decrypt);
@@ -635,41 +330,12 @@ create_parametrized_test!(
);
create_parametrized_test!(integer_signed_unchecked_absolute_value);
fn integer_signed_unchecked_add(param: impl Into<PBSParameters>) {
let (cks, sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
let mut rng = rand::thread_rng();
let modulus = (cks.parameters().message_modulus().0.pow(NB_CTXT as u32) / 2) as i64;
// check some overflow behaviour
let overflowing_values = [
(-modulus, -1, modulus - 1),
(modulus - 1, 1, -modulus),
(-modulus, -2, modulus - 2),
(modulus - 2, 2, -modulus),
];
for (clear_0, clear_1, expected_clear) in overflowing_values {
let ctxt_0 = cks.encrypt_signed_radix(clear_0, NB_CTXT);
let ctxt_1 = cks.encrypt_signed_radix(clear_1, NB_CTXT);
let ct_res = sks.unchecked_add_parallelized(&ctxt_0, &ctxt_1);
let dec_res: i64 = cks.decrypt_signed_radix(&ct_res);
let clear_res = signed_add_under_modulus(clear_0, clear_1, modulus);
assert_eq!(clear_res, dec_res);
assert_eq!(clear_res, expected_clear);
}
for (clear_0, clear_1) in
create_iterator_of_signed_random_pairs::<NB_TESTS_UNCHECKED>(&mut rng, modulus)
{
let ctxt_0 = cks.encrypt_signed_radix(clear_0, NB_CTXT);
let ctxt_1 = cks.encrypt_signed_radix(clear_1, NB_CTXT);
let ct_res = sks.unchecked_add_parallelized(&ctxt_0, &ctxt_1);
let dec_res: i64 = cks.decrypt_signed_radix(&ct_res);
let clear_res = signed_add_under_modulus(clear_0, clear_1, modulus);
assert_eq!(clear_res, dec_res);
}
fn integer_signed_unchecked_add<P>(param: P)
where
P: Into<PBSParameters>,
{
let executor = CpuFunctionExecutor::new(&ServerKey::unchecked_add_parallelized);
signed_unchecked_add_test(param, executor);
}
fn signed_unchecked_overflowing_add_test_case<P, F>(param: P, signed_overflowing_add: F)
@@ -1727,47 +1393,9 @@ fn integer_signed_default_add<P>(param: P)
where
P: Into<PBSParameters>,
{
let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
let cks = RadixClientKey::from((cks, NB_CTXT));
sks.set_deterministic_pbs_execution(true);
let mut rng = rand::thread_rng();
// message_modulus^vec_length
let modulus = (cks.parameters().message_modulus().0.pow(NB_CTXT as u32) / 2) as i64;
let mut clear;
for _ in 0..NB_TESTS_SMALLER {
let clear_0 = rng.gen::<i64>() % modulus;
let clear_1 = rng.gen::<i64>() % modulus;
let ctxt_0 = cks.encrypt_signed(clear_0);
let ctxt_1 = cks.encrypt_signed(clear_1);
let mut ct_res = sks.add_parallelized(&ctxt_0, &ctxt_1);
let tmp_ct = sks.add_parallelized(&ctxt_0, &ctxt_1);
assert!(ct_res.block_carries_are_empty());
assert_eq!(ct_res, tmp_ct);
clear = signed_add_under_modulus(clear_0, clear_1, modulus);
// println!("clear_0 = {}, clear_1 = {}", clear_0, clear_1);
// add multiple times to raise the degree
for _ in 0..NB_TESTS_SMALLER {
ct_res = sks.add_parallelized(&ct_res, &ctxt_0);
assert!(ct_res.block_carries_are_empty());
clear = signed_add_under_modulus(clear, clear_0, modulus);
let dec_res: i64 = cks.decrypt_signed(&ct_res);
// println!("clear = {}, dec_res = {}", clear, dec_res);
assert_eq!(clear, dec_res);
}
}
let executor = CpuFunctionExecutor::new(&ServerKey::add_parallelized);
signed_default_add_test(param, executor);
}
fn integer_signed_default_overflowing_add<P>(param: P)
where
P: Into<PBSParameters>,