feat(gpu): AES 256

This commit is contained in:
Enzo Di Maria
2025-11-03 15:52:43 +01:00
committed by Agnès Leroy
parent f970031d33
commit 4ff95e3a42
19 changed files with 1740 additions and 103 deletions

View File

@@ -1,8 +1,8 @@
#[cfg(feature = "gpu")]
pub mod cuda {
use benchmark::params_aliases::BENCH_PARAM_GPU_MULTI_BIT_GROUP_4_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M128;
use benchmark::utilities::{get_bench_type, write_to_json, BenchmarkType, OperatorType};
use criterion::{black_box, Criterion, Throughput};
use benchmark::utilities::{write_to_json, OperatorType};
use criterion::{black_box, Criterion};
use tfhe::core_crypto::gpu::CudaStreams;
use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
use tfhe::integer::gpu::CudaServerKey;
@@ -29,114 +29,109 @@ pub mod cuda {
let param_name = param.name();
match get_bench_type() {
BenchmarkType::Latency => {
let streams = CudaStreams::new_multi_gpu();
let (cpu_cks, _) = KEY_CACHE.get_from_params(atomic_param, IntegerKeyKind::Radix);
let sks = CudaServerKey::new(&cpu_cks, &streams);
let cks = RadixClientKey::from((cpu_cks, 1));
let streams = CudaStreams::new_multi_gpu();
let (cpu_cks, _) = KEY_CACHE.get_from_params(atomic_param, IntegerKeyKind::Radix);
let sks = CudaServerKey::new(&cpu_cks, &streams);
let cks = RadixClientKey::from((cpu_cks, 1));
let ct_key = cks.encrypt_u128_for_aes_ctr(key);
let ct_iv = cks.encrypt_u128_for_aes_ctr(iv);
let ct_key = cks.encrypt_u128_for_aes_ctr(key);
let ct_iv = cks.encrypt_u128_for_aes_ctr(iv);
let d_key = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_key, &streams);
let d_iv = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_iv, &streams);
let d_key = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_key, &streams);
let d_iv = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_iv, &streams);
{
const NUM_AES_INPUTS: usize = 1;
const SBOX_PARALLELISM: usize = 16;
let bench_id = format!("{param_name}::{NUM_AES_INPUTS}_input_encryption");
{
const NUM_AES_INPUTS: usize = 1;
const SBOX_PARALLELISM: usize = 16;
let bench_id = format!("{param_name}::{NUM_AES_INPUTS}_input_encryption");
let round_keys = sks.key_expansion(&d_key, &streams);
let round_keys = sks.key_expansion(&d_key, &streams);
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.aes_encrypt(
&d_iv,
&round_keys,
0,
NUM_AES_INPUTS,
SBOX_PARALLELISM,
&streams,
));
})
});
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.aes_encrypt(
&d_iv,
&round_keys,
0,
NUM_AES_INPUTS,
SBOX_PARALLELISM,
&streams,
));
})
});
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_encryption",
&OperatorType::Atomic,
aes_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_op_bit_size as usize],
);
}
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_encryption",
&OperatorType::Atomic,
aes_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_op_bit_size as usize],
);
}
{
let bench_id = format!("{param_name}::key_expansion");
{
let bench_id = format!("{param_name}::key_expansion");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.key_expansion(&d_key, &streams));
})
});
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.key_expansion(&d_key, &streams));
})
});
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_key_expansion",
&OperatorType::Atomic,
aes_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_op_bit_size as usize],
);
}
}
BenchmarkType::Throughput => {
const NUM_AES_INPUTS: usize = 192;
const SBOX_PARALLELISM: usize = 16;
let bench_id = format!("throughput::{param_name}::{NUM_AES_INPUTS}_inputs");
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_key_expansion",
&OperatorType::Atomic,
aes_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_op_bit_size as usize],
);
}
let streams = CudaStreams::new_multi_gpu();
let (cpu_cks, _) = KEY_CACHE.get_from_params(atomic_param, IntegerKeyKind::Radix);
let sks = CudaServerKey::new(&cpu_cks, &streams);
let cks = RadixClientKey::from((cpu_cks, 1));
{
const NUM_AES_INPUTS: usize = 192;
const SBOX_PARALLELISM: usize = 16;
let bench_id = format!("{param_name}::{NUM_AES_INPUTS}_inputs_encryption");
bench_group.throughput(Throughput::Elements(NUM_AES_INPUTS as u64));
let streams = CudaStreams::new_multi_gpu();
let (cpu_cks, _) = KEY_CACHE.get_from_params(atomic_param, IntegerKeyKind::Radix);
let sks = CudaServerKey::new(&cpu_cks, &streams);
let cks = RadixClientKey::from((cpu_cks, 1));
let ct_key = cks.encrypt_u128_for_aes_ctr(key);
let ct_iv = cks.encrypt_u128_for_aes_ctr(iv);
let ct_key = cks.encrypt_u128_for_aes_ctr(key);
let ct_iv = cks.encrypt_u128_for_aes_ctr(iv);
let d_key = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_key, &streams);
let d_iv = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_iv, &streams);
let d_key = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_key, &streams);
let d_iv = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_iv, &streams);
let round_keys = sks.key_expansion(&d_key, &streams);
let round_keys = sks.key_expansion(&d_key, &streams);
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.aes_encrypt(
&d_iv,
&round_keys,
0,
NUM_AES_INPUTS,
SBOX_PARALLELISM,
&streams,
));
})
});
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.aes_encrypt(
&d_iv,
&round_keys,
0,
NUM_AES_INPUTS,
SBOX_PARALLELISM,
&streams,
));
})
});
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_encryption",
&OperatorType::Atomic,
aes_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_op_bit_size as usize],
);
}
};
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_encryption",
&OperatorType::Atomic,
aes_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_op_bit_size as usize],
);
}
bench_group.finish();
}

View File

@@ -0,0 +1,143 @@
#[cfg(feature = "gpu")]
pub mod cuda {
use benchmark::params_aliases::BENCH_PARAM_GPU_MULTI_BIT_GROUP_4_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M128;
use benchmark::utilities::{write_to_json, OperatorType};
use criterion::{black_box, Criterion};
use tfhe::core_crypto::gpu::CudaStreams;
use tfhe::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
use tfhe::integer::gpu::CudaServerKey;
use tfhe::integer::keycache::KEY_CACHE;
use tfhe::integer::{IntegerKeyKind, RadixClientKey};
use tfhe::keycache::NamedParam;
use tfhe::shortint::AtomicPatternParameters;
pub fn cuda_aes_256(c: &mut Criterion) {
let bench_name = "integer::cuda::aes_256";
let mut bench_group = c.benchmark_group(bench_name);
bench_group
.sample_size(15)
.measurement_time(std::time::Duration::from_secs(60))
.warm_up_time(std::time::Duration::from_secs(60));
let param = BENCH_PARAM_GPU_MULTI_BIT_GROUP_4_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M128;
let atomic_param: AtomicPatternParameters = param.into();
let key_hi: u128 = 0x603deb1015ca71be2b73aef0857d7781;
let key_lo: u128 = 0x1f352c073b6108d72d9810a30914dff4;
let iv: u128 = 0xf0f1f2f3f4f5f6f7f8f9fafbfcfdfeff;
let aes_block_op_bit_size = 128;
let aes_key_op_bit_size = 256;
let param_name = param.name();
let streams = CudaStreams::new_multi_gpu();
let (cpu_cks, _) = KEY_CACHE.get_from_params(atomic_param, IntegerKeyKind::Radix);
let sks = CudaServerKey::new(&cpu_cks, &streams);
let cks = RadixClientKey::from((cpu_cks, 1));
let ct_key = cks.encrypt_2u128_for_aes_ctr_256(key_hi, key_lo);
let ct_iv = cks.encrypt_u128_for_aes_ctr(iv);
let d_key = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_key, &streams);
let d_iv = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_iv, &streams);
{
const NUM_AES_INPUTS: usize = 1;
const SBOX_PARALLELISM: usize = 16;
let bench_id = format!("{param_name}::{NUM_AES_INPUTS}_input_encryption");
let round_keys = sks.key_expansion_256(&d_key, &streams);
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.aes_256_encrypt(
&d_iv,
&round_keys,
0,
NUM_AES_INPUTS,
SBOX_PARALLELISM,
&streams,
));
})
});
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_256_encryption",
&OperatorType::Atomic,
aes_block_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_block_op_bit_size as usize],
);
}
{
let bench_id = format!("{param_name}::key_expansion");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.key_expansion_256(&d_key, &streams));
})
});
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_256_key_expansion",
&OperatorType::Atomic,
aes_key_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_key_op_bit_size as usize],
);
}
{
const NUM_AES_INPUTS: usize = 192;
const SBOX_PARALLELISM: usize = 16;
let bench_id = format!("{param_name}::{NUM_AES_INPUTS}_inputs_encryption");
let streams = CudaStreams::new_multi_gpu();
let (cpu_cks, _) = KEY_CACHE.get_from_params(atomic_param, IntegerKeyKind::Radix);
let sks = CudaServerKey::new(&cpu_cks, &streams);
let cks = RadixClientKey::from((cpu_cks, 1));
let ct_key = cks.encrypt_2u128_for_aes_ctr_256(key_hi, key_lo);
let ct_iv = cks.encrypt_u128_for_aes_ctr(iv);
let d_key = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_key, &streams);
let d_iv = CudaUnsignedRadixCiphertext::from_radix_ciphertext(&ct_iv, &streams);
let round_keys = sks.key_expansion_256(&d_key, &streams);
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
black_box(sks.aes_256_encrypt(
&d_iv,
&round_keys,
0,
NUM_AES_INPUTS,
SBOX_PARALLELISM,
&streams,
));
})
});
write_to_json::<u64, _>(
&bench_id,
atomic_param,
param.name(),
"aes_256_encryption",
&OperatorType::Atomic,
aes_block_op_bit_size,
vec![atomic_param.message_modulus().0.ilog2(); aes_block_op_bit_size as usize],
);
}
bench_group.finish();
}
}

View File

@@ -1,6 +1,7 @@
#![allow(dead_code)]
mod aes;
mod aes256;
mod oprf;
mod rerand;
@@ -2799,6 +2800,7 @@ mod cuda {
cuda_ilog2,
oprf::cuda::cuda_unsigned_oprf,
aes::cuda::cuda_aes,
aes256::cuda::cuda_aes_256,
);
criterion_group!(
@@ -2828,6 +2830,7 @@ mod cuda {
cuda_scalar_rem,
oprf::cuda::cuda_unsigned_oprf,
aes::cuda::cuda_aes,
aes256::cuda::cuda_aes_256,
);
criterion_group!(