mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-08 22:28:01 -05:00
chore(gpu): fix warnings detection
This commit is contained in:
committed by
Andrei Stoian
parent
2d7e1b2293
commit
7bf2ec6ff2
3
Makefile
3
Makefile
@@ -697,8 +697,9 @@ test_high_level_api_gpu_debug: install_rs_build_toolchain install_cargo_nextest
|
||||
test_integer_hl_test_gpu_check_warnings: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build \
|
||||
--features=integer,internal-keycache,gpu-debug,zk-pok -vv -p $(TFHE_SPEC) &> /tmp/gpu_compile_output
|
||||
WARNINGS=$$(cat /tmp/gpu_compile_output | grep ": warning:" | grep "\[tfhe-cuda-backend" | grep -v "inline function" || true) && \
|
||||
WARNINGS=$$(cat /tmp/gpu_compile_output | grep ": warning #" | grep "\[tfhe-cuda-backend" | grep -v "inline qualifier" || true) && \
|
||||
if [[ "$${WARNINGS}" != "" ]]; then \
|
||||
echo "FAILING BECAUSE CUDA COMPILATION WARNINGS WERE DETECTED: " && \
|
||||
echo "$${WARNINGS}" && exit 1; \
|
||||
fi
|
||||
|
||||
|
||||
@@ -3769,8 +3769,6 @@ template <typename Torus> struct int_tree_sign_reduction_buffer {
|
||||
gpu_memory_allocated = allocate_gpu_memory;
|
||||
this->params = params;
|
||||
|
||||
uint64_t big_size = (params.big_lwe_dimension + 1) * sizeof(Torus);
|
||||
|
||||
block_selector_f = [](Torus msb, Torus lsb) -> Torus {
|
||||
if (msb == IS_EQUAL) // EQUAL
|
||||
return lsb;
|
||||
@@ -3866,8 +3864,6 @@ template <typename Torus> struct int_comparison_diff_buffer {
|
||||
}
|
||||
};
|
||||
|
||||
uint64_t big_size = (params.big_lwe_dimension + 1) * sizeof(Torus);
|
||||
|
||||
tmp_packed = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams[0], gpu_indexes[0], tmp_packed, num_radix_blocks,
|
||||
|
||||
@@ -100,7 +100,6 @@ __host__ void host_glwe_wrapping_polynomial_mul_one_to_many(
|
||||
cudaStream_t stream, uint32_t gpu_index, Torus *result,
|
||||
const Torus *glwe_lhs, int8_t *circulant, const Torus *poly_rhs,
|
||||
uint32_t polynomial_size, uint32_t glwe_dimension, uint32_t n_rhs) {
|
||||
uint64_t const *glwe_lhs_t = static_cast<uint64_t const *>(glwe_lhs);
|
||||
|
||||
for (unsigned i = 0; i < glwe_dimension + 1; ++i) {
|
||||
host_wrapping_polynomial_mul_one_to_many<uint64_t, ulonglong4>(
|
||||
|
||||
Reference in New Issue
Block a user