mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-08 22:28:01 -05:00
chore(ci): add randomized long run tests on CPU and GPU
This commit is contained in:
13
.github/workflows/gpu_integer_long_run_tests.yml
vendored
13
.github/workflows/gpu_integer_long_run_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AWS Long Run Tests on GPU
|
||||
name: Long Run Tests on GPU
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -15,8 +15,8 @@ on:
|
||||
# Allows you to run this workflow manually from the Actions tab as an alternative.
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Weekly tests will be triggered each Friday at 1a.m.
|
||||
- cron: '0 1 * * FRI'
|
||||
# Weekly tests will be triggered each Friday at 9p.m.
|
||||
- cron: "0 21 * * 5"
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
@@ -36,10 +36,10 @@ jobs:
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
backend: hyperstack
|
||||
profile: 2-h100
|
||||
profile: multi-gpu-test
|
||||
|
||||
cuda-tests:
|
||||
name: Long run GPU H100 tests
|
||||
name: Long run GPU tests
|
||||
needs: [ setup-instance ]
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}
|
||||
@@ -53,6 +53,7 @@ jobs:
|
||||
- os: ubuntu-22.04
|
||||
cuda: "12.2"
|
||||
gcc: 11
|
||||
timeout-minutes: 4320 # 72 hours
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
@@ -87,7 +88,7 @@ jobs:
|
||||
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
|
||||
env:
|
||||
SLACK_COLOR: ${{ needs.cuda-tests.result }}
|
||||
SLACK_MESSAGE: "Integer GPU H100 long run tests finished with status: ${{ needs.cuda-tests.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_MESSAGE: "Integer GPU long run tests finished with status: ${{ needs.cuda-tests.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (gpu-tests)
|
||||
|
||||
5
.github/workflows/integer_long_run_tests.yml
vendored
5
.github/workflows/integer_long_run_tests.yml
vendored
@@ -15,8 +15,8 @@ on:
|
||||
# Allows you to run this workflow manually from the Actions tab as an alternative.
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Weekly tests will be triggered each Friday at 1a.m.
|
||||
- cron: '0 1 * * FRI'
|
||||
# Weekly tests will be triggered each Friday at 9p.m.
|
||||
- cron: "0 21 * * 5"
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
@@ -45,6 +45,7 @@ jobs:
|
||||
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
timeout-minutes: 4320 # 72 hours
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
|
||||
23
Makefile
23
Makefile
@@ -583,10 +583,13 @@ test_integer_gpu: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile $(CARGO_PROFILE) \
|
||||
--features=integer,gpu -p $(TFHE_SPEC) -- integer::gpu::server_key::
|
||||
|
||||
.PHONY: test_integer_long_run_gpu # Run the tests of the integer module including experimental on the gpu backend
|
||||
test_integer_long_run_gpu: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=integer,gpu,__long_run_tests -p $(TFHE_SPEC) -- integer::gpu::server_key::radix::tests_long_run --test-threads=6
|
||||
.PHONY: test_integer_long_run_gpu # Run the long run integer tests on the gpu backend
|
||||
test_integer_long_run_gpu: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
LONG_TESTS=TRUE \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)" \
|
||||
--tfhe-package "$(TFHE_SPEC)" --backend "gpu"
|
||||
|
||||
.PHONY: test_integer_compression
|
||||
test_integer_compression: install_rs_build_toolchain
|
||||
@@ -768,11 +771,13 @@ test_signed_integer_multi_bit_ci: install_rs_check_toolchain install_cargo_nexte
|
||||
--cargo-profile "$(CARGO_PROFILE)" --multi-bit --avx512-support "$(AVX512_SUPPORT)" \
|
||||
--signed-only --tfhe-package "$(TFHE_SPEC)"
|
||||
|
||||
.PHONY: test_integer_long_run # Run the long run tests for integer
|
||||
test_integer_long_run: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=integer,internal-keycache,__long_run_tests -p $(TFHE_SPEC) -- integer::server_key::radix_parallel::tests_long_run
|
||||
|
||||
.PHONY: test_integer_long_run # Run the long run integer tests
|
||||
test_integer_long_run: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
LONG_TESTS=TRUE \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)" \
|
||||
--tfhe-package "$(TFHE_SPEC)"
|
||||
|
||||
.PHONY: test_safe_serialization # Run the tests for safe serialization
|
||||
test_safe_serialization: install_rs_build_toolchain install_cargo_nextest
|
||||
|
||||
@@ -10,6 +10,9 @@ function usage() {
|
||||
echo "--multi-bit Run multi-bit tests only: default off"
|
||||
echo "--unsigned-only Run only unsigned integer tests, by default both signed and unsigned tests are run"
|
||||
echo "--signed-only Run only signed integer tests, by default both signed and unsigned tests are run"
|
||||
echo "--nightly-tests Run integer tests configured for nightly runs (3_3 params)"
|
||||
echo "--fast-tests Run integer set but skip a subset of longer tests"
|
||||
echo "--long-tests Run only long run integer tests"
|
||||
echo "--cargo-profile The cargo profile used to build tests"
|
||||
echo "--backend Backend to use with tfhe-rs"
|
||||
echo "--avx512-support Set to ON to enable avx512"
|
||||
@@ -21,6 +24,7 @@ RUST_TOOLCHAIN="+stable"
|
||||
multi_bit_argument=
|
||||
sign_argument=
|
||||
fast_tests_argument=
|
||||
long_tests_argument=
|
||||
nightly_tests_argument=
|
||||
no_big_params_argument=
|
||||
cargo_profile="release"
|
||||
@@ -91,6 +95,10 @@ if [[ "${FAST_TESTS}" == TRUE ]]; then
|
||||
fast_tests_argument=--fast-tests
|
||||
fi
|
||||
|
||||
if [[ "${LONG_TESTS}" == TRUE ]]; then
|
||||
long_tests_argument=--long-tests
|
||||
fi
|
||||
|
||||
if [[ "${NIGHTLY_TESTS}" == TRUE ]]; then
|
||||
nightly_tests_argument=--nightly-tests
|
||||
fi
|
||||
@@ -137,18 +145,24 @@ if [[ "${backend}" == "gpu" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
filter_expression=$(/usr/bin/python3 scripts/test_filtering.py --layer integer --backend "${backend}" ${fast_tests_argument} ${nightly_tests_argument} ${multi_bit_argument} ${sign_argument} ${no_big_params_argument})
|
||||
filter_expression=$(/usr/bin/python3 scripts/test_filtering.py --layer integer --backend "${backend}" ${fast_tests_argument} ${long_tests_argument} ${nightly_tests_argument} ${multi_bit_argument} ${sign_argument} ${no_big_params_argument})
|
||||
|
||||
if [[ "${FAST_TESTS}" == "TRUE" ]]; then
|
||||
echo "Running 'fast' test set"
|
||||
else
|
||||
elif [[ "${LONG_TESTS}" == "FALSE" ]]; then
|
||||
echo "Running 'slow' test set"
|
||||
fi
|
||||
|
||||
if [[ "${LONG_TESTS}" == "TRUE" ]]; then
|
||||
echo "Running 'long run' test set"
|
||||
fi
|
||||
|
||||
if [[ "${NIGHTLY_TESTS}" == "TRUE" ]]; then
|
||||
echo "Running 'nightly' test set"
|
||||
fi
|
||||
|
||||
echo "${filter_expression}"
|
||||
|
||||
cargo "${RUST_TOOLCHAIN}" nextest run \
|
||||
--tests \
|
||||
--cargo-profile "${cargo_profile}" \
|
||||
@@ -158,7 +172,7 @@ cargo "${RUST_TOOLCHAIN}" nextest run \
|
||||
--test-threads "${test_threads}" \
|
||||
-E "$filter_expression"
|
||||
|
||||
if [[ -z ${multi_bit_argument} ]]; then
|
||||
if [[ -z ${multi_bit_argument} && -z ${long_tests_argument} ]]; then
|
||||
cargo "${RUST_TOOLCHAIN}" test \
|
||||
--profile "${cargo_profile}" \
|
||||
--package "${tfhe_package}" \
|
||||
|
||||
@@ -26,6 +26,12 @@ parser.add_argument(
|
||||
action="store_true",
|
||||
help="Run only a small subset of test suite",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--long-tests",
|
||||
dest="long_tests",
|
||||
action="store_true",
|
||||
help="Run only the long tests suite",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--nightly-tests",
|
||||
dest="nightly_tests",
|
||||
@@ -80,6 +86,7 @@ EXCLUDED_INTEGER_TESTS = [
|
||||
"/.*test_wopbs_bivariate_crt_wopbs_param_message_[34]_carry_[34]_ks_pbs_gaussian_2m64$/",
|
||||
"/.*test_integer_smart_mul_param_message_4_carry_4_ks_pbs_gaussian_2m64$/",
|
||||
"/.*test_integer_default_add_sequence_multi_thread_param_message_4_carry_4_ks_pbs_gaussian_2m64$/",
|
||||
"/.*::tests_long_run::.*/",
|
||||
]
|
||||
|
||||
# skip default_div, default_rem which are covered by default_div_rem
|
||||
@@ -94,55 +101,61 @@ EXCLUDED_BIG_PARAMETERS = [
|
||||
"/.*_param_message_4_carry_4_ks_pbs_gaussian_2m64$/",
|
||||
]
|
||||
|
||||
|
||||
def filter_integer_tests(input_args):
|
||||
(multi_bit_filter, group_filter) = (
|
||||
("_multi_bit", "_group_[0-9]") if input_args.multi_bit else ("", "")
|
||||
)
|
||||
backend_filter = ""
|
||||
if input_args.backend == "gpu":
|
||||
backend_filter = "gpu::"
|
||||
if multi_bit_filter:
|
||||
# For now, GPU only has specific parameters set for multi-bit
|
||||
multi_bit_filter = "_gpu_multi_bit"
|
||||
if not input_args.long_tests:
|
||||
if input_args.backend == "gpu":
|
||||
backend_filter = "gpu::"
|
||||
if multi_bit_filter:
|
||||
# For now, GPU only has specific parameters set for multi-bit
|
||||
multi_bit_filter = "_gpu_multi_bit"
|
||||
|
||||
filter_expression = [f"test(/^integer::{backend_filter}.*/)"]
|
||||
filter_expression = [f"test(/^integer::{backend_filter}.*/)"]
|
||||
|
||||
if input_args.multi_bit:
|
||||
filter_expression.append("test(~_multi_bit)")
|
||||
else:
|
||||
filter_expression.append("not test(~_multi_bit)")
|
||||
if input_args.multi_bit:
|
||||
filter_expression.append("test(~_multi_bit)")
|
||||
else:
|
||||
filter_expression.append("not test(~_multi_bit)")
|
||||
|
||||
if input_args.signed_only:
|
||||
filter_expression.append("test(~_signed)")
|
||||
if input_args.unsigned_only:
|
||||
filter_expression.append("not test(~_signed)")
|
||||
if input_args.signed_only:
|
||||
filter_expression.append("test(~_signed)")
|
||||
if input_args.unsigned_only:
|
||||
filter_expression.append("not test(~_signed)")
|
||||
|
||||
if input_args.no_big_params:
|
||||
for pattern in EXCLUDED_BIG_PARAMETERS:
|
||||
if input_args.no_big_params:
|
||||
for pattern in EXCLUDED_BIG_PARAMETERS:
|
||||
filter_expression.append(f"not test({pattern})")
|
||||
|
||||
if input_args.fast_tests and input_args.nightly_tests:
|
||||
filter_expression.append(
|
||||
f"test(/.*_default_.*?_param{multi_bit_filter}{group_filter}_message_[2-3]_carry_[2-3]_.*/)"
|
||||
)
|
||||
elif input_args.fast_tests:
|
||||
# Test only fast default operations with only one set of parameters
|
||||
filter_expression.append(
|
||||
f"test(/.*_default_.*?_param{multi_bit_filter}{group_filter}_message_2_carry_2_.*/)"
|
||||
)
|
||||
elif input_args.nightly_tests:
|
||||
# Test only fast default operations with only one set of parameters
|
||||
# This subset would run slower than fast_tests hence the use of nightly_tests
|
||||
filter_expression.append(
|
||||
f"test(/.*_default_.*?_param{multi_bit_filter}{group_filter}_message_3_carry_3_.*/)"
|
||||
)
|
||||
excluded_tests = (
|
||||
EXCLUDED_INTEGER_FAST_TESTS if input_args.fast_tests else EXCLUDED_INTEGER_TESTS
|
||||
)
|
||||
for pattern in excluded_tests:
|
||||
filter_expression.append(f"not test({pattern})")
|
||||
|
||||
if input_args.fast_tests and input_args.nightly_tests:
|
||||
filter_expression.append(
|
||||
f"test(/.*_default_.*?_param{multi_bit_filter}{group_filter}_message_[2-3]_carry_[2-3]_.*/)"
|
||||
)
|
||||
elif input_args.fast_tests:
|
||||
# Test only fast default operations with only one set of parameters
|
||||
filter_expression.append(
|
||||
f"test(/.*_default_.*?_param{multi_bit_filter}{group_filter}_message_2_carry_2_.*/)"
|
||||
)
|
||||
elif input_args.nightly_tests:
|
||||
# Test only fast default operations with only one set of parameters
|
||||
# This subset would run slower than fast_tests hence the use of nightly_tests
|
||||
filter_expression.append(
|
||||
f"test(/.*_default_.*?_param{multi_bit_filter}{group_filter}_message_3_carry_3_.*/)"
|
||||
)
|
||||
else:
|
||||
if input_args.backend == "gpu":
|
||||
filter_expression = [f"test(/^integer::gpu::server_key::radix::tests_long_run.*/)"]
|
||||
elif input_args.backend == "cpu":
|
||||
filter_expression = [f"test(/^integer::server_key::radix_parallel::tests_long_run.*/)"]
|
||||
|
||||
excluded_tests = (
|
||||
EXCLUDED_INTEGER_FAST_TESTS if input_args.fast_tests else EXCLUDED_INTEGER_TESTS
|
||||
)
|
||||
for pattern in excluded_tests:
|
||||
filter_expression.append(f"not test({pattern})")
|
||||
|
||||
return " and ".join(filter_expression)
|
||||
|
||||
|
||||
@@ -128,7 +128,6 @@ nightly-avx512 = ["tfhe-fft/nightly", "tfhe-ntt/nightly", "pulp/nightly"]
|
||||
|
||||
# Private features
|
||||
__profiling = []
|
||||
__long_run_tests = []
|
||||
|
||||
software-prng = ["tfhe-csprng/software-prng"]
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ mod shift;
|
||||
mod sub;
|
||||
mod vector_find;
|
||||
|
||||
#[cfg(all(test, feature = "__long_run_tests"))]
|
||||
#[cfg(test)]
|
||||
mod tests_long_run;
|
||||
#[cfg(test)]
|
||||
mod tests_signed;
|
||||
|
||||
@@ -1,15 +1,22 @@
|
||||
use crate::core_crypto::gpu::vec::GpuIndex;
|
||||
use crate::core_crypto::gpu::CudaStreams;
|
||||
use crate::integer::gpu::ciphertext::boolean_value::CudaBooleanBlock;
|
||||
use crate::integer::gpu::ciphertext::CudaUnsignedRadixCiphertext;
|
||||
use crate::integer::gpu::ciphertext::{CudaSignedRadixCiphertext, CudaUnsignedRadixCiphertext};
|
||||
use crate::integer::gpu::server_key::radix::tests_unsigned::GpuContext;
|
||||
use crate::integer::gpu::CudaServerKey;
|
||||
use crate::integer::server_key::radix_parallel::tests_cases_unsigned::FunctionExecutor;
|
||||
use crate::integer::{BooleanBlock, RadixCiphertext, RadixClientKey, ServerKey, U256};
|
||||
use crate::integer::{
|
||||
BooleanBlock, RadixCiphertext, RadixClientKey, ServerKey, SignedRadixCiphertext, U256,
|
||||
};
|
||||
use rand::Rng;
|
||||
use std::sync::Arc;
|
||||
use tfhe_cuda_backend::cuda_bind::cuda_get_number_of_gpus;
|
||||
|
||||
pub(crate) mod test_erc20;
|
||||
pub(crate) mod test_random_op_sequence;
|
||||
pub(crate) mod test_signed_erc20;
|
||||
pub(crate) mod test_signed_random_op_sequence;
|
||||
|
||||
pub(crate) struct GpuMultiDeviceFunctionExecutor<F> {
|
||||
pub(crate) context: Option<GpuContext>,
|
||||
pub(crate) func: F,
|
||||
@@ -27,7 +34,8 @@ impl<F> GpuMultiDeviceFunctionExecutor<F> {
|
||||
impl<F> GpuMultiDeviceFunctionExecutor<F> {
|
||||
pub(crate) fn setup_from_keys(&mut self, cks: &RadixClientKey, _sks: &Arc<ServerKey>) {
|
||||
let num_gpus = unsafe { cuda_get_number_of_gpus() } as u32;
|
||||
let streams = CudaStreams::new_single_gpu(GpuIndex(num_gpus - 1));
|
||||
let gpu_index = GpuIndex(rand::thread_rng().gen_range(0..num_gpus));
|
||||
let streams = CudaStreams::new_single_gpu(gpu_index);
|
||||
|
||||
let sks = CudaServerKey::new(cks.as_ref(), &streams);
|
||||
streams.synchronize();
|
||||
@@ -562,3 +570,645 @@ where
|
||||
d_res.to_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
/// For default/unchecked binary signed functions
|
||||
impl<'a, F>
|
||||
FunctionExecutor<(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext), SignedRadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
let d_ctxt_2 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.streams);
|
||||
|
||||
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, &d_ctxt_2, &context.streams);
|
||||
|
||||
gpu_result.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F>
|
||||
FunctionExecutor<(&'a SignedRadixCiphertext, &'a RadixCiphertext), SignedRadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaUnsignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (&'a SignedRadixCiphertext, &'a RadixCiphertext),
|
||||
) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
let d_ctxt_2 =
|
||||
CudaUnsignedRadixCiphertext::from_radix_ciphertext(input.1, &context.streams);
|
||||
|
||||
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, &d_ctxt_2, &context.streams);
|
||||
|
||||
gpu_result.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
/// For unchecked/default assign binary functions
|
||||
impl<'a, F> FunctionExecutor<(&'a mut SignedRadixCiphertext, &'a SignedRadixCiphertext), ()>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(&CudaServerKey, &mut CudaSignedRadixCiphertext, &CudaSignedRadixCiphertext, &CudaStreams),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: (&'a mut SignedRadixCiphertext, &'a SignedRadixCiphertext)) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let mut d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
let d_ctxt_2 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.streams);
|
||||
|
||||
(self.func)(&context.sks, &mut d_ctxt_1, &d_ctxt_2, &context.streams);
|
||||
|
||||
*input.0 = d_ctxt_1.to_signed_radix_ciphertext(&context.streams);
|
||||
}
|
||||
}
|
||||
|
||||
/// For unchecked/default binary functions with one scalar input
|
||||
impl<'a, F> FunctionExecutor<(&'a SignedRadixCiphertext, i64), SignedRadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
i64,
|
||||
&CudaStreams,
|
||||
) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: (&'a SignedRadixCiphertext, i64)) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
|
||||
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
gpu_result.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
/// For unchecked/default binary functions with one scalar input
|
||||
impl<'a, F> FunctionExecutor<(&'a SignedRadixCiphertext, u64), SignedRadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
u64,
|
||||
&CudaStreams,
|
||||
) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: (&'a SignedRadixCiphertext, u64)) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
|
||||
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
gpu_result.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
/// For unchecked/default binary functions with one scalar input
|
||||
impl<F> FunctionExecutor<(SignedRadixCiphertext, i64), SignedRadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
i64,
|
||||
&CudaStreams,
|
||||
) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: (SignedRadixCiphertext, i64)) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(&input.0, &context.streams);
|
||||
|
||||
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
gpu_result.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
// Unary Function
|
||||
impl<'a, F> FunctionExecutor<&'a SignedRadixCiphertext, SignedRadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(&CudaServerKey, &CudaSignedRadixCiphertext, &CudaStreams) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: &'a SignedRadixCiphertext) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input, &context.streams);
|
||||
|
||||
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, &context.streams);
|
||||
|
||||
gpu_result.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<&'a SignedRadixCiphertext, RadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(&CudaServerKey, &CudaSignedRadixCiphertext, &CudaStreams) -> CudaUnsignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: &'a SignedRadixCiphertext) -> RadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input, &context.streams);
|
||||
|
||||
let gpu_result = (self.func)(&context.sks, &d_ctxt_1, &context.streams);
|
||||
|
||||
gpu_result.to_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
// Unary assign Function
|
||||
impl<'a, F> FunctionExecutor<&'a mut SignedRadixCiphertext, ()>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(&CudaServerKey, &mut CudaSignedRadixCiphertext, &CudaStreams),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: &'a mut SignedRadixCiphertext) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let mut d_ctxt_1 =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input, &context.streams);
|
||||
|
||||
(self.func)(&context.sks, &mut d_ctxt_1, &context.streams);
|
||||
|
||||
*input = d_ctxt_1.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<&'a Vec<SignedRadixCiphertext>, Option<SignedRadixCiphertext>>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(&CudaServerKey, Vec<CudaSignedRadixCiphertext>) -> Option<CudaSignedRadixCiphertext>,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: &'a Vec<SignedRadixCiphertext>) -> Option<SignedRadixCiphertext> {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: Vec<CudaSignedRadixCiphertext> = input
|
||||
.iter()
|
||||
.map(|ct| CudaSignedRadixCiphertext::from_signed_radix_ciphertext(ct, &context.streams))
|
||||
.collect();
|
||||
|
||||
let d_res = (self.func)(&context.sks, d_ctxt_1);
|
||||
|
||||
Some(d_res.unwrap().to_signed_radix_ciphertext(&context.streams))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F>
|
||||
FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
(SignedRadixCiphertext, BooleanBlock),
|
||||
> for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> (CudaSignedRadixCiphertext, CudaBooleanBlock),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
) -> (SignedRadixCiphertext, BooleanBlock) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
let d_ctxt_2: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, &d_ctxt_2, &context.streams);
|
||||
|
||||
(
|
||||
d_res.0.to_signed_radix_ciphertext(&context.streams),
|
||||
d_res.1.to_boolean_block(&context.streams),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// For unchecked/default unsigned overflowing scalar operations
|
||||
impl<'a, F>
|
||||
FunctionExecutor<(&'a SignedRadixCiphertext, i64), (SignedRadixCiphertext, BooleanBlock)>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
i64,
|
||||
&CudaStreams,
|
||||
) -> (CudaSignedRadixCiphertext, CudaBooleanBlock),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (&'a SignedRadixCiphertext, i64),
|
||||
) -> (SignedRadixCiphertext, BooleanBlock) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
(
|
||||
d_res.0.to_signed_radix_ciphertext(&context.streams),
|
||||
d_res.1.to_boolean_block(&context.streams),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<&'a SignedRadixCiphertext, (SignedRadixCiphertext, BooleanBlock)>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> (CudaSignedRadixCiphertext, CudaBooleanBlock),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: &'a SignedRadixCiphertext,
|
||||
) -> (SignedRadixCiphertext, BooleanBlock) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, &context.streams);
|
||||
|
||||
(
|
||||
d_res.0.to_signed_radix_ciphertext(&context.streams),
|
||||
d_res.1.to_boolean_block(&context.streams),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F>
|
||||
FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
(SignedRadixCiphertext, SignedRadixCiphertext),
|
||||
> for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> (CudaSignedRadixCiphertext, CudaSignedRadixCiphertext),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
) -> (SignedRadixCiphertext, SignedRadixCiphertext) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
let d_ctxt_2: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, &d_ctxt_2, &context.streams);
|
||||
|
||||
(
|
||||
d_res.0.to_signed_radix_ciphertext(&context.streams),
|
||||
d_res.1.to_signed_radix_ciphertext(&context.streams),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F>
|
||||
FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, i64),
|
||||
(SignedRadixCiphertext, SignedRadixCiphertext),
|
||||
> for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
i64,
|
||||
&CudaStreams,
|
||||
) -> (CudaSignedRadixCiphertext, CudaSignedRadixCiphertext),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (&'a SignedRadixCiphertext, i64),
|
||||
) -> (SignedRadixCiphertext, SignedRadixCiphertext) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
(
|
||||
d_res.0.to_signed_radix_ciphertext(&context.streams),
|
||||
d_res.1.to_signed_radix_ciphertext(&context.streams),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext), BooleanBlock>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> CudaBooleanBlock,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
) -> BooleanBlock {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
let d_ctxt_2: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, &d_ctxt_2, &context.streams);
|
||||
|
||||
d_res.to_boolean_block(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<(&'a SignedRadixCiphertext, i64), BooleanBlock>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(&CudaServerKey, &CudaSignedRadixCiphertext, i64, &CudaStreams) -> CudaBooleanBlock,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: (&'a SignedRadixCiphertext, i64)) -> BooleanBlock {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
d_res.to_boolean_block(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<(&'a SignedRadixCiphertext, U256), BooleanBlock>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(&CudaServerKey, &CudaSignedRadixCiphertext, U256, &CudaStreams) -> CudaBooleanBlock,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: (&'a SignedRadixCiphertext, U256)) -> BooleanBlock {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
d_res.to_boolean_block(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<(&'a SignedRadixCiphertext, U256), SignedRadixCiphertext>
|
||||
for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
U256,
|
||||
&CudaStreams,
|
||||
) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(&mut self, input: (&'a SignedRadixCiphertext, U256)) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.0, &context.streams);
|
||||
|
||||
let d_res = (self.func)(&context.sks, &d_ctxt_1, input.1, &context.streams);
|
||||
|
||||
d_res.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F>
|
||||
FunctionExecutor<
|
||||
(
|
||||
&'a BooleanBlock,
|
||||
&'a SignedRadixCiphertext,
|
||||
&'a SignedRadixCiphertext,
|
||||
),
|
||||
SignedRadixCiphertext,
|
||||
> for GpuMultiDeviceFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaBooleanBlock,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> CudaSignedRadixCiphertext,
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: (
|
||||
&'a BooleanBlock,
|
||||
&'a SignedRadixCiphertext,
|
||||
&'a SignedRadixCiphertext,
|
||||
),
|
||||
) -> SignedRadixCiphertext {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt_1: CudaBooleanBlock =
|
||||
CudaBooleanBlock::from_boolean_block(input.0, &context.streams);
|
||||
let d_ctxt_2: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.1, &context.streams);
|
||||
let d_ctxt_3: CudaSignedRadixCiphertext =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input.2, &context.streams);
|
||||
|
||||
let d_res = (self.func)(
|
||||
&context.sks,
|
||||
&d_ctxt_1,
|
||||
&d_ctxt_2,
|
||||
&d_ctxt_3,
|
||||
&context.streams,
|
||||
);
|
||||
|
||||
d_res.to_signed_radix_ciphertext(&context.streams)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,397 @@
|
||||
use crate::integer::gpu::server_key::radix::tests_long_run::GpuMultiDeviceFunctionExecutor;
|
||||
use crate::integer::gpu::server_key::radix::tests_unsigned::create_gpu_parameterized_test;
|
||||
use crate::integer::gpu::CudaServerKey;
|
||||
use crate::integer::server_key::radix_parallel::tests_long_run::test_random_op_sequence::{
|
||||
random_op_sequence_test, BinaryOpExecutor, ComparisonOpExecutor, DivRemOpExecutor,
|
||||
Log2OpExecutor, OverflowingOpExecutor, ScalarBinaryOpExecutor, ScalarComparisonOpExecutor,
|
||||
ScalarDivRemOpExecutor, ScalarOverflowingOpExecutor, SelectOpExecutor, UnaryOpExecutor,
|
||||
};
|
||||
use crate::shortint::parameters::*;
|
||||
use std::cmp::{max, min};
|
||||
|
||||
create_gpu_parameterized_test!(random_op_sequence {
|
||||
PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
|
||||
});
|
||||
fn random_op_sequence<P>(param: P)
|
||||
where
|
||||
P: Into<PBSParameters> + Clone,
|
||||
{
|
||||
// Binary Ops Executors
|
||||
let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
let bitwise_and_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitand);
|
||||
let bitwise_or_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitor);
|
||||
let bitwise_xor_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitxor);
|
||||
let mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::mul);
|
||||
let rotate_left_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_left);
|
||||
let left_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::left_shift);
|
||||
let rotate_right_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_right);
|
||||
let right_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::right_shift);
|
||||
let max_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::max);
|
||||
let min_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::min);
|
||||
|
||||
// Binary Ops Clear functions
|
||||
let clear_add = |x, y| x + y;
|
||||
let clear_sub = |x, y| x - y;
|
||||
let clear_bitwise_and = |x, y| x & y;
|
||||
let clear_bitwise_or = |x, y| x | y;
|
||||
let clear_bitwise_xor = |x, y| x ^ y;
|
||||
let clear_mul = |x, y| x * y;
|
||||
// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
let clear_rotate_left = |x: u64, y: u64| x.rotate_left(y as u32);
|
||||
let clear_left_shift = |x, y| x << y;
|
||||
// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
let clear_rotate_right = |x: u64, y: u64| x.rotate_right(y as u32);
|
||||
let clear_right_shift = |x, y| x >> y;
|
||||
let clear_max = |x: u64, y: u64| max(x, y);
|
||||
let clear_min = |x: u64, y: u64| min(x, y);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut binary_ops: Vec<(BinaryOpExecutor, &dyn Fn(u64, u64) -> u64, String)> = vec![
|
||||
(Box::new(add_executor), &clear_add, "add".to_string()),
|
||||
(Box::new(sub_executor), &clear_sub, "sub".to_string()),
|
||||
(
|
||||
Box::new(bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
"bitand".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(bitwise_or_executor),
|
||||
&clear_bitwise_or,
|
||||
"bitor".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(bitwise_xor_executor),
|
||||
&clear_bitwise_xor,
|
||||
"bitxor".to_string(),
|
||||
),
|
||||
(Box::new(mul_executor), &clear_mul, "mul".to_string()),
|
||||
(
|
||||
Box::new(rotate_left_executor),
|
||||
&clear_rotate_left,
|
||||
"rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"right shift".to_string(),
|
||||
),
|
||||
(Box::new(max_executor), &clear_max, "max".to_string()),
|
||||
(Box::new(min_executor), &clear_min, "min".to_string()),
|
||||
];
|
||||
|
||||
// Unary Ops Executors
|
||||
let neg_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::neg);
|
||||
let bitnot_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitnot);
|
||||
//let reverse_bits_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::reverse_bits); Unary Ops Clear
|
||||
// functions
|
||||
let clear_neg = |x: u64| x.wrapping_neg();
|
||||
let clear_bitnot = |x: u64| !x;
|
||||
//let clear_reverse_bits = |x: u64| x.reverse_bits();
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut unary_ops: Vec<(UnaryOpExecutor, &dyn Fn(u64) -> u64, String)> = vec![
|
||||
(Box::new(neg_executor), &clear_neg, "neg".to_string()),
|
||||
(
|
||||
Box::new(bitnot_executor),
|
||||
&clear_bitnot,
|
||||
"bitnot".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(reverse_bits_executor),
|
||||
// &clear_reverse_bits,
|
||||
// "reverse bits".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Scalar binary Ops Executors
|
||||
let scalar_add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_add);
|
||||
let scalar_sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_sub);
|
||||
let scalar_bitwise_and_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitand);
|
||||
let scalar_bitwise_or_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitor);
|
||||
let scalar_bitwise_xor_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitxor);
|
||||
let scalar_mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_mul);
|
||||
let scalar_rotate_left_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_left);
|
||||
let scalar_left_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_left_shift);
|
||||
let scalar_rotate_right_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_right);
|
||||
let scalar_right_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_right_shift);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_binary_ops: Vec<(ScalarBinaryOpExecutor, &dyn Fn(u64, u64) -> u64, String)> = vec![
|
||||
(
|
||||
Box::new(scalar_add_executor),
|
||||
&clear_add,
|
||||
"scalar add".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_sub_executor),
|
||||
&clear_sub,
|
||||
"scalar sub".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
"scalar bitand".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_bitwise_or_executor),
|
||||
&clear_bitwise_or,
|
||||
"scalar bitor".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_bitwise_xor_executor),
|
||||
&clear_bitwise_xor,
|
||||
"scalar bitxor".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_mul_executor),
|
||||
&clear_mul,
|
||||
"scalar mul".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_rotate_left_executor),
|
||||
&clear_rotate_left,
|
||||
"scalar rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"scalar left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"scalar rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"scalar right shift".to_string(),
|
||||
),
|
||||
];
|
||||
|
||||
// Overflowing Ops Executors
|
||||
let overflowing_add_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::unsigned_overflowing_add);
|
||||
let overflowing_sub_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::unsigned_overflowing_sub);
|
||||
//let overflowing_mul_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::unsigned_overflowing_mul);
|
||||
|
||||
// Overflowing Ops Clear functions
|
||||
let clear_overflowing_add = |x: u64, y: u64| -> (u64, bool) { x.overflowing_add(y) };
|
||||
let clear_overflowing_sub = |x: u64, y: u64| -> (u64, bool) { x.overflowing_sub(y) };
|
||||
//let clear_overflowing_mul = |x: u64, y: u64| -> (u64, bool) { x.overflowing_mul(y) };
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut overflowing_ops: Vec<(
|
||||
OverflowingOpExecutor,
|
||||
&dyn Fn(u64, u64) -> (u64, bool),
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(overflowing_add_executor),
|
||||
&clear_overflowing_add,
|
||||
"overflowing add".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(overflowing_sub_executor),
|
||||
&clear_overflowing_sub,
|
||||
"overflowing sub".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(overflowing_mul_executor),
|
||||
// &clear_overflowing_mul,
|
||||
// "overflowing mul".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Scalar Overflowing Ops Executors
|
||||
let overflowing_scalar_add_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::unsigned_overflowing_scalar_add);
|
||||
// let overflowing_scalar_sub_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::unsigned_overflowing_scalar_sub);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_overflowing_ops: Vec<(
|
||||
ScalarOverflowingOpExecutor,
|
||||
&dyn Fn(u64, u64) -> (u64, bool),
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(overflowing_scalar_add_executor),
|
||||
&clear_overflowing_add,
|
||||
"overflowing scalar add".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(overflowing_scalar_sub_executor),
|
||||
// &clear_overflowing_sub,
|
||||
// "overflowing scalar sub".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Comparison Ops Executors
|
||||
let gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::gt);
|
||||
let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
let lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::lt);
|
||||
let le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::le);
|
||||
let eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::eq);
|
||||
let ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ne);
|
||||
|
||||
// Comparison Ops Clear functions
|
||||
let clear_gt = |x: u64, y: u64| -> bool { x > y };
|
||||
let clear_ge = |x: u64, y: u64| -> bool { x >= y };
|
||||
let clear_lt = |x: u64, y: u64| -> bool { x < y };
|
||||
let clear_le = |x: u64, y: u64| -> bool { x <= y };
|
||||
let clear_eq = |x: u64, y: u64| -> bool { x == y };
|
||||
let clear_ne = |x: u64, y: u64| -> bool { x != y };
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut comparison_ops: Vec<(ComparisonOpExecutor, &dyn Fn(u64, u64) -> bool, String)> = vec![
|
||||
(Box::new(gt_executor), &clear_gt, "gt".to_string()),
|
||||
(Box::new(ge_executor), &clear_ge, "ge".to_string()),
|
||||
(Box::new(lt_executor), &clear_lt, "lt".to_string()),
|
||||
(Box::new(le_executor), &clear_le, "le".to_string()),
|
||||
(Box::new(eq_executor), &clear_eq, "eq".to_string()),
|
||||
(Box::new(ne_executor), &clear_ne, "ne".to_string()),
|
||||
];
|
||||
|
||||
// Scalar Comparison Ops Executors
|
||||
let scalar_gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_gt);
|
||||
let scalar_ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ge);
|
||||
let scalar_lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_lt);
|
||||
let scalar_le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_le);
|
||||
let scalar_eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_eq);
|
||||
let scalar_ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ne);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_comparison_ops: Vec<(
|
||||
ScalarComparisonOpExecutor,
|
||||
&dyn Fn(u64, u64) -> bool,
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(scalar_gt_executor),
|
||||
&clear_gt,
|
||||
"scalar gt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ge_executor),
|
||||
&clear_ge,
|
||||
"scalar ge".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_lt_executor),
|
||||
&clear_lt,
|
||||
"scalar lt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_le_executor),
|
||||
&clear_le,
|
||||
"scalar le".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_eq_executor),
|
||||
&clear_eq,
|
||||
"scalar eq".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ne_executor),
|
||||
&clear_ne,
|
||||
"scalar ne".to_string(),
|
||||
),
|
||||
];
|
||||
|
||||
// Select Executor
|
||||
let select_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::if_then_else);
|
||||
|
||||
// Select
|
||||
let clear_select = |b: bool, x: u64, y: u64| if b { x } else { y };
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut select_op: Vec<(SelectOpExecutor, &dyn Fn(bool, u64, u64) -> u64, String)> = vec![(
|
||||
Box::new(select_executor),
|
||||
&clear_select,
|
||||
"select".to_string(),
|
||||
)];
|
||||
|
||||
// Div executor
|
||||
let div_rem_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::div_rem);
|
||||
// Div Rem Clear functions
|
||||
let clear_div_rem = |x: u64, y: u64| -> (u64, u64) { (x / y, x % y) };
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut div_rem_op: Vec<(DivRemOpExecutor, &dyn Fn(u64, u64) -> (u64, u64), String)> = vec![(
|
||||
Box::new(div_rem_executor),
|
||||
&clear_div_rem,
|
||||
"div rem".to_string(),
|
||||
)];
|
||||
|
||||
// Scalar Div executor
|
||||
let scalar_div_rem_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_div_rem);
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_div_rem_op: Vec<(
|
||||
ScalarDivRemOpExecutor,
|
||||
&dyn Fn(u64, u64) -> (u64, u64),
|
||||
String,
|
||||
)> = vec![(
|
||||
Box::new(scalar_div_rem_executor),
|
||||
&clear_div_rem,
|
||||
"scalar div rem".to_string(),
|
||||
)];
|
||||
|
||||
// Log2/Hamming weight ops
|
||||
let ilog2_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ilog2);
|
||||
//let count_zeros_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::count_zeros);
|
||||
//let count_ones_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::count_ones);
|
||||
let clear_ilog2 = |x: u64| x.ilog2() as u64;
|
||||
//let clear_count_zeros = |x: u64| x.count_zeros() as u64;
|
||||
//let clear_count_ones = |x: u64| x.count_ones() as u64;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut log2_ops: Vec<(Log2OpExecutor, &dyn Fn(u64) -> u64, String)> = vec![
|
||||
(Box::new(ilog2_executor), &clear_ilog2, "ilog2".to_string()),
|
||||
//(
|
||||
// Box::new(count_zeros_executor),
|
||||
// &clear_count_zeros,
|
||||
// "count zeros".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(count_ones_executor),
|
||||
// &clear_count_ones,
|
||||
// "count ones".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
random_op_sequence_test(
|
||||
param,
|
||||
&mut binary_ops,
|
||||
&mut unary_ops,
|
||||
&mut scalar_binary_ops,
|
||||
&mut overflowing_ops,
|
||||
&mut scalar_overflowing_ops,
|
||||
&mut comparison_ops,
|
||||
&mut scalar_comparison_ops,
|
||||
&mut select_op,
|
||||
&mut div_rem_op,
|
||||
&mut scalar_div_rem_op,
|
||||
&mut log2_ops,
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
use crate::integer::gpu::server_key::radix::tests_long_run::GpuMultiDeviceFunctionExecutor;
|
||||
use crate::integer::gpu::server_key::radix::tests_unsigned::create_gpu_parameterized_test;
|
||||
use crate::integer::gpu::CudaServerKey;
|
||||
use crate::integer::server_key::radix_parallel::tests_long_run::test_signed_erc20::{
|
||||
signed_no_cmux_erc20_test, signed_whitepaper_erc20_test,
|
||||
};
|
||||
use crate::shortint::parameters::*;
|
||||
|
||||
create_gpu_parameterized_test!(signed_whitepaper_erc20 {
|
||||
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
|
||||
});
|
||||
create_gpu_parameterized_test!(signed_no_cmux_erc20 {
|
||||
PARAM_GPU_MULTI_BIT_GROUP_3_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
|
||||
});
|
||||
|
||||
fn signed_whitepaper_erc20<P>(param: P)
|
||||
where
|
||||
P: Into<PBSParameters>,
|
||||
{
|
||||
let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
let if_then_else_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::if_then_else);
|
||||
let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
signed_whitepaper_erc20_test(
|
||||
param,
|
||||
ge_executor,
|
||||
add_executor,
|
||||
if_then_else_executor,
|
||||
sub_executor,
|
||||
);
|
||||
}
|
||||
|
||||
fn signed_no_cmux_erc20<P>(param: P)
|
||||
where
|
||||
P: Into<PBSParameters>,
|
||||
{
|
||||
let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
let mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::mul);
|
||||
let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
signed_no_cmux_erc20_test(param, ge_executor, mul_executor, add_executor, sub_executor);
|
||||
}
|
||||
@@ -0,0 +1,433 @@
|
||||
use crate::integer::gpu::server_key::radix::tests_long_run::GpuMultiDeviceFunctionExecutor;
|
||||
use crate::integer::gpu::server_key::radix::tests_unsigned::create_gpu_parameterized_test;
|
||||
use crate::integer::gpu::CudaServerKey;
|
||||
use crate::integer::server_key::radix_parallel::tests_long_run::test_signed_random_op_sequence::{
|
||||
signed_random_op_sequence_test, SignedBinaryOpExecutor, SignedComparisonOpExecutor,
|
||||
SignedDivRemOpExecutor, SignedLog2OpExecutor, SignedOverflowingOpExecutor,
|
||||
SignedScalarBinaryOpExecutor, SignedScalarComparisonOpExecutor, SignedScalarDivRemOpExecutor,
|
||||
SignedScalarOverflowingOpExecutor, SignedScalarShiftRotateExecutor, SignedSelectOpExecutor,
|
||||
SignedShiftRotateExecutor, SignedUnaryOpExecutor,
|
||||
};
|
||||
use crate::shortint::parameters::*;
|
||||
use std::cmp::{max, min};
|
||||
|
||||
create_gpu_parameterized_test!(signed_random_op_sequence {
|
||||
PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
|
||||
});
|
||||
fn signed_random_op_sequence<P>(param: P)
|
||||
where
|
||||
P: Into<PBSParameters> + Clone,
|
||||
{
|
||||
// Binary Ops Executors
|
||||
let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
let bitwise_and_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitand);
|
||||
let bitwise_or_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitor);
|
||||
let bitwise_xor_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitxor);
|
||||
let mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::mul);
|
||||
let max_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::max);
|
||||
let min_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::min);
|
||||
|
||||
// Binary Ops Clear functions
|
||||
let clear_add = |x, y| x + y;
|
||||
let clear_sub = |x, y| x - y;
|
||||
let clear_bitwise_and = |x, y| x & y;
|
||||
let clear_bitwise_or = |x, y| x | y;
|
||||
let clear_bitwise_xor = |x, y| x ^ y;
|
||||
let clear_mul = |x, y| x * y;
|
||||
let clear_max = |x: i64, y: i64| max(x, y);
|
||||
let clear_min = |x: i64, y: i64| min(x, y);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut binary_ops: Vec<(SignedBinaryOpExecutor, &dyn Fn(i64, i64) -> i64, String)> = vec![
|
||||
(Box::new(add_executor), &clear_add, "add".to_string()),
|
||||
(Box::new(sub_executor), &clear_sub, "sub".to_string()),
|
||||
(
|
||||
Box::new(bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
"bitand".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(bitwise_or_executor),
|
||||
&clear_bitwise_or,
|
||||
"bitor".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(bitwise_xor_executor),
|
||||
&clear_bitwise_xor,
|
||||
"bitxor".to_string(),
|
||||
),
|
||||
(Box::new(mul_executor), &clear_mul, "mul".to_string()),
|
||||
(Box::new(max_executor), &clear_max, "max".to_string()),
|
||||
(Box::new(min_executor), &clear_min, "min".to_string()),
|
||||
];
|
||||
|
||||
let rotate_left_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_left);
|
||||
let left_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::left_shift);
|
||||
let rotate_right_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_right);
|
||||
let right_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::right_shift);
|
||||
// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
let clear_rotate_left = |x: i64, y: u64| x.rotate_left(y as u32);
|
||||
let clear_left_shift = |x: i64, y: u64| x << y;
|
||||
// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
let clear_rotate_right = |x: i64, y: u64| x.rotate_right(y as u32);
|
||||
let clear_right_shift = |x: i64, y: u64| x >> y;
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut shift_rotate_ops: Vec<(
|
||||
SignedShiftRotateExecutor,
|
||||
&dyn Fn(i64, u64) -> i64,
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(rotate_left_executor),
|
||||
&clear_rotate_left,
|
||||
"rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"right shift".to_string(),
|
||||
),
|
||||
];
|
||||
|
||||
// Unary Ops Executors
|
||||
let neg_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::neg);
|
||||
let bitnot_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitnot);
|
||||
//let reverse_bits_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::reverse_bits); Unary Ops Clear
|
||||
// functions
|
||||
let clear_neg = |x: i64| x.wrapping_neg();
|
||||
let clear_bitnot = |x: i64| !x;
|
||||
//let clear_reverse_bits = |x: i64| x.reverse_bits();
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut unary_ops: Vec<(SignedUnaryOpExecutor, &dyn Fn(i64) -> i64, String)> = vec![
|
||||
(Box::new(neg_executor), &clear_neg, "neg".to_string()),
|
||||
(
|
||||
Box::new(bitnot_executor),
|
||||
&clear_bitnot,
|
||||
"bitnot".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(reverse_bits_executor),
|
||||
// &clear_reverse_bits,
|
||||
// "reverse bits".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Scalar binary Ops Executors
|
||||
let scalar_add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_add);
|
||||
let scalar_sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_sub);
|
||||
let scalar_bitwise_and_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitand);
|
||||
let scalar_bitwise_or_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitor);
|
||||
let scalar_bitwise_xor_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitxor);
|
||||
let scalar_mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_mul);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_binary_ops: Vec<(
|
||||
SignedScalarBinaryOpExecutor,
|
||||
&dyn Fn(i64, i64) -> i64,
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(scalar_add_executor),
|
||||
&clear_add,
|
||||
"scalar add".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_sub_executor),
|
||||
&clear_sub,
|
||||
"scalar sub".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
"scalar bitand".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_bitwise_or_executor),
|
||||
&clear_bitwise_or,
|
||||
"scalar bitor".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_bitwise_xor_executor),
|
||||
&clear_bitwise_xor,
|
||||
"scalar bitxor".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_mul_executor),
|
||||
&clear_mul,
|
||||
"scalar mul".to_string(),
|
||||
),
|
||||
];
|
||||
|
||||
let scalar_rotate_left_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_left);
|
||||
let scalar_left_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_left_shift);
|
||||
let scalar_rotate_right_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_right);
|
||||
let scalar_right_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_right_shift);
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_shift_rotate_ops: Vec<(
|
||||
SignedScalarShiftRotateExecutor,
|
||||
&dyn Fn(i64, u64) -> i64,
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(scalar_rotate_left_executor),
|
||||
&clear_rotate_left,
|
||||
"scalar rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"scalar left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"scalar rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"scalar right shift".to_string(),
|
||||
),
|
||||
];
|
||||
|
||||
// Overflowing Ops Executors
|
||||
let overflowing_add_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::signed_overflowing_add);
|
||||
let overflowing_sub_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::signed_overflowing_sub);
|
||||
//let overflowing_mul_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::signed_overflowing_mul);
|
||||
|
||||
// Overflowing Ops Clear functions
|
||||
let clear_overflowing_add = |x: i64, y: i64| -> (i64, bool) { x.overflowing_add(y) };
|
||||
let clear_overflowing_sub = |x: i64, y: i64| -> (i64, bool) { x.overflowing_sub(y) };
|
||||
//let clear_overflowing_mul = |x: i64, y: i64| -> (i64, bool) { x.overflowing_mul(y) };
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut overflowing_ops: Vec<(
|
||||
SignedOverflowingOpExecutor,
|
||||
&dyn Fn(i64, i64) -> (i64, bool),
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(overflowing_add_executor),
|
||||
&clear_overflowing_add,
|
||||
"overflowing add".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(overflowing_sub_executor),
|
||||
&clear_overflowing_sub,
|
||||
"overflowing sub".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(overflowing_mul_executor),
|
||||
// &clear_overflowing_mul,
|
||||
// "overflowing mul".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Scalar Overflowing Ops Executors
|
||||
let overflowing_scalar_add_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::signed_overflowing_scalar_add);
|
||||
// let overflowing_scalar_sub_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::signed_overflowing_scalar_sub);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_overflowing_ops: Vec<(
|
||||
SignedScalarOverflowingOpExecutor,
|
||||
&dyn Fn(i64, i64) -> (i64, bool),
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(overflowing_scalar_add_executor),
|
||||
&clear_overflowing_add,
|
||||
"overflowing scalar add".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(overflowing_scalar_sub_executor),
|
||||
// &clear_overflowing_sub,
|
||||
// "overflowing scalar sub".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Comparison Ops Executors
|
||||
let gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::gt);
|
||||
let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
let lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::lt);
|
||||
let le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::le);
|
||||
let eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::eq);
|
||||
let ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ne);
|
||||
|
||||
// Comparison Ops Clear functions
|
||||
let clear_gt = |x: i64, y: i64| -> bool { x > y };
|
||||
let clear_ge = |x: i64, y: i64| -> bool { x >= y };
|
||||
let clear_lt = |x: i64, y: i64| -> bool { x < y };
|
||||
let clear_le = |x: i64, y: i64| -> bool { x <= y };
|
||||
let clear_eq = |x: i64, y: i64| -> bool { x == y };
|
||||
let clear_ne = |x: i64, y: i64| -> bool { x != y };
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut comparison_ops: Vec<(
|
||||
SignedComparisonOpExecutor,
|
||||
&dyn Fn(i64, i64) -> bool,
|
||||
String,
|
||||
)> = vec![
|
||||
(Box::new(gt_executor), &clear_gt, "gt".to_string()),
|
||||
(Box::new(ge_executor), &clear_ge, "ge".to_string()),
|
||||
(Box::new(lt_executor), &clear_lt, "lt".to_string()),
|
||||
(Box::new(le_executor), &clear_le, "le".to_string()),
|
||||
(Box::new(eq_executor), &clear_eq, "eq".to_string()),
|
||||
(Box::new(ne_executor), &clear_ne, "ne".to_string()),
|
||||
];
|
||||
|
||||
// Scalar Comparison Ops Executors
|
||||
let scalar_gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_gt);
|
||||
let scalar_ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ge);
|
||||
let scalar_lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_lt);
|
||||
let scalar_le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_le);
|
||||
let scalar_eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_eq);
|
||||
let scalar_ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ne);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_comparison_ops: Vec<(
|
||||
SignedScalarComparisonOpExecutor,
|
||||
&dyn Fn(i64, i64) -> bool,
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(scalar_gt_executor),
|
||||
&clear_gt,
|
||||
"scalar gt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ge_executor),
|
||||
&clear_ge,
|
||||
"scalar ge".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_lt_executor),
|
||||
&clear_lt,
|
||||
"scalar lt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_le_executor),
|
||||
&clear_le,
|
||||
"scalar le".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_eq_executor),
|
||||
&clear_eq,
|
||||
"scalar eq".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ne_executor),
|
||||
&clear_ne,
|
||||
"scalar ne".to_string(),
|
||||
),
|
||||
];
|
||||
|
||||
// Select Executor
|
||||
let select_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::if_then_else);
|
||||
|
||||
// Select
|
||||
let clear_select = |b: bool, x: i64, y: i64| if b { x } else { y };
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut select_op: Vec<(
|
||||
SignedSelectOpExecutor,
|
||||
&dyn Fn(bool, i64, i64) -> i64,
|
||||
String,
|
||||
)> = vec![(
|
||||
Box::new(select_executor),
|
||||
&clear_select,
|
||||
"select".to_string(),
|
||||
)];
|
||||
|
||||
// Div executor
|
||||
let div_rem_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::div_rem);
|
||||
// Div Rem Clear functions
|
||||
let clear_div_rem = |x: i64, y: i64| -> (i64, i64) { (x / y, x % y) };
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut div_rem_op: Vec<(
|
||||
SignedDivRemOpExecutor,
|
||||
&dyn Fn(i64, i64) -> (i64, i64),
|
||||
String,
|
||||
)> = vec![(
|
||||
Box::new(div_rem_executor),
|
||||
&clear_div_rem,
|
||||
"div rem".to_string(),
|
||||
)];
|
||||
|
||||
// Scalar Div executor
|
||||
let scalar_div_rem_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::signed_scalar_div_rem);
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_div_rem_op: Vec<(
|
||||
SignedScalarDivRemOpExecutor,
|
||||
&dyn Fn(i64, i64) -> (i64, i64),
|
||||
String,
|
||||
)> = vec![(
|
||||
Box::new(scalar_div_rem_executor),
|
||||
&clear_div_rem,
|
||||
"scalar div rem".to_string(),
|
||||
)];
|
||||
|
||||
// Log2/Hamming weight ops
|
||||
let ilog2_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ilog2);
|
||||
//let count_zeros_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::count_zeros);
|
||||
//let count_ones_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::count_ones);
|
||||
let clear_ilog2 = |x: i64| x.ilog2() as u64;
|
||||
//let clear_count_zeros = |x: i64| x.count_zeros() as i64;
|
||||
//let clear_count_ones = |x: i64| x.count_ones() as i64;
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut log2_ops: Vec<(SignedLog2OpExecutor, &dyn Fn(i64) -> u64, String)> = vec![
|
||||
(Box::new(ilog2_executor), &clear_ilog2, "ilog2".to_string()),
|
||||
//(
|
||||
// Box::new(count_zeros_executor),
|
||||
// &clear_count_zeros,
|
||||
// "count zeros".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(count_ones_executor),
|
||||
// &clear_count_ones,
|
||||
// "count ones".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
signed_random_op_sequence_test(
|
||||
param,
|
||||
&mut binary_ops,
|
||||
&mut unary_ops,
|
||||
&mut scalar_binary_ops,
|
||||
&mut overflowing_ops,
|
||||
&mut scalar_overflowing_ops,
|
||||
&mut comparison_ops,
|
||||
&mut scalar_comparison_ops,
|
||||
&mut select_op,
|
||||
&mut div_rem_op,
|
||||
&mut scalar_div_rem_op,
|
||||
&mut log2_ops,
|
||||
&mut shift_rotate_ops,
|
||||
&mut scalar_shift_rotate_ops,
|
||||
);
|
||||
}
|
||||
@@ -28,7 +28,7 @@ mod reverse_bits;
|
||||
mod slice;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests_cases_unsigned;
|
||||
#[cfg(all(test, feature = "__long_run_tests"))]
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests_long_run;
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests_signed;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
pub(crate) mod test_erc20;
|
||||
pub(crate) mod test_random_op_sequence;
|
||||
pub(crate) mod test_signed_erc20;
|
||||
pub(crate) mod test_signed_random_op_sequence;
|
||||
pub(crate) const NB_CTXT_LONG_RUN: usize = 32;
|
||||
pub(crate) const NB_TESTS_LONG_RUN: usize = 1000;
|
||||
pub(crate) const NB_TESTS_LONG_RUN: usize = 20000;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,251 @@
|
||||
use crate::integer::keycache::KEY_CACHE;
|
||||
use crate::integer::server_key::radix_parallel::tests_cases_unsigned::FunctionExecutor;
|
||||
use crate::integer::server_key::radix_parallel::tests_long_run::{
|
||||
NB_CTXT_LONG_RUN, NB_TESTS_LONG_RUN,
|
||||
};
|
||||
use crate::integer::server_key::radix_parallel::tests_unsigned::CpuFunctionExecutor;
|
||||
use crate::integer::tests::create_parameterized_test;
|
||||
use crate::integer::{
|
||||
BooleanBlock, IntegerCiphertext, IntegerKeyKind, RadixClientKey, ServerKey,
|
||||
SignedRadixCiphertext,
|
||||
};
|
||||
use crate::shortint::parameters::*;
|
||||
use rand::Rng;
|
||||
use std::sync::Arc;
|
||||
|
||||
create_parameterized_test!(whitepaper_erc20 {
|
||||
PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
|
||||
});
|
||||
create_parameterized_test!(no_cmux_erc20 {
|
||||
PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64
|
||||
});
|
||||
|
||||
fn whitepaper_erc20<P>(param: P)
|
||||
where
|
||||
P: Into<PBSParameters>,
|
||||
{
|
||||
let ge_executor = CpuFunctionExecutor::new(&ServerKey::ge_parallelized);
|
||||
let add_executor = CpuFunctionExecutor::new(&ServerKey::add_parallelized);
|
||||
let if_then_else_executor = CpuFunctionExecutor::new(&ServerKey::cmux_parallelized);
|
||||
let sub_executor = CpuFunctionExecutor::new(&ServerKey::sub_parallelized);
|
||||
signed_whitepaper_erc20_test(
|
||||
param,
|
||||
ge_executor,
|
||||
add_executor,
|
||||
if_then_else_executor,
|
||||
sub_executor,
|
||||
);
|
||||
}
|
||||
|
||||
fn no_cmux_erc20<P>(param: P)
|
||||
where
|
||||
P: Into<PBSParameters>,
|
||||
{
|
||||
let ge_executor = CpuFunctionExecutor::new(&ServerKey::ge_parallelized);
|
||||
let mul_executor = CpuFunctionExecutor::new(&ServerKey::mul_parallelized);
|
||||
let add_executor = CpuFunctionExecutor::new(&ServerKey::add_parallelized);
|
||||
let sub_executor = CpuFunctionExecutor::new(&ServerKey::sub_parallelized);
|
||||
signed_no_cmux_erc20_test(param, ge_executor, mul_executor, add_executor, sub_executor);
|
||||
}
|
||||
|
||||
pub(crate) fn signed_whitepaper_erc20_test<P, T1, T2, T3, T4>(
|
||||
param: P,
|
||||
mut ge_executor: T1,
|
||||
mut add_executor: T2,
|
||||
mut if_then_else_executor: T3,
|
||||
mut sub_executor: T4,
|
||||
) where
|
||||
P: Into<PBSParameters>,
|
||||
T1: for<'a> FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
BooleanBlock,
|
||||
>,
|
||||
T2: for<'a> FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
SignedRadixCiphertext,
|
||||
>,
|
||||
T3: for<'a> FunctionExecutor<
|
||||
(
|
||||
&'a BooleanBlock,
|
||||
&'a SignedRadixCiphertext,
|
||||
&'a SignedRadixCiphertext,
|
||||
),
|
||||
SignedRadixCiphertext,
|
||||
>,
|
||||
T4: for<'a> FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
SignedRadixCiphertext,
|
||||
>,
|
||||
{
|
||||
let param = param.into();
|
||||
let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
|
||||
|
||||
sks.set_deterministic_pbs_execution(true);
|
||||
let sks = Arc::new(sks);
|
||||
let cks = RadixClientKey::from((cks, NB_CTXT_LONG_RUN));
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
ge_executor.setup(&cks, sks.clone());
|
||||
add_executor.setup(&cks, sks.clone());
|
||||
if_then_else_executor.setup(&cks, sks.clone());
|
||||
sub_executor.setup(&cks, sks);
|
||||
|
||||
for _ in 0..NB_TESTS_LONG_RUN {
|
||||
let clear_from_amount = rng.gen::<i64>();
|
||||
let clear_to_amount = rng.gen::<i64>();
|
||||
let clear_amount = rng.gen::<i64>();
|
||||
|
||||
let from_amount = cks.encrypt_signed(clear_from_amount);
|
||||
let to_amount = cks.encrypt_signed(clear_to_amount);
|
||||
let amount = cks.encrypt_signed(clear_amount);
|
||||
|
||||
let has_enough_funds = ge_executor.execute((&from_amount, &amount));
|
||||
|
||||
let mut new_to_amount = add_executor.execute((&to_amount, &amount));
|
||||
new_to_amount =
|
||||
if_then_else_executor.execute((&has_enough_funds, &new_to_amount, &to_amount));
|
||||
|
||||
let mut new_from_amount = sub_executor.execute((&from_amount, &amount));
|
||||
new_from_amount =
|
||||
if_then_else_executor.execute((&has_enough_funds, &new_from_amount, &from_amount));
|
||||
|
||||
let decrypt_signed_new_from_amount: i64 = cks.decrypt_signed(&new_from_amount);
|
||||
let decrypt_signed_new_to_amount: i64 = cks.decrypt_signed(&new_to_amount);
|
||||
|
||||
let expected_new_from_amount = if clear_from_amount >= clear_amount {
|
||||
clear_from_amount - clear_amount
|
||||
} else {
|
||||
clear_from_amount
|
||||
};
|
||||
let expected_new_to_amount = if clear_from_amount >= clear_amount {
|
||||
clear_to_amount + clear_amount
|
||||
} else {
|
||||
clear_to_amount
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
decrypt_signed_new_from_amount, expected_new_from_amount,
|
||||
"Invalid erc20 result on from amount: original from amount: {clear_from_amount}, amount: {clear_amount}, to amount: {clear_to_amount}, expected new from amount: {expected_new_from_amount}."
|
||||
);
|
||||
assert_eq!(
|
||||
decrypt_signed_new_to_amount, expected_new_to_amount,
|
||||
"Invalid erc20 result on to amount."
|
||||
);
|
||||
|
||||
// Determinism check
|
||||
let has_enough_funds_1 = ge_executor.execute((&from_amount, &amount));
|
||||
|
||||
let mut new_to_amount_1 = add_executor.execute((&to_amount, &amount));
|
||||
new_to_amount_1 =
|
||||
if_then_else_executor.execute((&has_enough_funds_1, &new_to_amount_1, &to_amount));
|
||||
|
||||
let mut new_from_amount_1 = sub_executor.execute((&from_amount, &amount));
|
||||
new_from_amount_1 =
|
||||
if_then_else_executor.execute((&has_enough_funds_1, &new_from_amount_1, &from_amount));
|
||||
|
||||
assert_eq!(
|
||||
new_from_amount, new_from_amount_1,
|
||||
"Determinism check failed on erc20 from amount"
|
||||
);
|
||||
assert_eq!(
|
||||
new_to_amount, new_to_amount_1,
|
||||
"Determinism check failed on erc20 to amount"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn signed_no_cmux_erc20_test<P, T1, T2, T3, T4>(
|
||||
param: P,
|
||||
mut ge_executor: T1,
|
||||
mut mul_executor: T2,
|
||||
mut add_executor: T3,
|
||||
mut sub_executor: T4,
|
||||
) where
|
||||
P: Into<PBSParameters>,
|
||||
T1: for<'a> FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
BooleanBlock,
|
||||
>,
|
||||
T2: for<'a> FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
SignedRadixCiphertext,
|
||||
>,
|
||||
T3: for<'a> FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
SignedRadixCiphertext,
|
||||
>,
|
||||
T4: for<'a> FunctionExecutor<
|
||||
(&'a SignedRadixCiphertext, &'a SignedRadixCiphertext),
|
||||
SignedRadixCiphertext,
|
||||
>,
|
||||
{
|
||||
let param = param.into();
|
||||
let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
|
||||
|
||||
sks.set_deterministic_pbs_execution(true);
|
||||
let sks = Arc::new(sks);
|
||||
let cks = RadixClientKey::from((cks, NB_CTXT_LONG_RUN));
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
ge_executor.setup(&cks, sks.clone());
|
||||
mul_executor.setup(&cks, sks.clone());
|
||||
add_executor.setup(&cks, sks.clone());
|
||||
sub_executor.setup(&cks, sks);
|
||||
|
||||
for _ in 0..NB_TESTS_LONG_RUN {
|
||||
let clear_from_amount = rng.gen::<i64>();
|
||||
let clear_to_amount = rng.gen::<i64>();
|
||||
let clear_amount = rng.gen::<i64>();
|
||||
|
||||
let from_amount = cks.encrypt_signed(clear_from_amount);
|
||||
let to_amount = cks.encrypt_signed(clear_to_amount);
|
||||
let amount = cks.encrypt_signed(clear_amount);
|
||||
|
||||
let has_enough_funds = ge_executor.execute((&from_amount, &amount));
|
||||
let has_enough_funds_ct = SignedRadixCiphertext::from_blocks(vec![has_enough_funds.0]);
|
||||
let new_amount = mul_executor.execute((&amount, &has_enough_funds_ct));
|
||||
let new_to_amount = add_executor.execute((&to_amount, &new_amount));
|
||||
let new_from_amount = sub_executor.execute((&from_amount, &new_amount));
|
||||
|
||||
let decrypt_signed_new_from_amount: i64 = cks.decrypt_signed(&new_from_amount);
|
||||
let decrypt_signed_new_to_amount: i64 = cks.decrypt_signed(&new_to_amount);
|
||||
|
||||
let expected_new_from_amount = if clear_from_amount >= clear_amount {
|
||||
clear_from_amount - clear_amount
|
||||
} else {
|
||||
clear_from_amount
|
||||
};
|
||||
let expected_new_to_amount = if clear_from_amount >= clear_amount {
|
||||
clear_to_amount + clear_amount
|
||||
} else {
|
||||
clear_to_amount
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
decrypt_signed_new_from_amount, expected_new_from_amount,
|
||||
"Invalid erc20 result on from amount: original from amount: {clear_from_amount}, amount: {clear_amount}, to amount: {clear_to_amount}, expected new from amount: {expected_new_from_amount}."
|
||||
);
|
||||
assert_eq!(
|
||||
decrypt_signed_new_to_amount, expected_new_to_amount,
|
||||
"Invalid erc20 result on to amount."
|
||||
);
|
||||
|
||||
// Determinism check
|
||||
let has_enough_funds_1 = ge_executor.execute((&from_amount, &amount));
|
||||
let has_enough_funds_ct_1 = SignedRadixCiphertext::from_blocks(vec![has_enough_funds_1.0]);
|
||||
let new_amount_1 = mul_executor.execute((&amount, &has_enough_funds_ct_1));
|
||||
let new_to_amount_1 = add_executor.execute((&to_amount, &new_amount_1));
|
||||
let new_from_amount_1 = sub_executor.execute((&from_amount, &new_amount_1));
|
||||
|
||||
assert_eq!(
|
||||
new_from_amount, new_from_amount_1,
|
||||
"Determinism check failed on no cmux erc20 from amount"
|
||||
);
|
||||
assert_eq!(
|
||||
new_to_amount, new_to_amount_1,
|
||||
"Determinism check failed on no cmux erc20 to amount"
|
||||
);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -555,6 +555,8 @@ impl<T> NotTuple for &mut crate::integer::ciphertext::BaseSignedRadixCiphertext<
|
||||
|
||||
impl<T> NotTuple for &Vec<T> {}
|
||||
|
||||
impl NotTuple for &crate::integer::ciphertext::BooleanBlock {}
|
||||
|
||||
/// For unary operations
|
||||
///
|
||||
/// Note, we need to `NotTuple` constraint to avoid conflicts with binary or ternary operations
|
||||
@@ -654,22 +656,6 @@ where
|
||||
unchecked_rotate_right_test(param, executor);
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// Unchecked Scalar Tests
|
||||
//=============================================================================
|
||||
|
||||
//=============================================================================
|
||||
// Smart Tests
|
||||
//=============================================================================
|
||||
|
||||
//=============================================================================
|
||||
// Smart Scalar Tests
|
||||
//=============================================================================
|
||||
|
||||
//=============================================================================
|
||||
// Default Tests
|
||||
//=============================================================================
|
||||
|
||||
#[test]
|
||||
#[cfg(not(tarpaulin))]
|
||||
fn test_non_regression_clone_from() {
|
||||
|
||||
Reference in New Issue
Block a user