mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-11 07:38:08 -05:00
Compare commits
18 Commits
bb/fix/sum
...
al/debug_l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
592e2fcc89 | ||
|
|
611315255e | ||
|
|
44177c98f0 | ||
|
|
a1fde5bf18 | ||
|
|
c8e878fabe | ||
|
|
38d1842596 | ||
|
|
740d4697e7 | ||
|
|
11df6c69ee | ||
|
|
b76f4dbfe0 | ||
|
|
be21c15c80 | ||
|
|
aa51b25313 | ||
|
|
300c95fe3d | ||
|
|
524adda8f6 | ||
|
|
dedcf205b4 | ||
|
|
2c8d4c0fb0 | ||
|
|
3370fb5b7e | ||
|
|
cd77eac42b | ||
|
|
40f20b4ecb |
4
.github/actions/gpu_setup/action.yml
vendored
4
.github/actions/gpu_setup/action.yml
vendored
@@ -33,7 +33,9 @@ runs:
|
||||
if: inputs.github-instance == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
TOOLKIT_VERSION="$(echo ${CUDA_VERSION} | sed 's/\(.*\)\.\(.*\)/\1-\2/')"
|
||||
# Use Sed to extract a value from a string, this cannot be done with the ${variable//search/replace} pattern.
|
||||
# shellcheck disable=SC2001
|
||||
TOOLKIT_VERSION="$(echo "${CUDA_VERSION}" | sed 's/\(.*\)\.\(.*\)/\1-\2/')"
|
||||
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/${env.CUDA_KEYRING_PACKAGE}
|
||||
echo "${CUDA_KEYRING_SHA} ${CUDA_KEYRING_PACKAGE}" > checksum
|
||||
sha256sum -c checksum
|
||||
|
||||
@@ -126,9 +126,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: ${{ failure() && github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
3
.github/workflows/aws_tfhe_fast_tests.yml
vendored
3
.github/workflows/aws_tfhe_fast_tests.yml
vendored
@@ -272,9 +272,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: ${{ failure() && github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() && env.SECRETS_AVAILABLE == 'true' }}
|
||||
|
||||
3
.github/workflows/aws_tfhe_integer_tests.yml
vendored
3
.github/workflows/aws_tfhe_integer_tests.yml
vendored
@@ -142,9 +142,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: ${{ failure() && github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
@@ -147,9 +147,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: ${{ failure() && github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
3
.github/workflows/aws_tfhe_tests.yml
vendored
3
.github/workflows/aws_tfhe_tests.yml
vendored
@@ -254,9 +254,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: ${{ failure() && github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
3
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
3
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
@@ -123,9 +123,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: ${{ failure() && github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
12
.github/workflows/benchmark_boolean.yml
vendored
12
.github/workflows/benchmark_boolean.yml
vendored
@@ -58,11 +58,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -114,8 +117,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
12
.github/workflows/benchmark_core_crypto.yml
vendored
12
.github/workflows/benchmark_core_crypto.yml
vendored
@@ -58,11 +58,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -107,8 +110,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
12
.github/workflows/benchmark_dex.yml
vendored
12
.github/workflows/benchmark_dex.yml
vendored
@@ -58,11 +58,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -116,8 +119,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
12
.github/workflows/benchmark_erc20.yml
vendored
12
.github/workflows/benchmark_erc20.yml
vendored
@@ -59,11 +59,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -111,8 +114,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
26
.github/workflows/benchmark_gpu_4090.yml
vendored
26
.github/workflows/benchmark_gpu_4090.yml
vendored
@@ -46,12 +46,15 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
echo "FAST_BENCH=TRUE";
|
||||
} >> "${GITHUB_ENV}"
|
||||
echo "FAST_BENCH=TRUE" >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -93,8 +96,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
@@ -124,11 +130,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -170,8 +179,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
40
.github/workflows/benchmark_gpu_common.yml
vendored
40
.github/workflows/benchmark_gpu_common.yml
vendored
@@ -120,26 +120,33 @@ jobs:
|
||||
env:
|
||||
INPUTS_PARAMS_TYPE: ${{ inputs.params_type }}
|
||||
|
||||
|
||||
- name: Set command output
|
||||
id: set_command
|
||||
run: |
|
||||
echo "command=${{ toJSON(env.COMMAND) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "command=${COMMAND_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
COMMAND_OUTPUT: ${{ toJSON(env.COMMAND) }}
|
||||
|
||||
- name: Set operation flavor output
|
||||
id: set_op_flavor
|
||||
run: |
|
||||
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "op_flavor=${OP_FLAVOR_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
OP_FLAVOR_OUTPUT: ${{ toJSON(env.OP_FLAVOR) }}
|
||||
|
||||
- name: Set benchmark types output
|
||||
id: set_bench_type
|
||||
run: |
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "bench_type=${BENCH_TYPE_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
BENCH_TYPE_OUTPUT: ${{ toJSON(env.BENCH_TYPE) }}
|
||||
|
||||
- name: Set parameters types output
|
||||
id: set_params_type
|
||||
run: |
|
||||
echo "params_type=${{ toJSON(env.PARAMS_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "params_type=${PARAMS_TYPE_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
PARAMS_TYPE_OUTPUT: ${{ toJSON(env.PARAMS_TYPE) }}
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-${{ inputs.profile }}-benchmarks)
|
||||
@@ -227,6 +234,8 @@ jobs:
|
||||
include:
|
||||
- cuda: "12.2"
|
||||
gcc: 11
|
||||
env:
|
||||
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
@@ -237,18 +246,20 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
# Re-export environment variables as dependencies setup perform this task in the previous job.
|
||||
# Local env variables are cleaned at the end of each job.
|
||||
- name: Export CUDA variables
|
||||
shell: bash
|
||||
run: |
|
||||
CUDA_PATH=/usr/local/cuda-${{ matrix.cuda }}
|
||||
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
|
||||
echo "PATH=$PATH:$CUDA_PATH/bin" >> "${GITHUB_PATH}"
|
||||
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib64:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
|
||||
@@ -258,10 +269,12 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
{
|
||||
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
|
||||
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CC=/usr/bin/gcc-${GCC_VERSION}";
|
||||
echo "CXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
GCC_VERSION: ${{ matrix.gcc }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -317,8 +330,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
|
||||
12
.github/workflows/benchmark_gpu_dex_common.yml
vendored
12
.github/workflows/benchmark_gpu_dex_common.yml
vendored
@@ -119,11 +119,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -167,8 +170,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
|
||||
12
.github/workflows/benchmark_gpu_erc20_common.yml
vendored
12
.github/workflows/benchmark_gpu_erc20_common.yml
vendored
@@ -120,11 +120,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -168,8 +171,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
|
||||
12
.github/workflows/benchmark_hpu_integer.yml
vendored
12
.github/workflows/benchmark_hpu_integer.yml
vendored
@@ -37,11 +37,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -84,5 +87,8 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
20
.github/workflows/benchmark_integer.yml
vendored
20
.github/workflows/benchmark_integer.yml
vendored
@@ -79,12 +79,16 @@ jobs:
|
||||
- name: Set operation flavor output
|
||||
id: set_op_flavor
|
||||
run: |
|
||||
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "op_flavor=${OP_FLAVOR_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
OP_FLAVOR_OUTPUT: ${{ toJSON(env.OP_FLAVOR) }}
|
||||
|
||||
- name: Set benchmark types output
|
||||
id: set_bench_type
|
||||
run: |
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "bench_type=${BENCH_TYPE_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
BENCH_TYPE_OUTPUT: ${{ toJSON(env.BENCH_TYPE) }}
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (integer-benchmarks)
|
||||
@@ -128,11 +132,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -193,8 +200,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
16
.github/workflows/benchmark_shortint.yml
vendored
16
.github/workflows/benchmark_shortint.yml
vendored
@@ -48,7 +48,9 @@ jobs:
|
||||
- name: Set operation flavor output
|
||||
id: set_op_flavor
|
||||
run: |
|
||||
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "op_flavor=${OP_FLAVOR_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
OP_FLAVOR_OUTPUT: ${{ toJSON(env.OP_FLAVOR) }}
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (shortint-benchmarks)
|
||||
@@ -89,11 +91,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -150,8 +155,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
20
.github/workflows/benchmark_signed_integer.yml
vendored
20
.github/workflows/benchmark_signed_integer.yml
vendored
@@ -79,12 +79,16 @@ jobs:
|
||||
- name: Set operation flavor output
|
||||
id: set_op_flavor
|
||||
run: |
|
||||
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "op_flavor=${OP_FLAVOR_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
OP_FLAVOR_OUTPUT: ${{ toJSON(env.OP_FLAVOR) }}
|
||||
|
||||
- name: Set benchmark types output
|
||||
id: set_bench_type
|
||||
run: |
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "bench_type=${BENCH_TYPE_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
BENCH_TYPE_OUTPUT: ${{ toJSON(env.BENCH_TYPE) }}
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (signed-integer-benchmarks)
|
||||
@@ -128,11 +132,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -185,8 +192,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
12
.github/workflows/benchmark_tfhe_fft.yml
vendored
12
.github/workflows/benchmark_tfhe_fft.yml
vendored
@@ -61,11 +61,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
@@ -107,8 +110,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
12
.github/workflows/benchmark_tfhe_ntt.yml
vendored
12
.github/workflows/benchmark_tfhe_ntt.yml
vendored
@@ -61,11 +61,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
@@ -107,8 +110,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
12
.github/workflows/benchmark_tfhe_zk_pok.yml
vendored
12
.github/workflows/benchmark_tfhe_zk_pok.yml
vendored
@@ -98,11 +98,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -155,8 +158,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
22
.github/workflows/benchmark_wasm_client.yml
vendored
22
.github/workflows/benchmark_wasm_client.yml
vendored
@@ -96,11 +96,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -136,12 +139,16 @@ jobs:
|
||||
|
||||
- name: Install web resources
|
||||
run: |
|
||||
make install_${{ matrix.browser }}_browser
|
||||
make install_${{ matrix.browser }}_web_driver
|
||||
make install_"${BROWSER}"_browser
|
||||
make install_"${BROWSER}"_web_driver
|
||||
env:
|
||||
BROWSER: ${{ matrix.browser }}
|
||||
|
||||
- name: Run benchmarks
|
||||
run: |
|
||||
make bench_web_js_api_parallel_${{ matrix.browser }}_ci
|
||||
make bench_web_js_api_parallel_"${BROWSER}"_ci
|
||||
env:
|
||||
BROWSER: ${{ matrix.browser }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
@@ -188,8 +195,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
16
.github/workflows/benchmark_zk_pke.yml
vendored
16
.github/workflows/benchmark_zk_pke.yml
vendored
@@ -93,7 +93,9 @@ jobs:
|
||||
- name: Set benchmark types output
|
||||
id: set_bench_type
|
||||
run: |
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
echo "bench_type=${BENCH_TYPE_OUTPUT}" >> "${GITHUB_OUTPUT}"
|
||||
env:
|
||||
BENCH_TYPE_OUTPUT: ${{ toJSON(env.BENCH_TYPE) }}
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (pke-zk-benchmarks)
|
||||
@@ -140,11 +142,14 @@ jobs:
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -205,8 +210,11 @@ jobs:
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${{ secrets.JOB_SECRET }}" \
|
||||
--slab-url "${{ secrets.SLAB_URL }}"
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
|
||||
6
.github/workflows/ci_lint.yml
vendored
6
.github/workflows/ci_lint.yml
vendored
@@ -25,10 +25,10 @@ jobs:
|
||||
|
||||
- name: Get actionlint
|
||||
run: |
|
||||
wget "https://github.com/rhysd/actionlint/releases/download/v${{ env.ACTIONLINT_VERSION }}/actionlint_${{ env.ACTIONLINT_VERSION }}_linux_amd64.tar.gz"
|
||||
echo "${{ env.ACTIONLINT_CHECKSUM }} actionlint_${{ env.ACTIONLINT_VERSION }}_linux_amd64.tar.gz" > checksum
|
||||
wget "https://github.com/rhysd/actionlint/releases/download/v${ACTIONLINT_VERSION}/actionlint_${ACTIONLINT_VERSION}_linux_amd64.tar.gz"
|
||||
echo "${ACTIONLINT_CHECKSUM} actionlint_${ACTIONLINT_VERSION}_linux_amd64.tar.gz" > checksum
|
||||
sha256sum -c checksum
|
||||
tar -xf actionlint_${{ env.ACTIONLINT_VERSION }}_linux_amd64.tar.gz actionlint
|
||||
tar -xf actionlint_"${ACTIONLINT_VERSION}"_linux_amd64.tar.gz actionlint
|
||||
ln -s "$(pwd)/actionlint" /usr/local/bin/
|
||||
|
||||
- name: Lint workflows
|
||||
|
||||
13
.github/workflows/data_pr_close.yml
vendored
13
.github/workflows/data_pr_close.yml
vendored
@@ -59,7 +59,7 @@ jobs:
|
||||
echo 'GH_API_RES<<EOF'
|
||||
curl --fail-with-body --no-progress-meter -L -X POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
|
||||
-H "Authorization: Bearer ${TOKEN}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
"${COMMENTS_URL}" \
|
||||
-d "${BODY}"
|
||||
@@ -71,6 +71,7 @@ jobs:
|
||||
REPO: ${{ github.repository }}
|
||||
EVENT_NUMBER: ${{ github.event.number }}
|
||||
COMMENTS_URL: ${{ fromJson(env.TARGET_REPO_PR).comments_url }}
|
||||
TOKEN: ${{ secrets.FHE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Merge the Pull Request in the data repo
|
||||
if: ${{ github.event.pull_request.merged }}
|
||||
@@ -81,7 +82,7 @@ jobs:
|
||||
echo 'GH_API_RES<<EOF'
|
||||
curl --fail-with-body --no-progress-meter -L -X PUT \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
|
||||
-H "Authorization: Bearer ${TOKEN}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
"${TARGET_REPO_PR_URL}"/merge \
|
||||
-d '{ "merge_method": "rebase" }'
|
||||
@@ -91,6 +92,7 @@ jobs:
|
||||
exit $RES
|
||||
env:
|
||||
TARGET_REPO_PR_URL: ${{ fromJson(env.TARGET_REPO_PR).url }}
|
||||
TOKEN: ${{ secrets.FHE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Close the Pull Request in the data repo
|
||||
if: ${{ !github.event.pull_request.merged }}
|
||||
@@ -101,7 +103,7 @@ jobs:
|
||||
echo 'GH_API_RES<<EOF'
|
||||
curl --fail-with-body --no-progress-meter -L -X PATCH \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
|
||||
-H "Authorization: Bearer ${TOKEN}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
"${TARGET_REPO_PR_URL}" \
|
||||
-d '{ "state": "closed" }'
|
||||
@@ -111,6 +113,7 @@ jobs:
|
||||
exit $RES
|
||||
env:
|
||||
TARGET_REPO_PR_URL: ${{ fromJson(env.TARGET_REPO_PR).url }}
|
||||
TOKEN: ${{ secrets.FHE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Delete the associated branch in the data repo
|
||||
run: |
|
||||
@@ -120,13 +123,15 @@ jobs:
|
||||
echo 'GH_API_RES<<EOF'
|
||||
curl --fail-with-body --no-progress-meter -L -X DELETE \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
|
||||
-H "Authorization: Bearer ${TOKEN}" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
"${TARGET_REPO_API_URL}"/git/refs/heads/"${PR_BRANCH}"
|
||||
RES="$?"
|
||||
echo EOF
|
||||
} >> "${GITHUB_ENV}"
|
||||
exit $RES
|
||||
env:
|
||||
TOKEN: ${{ secrets.FHE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() && job.status == 'failure' }}
|
||||
|
||||
3
.github/workflows/gpu_fast_h100_tests.yml
vendored
3
.github/workflows/gpu_fast_h100_tests.yml
vendored
@@ -172,9 +172,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
3
.github/workflows/gpu_fast_tests.yml
vendored
3
.github/workflows/gpu_fast_tests.yml
vendored
@@ -156,9 +156,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
@@ -161,9 +161,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
21
.github/workflows/gpu_pcc.yml
vendored
21
.github/workflows/gpu_pcc.yml
vendored
@@ -81,13 +81,17 @@ jobs:
|
||||
if: env.SECRETS_AVAILABLE == 'false'
|
||||
shell: bash
|
||||
run: |
|
||||
TOOLKIT_VERSION="$(echo ${{ matrix.cuda }} | sed 's/\(.*\)\.\(.*\)/\1-\2/')"
|
||||
# Use Sed to extract a value from a string, this cannot be done with the ${variable//search/replace} pattern.
|
||||
# shellcheck disable=SC2001
|
||||
TOOLKIT_VERSION="$(echo "${CUDA_VERSION}" | sed 's/\(.*\)\.\(.*\)/\1-\2/')"
|
||||
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/"${CUDA_KEYRING_PACKAGE}"
|
||||
echo "${CUDA_KEYRING_SHA} ${CUDA_KEYRING_PACKAGE}" > checksum
|
||||
sha256sum -c checksum
|
||||
sudo dpkg -i "${CUDA_KEYRING_PACKAGE}"
|
||||
sudo apt update
|
||||
sudo apt -y install "cuda-toolkit-${TOOLKIT_VERSION}" cmake-format
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda }}
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
|
||||
@@ -100,17 +104,21 @@ jobs:
|
||||
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
|
||||
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
|
||||
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
|
||||
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
|
||||
echo "CUDACXX=/usr/local/cuda-${CUDA_VERSION}/bin/nvcc" >> "${GITHUB_ENV}"
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda }}
|
||||
|
||||
# Specify the correct host compilers
|
||||
- name: Export gcc and g++ variables
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
{
|
||||
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
|
||||
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CC=/usr/bin/gcc-${GCC_VERSION}";
|
||||
echo "CXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
GCC_VERSION: ${{ matrix.gcc }}
|
||||
|
||||
- name: Run fmt checks
|
||||
run: |
|
||||
@@ -127,9 +135,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: ${{ failure() && github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() && env.SECRETS_AVAILABLE == 'true' }}
|
||||
|
||||
@@ -144,9 +144,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
@@ -158,9 +158,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
@@ -156,9 +156,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
@@ -144,9 +144,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
@@ -158,9 +158,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
@@ -156,9 +156,10 @@ jobs:
|
||||
- name: Set pull-request URL
|
||||
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
|
||||
run: |
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
|
||||
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
|
||||
env:
|
||||
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Send message
|
||||
if: env.SECRETS_AVAILABLE == 'true'
|
||||
|
||||
2
.github/workflows/hpu_hlapi_tests.yml
vendored
2
.github/workflows/hpu_hlapi_tests.yml
vendored
@@ -70,4 +70,4 @@ jobs:
|
||||
source setup_hpu.sh
|
||||
just -f mockups/tfhe-hpu-mockup/Justfile BUILD_PROFILE=release mockup &
|
||||
make HPU_CONFIG=sim test_high_level_api_hpu
|
||||
|
||||
make HPU_CONFIG=sim test_user_doc_hpu
|
||||
|
||||
22
.github/workflows/make_release_cuda.yml
vendored
22
.github/workflows/make_release_cuda.yml
vendored
@@ -78,17 +78,19 @@ jobs:
|
||||
{
|
||||
echo "CUDA_PATH=$CUDA_PATH";
|
||||
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
|
||||
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
|
||||
echo "CUDACXX=/usr/local/cuda-${CUDA_VERSION}/bin/nvcc";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda }}
|
||||
|
||||
# Specify the correct host compilers
|
||||
- name: Export gcc and g++ variables
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
{
|
||||
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
|
||||
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CC=/usr/bin/gcc-${GCC_VERSION}";
|
||||
echo "CXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
echo "HOME=/home/ubuntu";
|
||||
} >> "${GITHUB_ENV}"
|
||||
- name: Prepare package
|
||||
@@ -140,19 +142,23 @@ jobs:
|
||||
{
|
||||
echo "CUDA_PATH=$CUDA_PATH";
|
||||
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
|
||||
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
|
||||
echo "CUDACXX=/usr/local/cuda-${CUDA_VERSION}/bin/nvcc";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
CUDA_VERSION: ${{ matrix.cuda }}
|
||||
|
||||
# Specify the correct host compilers
|
||||
- name: Export gcc and g++ variables
|
||||
if: ${{ !cancelled() }}
|
||||
run: |
|
||||
{
|
||||
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
|
||||
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
|
||||
echo "CC=/usr/bin/gcc-${GCC_VERSION}";
|
||||
echo "CXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
echo "HOME=/home/ubuntu";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
GCC_VERSION: ${{ matrix.gcc }}
|
||||
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
|
||||
14
Makefile
14
Makefile
@@ -935,9 +935,21 @@ test_user_doc: install_rs_build_toolchain
|
||||
.PHONY: test_user_doc_gpu # Run tests for GPU from the .md documentation
|
||||
test_user_doc_gpu: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) --doc \
|
||||
--features=boolean,shortint,integer,internal-keycache,gpu,zk-pok -p $(TFHE_SPEC) \
|
||||
--features=internal-keycache,integer,zk-pok,gpu -p $(TFHE_SPEC) \
|
||||
-- test_user_docs::
|
||||
|
||||
.PHONY: test_user_doc_hpu # Run tests for HPU from the .md documentation
|
||||
test_user_doc_hpu: install_rs_build_toolchain
|
||||
ifeq ($(HPU_CONFIG), v80)
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) --doc \
|
||||
--features=internal-keycache,integer,hpu,hpu-v80 -p $(TFHE_SPEC) \
|
||||
-- test_user_docs::
|
||||
else
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) --doc \
|
||||
--features=internal-keycache,integer,hpu -p $(TFHE_SPEC) \
|
||||
-- test_user_docs::
|
||||
endif
|
||||
|
||||
|
||||
|
||||
.PHONY: test_regex_engine # Run tests for regex_engine example
|
||||
|
||||
@@ -538,5 +538,8 @@ void cleanup_cuda_integer_is_at_least_one_comparisons_block_true(
|
||||
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
|
||||
int8_t **mem_ptr_void);
|
||||
|
||||
void extend_radix_with_trivial_zero_blocks_msb_64(
|
||||
CudaRadixCiphertextFFI *output, CudaRadixCiphertextFFI const *input,
|
||||
void *const *streams, uint32_t const *gpu_indexes);
|
||||
} // extern C
|
||||
#endif // CUDA_INTEGER_H
|
||||
|
||||
@@ -3918,7 +3918,8 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
|
||||
zero_out_if_overflow_did_not_happen[0]->get_degree(0),
|
||||
zero_out_if_overflow_did_not_happen[0]->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, cur_lut_f, 2, gpu_memory_allocated);
|
||||
params.carry_modulus, cur_lut_f, params.message_modulus - 2,
|
||||
gpu_memory_allocated);
|
||||
zero_out_if_overflow_did_not_happen[0]->broadcast_lut(streams, gpu_indexes,
|
||||
0);
|
||||
generate_device_accumulator_bivariate_with_factor<Torus>(
|
||||
@@ -3927,7 +3928,8 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
|
||||
zero_out_if_overflow_did_not_happen[1]->get_degree(0),
|
||||
zero_out_if_overflow_did_not_happen[1]->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, cur_lut_f, 3, gpu_memory_allocated);
|
||||
params.carry_modulus, cur_lut_f, params.message_modulus - 1,
|
||||
gpu_memory_allocated);
|
||||
zero_out_if_overflow_did_not_happen[1]->broadcast_lut(streams, gpu_indexes,
|
||||
0);
|
||||
|
||||
@@ -3954,7 +3956,8 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
|
||||
zero_out_if_overflow_happened[0]->get_degree(0),
|
||||
zero_out_if_overflow_happened[0]->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, overflow_happened_f, 2, gpu_memory_allocated);
|
||||
params.carry_modulus, overflow_happened_f, params.message_modulus - 2,
|
||||
gpu_memory_allocated);
|
||||
zero_out_if_overflow_happened[0]->broadcast_lut(streams, gpu_indexes, 0);
|
||||
generate_device_accumulator_bivariate_with_factor<Torus>(
|
||||
streams[0], gpu_indexes[0],
|
||||
@@ -3962,7 +3965,8 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
|
||||
zero_out_if_overflow_happened[1]->get_degree(0),
|
||||
zero_out_if_overflow_happened[1]->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, overflow_happened_f, 3, gpu_memory_allocated);
|
||||
params.carry_modulus, overflow_happened_f, params.message_modulus - 1,
|
||||
gpu_memory_allocated);
|
||||
zero_out_if_overflow_happened[1]->broadcast_lut(streams, gpu_indexes, 0);
|
||||
|
||||
// merge_overflow_flags_luts
|
||||
|
||||
@@ -62,7 +62,7 @@ void update_degrees_after_bitor(uint64_t *output_degrees,
|
||||
auto min = std::min(lwe_array_1_degrees[i], lwe_array_2_degrees[i]);
|
||||
auto result = max;
|
||||
|
||||
for (uint j = 0; j < min + 1; j++) {
|
||||
for (uint64_t j = 0; j < min + 1; j++) {
|
||||
if (max | j > result) {
|
||||
result = max | j;
|
||||
}
|
||||
@@ -79,13 +79,16 @@ void update_degrees_after_bitxor(uint64_t *output_degrees,
|
||||
auto max = std::max(lwe_array_1_degrees[i], lwe_array_2_degrees[i]);
|
||||
auto min = std::min(lwe_array_1_degrees[i], lwe_array_2_degrees[i]);
|
||||
auto result = max;
|
||||
printf("max %lu, min %lu, result %d\n", max, min, result);
|
||||
|
||||
// Try every possibility to find the worst case
|
||||
for (uint j = 0; j < min + 1; j++) {
|
||||
if (max ^ j > result) {
|
||||
for (uint64_t j = 0; j < min + 1; j++) {
|
||||
printf("j %lu, max ^ j %lu \n", j, max ^ j);
|
||||
if ((max ^ j) > result) {
|
||||
result = max ^ j;
|
||||
}
|
||||
}
|
||||
output_degrees[i] = result;
|
||||
printf("output degree %lu\n", result);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ __host__ void host_integer_radix_bitop_kb(
|
||||
update_degrees_after_bitor(degrees, lwe_array_1->degrees,
|
||||
lwe_array_2->degrees,
|
||||
lwe_array_1->num_radix_blocks);
|
||||
} else if (mem_ptr->op == BITXOR) {
|
||||
} else if (mem_ptr->op == BITOP_TYPE::BITXOR) {
|
||||
update_degrees_after_bitxor(degrees, lwe_array_1->degrees,
|
||||
lwe_array_2->degrees,
|
||||
lwe_array_1->num_radix_blocks);
|
||||
|
||||
9
backends/tfhe-cuda-backend/cuda/src/integer/cast.cu
Normal file
9
backends/tfhe-cuda-backend/cuda/src/integer/cast.cu
Normal file
@@ -0,0 +1,9 @@
|
||||
#include "cast.cuh"
|
||||
|
||||
void extend_radix_with_trivial_zero_blocks_msb_64(
|
||||
CudaRadixCiphertextFFI *output, CudaRadixCiphertextFFI const *input,
|
||||
void *const *streams, uint32_t const *gpu_indexes) {
|
||||
|
||||
host_extend_radix_with_trivial_zero_blocks_msb<uint64_t>(
|
||||
output, input, (cudaStream_t *)streams, gpu_indexes);
|
||||
}
|
||||
18
backends/tfhe-cuda-backend/cuda/src/integer/cast.cuh
Normal file
18
backends/tfhe-cuda-backend/cuda/src/integer/cast.cuh
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef CAST_CUH
|
||||
#define CAST_CUH
|
||||
|
||||
#include "device.h"
|
||||
#include "integer.cuh"
|
||||
#include "integer/integer_utilities.h"
|
||||
|
||||
template <typename Torus>
|
||||
__host__ void host_extend_radix_with_trivial_zero_blocks_msb(
|
||||
CudaRadixCiphertextFFI *output, CudaRadixCiphertextFFI const *input,
|
||||
cudaStream_t const *streams, uint32_t const *gpu_indexes) {
|
||||
|
||||
copy_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0], output,
|
||||
0, input->num_radix_blocks, input, 0,
|
||||
input->num_radix_blocks);
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -456,7 +456,7 @@ __host__ void tree_sign_reduction(
|
||||
auto inner_tree_leaf = tree_buffer->tree_inner_leaf_lut;
|
||||
while (partial_block_count > 2) {
|
||||
pack_blocks<Torus>(streams[0], gpu_indexes[0], y, x, partial_block_count,
|
||||
4);
|
||||
message_modulus);
|
||||
|
||||
integer_radix_apply_univariate_lookup_table_kb<Torus>(
|
||||
streams, gpu_indexes, gpu_count, x, y, bsks, ksks,
|
||||
@@ -477,16 +477,17 @@ __host__ void tree_sign_reduction(
|
||||
auto last_lut = tree_buffer->tree_last_leaf_lut;
|
||||
auto block_selector_f = tree_buffer->block_selector_f;
|
||||
std::function<Torus(Torus)> f;
|
||||
|
||||
auto num_bits_in_message = log2_int(params.message_modulus);
|
||||
if (partial_block_count == 2) {
|
||||
pack_blocks<Torus>(streams[0], gpu_indexes[0], y, x, partial_block_count,
|
||||
4);
|
||||
message_modulus);
|
||||
|
||||
f = [block_selector_f, sign_handler_f](Torus x) -> Torus {
|
||||
int msb = (x >> 2) & 3;
|
||||
int lsb = x & 3;
|
||||
f = [block_selector_f, sign_handler_f, num_bits_in_message,
|
||||
message_modulus](Torus x) -> Torus {
|
||||
Torus msb = (x >> num_bits_in_message) & (message_modulus - 1);
|
||||
Torus lsb = x & (message_modulus - 1);
|
||||
|
||||
int final_sign = block_selector_f(msb, lsb);
|
||||
Torus final_sign = block_selector_f(msb, lsb);
|
||||
return sign_handler_f(final_sign);
|
||||
};
|
||||
} else {
|
||||
|
||||
@@ -386,8 +386,9 @@ __host__ void host_unsigned_integer_div_rem_kb(
|
||||
subtraction_overflowed,
|
||||
at_least_one_upper_block_is_non_zero, 1);
|
||||
|
||||
int factor = (i) ? 3 : 2;
|
||||
int factor_lut_id = factor - 2;
|
||||
auto message_modulus = radix_params.message_modulus;
|
||||
int factor = (i) ? message_modulus - 1 : message_modulus - 2;
|
||||
int factor_lut_id = (i) ? 1 : 0;
|
||||
for (size_t k = 0;
|
||||
k < cleaned_merged_interesting_remainder->num_radix_blocks; k++) {
|
||||
copy_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0],
|
||||
|
||||
@@ -1616,10 +1616,12 @@ __host__ void reduce_signs(
|
||||
auto message_modulus = params.message_modulus;
|
||||
auto carry_modulus = params.carry_modulus;
|
||||
|
||||
auto num_bits_in_message = log2_int(message_modulus);
|
||||
std::function<Torus(Torus)> reduce_two_orderings_function =
|
||||
[diff_buffer, sign_handler_f](Torus x) -> Torus {
|
||||
int msb = (x >> 2) & 3;
|
||||
int lsb = x & 3;
|
||||
[diff_buffer, sign_handler_f, num_bits_in_message,
|
||||
message_modulus](Torus x) -> Torus {
|
||||
Torus msb = (x >> num_bits_in_message) & (message_modulus - 1);
|
||||
Torus lsb = x & (message_modulus - 1);
|
||||
|
||||
return diff_buffer->tree_buffer->block_selector_f(msb, lsb);
|
||||
};
|
||||
@@ -1640,7 +1642,7 @@ __host__ void reduce_signs(
|
||||
|
||||
while (num_sign_blocks > 2) {
|
||||
pack_blocks<Torus>(streams[0], gpu_indexes[0], signs_b, signs_a,
|
||||
num_sign_blocks, 4);
|
||||
num_sign_blocks, message_modulus);
|
||||
integer_radix_apply_univariate_lookup_table_kb<Torus>(
|
||||
streams, gpu_indexes, gpu_count, signs_a, signs_b, bsks, ksks,
|
||||
ms_noise_reduction_key, lut, num_sign_blocks / 2);
|
||||
@@ -1669,7 +1671,8 @@ __host__ void reduce_signs(
|
||||
message_modulus, carry_modulus, final_lut_f, true);
|
||||
lut->broadcast_lut(streams, gpu_indexes, 0);
|
||||
|
||||
pack_blocks<Torus>(streams[0], gpu_indexes[0], signs_b, signs_a, 2, 4);
|
||||
pack_blocks<Torus>(streams[0], gpu_indexes[0], signs_b, signs_a,
|
||||
num_sign_blocks, message_modulus);
|
||||
integer_radix_apply_univariate_lookup_table_kb<Torus>(
|
||||
streams, gpu_indexes, gpu_count, signs_array_out, signs_b, bsks, ksks,
|
||||
ms_noise_reduction_key, lut, 1);
|
||||
@@ -1677,8 +1680,8 @@ __host__ void reduce_signs(
|
||||
} else {
|
||||
|
||||
std::function<Torus(Torus)> final_lut_f =
|
||||
[mem_ptr, sign_handler_f](Torus x) -> Torus {
|
||||
return sign_handler_f(x & 3);
|
||||
[mem_ptr, sign_handler_f, message_modulus](Torus x) -> Torus {
|
||||
return sign_handler_f(x & (message_modulus - 1));
|
||||
};
|
||||
|
||||
auto lut = mem_ptr->diff_buffer->reduce_signs_lut;
|
||||
|
||||
@@ -1316,6 +1316,14 @@ unsafe extern "C" {
|
||||
mem_ptr_void: *mut *mut i8,
|
||||
);
|
||||
}
|
||||
unsafe extern "C" {
|
||||
pub fn extend_radix_with_trivial_zero_blocks_msb_64(
|
||||
output: *mut CudaRadixCiphertextFFI,
|
||||
input: *const CudaRadixCiphertextFFI,
|
||||
streams: *const *mut ffi::c_void,
|
||||
gpu_indexes: *const u32,
|
||||
);
|
||||
}
|
||||
pub const KS_TYPE_BIG_TO_SMALL: KS_TYPE = 0;
|
||||
pub const KS_TYPE_SMALL_TO_BIG: KS_TYPE = 1;
|
||||
pub type KS_TYPE = ffi::c_uint;
|
||||
|
||||
@@ -43,9 +43,9 @@ Comparing to the [CPU example](../../getting_started/quick_start.md), HPU set up
|
||||
Here is a full example (combining the client and server parts):
|
||||
|
||||
```rust
|
||||
use tfhe::{ConfigBuilder, set_server_key, FheUint8, ClientKey, CompressedServerKey};
|
||||
use tfhe::{Config, set_server_key, FheUint8, ClientKey, CompressedServerKey};
|
||||
use tfhe::prelude::*;
|
||||
use tfhe_hpu_backend::prelude::*;
|
||||
use tfhe::tfhe_hpu_backend::prelude::*;
|
||||
|
||||
fn main() {
|
||||
|
||||
@@ -53,7 +53,7 @@ fn main() {
|
||||
// HPU configuration knobs are retrieved from a TOML configuration file. Prebuilt configurations could be find in `backends/tfhe-hpu-backend/config_store`
|
||||
// For ease of use a setup_hpu.sh script is available in repository root folder and it handle the required environment variables setup and driver initialisation
|
||||
// More details are available in `backends/tfhe-hpu-backend/README.md`
|
||||
let hpu_device = HpuDevice::from_config(ShellString::new("${HPU_BACKEND_DIR}/config_store/${HPU_CONFIG}/hpu_config.toml".to_string()));
|
||||
let hpu_device = HpuDevice::from_config(ShellString::new("${HPU_BACKEND_DIR}/config_store/${HPU_CONFIG}/hpu_config.toml".to_string()).expand().as_str());
|
||||
|
||||
// Generate keys ----------------------------------------------------------
|
||||
let config = Config::from_hpu_device(&hpu_device);
|
||||
@@ -106,7 +106,7 @@ The server first needs to set up its keys with `set_server_key((hpu_device, comp
|
||||
|
||||
Then, homomorphic computations are performed using the same approach as the [CPU operations](../../fhe-computation/operations/README.md).
|
||||
|
||||
``` rust
|
||||
``` Rust
|
||||
// Server-side
|
||||
let result = a + b;
|
||||
|
||||
|
||||
@@ -260,16 +260,19 @@ pub fn main() {
|
||||
let roi_start = Instant::now();
|
||||
|
||||
let res_hpu = (0..args.iter)
|
||||
.map(|_i| {
|
||||
.filter_map(|i| {
|
||||
let res = HpuRadixCiphertext::exec(&proto, iop.opcode(), &srcs_enc, &imms);
|
||||
std::hint::black_box(&res);
|
||||
res
|
||||
if i == (args.iter - 1) {
|
||||
Some(res)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.next_back()
|
||||
.expect("Iteration must be greater than 0");
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// let res_fhe = $fhe_type::from(res_hpu);
|
||||
let res_fhe = res_hpu
|
||||
.last()
|
||||
.expect("Iteration must be greater than 0")
|
||||
.iter()
|
||||
.map(|x| x.to_radix_ciphertext())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::high_level_api::global_state;
|
||||
use crate::high_level_api::integers::FheIntId;
|
||||
use crate::high_level_api::keys::InternalServerKey;
|
||||
use crate::integer::block_decomposition::DecomposableInto;
|
||||
use crate::prelude::{OverflowingAdd, OverflowingMul, OverflowingSub};
|
||||
use crate::prelude::{OverflowingAdd, OverflowingMul, OverflowingNeg, OverflowingSub};
|
||||
use crate::{FheBool, FheInt};
|
||||
|
||||
impl<Id> OverflowingAdd<Self> for &FheInt<Id>
|
||||
@@ -537,3 +537,73 @@ where
|
||||
<&Self as OverflowingMul<&Self>>::overflowing_mul(&self, other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Id> OverflowingNeg for &FheInt<Id>
|
||||
where
|
||||
Id: FheIntId,
|
||||
{
|
||||
type Output = FheInt<Id>;
|
||||
|
||||
/// Negates self, overflowing if this is equal to the minimum value.
|
||||
///
|
||||
/// * The operation is modular, i.e. on overflow the result wraps around.
|
||||
/// * On overflow the [FheBool] is true (if self encrypts the minimum value), otherwise false
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use tfhe::prelude::*;
|
||||
/// use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheInt16};
|
||||
///
|
||||
/// let (client_key, server_key) = generate_keys(ConfigBuilder::default());
|
||||
/// set_server_key(server_key);
|
||||
///
|
||||
/// let a = FheInt16::encrypt(i16::MIN, &client_key);
|
||||
///
|
||||
/// let (result, overflowed) = a.overflowing_neg();
|
||||
/// let (expected_result, expected_overflowed) = i16::MIN.overflowing_neg();
|
||||
/// let result: i16 = result.decrypt(&client_key);
|
||||
/// assert_eq!(result, expected_result);
|
||||
/// assert_eq!(overflowed.decrypt(&client_key), expected_overflowed);
|
||||
/// assert!(overflowed.decrypt(&client_key));
|
||||
/// ```
|
||||
fn overflowing_neg(self) -> (Self::Output, FheBool) {
|
||||
global_state::with_internal_keys(|key| match key {
|
||||
InternalServerKey::Cpu(cpu_key) => {
|
||||
let (result, overflow) = cpu_key
|
||||
.pbs_key()
|
||||
.overflowing_neg_parallelized(&*self.ciphertext.on_cpu());
|
||||
(
|
||||
FheInt::new(result, cpu_key.tag.clone()),
|
||||
FheBool::new(overflow, cpu_key.tag.clone()),
|
||||
)
|
||||
}
|
||||
#[cfg(feature = "gpu")]
|
||||
InternalServerKey::Cuda(cuda_key) => {
|
||||
let (result, overflow) = cuda_key.pbs_key().overflowing_neg(
|
||||
&*self.ciphertext.on_gpu(&cuda_key.streams),
|
||||
&cuda_key.streams,
|
||||
);
|
||||
(
|
||||
FheInt::new(result, cuda_key.tag.clone()),
|
||||
FheBool::new(overflow, cuda_key.tag.clone()),
|
||||
)
|
||||
}
|
||||
#[cfg(feature = "hpu")]
|
||||
InternalServerKey::Hpu(_device) => {
|
||||
panic!("Hpu does not support this overflowing_neg yet.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<Id> OverflowingNeg for FheInt<Id>
|
||||
where
|
||||
Id: FheIntId,
|
||||
{
|
||||
type Output = Self;
|
||||
|
||||
fn overflowing_neg(self) -> (Self::Output, FheBool) {
|
||||
<&Self as OverflowingNeg>::overflowing_neg(&self)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ use crate::high_level_api::global_state;
|
||||
use crate::high_level_api::integers::FheUintId;
|
||||
use crate::high_level_api::keys::InternalServerKey;
|
||||
use crate::integer::block_decomposition::DecomposableInto;
|
||||
use crate::prelude::{CastInto, OverflowingAdd, OverflowingMul, OverflowingSub};
|
||||
use crate::prelude::{CastInto, OverflowingAdd, OverflowingMul, OverflowingNeg, OverflowingSub};
|
||||
use crate::{FheBool, FheUint};
|
||||
|
||||
impl<Id> OverflowingAdd<Self> for &FheUint<Id>
|
||||
@@ -530,3 +530,50 @@ where
|
||||
<&Self as OverflowingMul<&Self>>::overflowing_mul(&self, other)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Id> OverflowingNeg for &FheUint<Id>
|
||||
where
|
||||
Id: FheUintId,
|
||||
{
|
||||
type Output = FheUint<Id>;
|
||||
|
||||
fn overflowing_neg(self) -> (Self::Output, FheBool) {
|
||||
global_state::with_internal_keys(|key| match key {
|
||||
InternalServerKey::Cpu(cpu_key) => {
|
||||
let (result, overflow) = cpu_key
|
||||
.pbs_key()
|
||||
.overflowing_neg_parallelized(&*self.ciphertext.on_cpu());
|
||||
(
|
||||
FheUint::new(result, cpu_key.tag.clone()),
|
||||
FheBool::new(overflow, cpu_key.tag.clone()),
|
||||
)
|
||||
}
|
||||
#[cfg(feature = "gpu")]
|
||||
InternalServerKey::Cuda(cuda_key) => {
|
||||
let (result, overflow) = cuda_key.pbs_key().overflowing_neg(
|
||||
&*self.ciphertext.on_gpu(&cuda_key.streams),
|
||||
&cuda_key.streams,
|
||||
);
|
||||
(
|
||||
FheUint::new(result, cuda_key.tag.clone()),
|
||||
FheBool::new(overflow, cuda_key.tag.clone()),
|
||||
)
|
||||
}
|
||||
#[cfg(feature = "hpu")]
|
||||
InternalServerKey::Hpu(_device) => {
|
||||
panic!("Hpu does not support this overflowing_neg yet.")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<Id> OverflowingNeg for FheUint<Id>
|
||||
where
|
||||
Id: FheUintId,
|
||||
{
|
||||
type Output = Self;
|
||||
|
||||
fn overflowing_neg(self) -> (Self::Output, FheBool) {
|
||||
<&Self as OverflowingNeg>::overflowing_neg(&self)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,8 +9,8 @@
|
||||
pub use crate::high_level_api::traits::{
|
||||
BitSlice, CiphertextList, DivRem, FheDecrypt, FheEncrypt, FheEq, FheKeyswitch, FheMax, FheMin,
|
||||
FheOrd, FheTrivialEncrypt, FheTryEncrypt, FheTryTrivialEncrypt, FheWait, IfThenElse,
|
||||
OverflowingAdd, OverflowingMul, OverflowingSub, RotateLeft, RotateLeftAssign, RotateRight,
|
||||
RotateRightAssign, ScalarIfThenElse, SquashNoise, Tagged,
|
||||
OverflowingAdd, OverflowingMul, OverflowingNeg, OverflowingSub, RotateLeft, RotateLeftAssign,
|
||||
RotateRight, RotateRightAssign, ScalarIfThenElse, SquashNoise, Tagged,
|
||||
};
|
||||
#[cfg(feature = "hpu")]
|
||||
pub use crate::high_level_api::traits::{FheHpu, HpuHandle};
|
||||
|
||||
@@ -180,6 +180,12 @@ pub trait OverflowingMul<Rhs> {
|
||||
fn overflowing_mul(self, rhs: Rhs) -> (Self::Output, FheBool);
|
||||
}
|
||||
|
||||
pub trait OverflowingNeg {
|
||||
type Output;
|
||||
|
||||
fn overflowing_neg(self) -> (Self::Output, FheBool);
|
||||
}
|
||||
|
||||
pub trait BitSlice<Bounds> {
|
||||
type Output;
|
||||
|
||||
|
||||
@@ -77,30 +77,6 @@ impl CudaRadixCiphertextInfo {
|
||||
new_block_info
|
||||
}
|
||||
|
||||
pub(crate) fn after_extend_radix_with_trivial_zero_blocks_msb(
|
||||
&self,
|
||||
num_blocks: usize,
|
||||
) -> Self {
|
||||
assert!(num_blocks > 0);
|
||||
|
||||
let mut new_block_info = Self {
|
||||
blocks: Vec::with_capacity(self.blocks.len() + num_blocks),
|
||||
};
|
||||
for &b in self.blocks.iter() {
|
||||
new_block_info.blocks.push(b);
|
||||
}
|
||||
for _ in 0..num_blocks {
|
||||
new_block_info.blocks.push(CudaBlockInfo {
|
||||
degree: Degree::new(0),
|
||||
message_modulus: self.blocks.first().unwrap().message_modulus,
|
||||
carry_modulus: self.blocks.first().unwrap().carry_modulus,
|
||||
atomic_pattern: self.blocks.first().unwrap().atomic_pattern,
|
||||
noise_level: NoiseLevel::ZERO,
|
||||
});
|
||||
}
|
||||
new_block_info
|
||||
}
|
||||
|
||||
pub(crate) fn after_trim_radix_blocks_lsb(&self, num_blocks: usize) -> Self {
|
||||
let mut new_block_info = Self {
|
||||
blocks: Vec::with_capacity(self.blocks.len().saturating_sub(num_blocks)),
|
||||
|
||||
@@ -5387,3 +5387,31 @@ pub unsafe fn unchecked_negate_integer_radix_async(
|
||||
);
|
||||
update_noise_degree(radix_lwe_out, &cuda_ffi_radix_lwe_out);
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must not
|
||||
/// be dropped until streams is synchronized
|
||||
pub unsafe fn extend_radix_with_trivial_zero_blocks_msb_async(
|
||||
output: &mut CudaRadixCiphertext,
|
||||
input: &CudaRadixCiphertext,
|
||||
streams: &CudaStreams,
|
||||
) {
|
||||
let mut input_degrees = input.info.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let mut input_noise_levels = input.info.blocks.iter().map(|b| b.noise_level.0).collect();
|
||||
let mut output_degrees = output.info.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let mut output_noise_levels = output.info.blocks.iter().map(|b| b.noise_level.0).collect();
|
||||
|
||||
let mut cuda_ffi_output =
|
||||
prepare_cuda_radix_ffi(output, &mut output_degrees, &mut output_noise_levels);
|
||||
|
||||
let cuda_ffi_input = prepare_cuda_radix_ffi(input, &mut input_degrees, &mut input_noise_levels);
|
||||
|
||||
extend_radix_with_trivial_zero_blocks_msb_64(
|
||||
&raw mut cuda_ffi_output,
|
||||
&raw const cuda_ffi_input,
|
||||
streams.ptr.as_ptr(),
|
||||
streams.gpu_indexes_ptr(),
|
||||
);
|
||||
update_noise_degree(output, &cuda_ffi_output);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ use crate::core_crypto::gpu::algorithms::{
|
||||
use crate::core_crypto::gpu::vec::CudaVec;
|
||||
use crate::core_crypto::gpu::CudaStreams;
|
||||
use crate::core_crypto::prelude::LweBskGroupingFactor;
|
||||
use crate::integer::gpu::ciphertext::boolean_value::CudaBooleanBlock;
|
||||
use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext;
|
||||
use crate::integer::gpu::server_key::CudaBootstrappingKey;
|
||||
use crate::integer::gpu::{
|
||||
@@ -96,6 +97,31 @@ impl CudaServerKey {
|
||||
ct.as_mut().info = ct.as_ref().info.after_bitnot();
|
||||
}
|
||||
|
||||
pub(crate) unsafe fn unchecked_boolean_bitnot_assign_async(
|
||||
&self,
|
||||
ct: &mut CudaBooleanBlock,
|
||||
streams: &CudaStreams,
|
||||
) {
|
||||
// We do (-ciphertext) + (msg_mod -1) as it allows to avoid an allocation
|
||||
cuda_lwe_ciphertext_negate_assign(&mut ct.0.as_mut().d_blocks, streams);
|
||||
|
||||
let ct_blocks = ct.0.as_ref().d_blocks.lwe_ciphertext_count().0;
|
||||
|
||||
let shift_plaintext = self.encoding().encode(Cleartext(1u64)).0;
|
||||
|
||||
let scalar_vector = vec![shift_plaintext; ct_blocks];
|
||||
let mut d_decomposed_scalar =
|
||||
CudaVec::<u64>::new_async(ct.0.as_ref().d_blocks.lwe_ciphertext_count().0, streams, 0);
|
||||
d_decomposed_scalar.copy_from_cpu_async(scalar_vector.as_slice(), streams, 0);
|
||||
|
||||
cuda_lwe_ciphertext_plaintext_add_assign(
|
||||
&mut ct.0.as_mut().d_blocks,
|
||||
&d_decomposed_scalar,
|
||||
streams,
|
||||
);
|
||||
// Neither noise level nor the degree changes
|
||||
}
|
||||
|
||||
pub fn unchecked_bitnot_assign<T: CudaIntegerRadixCiphertext>(
|
||||
&self,
|
||||
ct: &mut T,
|
||||
@@ -165,7 +191,7 @@ impl CudaServerKey {
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must
|
||||
/// not be dropped until streams is synchronised
|
||||
/// not be dropped until streams is synchronized
|
||||
pub unsafe fn unchecked_bitop_assign_async<T: CudaIntegerRadixCiphertext>(
|
||||
&self,
|
||||
ct_left: &mut T,
|
||||
@@ -560,7 +586,7 @@ impl CudaServerKey {
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must
|
||||
/// not be dropped until streams is synchronised
|
||||
/// not be dropped until streams is synchronized
|
||||
pub unsafe fn bitand_assign_async<T: CudaIntegerRadixCiphertext>(
|
||||
&self,
|
||||
ct_left: &mut T,
|
||||
@@ -666,7 +692,7 @@ impl CudaServerKey {
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must
|
||||
/// not be dropped until streams is synchronised
|
||||
/// not be dropped until streams is synchronized
|
||||
pub unsafe fn bitor_assign_async<T: CudaIntegerRadixCiphertext>(
|
||||
&self,
|
||||
ct_left: &mut T,
|
||||
@@ -771,7 +797,7 @@ impl CudaServerKey {
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must
|
||||
/// not be dropped until streams is synchronised
|
||||
/// not be dropped until streams is synchronized
|
||||
pub unsafe fn bitxor_assign_async<T: CudaIntegerRadixCiphertext>(
|
||||
&self,
|
||||
ct_left: &mut T,
|
||||
@@ -869,7 +895,7 @@ impl CudaServerKey {
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must
|
||||
/// not be dropped until streams is synchronised
|
||||
/// not be dropped until streams is synchronized
|
||||
pub unsafe fn bitnot_assign_async<T: CudaIntegerRadixCiphertext>(
|
||||
&self,
|
||||
ct: &mut T,
|
||||
|
||||
@@ -16,8 +16,8 @@ use crate::integer::gpu::server_key::CudaBootstrappingKey;
|
||||
use crate::integer::gpu::{
|
||||
add_and_propagate_single_carry_assign_async, apply_bivariate_lut_kb_async,
|
||||
apply_many_univariate_lut_kb_async, apply_univariate_lut_kb_async,
|
||||
compute_prefix_sum_hillis_steele_async, full_propagate_assign_async,
|
||||
propagate_single_carry_assign_async, CudaServerKey, PBSType,
|
||||
compute_prefix_sum_hillis_steele_async, extend_radix_with_trivial_zero_blocks_msb_async,
|
||||
full_propagate_assign_async, propagate_single_carry_assign_async, CudaServerKey, PBSType,
|
||||
};
|
||||
use crate::integer::server_key::radix_parallel::OutputFlag;
|
||||
use crate::shortint::ciphertext::{Degree, NoiseLevel};
|
||||
@@ -577,27 +577,17 @@ impl CudaServerKey {
|
||||
num_blocks: usize,
|
||||
streams: &CudaStreams,
|
||||
) -> T {
|
||||
if num_blocks == 0 {
|
||||
return ct.duplicate_async(streams);
|
||||
let mut output: T = unsafe {
|
||||
self.create_trivial_zero_radix_async(
|
||||
ct.as_ref().d_blocks.lwe_ciphertext_count().0 + num_blocks,
|
||||
streams,
|
||||
)
|
||||
};
|
||||
|
||||
unsafe {
|
||||
extend_radix_with_trivial_zero_blocks_msb_async(output.as_mut(), ct.as_ref(), streams);
|
||||
}
|
||||
let new_num_blocks = ct.as_ref().d_blocks.lwe_ciphertext_count().0 + num_blocks;
|
||||
let ciphertext_modulus = ct.as_ref().d_blocks.ciphertext_modulus();
|
||||
let lwe_size = ct.as_ref().d_blocks.lwe_dimension().to_lwe_size();
|
||||
|
||||
let mut extended_ct_vec = CudaVec::new_async(new_num_blocks * lwe_size.0, streams, 0);
|
||||
extended_ct_vec.memset_async(0u64, streams, 0);
|
||||
extended_ct_vec.copy_from_gpu_async(&ct.as_ref().d_blocks.0.d_vec, streams, 0);
|
||||
let extended_ct_list = CudaLweCiphertextList::from_cuda_vec(
|
||||
extended_ct_vec,
|
||||
LweCiphertextCount(new_num_blocks),
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
let extended_ct_info = ct
|
||||
.as_ref()
|
||||
.info
|
||||
.after_extend_radix_with_trivial_zero_blocks_msb(num_blocks);
|
||||
T::from(CudaRadixCiphertext::new(extended_ct_list, extended_ct_info))
|
||||
output
|
||||
}
|
||||
|
||||
/// Remove LSB blocks from an existing [`CudaUnsignedRadixCiphertext`] or
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use crate::core_crypto::gpu::CudaStreams;
|
||||
use crate::integer::gpu::ciphertext::CudaIntegerRadixCiphertext;
|
||||
use crate::integer::gpu::ciphertext::boolean_value::CudaBooleanBlock;
|
||||
use crate::integer::gpu::ciphertext::{
|
||||
CudaIntegerRadixCiphertext, CudaSignedRadixCiphertext, CudaUnsignedRadixCiphertext,
|
||||
};
|
||||
use crate::integer::gpu::server_key::CudaServerKey;
|
||||
use crate::integer::gpu::unchecked_negate_integer_radix_async;
|
||||
use crate::integer::server_key::radix_parallel::OutputFlag;
|
||||
@@ -126,7 +129,7 @@ impl CudaServerKey {
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must
|
||||
/// not be dropped until streams is synchronised
|
||||
/// not be dropped until streams is synchronized
|
||||
pub unsafe fn neg_async<T: CudaIntegerRadixCiphertext>(
|
||||
&self,
|
||||
ctxt: &T,
|
||||
@@ -147,4 +150,53 @@ impl CudaServerKey {
|
||||
self.propagate_single_carry_assign_async(&mut res, streams, None, OutputFlag::None);
|
||||
res
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - `streams` __must__ be synchronized to guarantee computation has finished, and inputs must
|
||||
/// not be dropped until streams is synchronized
|
||||
pub unsafe fn overflowing_neg_async<T>(
|
||||
&self,
|
||||
ctxt: &T,
|
||||
streams: &CudaStreams,
|
||||
) -> (T, CudaBooleanBlock)
|
||||
where
|
||||
T: CudaIntegerRadixCiphertext,
|
||||
{
|
||||
let mut ct = if ctxt.block_carries_are_empty() {
|
||||
ctxt.duplicate_async(streams)
|
||||
} else {
|
||||
let mut ct = ctxt.duplicate_async(streams);
|
||||
self.full_propagate_assign_async(&mut ct, streams);
|
||||
ct
|
||||
};
|
||||
|
||||
self.bitnot_assign_async(&mut ct, streams);
|
||||
|
||||
if T::IS_SIGNED {
|
||||
let tmp = CudaSignedRadixCiphertext {
|
||||
ciphertext: ct.into_inner(),
|
||||
};
|
||||
let (result, overflowed) = self.signed_overflowing_scalar_add(&tmp, 1, streams);
|
||||
let result = T::from(result.into_inner());
|
||||
(result, overflowed)
|
||||
} else {
|
||||
let mut tmp = CudaUnsignedRadixCiphertext {
|
||||
ciphertext: ct.into_inner(),
|
||||
};
|
||||
let mut overflowed = self.unsigned_overflowing_scalar_add_assign(&mut tmp, 1, streams);
|
||||
self.unchecked_boolean_bitnot_assign_async(&mut overflowed, streams);
|
||||
let result = T::from(tmp.into_inner());
|
||||
(result, overflowed)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn overflowing_neg<T>(&self, ctxt: &T, streams: &CudaStreams) -> (T, CudaBooleanBlock)
|
||||
where
|
||||
T: CudaIntegerRadixCiphertext,
|
||||
{
|
||||
let result = unsafe { self.overflowing_neg_async(ctxt, streams) };
|
||||
streams.synchronize();
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,39 +17,39 @@ where
|
||||
P: Into<TestParameters> + Clone,
|
||||
{
|
||||
// Binary Ops Executors
|
||||
let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
//let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
//let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
let bitwise_and_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitand);
|
||||
let bitwise_or_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitor);
|
||||
let bitwise_xor_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitxor);
|
||||
let mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::mul);
|
||||
let rotate_left_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_left);
|
||||
let left_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::left_shift);
|
||||
let rotate_right_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_right);
|
||||
let right_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::right_shift);
|
||||
let max_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::max);
|
||||
let min_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::min);
|
||||
//let rotate_left_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_left);
|
||||
//let left_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::left_shift);
|
||||
//let rotate_right_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_right);
|
||||
//let right_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::right_shift);
|
||||
//let max_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::max);
|
||||
//let min_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::min);
|
||||
|
||||
// Binary Ops Clear functions
|
||||
let clear_add = |x, y| x + y;
|
||||
let clear_sub = |x, y| x - y;
|
||||
//let clear_add = |x, y| x + y;
|
||||
//let clear_sub = |x, y| x - y;
|
||||
let clear_bitwise_and = |x, y| x & y;
|
||||
let clear_bitwise_or = |x, y| x | y;
|
||||
let clear_bitwise_xor = |x, y| x ^ y;
|
||||
let clear_mul = |x, y| x * y;
|
||||
// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
let clear_rotate_left = |x: u64, y: u64| x.rotate_left(y as u32);
|
||||
let clear_left_shift = |x, y| x << y;
|
||||
// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
let clear_rotate_right = |x: u64, y: u64| x.rotate_right(y as u32);
|
||||
let clear_right_shift = |x, y| x >> y;
|
||||
let clear_max = |x: u64, y: u64| max(x, y);
|
||||
let clear_min = |x: u64, y: u64| min(x, y);
|
||||
//let clear_rotate_left = |x: u64, y: u64| x.rotate_left(y as u32);
|
||||
//let clear_left_shift = |x, y| x << y;
|
||||
//// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
//let clear_rotate_right = |x: u64, y: u64| x.rotate_right(y as u32);
|
||||
//let clear_right_shift = |x, y| x >> y;
|
||||
//let clear_max = |x: u64, y: u64| max(x, y);
|
||||
//let clear_min = |x: u64, y: u64| min(x, y);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut binary_ops: Vec<(BinaryOpExecutor, &dyn Fn(u64, u64) -> u64, String)> = vec![
|
||||
(Box::new(add_executor), &clear_add, "add".to_string()),
|
||||
(Box::new(sub_executor), &clear_sub, "sub".to_string()),
|
||||
//(Box::new(add_executor), &clear_add, "add".to_string()),
|
||||
//(Box::new(sub_executor), &clear_sub, "sub".to_string()),
|
||||
(
|
||||
Box::new(bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
@@ -66,28 +66,28 @@ where
|
||||
"bitxor".to_string(),
|
||||
),
|
||||
(Box::new(mul_executor), &clear_mul, "mul".to_string()),
|
||||
(
|
||||
Box::new(rotate_left_executor),
|
||||
&clear_rotate_left,
|
||||
"rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"right shift".to_string(),
|
||||
),
|
||||
(Box::new(max_executor), &clear_max, "max".to_string()),
|
||||
(Box::new(min_executor), &clear_min, "min".to_string()),
|
||||
//(
|
||||
// Box::new(rotate_left_executor),
|
||||
// &clear_rotate_left,
|
||||
// "rotate left".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(left_shift_executor),
|
||||
// &clear_left_shift,
|
||||
// "left shift".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(rotate_right_executor),
|
||||
// &clear_rotate_right,
|
||||
// "rotate right".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(right_shift_executor),
|
||||
// &clear_right_shift,
|
||||
// "right shift".to_string(),
|
||||
//),
|
||||
//(Box::new(max_executor), &clear_max, "max".to_string()),
|
||||
//(Box::new(min_executor), &clear_min, "min".to_string()),
|
||||
];
|
||||
|
||||
// Unary Ops Executors
|
||||
@@ -115,8 +115,8 @@ where
|
||||
];
|
||||
|
||||
// Scalar binary Ops Executors
|
||||
let scalar_add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_add);
|
||||
let scalar_sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_sub);
|
||||
//let scalar_add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_add);
|
||||
//let scalar_sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_sub);
|
||||
let scalar_bitwise_and_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitand);
|
||||
let scalar_bitwise_or_executor =
|
||||
@@ -124,27 +124,27 @@ where
|
||||
let scalar_bitwise_xor_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitxor);
|
||||
let scalar_mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_mul);
|
||||
let scalar_rotate_left_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_left);
|
||||
let scalar_left_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_left_shift);
|
||||
let scalar_rotate_right_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_right);
|
||||
let scalar_right_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_right_shift);
|
||||
//let scalar_rotate_left_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_left);
|
||||
//let scalar_left_shift_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_left_shift);
|
||||
//let scalar_rotate_right_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_right);
|
||||
//let scalar_right_shift_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_right_shift);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_binary_ops: Vec<(ScalarBinaryOpExecutor, &dyn Fn(u64, u64) -> u64, String)> = vec![
|
||||
(
|
||||
Box::new(scalar_add_executor),
|
||||
&clear_add,
|
||||
"scalar add".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_sub_executor),
|
||||
&clear_sub,
|
||||
"scalar sub".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(scalar_add_executor),
|
||||
// &clear_add,
|
||||
// "scalar add".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_sub_executor),
|
||||
// &clear_sub,
|
||||
// "scalar sub".to_string(),
|
||||
//),
|
||||
(
|
||||
Box::new(scalar_bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
@@ -165,26 +165,26 @@ where
|
||||
&clear_mul,
|
||||
"scalar mul".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_rotate_left_executor),
|
||||
&clear_rotate_left,
|
||||
"scalar rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"scalar left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"scalar rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"scalar right shift".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(scalar_rotate_left_executor),
|
||||
// &clear_rotate_left,
|
||||
// "scalar rotate left".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_left_shift_executor),
|
||||
// &clear_left_shift,
|
||||
// "scalar left shift".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_rotate_right_executor),
|
||||
// &clear_rotate_right,
|
||||
// "scalar rotate right".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_right_shift_executor),
|
||||
// &clear_right_shift,
|
||||
// "scalar right shift".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Overflowing Ops Executors
|
||||
@@ -249,37 +249,37 @@ where
|
||||
|
||||
// Comparison Ops Executors
|
||||
let gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::gt);
|
||||
let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
let lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::lt);
|
||||
let le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::le);
|
||||
let eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::eq);
|
||||
let ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ne);
|
||||
//let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
//let lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::lt);
|
||||
//let le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::le);
|
||||
//let eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::eq);
|
||||
//let ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ne);
|
||||
|
||||
// Comparison Ops Clear functions
|
||||
let clear_gt = |x: u64, y: u64| -> bool { x > y };
|
||||
let clear_ge = |x: u64, y: u64| -> bool { x >= y };
|
||||
let clear_lt = |x: u64, y: u64| -> bool { x < y };
|
||||
let clear_le = |x: u64, y: u64| -> bool { x <= y };
|
||||
let clear_eq = |x: u64, y: u64| -> bool { x == y };
|
||||
let clear_ne = |x: u64, y: u64| -> bool { x != y };
|
||||
//let clear_ge = |x: u64, y: u64| -> bool { x >= y };
|
||||
//let clear_lt = |x: u64, y: u64| -> bool { x < y };
|
||||
//let clear_le = |x: u64, y: u64| -> bool { x <= y };
|
||||
//let clear_eq = |x: u64, y: u64| -> bool { x == y };
|
||||
//let clear_ne = |x: u64, y: u64| -> bool { x != y };
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut comparison_ops: Vec<(ComparisonOpExecutor, &dyn Fn(u64, u64) -> bool, String)> = vec![
|
||||
(Box::new(gt_executor), &clear_gt, "gt".to_string()),
|
||||
(Box::new(ge_executor), &clear_ge, "ge".to_string()),
|
||||
(Box::new(lt_executor), &clear_lt, "lt".to_string()),
|
||||
(Box::new(le_executor), &clear_le, "le".to_string()),
|
||||
(Box::new(eq_executor), &clear_eq, "eq".to_string()),
|
||||
(Box::new(ne_executor), &clear_ne, "ne".to_string()),
|
||||
//(Box::new(ge_executor), &clear_ge, "ge".to_string()),
|
||||
//(Box::new(lt_executor), &clear_lt, "lt".to_string()),
|
||||
//(Box::new(le_executor), &clear_le, "le".to_string()),
|
||||
//(Box::new(eq_executor), &clear_eq, "eq".to_string()),
|
||||
//(Box::new(ne_executor), &clear_ne, "ne".to_string()),
|
||||
];
|
||||
|
||||
// Scalar Comparison Ops Executors
|
||||
let scalar_gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_gt);
|
||||
let scalar_ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ge);
|
||||
let scalar_lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_lt);
|
||||
let scalar_le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_le);
|
||||
let scalar_eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_eq);
|
||||
let scalar_ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ne);
|
||||
//let scalar_ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ge);
|
||||
//let scalar_lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_lt);
|
||||
//let scalar_le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_le);
|
||||
//let scalar_eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_eq);
|
||||
//let scalar_ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ne);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_comparison_ops: Vec<(
|
||||
@@ -292,31 +292,31 @@ where
|
||||
&clear_gt,
|
||||
"scalar gt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ge_executor),
|
||||
&clear_ge,
|
||||
"scalar ge".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_lt_executor),
|
||||
&clear_lt,
|
||||
"scalar lt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_le_executor),
|
||||
&clear_le,
|
||||
"scalar le".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_eq_executor),
|
||||
&clear_eq,
|
||||
"scalar eq".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ne_executor),
|
||||
&clear_ne,
|
||||
"scalar ne".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(scalar_ge_executor),
|
||||
// &clear_ge,
|
||||
// "scalar ge".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_lt_executor),
|
||||
// &clear_lt,
|
||||
// "scalar lt".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_le_executor),
|
||||
// &clear_le,
|
||||
// "scalar le".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_eq_executor),
|
||||
// &clear_eq,
|
||||
// "scalar eq".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_ne_executor),
|
||||
// &clear_ne,
|
||||
// "scalar ne".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Select Executor
|
||||
|
||||
@@ -19,29 +19,29 @@ where
|
||||
P: Into<TestParameters> + Clone,
|
||||
{
|
||||
// Binary Ops Executors
|
||||
let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
//let add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::add);
|
||||
//let sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::sub);
|
||||
let bitwise_and_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitand);
|
||||
let bitwise_or_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitor);
|
||||
let bitwise_xor_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::bitxor);
|
||||
let mul_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::mul);
|
||||
let max_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::max);
|
||||
let min_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::min);
|
||||
//let max_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::max);
|
||||
//let min_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::min);
|
||||
|
||||
// Binary Ops Clear functions
|
||||
let clear_add = |x, y| x + y;
|
||||
let clear_sub = |x, y| x - y;
|
||||
//let clear_add = |x, y| x + y;
|
||||
//let clear_sub = |x, y| x - y;
|
||||
let clear_bitwise_and = |x, y| x & y;
|
||||
let clear_bitwise_or = |x, y| x | y;
|
||||
let clear_bitwise_xor = |x, y| x ^ y;
|
||||
let clear_mul = |x, y| x * y;
|
||||
let clear_max = |x: i64, y: i64| max(x, y);
|
||||
let clear_min = |x: i64, y: i64| min(x, y);
|
||||
//let clear_max = |x: i64, y: i64| max(x, y);
|
||||
//let clear_min = |x: i64, y: i64| min(x, y);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut binary_ops: Vec<(SignedBinaryOpExecutor, &dyn Fn(i64, i64) -> i64, String)> = vec![
|
||||
(Box::new(add_executor), &clear_add, "add".to_string()),
|
||||
(Box::new(sub_executor), &clear_sub, "sub".to_string()),
|
||||
//(Box::new(add_executor), &clear_add, "add".to_string()),
|
||||
//(Box::new(sub_executor), &clear_sub, "sub".to_string()),
|
||||
(
|
||||
Box::new(bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
@@ -58,14 +58,14 @@ where
|
||||
"bitxor".to_string(),
|
||||
),
|
||||
(Box::new(mul_executor), &clear_mul, "mul".to_string()),
|
||||
(Box::new(max_executor), &clear_max, "max".to_string()),
|
||||
(Box::new(min_executor), &clear_min, "min".to_string()),
|
||||
//(Box::new(max_executor), &clear_max, "max".to_string()),
|
||||
//(Box::new(min_executor), &clear_min, "min".to_string()),
|
||||
];
|
||||
|
||||
let rotate_left_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_left);
|
||||
let left_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::left_shift);
|
||||
let rotate_right_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_right);
|
||||
let right_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::right_shift);
|
||||
//let left_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::left_shift);
|
||||
//let rotate_right_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::rotate_right);
|
||||
//let right_shift_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::right_shift);
|
||||
// Warning this rotate definition only works with 64-bit ciphertexts
|
||||
let clear_rotate_left = |x: i64, y: u64| x.rotate_left(y as u32);
|
||||
let clear_left_shift = |x: i64, y: u64| x << y;
|
||||
@@ -83,21 +83,21 @@ where
|
||||
&clear_rotate_left,
|
||||
"rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"right shift".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(left_shift_executor),
|
||||
// &clear_left_shift,
|
||||
// "left shift".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(rotate_right_executor),
|
||||
// &clear_rotate_right,
|
||||
// "rotate right".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(right_shift_executor),
|
||||
// &clear_right_shift,
|
||||
// "right shift".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Unary Ops Executors
|
||||
@@ -125,8 +125,8 @@ where
|
||||
];
|
||||
|
||||
// Scalar binary Ops Executors
|
||||
let scalar_add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_add);
|
||||
let scalar_sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_sub);
|
||||
//let scalar_add_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_add);
|
||||
//let scalar_sub_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_sub);
|
||||
let scalar_bitwise_and_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_bitand);
|
||||
let scalar_bitwise_or_executor =
|
||||
@@ -141,16 +141,16 @@ where
|
||||
&dyn Fn(i64, i64) -> i64,
|
||||
String,
|
||||
)> = vec![
|
||||
(
|
||||
Box::new(scalar_add_executor),
|
||||
&clear_add,
|
||||
"scalar add".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_sub_executor),
|
||||
&clear_sub,
|
||||
"scalar sub".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(scalar_add_executor),
|
||||
// &clear_add,
|
||||
// "scalar add".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_sub_executor),
|
||||
// &clear_sub,
|
||||
// "scalar sub".to_string(),
|
||||
//),
|
||||
(
|
||||
Box::new(scalar_bitwise_and_executor),
|
||||
&clear_bitwise_and,
|
||||
@@ -175,12 +175,12 @@ where
|
||||
|
||||
let scalar_rotate_left_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_left);
|
||||
let scalar_left_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_left_shift);
|
||||
let scalar_rotate_right_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_right);
|
||||
let scalar_right_shift_executor =
|
||||
GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_right_shift);
|
||||
//let scalar_left_shift_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_left_shift);
|
||||
//let scalar_rotate_right_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_rotate_right);
|
||||
//let scalar_right_shift_executor =
|
||||
// GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_right_shift);
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_shift_rotate_ops: Vec<(
|
||||
SignedScalarShiftRotateExecutor,
|
||||
@@ -192,21 +192,21 @@ where
|
||||
&clear_rotate_left,
|
||||
"scalar rotate left".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_left_shift_executor),
|
||||
&clear_left_shift,
|
||||
"scalar left shift".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_rotate_right_executor),
|
||||
&clear_rotate_right,
|
||||
"scalar rotate right".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_right_shift_executor),
|
||||
&clear_right_shift,
|
||||
"scalar right shift".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(scalar_left_shift_executor),
|
||||
// &clear_left_shift,
|
||||
// "scalar left shift".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_rotate_right_executor),
|
||||
// &clear_rotate_right,
|
||||
// "scalar rotate right".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_right_shift_executor),
|
||||
// &clear_right_shift,
|
||||
// "scalar right shift".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Overflowing Ops Executors
|
||||
@@ -271,11 +271,11 @@ where
|
||||
|
||||
// Comparison Ops Executors
|
||||
let gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::gt);
|
||||
let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
let lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::lt);
|
||||
let le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::le);
|
||||
let eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::eq);
|
||||
let ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ne);
|
||||
//let ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ge);
|
||||
//let lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::lt);
|
||||
//let le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::le);
|
||||
//let eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::eq);
|
||||
//let ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::ne);
|
||||
|
||||
// Comparison Ops Clear functions
|
||||
let clear_gt = |x: i64, y: i64| -> bool { x > y };
|
||||
@@ -292,20 +292,20 @@ where
|
||||
String,
|
||||
)> = vec![
|
||||
(Box::new(gt_executor), &clear_gt, "gt".to_string()),
|
||||
(Box::new(ge_executor), &clear_ge, "ge".to_string()),
|
||||
(Box::new(lt_executor), &clear_lt, "lt".to_string()),
|
||||
(Box::new(le_executor), &clear_le, "le".to_string()),
|
||||
(Box::new(eq_executor), &clear_eq, "eq".to_string()),
|
||||
(Box::new(ne_executor), &clear_ne, "ne".to_string()),
|
||||
//(Box::new(ge_executor), &clear_ge, "ge".to_string()),
|
||||
//(Box::new(lt_executor), &clear_lt, "lt".to_string()),
|
||||
//(Box::new(le_executor), &clear_le, "le".to_string()),
|
||||
//(Box::new(eq_executor), &clear_eq, "eq".to_string()),
|
||||
//(Box::new(ne_executor), &clear_ne, "ne".to_string()),
|
||||
];
|
||||
|
||||
// Scalar Comparison Ops Executors
|
||||
let scalar_gt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_gt);
|
||||
let scalar_ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ge);
|
||||
let scalar_lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_lt);
|
||||
let scalar_le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_le);
|
||||
let scalar_eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_eq);
|
||||
let scalar_ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ne);
|
||||
//let scalar_ge_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ge);
|
||||
//let scalar_lt_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_lt);
|
||||
//let scalar_le_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_le);
|
||||
//let scalar_eq_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_eq);
|
||||
//let scalar_ne_executor = GpuMultiDeviceFunctionExecutor::new(&CudaServerKey::scalar_ne);
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
let mut scalar_comparison_ops: Vec<(
|
||||
@@ -318,31 +318,31 @@ where
|
||||
&clear_gt,
|
||||
"scalar gt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ge_executor),
|
||||
&clear_ge,
|
||||
"scalar ge".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_lt_executor),
|
||||
&clear_lt,
|
||||
"scalar lt".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_le_executor),
|
||||
&clear_le,
|
||||
"scalar le".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_eq_executor),
|
||||
&clear_eq,
|
||||
"scalar eq".to_string(),
|
||||
),
|
||||
(
|
||||
Box::new(scalar_ne_executor),
|
||||
&clear_ne,
|
||||
"scalar ne".to_string(),
|
||||
),
|
||||
//(
|
||||
// Box::new(scalar_ge_executor),
|
||||
// &clear_ge,
|
||||
// "scalar ge".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_lt_executor),
|
||||
// &clear_lt,
|
||||
// "scalar lt".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_le_executor),
|
||||
// &clear_le,
|
||||
// "scalar le".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_eq_executor),
|
||||
// &clear_eq,
|
||||
// "scalar eq".to_string(),
|
||||
//),
|
||||
//(
|
||||
// Box::new(scalar_ne_executor),
|
||||
// &clear_ne,
|
||||
// "scalar ne".to_string(),
|
||||
//),
|
||||
];
|
||||
|
||||
// Select Executor
|
||||
|
||||
@@ -79,6 +79,40 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<&'a SignedRadixCiphertext, (SignedRadixCiphertext, BooleanBlock)>
|
||||
for GpuFunctionExecutor<F>
|
||||
where
|
||||
F: Fn(
|
||||
&CudaServerKey,
|
||||
&CudaSignedRadixCiphertext,
|
||||
&CudaStreams,
|
||||
) -> (CudaSignedRadixCiphertext, CudaBooleanBlock),
|
||||
{
|
||||
fn setup(&mut self, cks: &RadixClientKey, sks: Arc<ServerKey>) {
|
||||
self.setup_from_keys(cks, &sks);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
input: &'a SignedRadixCiphertext,
|
||||
) -> (SignedRadixCiphertext, BooleanBlock) {
|
||||
let context = self
|
||||
.context
|
||||
.as_ref()
|
||||
.expect("setup was not properly called");
|
||||
|
||||
let d_ctxt =
|
||||
CudaSignedRadixCiphertext::from_signed_radix_ciphertext(input, &context.streams);
|
||||
|
||||
let (gpu_result_0, gpu_result_1) = (self.func)(&context.sks, &d_ctxt, &context.streams);
|
||||
|
||||
(
|
||||
gpu_result_0.to_signed_radix_ciphertext(&context.streams),
|
||||
gpu_result_1.to_boolean_block(&context.streams),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> FunctionExecutor<&'a SignedRadixCiphertext, (RadixCiphertext, BooleanBlock)>
|
||||
for GpuFunctionExecutor<F>
|
||||
where
|
||||
|
||||
@@ -3,13 +3,14 @@ use crate::integer::gpu::server_key::radix::tests_unsigned::{
|
||||
};
|
||||
use crate::integer::gpu::CudaServerKey;
|
||||
use crate::integer::server_key::radix_parallel::tests_signed::test_neg::{
|
||||
signed_default_neg_test, signed_unchecked_neg_test,
|
||||
default_overflowing_neg_test, signed_default_neg_test, signed_unchecked_neg_test,
|
||||
};
|
||||
use crate::shortint::parameters::test_params::*;
|
||||
use crate::shortint::parameters::*;
|
||||
|
||||
create_gpu_parameterized_test!(integer_unchecked_neg);
|
||||
create_gpu_parameterized_test!(integer_neg);
|
||||
create_gpu_parameterized_test!(integer_overflowing_neg);
|
||||
|
||||
fn integer_unchecked_neg<P>(param: P)
|
||||
where
|
||||
@@ -26,3 +27,8 @@ where
|
||||
let executor = GpuFunctionExecutor::new(&CudaServerKey::neg);
|
||||
signed_default_neg_test(param, executor);
|
||||
}
|
||||
|
||||
fn integer_overflowing_neg(param: impl Into<TestParameters>) {
|
||||
let executor = GpuFunctionExecutor::new(&CudaServerKey::overflowing_neg);
|
||||
default_overflowing_neg_test(param, executor);
|
||||
}
|
||||
|
||||
@@ -5,11 +5,13 @@ use crate::integer::gpu::CudaServerKey;
|
||||
use crate::integer::server_key::radix_parallel::tests_cases_unsigned::{
|
||||
default_neg_test, unchecked_neg_test,
|
||||
};
|
||||
use crate::integer::server_key::radix_parallel::tests_unsigned::test_neg::default_overflowing_neg_test;
|
||||
use crate::shortint::parameters::test_params::*;
|
||||
use crate::shortint::parameters::*;
|
||||
|
||||
create_gpu_parameterized_test!(integer_unchecked_neg);
|
||||
create_gpu_parameterized_test!(integer_neg);
|
||||
create_gpu_parameterized_test!(integer_overflowing_neg);
|
||||
|
||||
fn integer_unchecked_neg<P>(param: P)
|
||||
where
|
||||
@@ -26,3 +28,8 @@ where
|
||||
let executor = GpuFunctionExecutor::new(&CudaServerKey::neg);
|
||||
default_neg_test(param, executor);
|
||||
}
|
||||
|
||||
fn integer_overflowing_neg(param: impl Into<TestParameters>) {
|
||||
let executor = GpuFunctionExecutor::new(&CudaServerKey::overflowing_neg);
|
||||
default_overflowing_neg_test(param, executor);
|
||||
}
|
||||
|
||||
@@ -486,7 +486,7 @@ impl ServerKey {
|
||||
|
||||
/// Computes the result of `lhs += rhs + input_carry`
|
||||
///
|
||||
/// This will selects what seems to be best algorithm to propagate carries
|
||||
/// This will select what seems to be the best algorithm to propagate carries
|
||||
/// (fully parallel vs sequential) by looking at the number of blocks and
|
||||
/// number of threads.
|
||||
///
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use crate::integer::ciphertext::IntegerRadixCiphertext;
|
||||
use crate::integer::ServerKey;
|
||||
use crate::integer::{BooleanBlock, ServerKey};
|
||||
|
||||
impl ServerKey {
|
||||
/// Homomorphically computes the opposite of a ciphertext encrypting an integer message.
|
||||
@@ -75,11 +75,32 @@ impl ServerKey {
|
||||
/// assert_eq!(255, dec);
|
||||
/// ```
|
||||
pub fn neg_parallelized<T>(&self, ctxt: &T) -> T
|
||||
where
|
||||
T: IntegerRadixCiphertext,
|
||||
{
|
||||
if ctxt.block_carries_are_empty() {
|
||||
let mut result = self.bitnot(ctxt);
|
||||
self.scalar_add_assign_parallelized(&mut result, 1);
|
||||
result
|
||||
} else if self.is_neg_possible(ctxt).is_ok() {
|
||||
let mut result = self.unchecked_neg(ctxt);
|
||||
self.full_propagate_parallelized(&mut result);
|
||||
result
|
||||
} else {
|
||||
let mut cleaned_ctxt = ctxt.clone();
|
||||
self.full_propagate_parallelized(&mut cleaned_ctxt);
|
||||
self.neg_parallelized(&cleaned_ctxt)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn overflowing_neg_parallelized<T>(&self, ctxt: &T) -> (T, BooleanBlock)
|
||||
where
|
||||
T: IntegerRadixCiphertext,
|
||||
{
|
||||
let mut tmp_ctxt;
|
||||
|
||||
// As we want to compute the overflow we need a truly clean state
|
||||
// And so we cannot avoid the full_propagate like we may in non overflowing_block
|
||||
let ct = if ctxt.block_carries_are_empty() {
|
||||
ctxt
|
||||
} else {
|
||||
@@ -88,8 +109,19 @@ impl ServerKey {
|
||||
&tmp_ctxt
|
||||
};
|
||||
|
||||
let mut ct = self.unchecked_neg(ct);
|
||||
self.full_propagate_parallelized(&mut ct);
|
||||
ct
|
||||
let mut result = self.bitnot(ct);
|
||||
let mut overflowed = self.overflowing_scalar_add_assign_parallelized(&mut result, 1);
|
||||
|
||||
if !T::IS_SIGNED {
|
||||
// Computing overflow of !input + 1 only really works for signed integers
|
||||
// However for unsigned integers we can still get the correct result as the only
|
||||
// case where `!input + 1` overflows, is when `!input` == MAX (0b111..111) =>
|
||||
// `input == 0`.
|
||||
// And in unsigned integers, the only case that is not an overflow is -0,
|
||||
// so we can just invert the result
|
||||
self.boolean_bitnot_assign(&mut overflowed);
|
||||
}
|
||||
|
||||
(result, overflowed)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,4 +3,4 @@ pub(crate) mod test_random_op_sequence;
|
||||
pub(crate) mod test_signed_erc20;
|
||||
pub(crate) mod test_signed_random_op_sequence;
|
||||
pub(crate) const NB_CTXT_LONG_RUN: usize = 32;
|
||||
pub(crate) const NB_TESTS_LONG_RUN: usize = 20000;
|
||||
pub(crate) const NB_TESTS_LONG_RUN: usize = 200;
|
||||
|
||||
@@ -578,16 +578,19 @@ pub(crate) fn random_op_sequence_test<P>(
|
||||
"Noise level greater than nominal value on op {fn_name} for block {k}",
|
||||
)
|
||||
});
|
||||
// Determinism check
|
||||
let res_1 = binary_op_executor.execute((&left_vec[i], &right_vec[i]));
|
||||
assert_eq!(
|
||||
res, res_1,
|
||||
"Determinism check failed on binary op {fn_name} with clear inputs {clear_left} and {clear_right}.",
|
||||
);
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let input_degrees_right: Vec<u64> =
|
||||
right_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
// Determinism check
|
||||
let res_1 = binary_op_executor.execute((&left_vec[i], &right_vec[i]));
|
||||
assert_eq!(
|
||||
res, res_1,
|
||||
"Determinism check failed on binary op {fn_name} with clear inputs {clear_left} and {clear_right} with input degrees {input_degrees_left:?} and {input_degrees_right:?}",
|
||||
);
|
||||
println!("Input degrees left: {input_degrees_left:?}, right {input_degrees_right:?}, Output degrees {:?}", output_degrees);
|
||||
let decrypted_res: u64 = cks.decrypt(&res);
|
||||
let expected_res: u64 = clear_fn(clear_left, clear_right);
|
||||
|
||||
@@ -633,13 +636,13 @@ pub(crate) fn random_op_sequence_test<P>(
|
||||
"Noise level greater than nominal value on op {fn_name} for block {k}",
|
||||
)
|
||||
});
|
||||
let input_degrees: Vec<u64> = input.blocks.iter().map(|b| b.degree.0).collect();
|
||||
// Determinism check
|
||||
let res_1 = unary_op_executor.execute(input);
|
||||
assert_eq!(
|
||||
res, res_1,
|
||||
"Determinism check failed on unary op {fn_name} with clear input {clear_input}.",
|
||||
"Determinism check failed on unary op {fn_name} with clear input {clear_input} with input degrees {input_degrees:?}.",
|
||||
);
|
||||
let input_degrees: Vec<u64> = input.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let decrypted_res: u64 = cks.decrypt(&res);
|
||||
let expected_res: u64 = clear_fn(clear_input);
|
||||
if i % 2 == 0 {
|
||||
@@ -675,16 +678,16 @@ pub(crate) fn random_op_sequence_test<P>(
|
||||
"Noise level greater than nominal value on op {fn_name} for block {k}",
|
||||
)
|
||||
});
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
// Determinism check
|
||||
let res_1 = scalar_binary_op_executor.execute((&left_vec[i], clear_right_vec[i]));
|
||||
assert_eq!(
|
||||
res, res_1,
|
||||
"Determinism check failed on binary op {fn_name} with clear inputs {clear_left} and {clear_right}.",
|
||||
"Determinism check failed on binary op {fn_name} with clear inputs {clear_left} and {clear_right} with input degrees {input_degrees_left:?}.",
|
||||
);
|
||||
let decrypted_res: u64 = cks.decrypt(&res);
|
||||
let expected_res: u64 = clear_fn(clear_left, clear_right);
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
|
||||
if i % 2 == 0 {
|
||||
left_vec[j] = res.clone();
|
||||
|
||||
@@ -670,16 +670,19 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
"Noise level greater than nominal value on op {fn_name} for block {k}",
|
||||
)
|
||||
});
|
||||
// Determinism check
|
||||
let res_1 = binary_op_executor.execute((&left_vec[i], &right_vec[i]));
|
||||
assert_eq!(
|
||||
res, res_1,
|
||||
"Determinism check failed on binary op {fn_name} with clear inputs {clear_left} and {clear_right}.",
|
||||
);
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let input_degrees_right: Vec<u64> =
|
||||
right_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
// Determinism check
|
||||
let res_1 = binary_op_executor.execute((&left_vec[i], &right_vec[i]));
|
||||
assert_eq!(
|
||||
res, res_1,
|
||||
"Determinism check failed on binary op {fn_name} with clear inputs {clear_left} and {clear_right} with input degrees {input_degrees_left:?} and {input_degrees_right:?}",
|
||||
);
|
||||
println!("Input degrees left: {input_degrees_left:?}, right {input_degrees_right:?}, Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let expected_res: i64 = clear_fn(clear_left, clear_right);
|
||||
|
||||
@@ -731,6 +734,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
"Determinism check failed on unary op {fn_name} with clear input {clear_input}.",
|
||||
);
|
||||
let input_degrees: Vec<u64> = input.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let expected_res: i64 = clear_fn(clear_input);
|
||||
if i % 2 == 0 {
|
||||
@@ -774,6 +780,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
);
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let expected_res: i64 = clear_fn(clear_left, clear_right);
|
||||
|
||||
@@ -829,6 +838,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let input_degrees_right: Vec<u64> =
|
||||
right_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let decrypt_signed_overflow = cks.decrypt_bool(&overflow);
|
||||
let (expected_res, expected_overflow) = clear_fn(clear_left, clear_right);
|
||||
@@ -889,6 +901,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
);
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let decrypt_signed_overflow = cks.decrypt_bool(&overflow);
|
||||
let (expected_res, expected_overflow) = clear_fn(clear_left, clear_right);
|
||||
@@ -1020,6 +1035,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let input_degrees_right: Vec<u64> =
|
||||
right_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let expected_res = clear_fn(clear_bool, clear_left, clear_right);
|
||||
|
||||
@@ -1081,6 +1099,12 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let input_degrees_right: Vec<u64> =
|
||||
right_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees_q: Vec<u64> =
|
||||
res_q.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees_r: Vec<u64> =
|
||||
res_r.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees_q);
|
||||
println!("Output degrees {:?}", output_degrees_r);
|
||||
let decrypt_signed_res_q: i64 = cks.decrypt_signed(&res_q);
|
||||
let decrypt_signed_res_r: i64 = cks.decrypt_signed(&res_r);
|
||||
let (expected_res_q, expected_res_r) = clear_fn(clear_left, clear_right);
|
||||
@@ -1147,6 +1171,12 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
);
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_q_degrees: Vec<u64> =
|
||||
res_r.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_r_degrees: Vec<u64> =
|
||||
res_r.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output r degrees {:?}", output_r_degrees);
|
||||
println!("Output q degrees {:?}", output_q_degrees);
|
||||
let decrypt_signed_res_q: i64 = cks.decrypt_signed(&res_q);
|
||||
let decrypt_signed_res_r: i64 = cks.decrypt_signed(&res_r);
|
||||
let (expected_res_q, expected_res_r) = clear_fn(clear_left, clear_right);
|
||||
@@ -1205,6 +1235,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
"Determinism check failed on op {fn_name} with clear input {clear_input}.",
|
||||
);
|
||||
let input_degrees: Vec<u64> = input.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let cast_res = sks.cast_to_signed(res, NB_CTXT_LONG_RUN);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&cast_res);
|
||||
let expected_res = clear_fn(clear_input) as i64;
|
||||
@@ -1252,6 +1285,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let input_degrees_right: Vec<u64> =
|
||||
unsigned_right.blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let expected_res: i64 = clear_fn(clear_left, clear_right as u64);
|
||||
|
||||
@@ -1297,6 +1333,9 @@ pub(crate) fn signed_random_op_sequence_test<P>(
|
||||
);
|
||||
let input_degrees_left: Vec<u64> =
|
||||
left_vec[i].blocks.iter().map(|b| b.degree.0).collect();
|
||||
let output_degrees: Vec<u64> =
|
||||
res.blocks.iter().map(|b| b.degree.0).collect();
|
||||
println!("Output degrees {:?}", output_degrees);
|
||||
let decrypt_signed_res: i64 = cks.decrypt_signed(&res);
|
||||
let expected_res: i64 = clear_fn(clear_left, clear_right as u64);
|
||||
|
||||
|
||||
@@ -4,10 +4,12 @@ use crate::integer::server_key::radix_parallel::tests_signed::{
|
||||
create_iterator_of_signed_random_pairs, signed_neg_under_modulus, NB_CTXT,
|
||||
};
|
||||
use crate::integer::server_key::radix_parallel::tests_unsigned::{
|
||||
nb_tests_smaller_for_params, nb_unchecked_tests_for_params, CpuFunctionExecutor,
|
||||
nb_tests_smaller_for_params, nb_unchecked_tests_for_params, CpuFunctionExecutor, MAX_NB_CTXT,
|
||||
};
|
||||
use crate::integer::tests::create_parameterized_test;
|
||||
use crate::integer::{IntegerKeyKind, RadixClientKey, ServerKey, SignedRadixCiphertext};
|
||||
use crate::integer::{
|
||||
BooleanBlock, IntegerKeyKind, RadixClientKey, ServerKey, SignedRadixCiphertext,
|
||||
};
|
||||
#[cfg(tarpaulin)]
|
||||
use crate::shortint::parameters::coverage_parameters::*;
|
||||
use crate::shortint::parameters::test_params::*;
|
||||
@@ -18,6 +20,7 @@ use std::sync::Arc;
|
||||
create_parameterized_test!(integer_signed_unchecked_neg);
|
||||
create_parameterized_test!(integer_signed_smart_neg);
|
||||
create_parameterized_test!(integer_signed_default_neg);
|
||||
create_parameterized_test!(integer_signed_default_overflowing_neg);
|
||||
|
||||
fn integer_signed_unchecked_neg<P>(param: P)
|
||||
where
|
||||
@@ -43,6 +46,14 @@ where
|
||||
signed_default_neg_test(param, executor);
|
||||
}
|
||||
|
||||
fn integer_signed_default_overflowing_neg<P>(param: P)
|
||||
where
|
||||
P: Into<TestParameters>,
|
||||
{
|
||||
let executor = CpuFunctionExecutor::new(&ServerKey::overflowing_neg_parallelized);
|
||||
default_overflowing_neg_test(param, executor);
|
||||
}
|
||||
|
||||
pub(crate) fn signed_unchecked_neg_test<P, T>(param: P, mut executor: T)
|
||||
where
|
||||
P: Into<TestParameters>,
|
||||
@@ -186,3 +197,98 @@ where
|
||||
assert_eq!(clear_result, dec);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn default_overflowing_neg_test<P, T>(param: P, mut overflowing_neg: T)
|
||||
where
|
||||
P: Into<TestParameters>,
|
||||
T: for<'a> FunctionExecutor<&'a SignedRadixCiphertext, (SignedRadixCiphertext, BooleanBlock)>,
|
||||
{
|
||||
let param = param.into();
|
||||
let nb_tests_smaller = nb_tests_smaller_for_params(param);
|
||||
let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
|
||||
let cks = RadixClientKey::from((
|
||||
cks,
|
||||
crate::integer::server_key::radix_parallel::tests_cases_unsigned::NB_CTXT,
|
||||
));
|
||||
|
||||
sks.set_deterministic_pbs_execution(true);
|
||||
let sks = Arc::new(sks);
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
overflowing_neg.setup(&cks, sks);
|
||||
|
||||
let cks: crate::integer::ClientKey = cks.into();
|
||||
|
||||
for num_blocks in 1..MAX_NB_CTXT {
|
||||
let modulus = (cks.parameters().message_modulus().0.pow(num_blocks as u32) / 2) as i64;
|
||||
|
||||
if modulus <= 1 {
|
||||
continue;
|
||||
}
|
||||
|
||||
for _ in 0..nb_tests_smaller {
|
||||
let clear = rng.gen_range(-modulus + 1..modulus);
|
||||
let ctxt = cks.encrypt_signed_radix(clear, num_blocks);
|
||||
|
||||
let (ct_res, flag) = overflowing_neg.execute(&ctxt);
|
||||
|
||||
assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL);
|
||||
assert_eq!(flag.0.degree.get(), 1);
|
||||
|
||||
let dec_flag = cks.decrypt_bool(&flag);
|
||||
assert!(
|
||||
!dec_flag,
|
||||
"Invalid flag result for overflowing_neg({clear}),\n\
|
||||
Expected false, got true\n\
|
||||
num_blocks: {num_blocks}, modulus: {:?}",
|
||||
-modulus..modulus
|
||||
);
|
||||
|
||||
let dec_ct: i64 = cks.decrypt_signed_radix(&ct_res);
|
||||
let expected = clear.wrapping_neg() % modulus;
|
||||
assert_eq!(
|
||||
dec_ct,
|
||||
expected,
|
||||
"Invalid result for overflowing_neg({clear}),\n\
|
||||
Expected {expected}, got {dec_ct}\n\
|
||||
num_blocks: {num_blocks}, modulus: {:?}",
|
||||
-modulus..modulus
|
||||
);
|
||||
|
||||
let (ct_res2, flag2) = overflowing_neg.execute(&ctxt);
|
||||
assert_eq!(ct_res, ct_res2, "Failed determinism check");
|
||||
assert_eq!(flag, flag2, "Failed determinism check");
|
||||
}
|
||||
|
||||
// The only case where signed neg does overflows
|
||||
let ctxt = cks.encrypt_signed_radix(-modulus, num_blocks);
|
||||
|
||||
let (ct_res, flag) = overflowing_neg.execute(&ctxt);
|
||||
|
||||
assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL);
|
||||
assert_eq!(flag.0.degree.get(), 1);
|
||||
|
||||
let dec_flag = cks.decrypt_bool(&flag);
|
||||
assert!(
|
||||
dec_flag,
|
||||
"Invalid flag result for overflowing_neg({}),\n\
|
||||
Expected true, got false\n\
|
||||
num_blocks: {num_blocks}, modulus: {:?}",
|
||||
-modulus,
|
||||
-modulus..modulus
|
||||
);
|
||||
|
||||
let dec_ct: i64 = cks.decrypt_signed_radix(&ct_res);
|
||||
assert_eq!(
|
||||
dec_ct,
|
||||
-modulus,
|
||||
"Invalid result for overflowing_neg({}),\n\
|
||||
Expected {}, got {dec_ct}\n\
|
||||
num_blocks: {num_blocks}, modulus: {:?}",
|
||||
-modulus,
|
||||
-modulus,
|
||||
-modulus..modulus
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,11 +4,11 @@ use crate::integer::server_key::radix_parallel::tests_cases_unsigned::{FunctionE
|
||||
use crate::integer::server_key::radix_parallel::tests_unsigned::{
|
||||
nb_tests_for_params, nb_tests_smaller_for_params,
|
||||
panic_if_any_block_info_exceeds_max_degree_or_noise, panic_if_any_block_is_not_clean,
|
||||
panic_if_any_block_values_exceeds_its_degree, unsigned_modulus, CpuFunctionExecutor,
|
||||
ExpectedDegrees, ExpectedNoiseLevels,
|
||||
panic_if_any_block_values_exceeds_its_degree, random_non_zero_value, unsigned_modulus,
|
||||
CpuFunctionExecutor, ExpectedDegrees, ExpectedNoiseLevels, MAX_NB_CTXT,
|
||||
};
|
||||
use crate::integer::tests::create_parameterized_test;
|
||||
use crate::integer::{IntegerKeyKind, RadixCiphertext, RadixClientKey, ServerKey};
|
||||
use crate::integer::{BooleanBlock, IntegerKeyKind, RadixCiphertext, RadixClientKey, ServerKey};
|
||||
#[cfg(tarpaulin)]
|
||||
use crate::shortint::parameters::coverage_parameters::*;
|
||||
use crate::shortint::parameters::test_params::*;
|
||||
@@ -18,6 +18,7 @@ use std::sync::Arc;
|
||||
|
||||
create_parameterized_test!(integer_smart_neg);
|
||||
create_parameterized_test!(integer_default_neg);
|
||||
create_parameterized_test!(integer_default_overflowing_neg);
|
||||
|
||||
fn integer_smart_neg<P>(param: P)
|
||||
where
|
||||
@@ -35,6 +36,14 @@ where
|
||||
default_neg_test(param, executor);
|
||||
}
|
||||
|
||||
fn integer_default_overflowing_neg<P>(param: P)
|
||||
where
|
||||
P: Into<TestParameters>,
|
||||
{
|
||||
let executor = CpuFunctionExecutor::new(&ServerKey::overflowing_neg_parallelized);
|
||||
default_overflowing_neg_test(param, executor);
|
||||
}
|
||||
|
||||
impl ExpectedDegrees {
|
||||
fn after_unchecked_neg(&mut self, lhs: &RadixCiphertext) -> &Self {
|
||||
self.set_with(NegatedDegreeIter::new(
|
||||
@@ -165,7 +174,7 @@ where
|
||||
// Default Tests
|
||||
//=============================================================================
|
||||
|
||||
pub(crate) fn default_neg_test<P, T>(param: P, mut executor: T)
|
||||
pub(crate) fn default_neg_test<P, T>(param: P, mut neg: T)
|
||||
where
|
||||
P: Into<TestParameters>,
|
||||
T: for<'a> FunctionExecutor<&'a RadixCiphertext, RadixCiphertext>,
|
||||
@@ -175,28 +184,132 @@ where
|
||||
let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
|
||||
let cks = RadixClientKey::from((cks, NB_CTXT));
|
||||
|
||||
sks.set_deterministic_pbs_execution(true);
|
||||
let sks = Arc::new(sks.clone());
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
neg.setup(&cks, sks.clone());
|
||||
|
||||
let cks: crate::integer::ClientKey = cks.into();
|
||||
|
||||
for num_blocks in 1..MAX_NB_CTXT {
|
||||
let modulus = unsigned_modulus(cks.parameters().message_modulus(), num_blocks as u32);
|
||||
|
||||
for _ in 0..nb_tests_smaller {
|
||||
let mut clear = rng.gen_range(0..modulus);
|
||||
let mut ctxt = cks.encrypt_radix(clear, num_blocks);
|
||||
|
||||
let ct_res = neg.execute(&ctxt);
|
||||
panic_if_any_block_is_not_clean(&ct_res, &cks);
|
||||
|
||||
let dec_ct: u64 = cks.decrypt_radix(&ct_res);
|
||||
let expected = clear.wrapping_neg() % modulus;
|
||||
assert_eq!(
|
||||
dec_ct, expected,
|
||||
"Invalid result for neg({clear}),\n\
|
||||
Expected {expected}, got {dec_ct}\n\
|
||||
num_blocks: {num_blocks}, modulus: {modulus}"
|
||||
);
|
||||
|
||||
let ct_res2 = neg.execute(&ctxt);
|
||||
assert_eq!(ct_res, ct_res2, "Failed determinism check");
|
||||
|
||||
// Test with non clean carries
|
||||
let random_non_zero = random_non_zero_value(&mut rng, modulus);
|
||||
sks.unchecked_scalar_add_assign(&mut ctxt, random_non_zero);
|
||||
clear = clear.wrapping_add(random_non_zero) % modulus;
|
||||
|
||||
let ct_res = neg.execute(&ctxt);
|
||||
panic_if_any_block_is_not_clean(&ct_res, &cks);
|
||||
|
||||
let dec_ct: u64 = cks.decrypt_radix(&ct_res);
|
||||
let expected = clear.wrapping_neg() % modulus;
|
||||
assert_eq!(
|
||||
dec_ct, expected,
|
||||
"Invalid result for neg({clear}),\n\
|
||||
Expected {expected}, got {dec_ct}\n\
|
||||
num_blocks: {num_blocks}, modulus: {modulus}"
|
||||
);
|
||||
let ct_res2 = neg.execute(&ctxt);
|
||||
assert_eq!(ct_res, ct_res2, "Failed determinism check");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn default_overflowing_neg_test<P, T>(param: P, mut overflowing_neg: T)
|
||||
where
|
||||
P: Into<TestParameters>,
|
||||
T: for<'a> FunctionExecutor<&'a RadixCiphertext, (RadixCiphertext, BooleanBlock)>,
|
||||
{
|
||||
let param = param.into();
|
||||
let nb_tests_smaller = nb_tests_smaller_for_params(param);
|
||||
let (cks, mut sks) = KEY_CACHE.get_from_params(param, IntegerKeyKind::Radix);
|
||||
let cks = RadixClientKey::from((cks, NB_CTXT));
|
||||
|
||||
sks.set_deterministic_pbs_execution(true);
|
||||
let sks = Arc::new(sks);
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
let modulus = unsigned_modulus(cks.parameters().message_modulus(), NB_CTXT as u32);
|
||||
overflowing_neg.setup(&cks, sks);
|
||||
|
||||
executor.setup(&cks, sks);
|
||||
let cks: crate::integer::ClientKey = cks.into();
|
||||
|
||||
for _ in 0..nb_tests_smaller {
|
||||
let clear = rng.gen::<u64>() % modulus;
|
||||
for num_blocks in 1..MAX_NB_CTXT {
|
||||
let modulus = unsigned_modulus(cks.parameters().message_modulus(), num_blocks as u32);
|
||||
|
||||
let ctxt = cks.encrypt(clear);
|
||||
panic_if_any_block_is_not_clean(&ctxt, &cks);
|
||||
for _ in 0..nb_tests_smaller {
|
||||
let clear = rng.gen_range(1..modulus);
|
||||
let ctxt = cks.encrypt_radix(clear, num_blocks);
|
||||
|
||||
let ct_res = executor.execute(&ctxt);
|
||||
let tmp = executor.execute(&ctxt);
|
||||
assert!(ct_res.block_carries_are_empty());
|
||||
assert_eq!(ct_res, tmp);
|
||||
let (ct_res, flag) = overflowing_neg.execute(&ctxt);
|
||||
|
||||
let dec: u64 = cks.decrypt(&ct_res);
|
||||
let clear_result = clear.wrapping_neg() % modulus;
|
||||
assert_eq!(clear_result, dec);
|
||||
panic_if_any_block_is_not_clean(&ct_res, &cks);
|
||||
assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL);
|
||||
assert_eq!(flag.0.degree.get(), 1);
|
||||
|
||||
let dec_flag = cks.decrypt_bool(&flag);
|
||||
assert!(
|
||||
dec_flag,
|
||||
"Invalid value for overflowing_neg flag, expected true, got false"
|
||||
);
|
||||
|
||||
let dec_ct: u64 = cks.decrypt_radix(&ct_res);
|
||||
let expected = clear.wrapping_neg() % modulus;
|
||||
assert_eq!(
|
||||
dec_ct, expected,
|
||||
"Invalid result for overflowing_neg({clear}),\n\
|
||||
Expected {expected}, got {dec_ct}\n\
|
||||
num_blocks: {num_blocks}, modulus: {modulus}"
|
||||
);
|
||||
|
||||
let (ct_res2, flag2) = overflowing_neg.execute(&ctxt);
|
||||
assert_eq!(ct_res, ct_res2, "Failed determinism check");
|
||||
assert_eq!(flag, flag2, "Failed determinism check");
|
||||
}
|
||||
|
||||
// The only case where unsigned neg does not overflows
|
||||
let ctxt = cks.encrypt_radix(0u32, num_blocks);
|
||||
|
||||
let (ct_res, flag) = overflowing_neg.execute(&ctxt);
|
||||
|
||||
panic_if_any_block_is_not_clean(&ct_res, &cks);
|
||||
assert_eq!(flag.0.noise_level(), NoiseLevel::NOMINAL);
|
||||
assert_eq!(flag.0.degree.get(), 1);
|
||||
|
||||
let dec_flag = cks.decrypt_bool(&flag);
|
||||
assert!(
|
||||
!dec_flag,
|
||||
"Invalid value for overflowing_neg flag, expected false, got true"
|
||||
);
|
||||
|
||||
let dec_ct: u64 = cks.decrypt_radix(&ct_res);
|
||||
assert_eq!(
|
||||
dec_ct, 0,
|
||||
"Invalid result for overflowing_neg(0),\n\
|
||||
Expected 0, got {dec_ct}\n\
|
||||
num_blocks: {num_blocks}, modulus: {modulus}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,14 +118,7 @@ pub use shortint::server_key::pbs_stats::*;
|
||||
/// cbindgen:ignore
|
||||
mod js_on_wasm_api;
|
||||
|
||||
#[cfg(all(
|
||||
doctest,
|
||||
feature = "shortint",
|
||||
feature = "boolean",
|
||||
feature = "integer",
|
||||
feature = "zk-pok",
|
||||
feature = "strings"
|
||||
))]
|
||||
#[cfg(doctest)]
|
||||
mod test_user_docs;
|
||||
|
||||
#[cfg(feature = "strings")]
|
||||
|
||||
@@ -171,6 +171,7 @@ impl Degree {
|
||||
pub(crate) fn after_bitxor(self, other: Self) -> Self {
|
||||
let max = cmp::max(self.0, other.0);
|
||||
let min = cmp::min(self.0, other.0);
|
||||
println!("max {max}, min {min}");
|
||||
let mut result = max;
|
||||
|
||||
//Try every possibility to find the worst case
|
||||
@@ -179,6 +180,7 @@ impl Degree {
|
||||
result = max ^ i;
|
||||
}
|
||||
}
|
||||
println!("result {result}");
|
||||
|
||||
Self(result)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#[cfg(not(feature = "gpu"))]
|
||||
#[cfg(not(any(feature = "gpu", feature = "hpu")))]
|
||||
mod test_cpu_doc {
|
||||
use doc_comment::doctest;
|
||||
|
||||
@@ -253,8 +253,4 @@ mod test_hpu_doc {
|
||||
"../docs/configuration/hpu_acceleration/run_on_hpu.md",
|
||||
configuration_hpu_acceleration_run_on_hpu
|
||||
);
|
||||
doctest!(
|
||||
"../docs/configuration/hpu_acceleration/benchmark.md",
|
||||
configuration_hpu_acceleration_benchmark
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user