Compare commits

..

1 Commits

Author SHA1 Message Date
Baptiste Roux
51401dcf24 feat(hpu): Add Shift/Rot/Min/Max operation for Hpu in hlapi bench 2025-06-20 09:24:11 +02:00
575 changed files with 8387 additions and 25662 deletions

2
.gitattributes vendored
View File

@@ -1,3 +1 @@
*.hpu filter=lfs diff=lfs merge=lfs -text
*.bcode filter=lfs diff=lfs merge=lfs -text
*.cbor filter=lfs diff=lfs merge=lfs -text

View File

@@ -23,7 +23,6 @@ runs:
echo "${CMAKE_SCRIPT_SHA} cmake-${CMAKE_VERSION}-linux-x86_64.sh" > checksum
sha256sum -c checksum
sudo bash cmake-"${CMAKE_VERSION}"-linux-x86_64.sh --skip-license --prefix=/usr/ --exclude-subdir
sudo apt remove -y unattended-upgrades
sudo apt update
sudo apt install -y cmake-format libclang-dev
env:
@@ -51,13 +50,11 @@ runs:
- name: Export CUDA variables
shell: bash
run: |
find /usr/local -executable -name "nvcc"
CUDA_PATH=/usr/local/cuda-"${CUDA_VERSION}"
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib64:$LD_LIBRARY_PATH";
echo "CUDA_MODULE_LOADER=EAGER";
echo "PATH=$PATH:$CUDA_PATH/bin";
} >> "${GITHUB_ENV}"
{
echo "PATH=$PATH:$CUDA_PATH/bin";
@@ -77,11 +74,6 @@ runs:
env:
GCC_VERSION: ${{ inputs.gcc-version }}
- name: Check setup
shell: bash
run: |
which nvcc
- name: Check device is detected
shell: bash
run: nvidia-smi

View File

@@ -71,26 +71,45 @@ jobs:
with:
toolchain: stable
# Cache key is an aggregated hash of lfs files hashes
- name: Get LFS data sha
id: hash-lfs-data
- name: Use specific data branch
if: ${{ contains(github.event.pull_request.labels.*.name, 'data_PR') }}
env:
PR_BRANCH: ${{ github.head_ref || github.ref_name }}
run: |
SHA=$(git lfs ls-files -l -I utils/tfhe-backward-compat-data | sha256sum | cut -d' ' -f1)
echo "BACKWARD_COMPAT_DATA_BRANCH=${PR_BRANCH}" >> "${GITHUB_ENV}"
- name: Get backward compat branch
id: backward_compat_branch
run: |
BRANCH="$(make backward_compat_branch)"
echo "branch=${BRANCH}" >> "${GITHUB_OUTPUT}"
- name: Get backward compat branch head SHA
id: backward_compat_sha
run: |
SHA=$(git ls-remote "${REPO_URL}" refs/heads/"${BACKWARD_COMPAT_BRANCH}" | awk '{print $1}')
echo "sha=${SHA}" >> "${GITHUB_OUTPUT}"
env:
REPO_URL: "https://github.com/zama-ai/tfhe-backward-compat-data"
BACKWARD_COMPAT_BRANCH: ${{ steps.backward_compat_branch.outputs.branch }}
- name: Retrieve data from cache
id: retrieve-data-cache
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
with:
path: |
utils/tfhe-backward-compat-data/**/*.cbor
utils/tfhe-backward-compat-data/**/*.bcode
key: ${{ steps.hash-lfs-data.outputs.sha }}
path: tests/tfhe-backward-compat-data
key: ${{ steps.backward_compat_branch.outputs.branch }}_${{ steps.backward_compat_sha.outputs.sha }}
- name: Pull test data
- name: Clone test data
if: steps.retrieve-data-cache.outputs.cache-hit != 'true'
run: |
make pull_backward_compat_data
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
repository: zama-ai/tfhe-backward-compat-data
path: tests/tfhe-backward-compat-data
lfs: 'true'
ref: ${{ steps.backward_compat_branch.outputs.branch }}
- name: Run backward compatibility tests
run: |
@@ -101,10 +120,8 @@ jobs:
continue-on-error: true
uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
with:
path: |
utils/tfhe-backward-compat-data/**/*.cbor
utils/tfhe-backward-compat-data/**/*.bcode
key: ${{ steps.hash-lfs-data.outputs.sha }}
path: tests/tfhe-backward-compat-data
key: ${{ steps.backward_compat_branch.outputs.branch }}_${{ steps.backward_compat_sha.outputs.sha }}
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}

View File

@@ -103,7 +103,7 @@ jobs:
name: Unsigned integer tests
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:

View File

@@ -1,113 +0,0 @@
name: Run noise checks on CPU
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
permissions:
contents: read
jobs:
setup-instance:
name: Setup instance (noise-checks)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
steps:
- name: Start remote instance
id: start-remote-instance
if: env.SECRETS_AVAILABLE == 'true'
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
# We want an hpc7a more compute, will be faster
profile: bench
# This instance will be spawned especially for pull-request from forked repository
- name: Start GitHub instance
id: start-github-instance
if: env.SECRETS_AVAILABLE == 'false'
run: |
echo "Cannot run this without secrets"
exit 1
noise-checks:
name: CPU noise checks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Run noise checks
run: |
make test_noise_check
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
env:
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
PR_NUMBER: ${{ github.event.pull_request.number }}
- name: Slack Notification
if: ${{ failure() && env.SECRETS_AVAILABLE == 'true' }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Noise checks tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (noise-checks)
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, noise-checks ]
runs-on: ubuntu-latest
steps:
- name: Stop remote instance
id: stop-instance
if: env.SECRETS_AVAILABLE == 'true'
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (noise-checks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -104,7 +104,7 @@ jobs:
name: Signed integer tests
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:

View File

@@ -31,7 +31,6 @@ on:
- ks
- ks_pbs
- integer_zk
- hlapi_noise_squash
op_flavor:
description: "Operations set to run"
type: choice

View File

@@ -192,7 +192,7 @@ jobs:
matrix:
# explicit include-based build matrix, of known valid options
include:
- cuda: "12.8"
- cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs repo
@@ -224,7 +224,7 @@ jobs:
params_type: ${{ fromJSON(needs.prepare-matrix.outputs.params_type) }}
# explicit include-based build matrix, of known valid options
include:
- cuda: "12.8"
- cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}

View File

@@ -100,7 +100,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs repo with tags

View File

@@ -101,7 +101,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs repo with tags

View File

@@ -10,16 +10,37 @@ on:
permissions: {}
jobs:
run-benchmarks-8-h100-sxm5-integer:
name: Run integer benchmarks (8xH100-SXM5)
run-benchmarks-1-h100:
name: Run integer benchmarks (1xH100)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: multi-h100-sxm5
hardware_name: n3-H100x8-SXM5
profile: single-h100
hardware_name: n3-H100x1
command: integer,integer_multi_bit
op_flavor: default
bench_type: latency
all_precisions: true
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
JOB_SECRET: ${{ secrets.JOB_SECRET }}
SLAB_ACTION_TOKEN: ${{ secrets.SLAB_ACTION_TOKEN }}
SLAB_URL: ${{ secrets.SLAB_URL }}
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-2-h100:
name: Run integer benchmarks (2xH100)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: 2-h100
hardware_name: n3-H100x2
command: integer_multi_bit
op_flavor: default
bench_type: both
bench_type: latency
all_precisions: true
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
@@ -31,16 +52,16 @@ jobs:
SLAB_URL: ${{ secrets.SLAB_URL }}
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100-sxm5-integer-compression:
name: Run integer compression benchmarks (8xH100-SXM5)
run-benchmarks-8-h100:
name: Run integer benchmarks (8xH100)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: multi-h100-sxm5
hardware_name: n3-H100x8-SXM5
command: integer_compression
profile: multi-h100
hardware_name: n3-H100x8
command: integer_multi_bit
op_flavor: default
bench_type: both
bench_type: latency
all_precisions: true
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
@@ -52,37 +73,16 @@ jobs:
SLAB_URL: ${{ secrets.SLAB_URL }}
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100-sxm5-integer-zk:
name: Run integer zk benchmarks (8xH100-SXM5)
run-benchmarks-l40:
name: Run integer benchmarks (L40)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: multi-h100-sxm5
hardware_name: n3-H100x8-SXM5
command: integer_zk
profile: l40
hardware_name: n3-L40x1
command: integer_multi_bit,integer_compression,pbs,ks
op_flavor: default
bench_type: both
all_precisions: true
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
JOB_SECRET: ${{ secrets.JOB_SECRET }}
SLAB_ACTION_TOKEN: ${{ secrets.SLAB_ACTION_TOKEN }}
SLAB_URL: ${{ secrets.SLAB_URL }}
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100-sxm5-noise-squash:
name: Run integer zk benchmarks (8xH100-SXM5)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: multi-h100-sxm5
hardware_name: n3-H100x8-SXM5
command: hlapi_noise_squash
op_flavor: default
bench_type: both
bench_type: latency
all_precisions: true
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}

View File

@@ -33,7 +33,6 @@ jobs:
with:
fetch-depth: 0
persist-credentials: 'false'
lfs: true
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Get benchmark details
@@ -62,7 +61,6 @@ jobs:
- name: Run benchmarks
run: |
make pull_hpu_files
make bench_integer_hpu
make bench_hlapi_erc20_hpu

View File

@@ -48,7 +48,7 @@ jobs:
name: Execute FFT benchmarks in EC2
needs: setup-ec2
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:

View File

@@ -48,7 +48,7 @@ jobs:
name: Execute NTT benchmarks in EC2
needs: setup-ec2
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:

View File

@@ -49,14 +49,6 @@ jobs:
mv linelint-linux-amd64 /usr/local/bin/linelint
make check_newline
# This is needed for the ws tests clippy checks
- name: Use specific data branch
if: ${{ contains(github.event.pull_request.labels.*.name, 'data_PR') }}
env:
PR_BRANCH: ${{ github.head_ref || github.ref_name }}
run: |
echo "BACKWARD_COMPAT_DATA_BRANCH=${PR_BRANCH}" >> "${GITHUB_ENV}"
- name: Run pcc checks
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |

View File

@@ -13,7 +13,7 @@ env:
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:

View File

@@ -13,7 +13,7 @@ env:
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
permissions:

63
.github/workflows/data_pr_close.yml vendored Normal file
View File

@@ -0,0 +1,63 @@
name: Close or Merge corresponding PR on the data repo
# When a PR with the data_PR tag is closed or merged, this will close the corresponding PR in the data repo.
env:
DATA_REPO: zama-ai/tfhe-backward-compat-data
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
PR_BRANCH: ${{ github.head_ref || github.ref_name }}
CLOSE_TYPE: ${{ github.event.pull_request.merged && 'merge' || 'close' }}
# only trigger on pull request closed events
on:
pull_request:
types: [ closed ]
permissions: {}
jobs:
auto_close_job:
if: ${{ contains(github.event.pull_request.labels.*.name, 'data_PR') && github.repository == 'zama-ai/tfhe-rs' }}
runs-on: ubuntu-latest
env:
GH_TOKEN: ${{ secrets.FHE_ACTIONS_TOKEN }} # Needed for gh CLI commands
steps:
- name: Fetch PR number
run: |
PR_NUMBER=$(gh pr view "${PR_BRANCH}" --repo "${DATA_REPO}" --json number | jq '.number')
echo "DATA_REPO_PR_NUMBER=${PR_NUMBER}" >> "${GITHUB_ENV}"
- name: Comment on the PR to indicate the reason of the close
run: |
gh pr comment "${PR_BRANCH}" \
--repo "${DATA_REPO}" \
--body "PR ${CLOSE_TYPE}d because the corresponding PR in main repo was ${CLOSE_TYPE}d: ${REPO}#${EVENT_NUMBER}"
env:
REPO: ${{ github.repository }}
EVENT_NUMBER: ${{ github.event.number }}
- name: Merge the Pull Request in the data repo
if: ${{ github.event.pull_request.merged }}
run: |
gh pr merge "${PR_BRANCH}" \
--repo "${DATA_REPO}" \
--rebase \
--delete-branch
- name: Close the Pull Request in the data repo
if: ${{ !github.event.pull_request.merged }}
run: |
gh pr close "${PR_BRANCH}" \
--repo "${DATA_REPO}" \
--delete-branch
- name: Slack Notification
if: ${{ always() && job.status == 'failure' }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Failed to auto-${{ env.CLOSE_TYPE }} PR on data repo: https://github.com/${{ env.DATA_REPO }}/pull/${{ env.DATA_REPO_PR_NUMBER }}"

View File

@@ -1,148 +0,0 @@
# Compile and test tfhe-cuda-backend on an AWS instance
name: Cuda - Code Validation
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
EXTERNAL_CONTRIBUTION_RUNNER: "gpu_ubuntu-22.04"
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
permissions:
contents: read
jobs:
setup-instance:
name: Setup instance (cuda-tests)
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' ||
(github.event.action == 'labeled' && github.event.label.name == 'approved')
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
steps:
- name: Start remote instance
id: start-remote-instance
if: env.SECRETS_AVAILABLE == 'true'
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-test
# This instance will be spawned especially for pull-request from forked repository
- name: Start GitHub instance
id: start-github-instance
if: env.SECRETS_AVAILABLE == 'false'
run: |
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA Code Validation tests
needs: [ setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
gcc-version: ${{ matrix.gcc }}
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Find tools
run: |
find /usr -executable -name "compute-sanitizer"
- name: Install latest stable
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Run memory sanitizer
run: |
make test_high_level_api_gpu_debug
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${PR_BASE_URL}${PR_NUMBER}), " >> "${GITHUB_ENV}"
env:
PR_BASE_URL: ${{ vars.PR_BASE_URL }}
PR_NUMBER: ${{ github.event.pull_request.number }}
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "GPU code validation tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
- name: Stop remote instance
id: stop-instance
if: env.SECRETS_AVAILABLE == 'true'
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -122,7 +122,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -107,7 +107,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -62,7 +62,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -109,7 +109,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs
@@ -137,7 +137,7 @@ jobs:
# No need to test core_crypto and classic PBS in integer since it's already tested on single GPU.
- name: Run multi-bit CUDA integer tests
run: |
BIG_TESTS_INSTANCE=TRUE NO_BIG_PARAMS_GPU=TRUE make test_integer_multi_bit_gpu_ci
BIG_TESTS_INSTANCE=TRUE make test_integer_multi_bit_gpu_ci
- name: Run user docs tests
run: |

View File

@@ -11,7 +11,6 @@ env:
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
IS_PR: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
@@ -19,8 +18,6 @@ on:
schedule:
# Nightly tests will be triggered each evening 8p.m.
- cron: "0 20 * * *"
pull_request:
permissions:
contents: read
@@ -58,7 +55,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
timeout-minutes: 4320 # 72 hours
steps:
@@ -81,11 +78,7 @@ jobs:
- name: Run tests
run: |
if [[ "${IS_PR}" == "true" ]]; then
make test_integer_short_run_gpu
else
make test_integer_long_run_gpu
fi
make test_integer_long_run_gpu
slack-notify:
name: Slack Notification

View File

@@ -109,7 +109,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -122,7 +122,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -25,6 +25,9 @@ on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
schedule:
# Nightly tests @ 1AM after each work day
- cron: "0 1 * * MON-FRI"
permissions:
contents: read
@@ -110,7 +113,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -109,7 +109,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -122,7 +122,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -25,6 +25,9 @@ on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
schedule:
# Nightly tests @ 1AM after each work day
- cron: "0 1 * * MON-FRI"
permissions:
contents: read
@@ -110,7 +113,7 @@ jobs:
matrix:
include:
- os: ubuntu-22.04
cuda: "12.8"
cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs

View File

@@ -13,7 +13,7 @@ env:
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true

View File

@@ -21,7 +21,7 @@ jobs:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: git-sync
uses: valtech-sd/git-sync@e734cfe9485a92e720eac5af8a4555dde5fecf88
uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83
with:
source_repo: "zama-ai/tfhe-rs"
source_branch: "main"

View File

@@ -1,26 +0,0 @@
name: 'Close unverified PRs'
on:
schedule:
- cron: '30 1 * * *'
permissions: {}
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: read
pull-requests: write
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
with:
stale-pr-message: 'This PR is unverified and has been open for 2 days, it will now be closed. If you want to contribute please sign the CLA as indicated by the bot.'
days-before-stale: 2
days-before-close: 0
# We are not interested in suppressing issues so have a currently non existent label
# if we ever accept issues to become stale/closable this label will be the signal for that
only-issue-labels: can-be-auto-closed
# Only unverified PRs are an issue
exempt-pr-labels: cla-signed
# We don't want people commenting to keep an unverified PR
ignore-updates: true

3
.gitignore vendored
View File

@@ -36,6 +36,9 @@ package-lock.json
.env
__pycache__
# Dir used for backward compatibility test data
# First directive is to ignore symlinks
tests/tfhe-backward-compat-data
ci/
# In case someone clones the lattice-estimator locally to verify security

View File

@@ -10,7 +10,6 @@ ignore:
- keys
- coverage
- utils/tfhe-lints/ui/main.stderr
- utils/tfhe-backward-compat-data/**/*.ron # ron files are autogenerated
rules:
# checks if file ends in a newline character

View File

@@ -170,8 +170,6 @@ On the contrary, these changes are *not* data breaking:
* Renaming a type (unless it implements the `Named` trait).
* Adding a variant to the end of an enum.
Historical data from previous TFHE-rs versions are stored inside `utils/tfhe-backward-compat-data`. They are used to check on every PR that backward compatibility has been preserved.
## Example: adding a field
Suppose you want to add an i32 field to a type named `MyType`. The original type is defined as:

View File

@@ -18,7 +18,7 @@ members = [
]
exclude = [
"utils/tfhe-backward-compat-data",
"tests/backward_compatibility_tests",
"utils/tfhe-lints",
"apps/trivium",
]

129
Makefile
View File

@@ -22,7 +22,10 @@ BENCH_TYPE?=latency
BENCH_PARAM_TYPE?=classical
BENCH_PARAMS_SET?=default
NODE_VERSION=22.6
BACKWARD_COMPAT_DATA_DIR=utils/tfhe-backward-compat-data
BACKWARD_COMPAT_DATA_URL=https://github.com/zama-ai/tfhe-backward-compat-data.git
BACKWARD_COMPAT_DATA_BRANCH?=$(shell ./scripts/backward_compat_data_version.py)
BACKWARD_COMPAT_DATA_PROJECT=tfhe-backward-compat-data
BACKWARD_COMPAT_DATA_DIR=$(BACKWARD_COMPAT_DATA_PROJECT)
TFHE_SPEC:=tfhe
WASM_PACK_VERSION="0.13.1"
# We are kind of hacking the cut here, the version cannot contain a quote '"'
@@ -156,23 +159,23 @@ install_tarpaulin: install_rs_build_toolchain
.PHONY: install_cargo_dylint # Install custom tfhe-rs lints
install_cargo_dylint:
cargo install --locked cargo-dylint dylint-link
cargo install cargo-dylint dylint-link
.PHONY: install_typos_checker # Install typos checker
install_typos_checker: install_rs_build_toolchain
@typos --version > /dev/null 2>&1 || \
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install --locked typos-cli || \
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install typos-cli || \
( echo "Unable to install typos-cli, unknown error." && exit 1 )
.PHONY: install_zizmor # Install zizmor workflow security checker
install_zizmor: install_rs_build_toolchain
@zizmor --version > /dev/null 2>&1 || \
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install --locked zizmor --version ~1.9 || \
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install zizmor --version ~1.9 || \
( echo "Unable to install zizmor, unknown error." && exit 1 )
.PHONY: install_cargo_cross # Install cross for big endian tests
.PHONY: install_cargo_cross # Install custom tfhe-rs lints
install_cargo_cross: install_rs_build_toolchain
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install --locked cross
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install cross
.PHONY: setup_venv # Setup Python virtualenv for wasm tests
setup_venv:
@@ -249,9 +252,6 @@ install_mlc: install_rs_build_toolchain
.PHONY: fmt # Format rust code
fmt: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options -C $(BACKWARD_COMPAT_DATA_DIR) fmt
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options -C utils/tfhe-lints fmt
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options -C apps/trivium fmt
.PHONY: fmt_js # Format javascript code
fmt_js: check_nvm_installed
@@ -273,9 +273,6 @@ fmt_c_tests:
.PHONY: check_fmt # Check rust code format
check_fmt: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt --check
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options -C $(BACKWARD_COMPAT_DATA_DIR) fmt --check
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options -C utils/tfhe-lints fmt --check
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options -C apps/trivium fmt --check
.PHONY: check_fmt_c_tests # Check C tests format
check_fmt_c_tests:
@@ -486,22 +483,10 @@ clippy_param_dedup: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
-p param_dedup -- --no-deps -D warnings
.PHONY: clippy_backward_compat_data # Run clippy lints on tfhe-backward-compat-data
clippy_backward_compat_data: install_rs_check_toolchain # the toolchain is selected with toolchain.toml
@# Some old crates are x86 specific, only run in that case
@if uname -a | grep -q x86; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options \
-C $(BACKWARD_COMPAT_DATA_DIR) clippy --all-targets \
-- --no-deps -D warnings; \
else \
echo "Cannot run clippy for backward compat crate on non x86 platform for now."; \
fi
.PHONY: clippy_all # Run all clippy targets
clippy_all: clippy_rustdoc clippy clippy_boolean clippy_shortint clippy_integer clippy_all_targets \
clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core clippy_tfhe_csprng clippy_zk_pok clippy_trivium \
clippy_versionable clippy_tfhe_lints clippy_ws_tests clippy_bench clippy_param_dedup \
clippy_backward_compat_data
clippy_versionable clippy_tfhe_lints clippy_ws_tests clippy_bench clippy_param_dedup
.PHONY: clippy_fast # Run main clippy targets
clippy_fast: clippy_rustdoc clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks \
@@ -517,12 +502,6 @@ clippy_hpu_backend: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
-p tfhe-hpu-backend -- --no-deps -D warnings
.PHONY: clippy_hpu_mockup # Run clippy lints on tfhe-hpu-mockup
clippy_hpu_mockup: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--all-targets \
-p tfhe-hpu-mockup -- --no-deps -D warnings
.PHONY: check_rust_bindings_did_not_change # Check rust bindings are up to date for tfhe-cuda-backend
check_rust_bindings_did_not_change:
cargo build -p tfhe-cuda-backend && "$(MAKE)" fmt_gpu && \
@@ -535,9 +514,6 @@ check_rust_bindings_did_not_change:
tfhe_lints: install_cargo_dylint
RUSTFLAGS="$(RUSTFLAGS)" cargo dylint --all -p tfhe --no-deps -- \
--features=boolean,shortint,integer,strings,zk-pok
RUSTFLAGS="$(RUSTFLAGS)" cargo dylint --all -p tfhe-zk-pok --no-deps -- \
--features=experimental
.PHONY: build_core # Build core_crypto without experimental features
build_core: install_rs_build_toolchain install_rs_check_toolchain
@@ -679,30 +655,6 @@ test_integer_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile $(CARGO_PROFILE) \
--features=integer,gpu -p $(TFHE_SPEC) -- integer::gpu::server_key::
.PHONY: test_integer_gpu_debug # Run the tests of the integer module with Debug flags for CUDA
test_integer_gpu_debug: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile release_lto_off \
--features=integer,gpu-debug -vv -p $(TFHE_SPEC) -- integer::gpu::server_key:: --test-threads=1 --nocapture
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile release_lto_off \
--features=integer,gpu-debug -p $(TFHE_SPEC) -- integer::gpu::server_key::
.PHONY: test_high_level_api_gpu_debug # Run the tests of the integer module with Debug flags for CUDA
test_high_level_api_gpu_debug: install_rs_build_toolchain install_cargo_nextest
export RUSTFLAGS="$(RUSTFLAGS)" && \
export CARGO_RS_BUILD_TOOLCHAIN="$(CARGO_RS_BUILD_TOOLCHAIN)" && \
export TFHE_SPEC="$(TFHE_SPEC)" && \
export CARGO_PROFILE="$(CARGO_PROFILE)" && scripts/check_memory_errors.sh
.PHONY: test_integer_hl_test_gpu_check_warnings
test_integer_hl_test_gpu_check_warnings: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build \
--features=integer,internal-keycache,gpu-debug,zk-pok -vv -p $(TFHE_SPEC) &> /tmp/gpu_compile_output
WARNINGS=$$(cat /tmp/gpu_compile_output | grep ": warning:" | grep "\[tfhe-cuda-backend" | grep -v "inline function" || true) && \
if [[ "$${WARNINGS}" != "" ]]; then \
echo "$${WARNINGS}" && exit 1; \
fi
.PHONY: test_integer_long_run_gpu # Run the long run integer tests on the gpu backend
test_integer_long_run_gpu: install_rs_check_toolchain install_cargo_nextest
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
@@ -711,12 +663,6 @@ test_integer_long_run_gpu: install_rs_check_toolchain install_cargo_nextest
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)" \
--tfhe-package "$(TFHE_SPEC)" --backend "gpu"
.PHONY: test_integer_short_run_gpu # Run the long run integer tests on the gpu backend
test_integer_short_run_gpu: install_rs_check_toolchain install_cargo_nextest
TFHE_RS_TEST_LONG_TESTS_MINIMAL=TRUE \
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=integer,gpu -p $(TFHE_SPEC) -- integer::gpu::server_key::radix::tests_long_run::test_random_op_sequence integer::gpu::server_key::radix::tests_long_run::test_signed_random_op_sequence --test-threads=1 --nocapture
.PHONY: test_integer_compression
test_integer_compression: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
@@ -927,15 +873,6 @@ test_integer_long_run: install_rs_check_toolchain install_cargo_nextest
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)" \
--tfhe-package "$(TFHE_SPEC)"
.PHONY: test_noise_check # Run dedicated noise and pfail check tests
test_noise_check: install_rs_check_toolchain
@# First run the sanity checks to make sure the atomic patterns are correct
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=boolean,shortint,integer,nightly-avx512 -p $(TFHE_SPEC) -- sanity_check
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=boolean,shortint,integer,nightly-avx512 -p $(TFHE_SPEC) -- noise_check \
--test-threads=1
.PHONY: test_safe_serialization # Run the tests for safe serialization
test_safe_serialization: install_rs_build_toolchain install_cargo_nextest
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
@@ -1088,11 +1025,16 @@ test_tfhe_lints: install_cargo_dylint
# Here we use the "patch" functionality of Cargo to make sure the repo used for the data is the same as the one used for the code.
.PHONY: test_backward_compatibility_ci
test_backward_compatibility_ci: install_rs_build_toolchain
TFHE_BACKWARD_COMPAT_DATA_DIR="../$(BACKWARD_COMPAT_DATA_DIR)" RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
TFHE_BACKWARD_COMPAT_DATA_DIR="$(BACKWARD_COMPAT_DATA_DIR)" RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--config "patch.'$(BACKWARD_COMPAT_DATA_URL)'.$(BACKWARD_COMPAT_DATA_PROJECT).path=\"tests/$(BACKWARD_COMPAT_DATA_DIR)\"" \
--features=shortint,integer,zk-pok -p tests test_backward_compatibility -- --nocapture
.PHONY: test_backward_compatibility # Same as test_backward_compatibility_ci but tries to clone the data repo first if needed
test_backward_compatibility: pull_backward_compat_data test_backward_compatibility_ci
test_backward_compatibility: tests/$(BACKWARD_COMPAT_DATA_DIR) test_backward_compatibility_ci
.PHONY: backward_compat_branch # Prints the required backward compatibility branch
backward_compat_branch:
@echo "$(BACKWARD_COMPAT_DATA_BRANCH)"
.PHONY: doc # Build rust doc
doc: install_rs_check_toolchain
@@ -1137,10 +1079,6 @@ check_intra_md_links: install_mlc
check_md_links: install_mlc
mlc --match-file-extension tfhe/docs
.PHONY: check_doc_paths_use_dash # Check paths use "-" instead of "_" in docs for gitbook compatibility
check_doc_paths_use_dash:
python3 ./scripts/check_doc_paths_use_dash.py
.PHONY: check_parameter_export_ok # Checks exported "current" shortint parameter module is correct
check_parameter_export_ok:
python3 ./scripts/check_current_param_export.py
@@ -1498,20 +1436,6 @@ bench_tfhe_zk_pok: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench -p tfhe-zk-pok --
.PHONY: bench_hlapi_noise_squash # Run benchmarks for noise squash operation
bench_hlapi_noise_squash: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi-noise-squash \
--features=integer,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
.PHONY: bench_hlapi_noise_squash_gpu # Run benchmarks for noise squash operation on GPU
bench_hlapi_noise_squash_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi-noise-squash \
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
#
# Utility tools
#
@@ -1569,13 +1493,11 @@ write_params_to_file: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) run \
--example write_params_to_file --features=boolean,shortint,hpu,internal-keycache
.PHONY: pull_backward_compat_data # Pull the data files needed for backward compatibility tests
pull_backward_compat_data:
./scripts/pull_lfs_data.sh $(BACKWARD_COMPAT_DATA_DIR)
.PHONY: clone_backward_compat_data # Clone the data repo needed for backward compatibility tests
clone_backward_compat_data:
./scripts/clone_backward_compat_data.sh $(BACKWARD_COMPAT_DATA_URL) $(BACKWARD_COMPAT_DATA_BRANCH) tests/$(BACKWARD_COMPAT_DATA_DIR)
.PHONY: pull_hpu_files # Pull the hpu files
pull_hpu_files:
./scripts/pull_lfs_data.sh backends/tfhe-hpu-backend/
tests/$(BACKWARD_COMPAT_DATA_DIR): clone_backward_compat_data
#
# Real use case examples
@@ -1601,20 +1523,19 @@ sha256_bool: install_rs_check_toolchain
.PHONY: pcc # pcc stands for pre commit checks (except GPU)
pcc: no_tfhe_typo no_dbg_log check_parameter_export_ok check_fmt check_typos lint_doc \
check_md_docs_are_tested check_intra_md_links check_doc_paths_use_dash \
clippy_all check_compile_tests test_tfhe_lints \
check_md_docs_are_tested check_intra_md_links clippy_all check_compile_tests test_tfhe_lints \
tfhe_lints
.PHONY: pcc_gpu # pcc stands for pre commit checks for GPU compilation
pcc_gpu: check_rust_bindings_did_not_change clippy_rustdoc_gpu \
clippy_gpu clippy_cuda_backend clippy_bench_gpu check_compile_tests_benches_gpu test_integer_hl_test_gpu_check_warnings
clippy_gpu clippy_cuda_backend clippy_bench_gpu check_compile_tests_benches_gpu
.PHONY: pcc_hpu # pcc stands for pre commit checks for HPU compilation
pcc_hpu: clippy_hpu clippy_hpu_backend clippy_hpu_mockup test_integer_hpu_mockup_ci_fast
pcc_hpu: clippy_hpu clippy_hpu_backend test_integer_hpu_mockup_ci_fast
.PHONY: fpcc # pcc stands for pre commit checks, the f stands for fast
fpcc: no_tfhe_typo no_dbg_log check_parameter_export_ok check_fmt check_typos lint_doc \
check_md_docs_are_tested check_intra_md_links check_doc_paths_use_dash clippy_fast check_compile_tests
check_md_docs_are_tested clippy_fast check_compile_tests
.PHONY: conformance # Automatically fix problems that can be fixed
conformance: fix_newline fmt fmt_js

View File

@@ -18,7 +18,6 @@
<a href="https://github.com/zama-ai/tfhe-rs/releases"><img src="https://img.shields.io/github/v/release/zama-ai/tfhe-rs?style=flat-square"></a>
<a href="LICENSE"><img src="https://img.shields.io/badge/License-BSD--3--Clause--Clear-%23ffb243?style=flat-square"></a>
<a href="https://github.com/zama-ai/bounty-program"><img src="https://img.shields.io/badge/Contribute-Zama%20Bounty%20Program-%23ffd208?style=flat-square"></a>
<a href="https://slsa.dev"><img alt="SLSA 3" src="https://slsa.dev/images/gh-badge-level3.svg" /></a>
</p>
## About
@@ -149,7 +148,7 @@ To run this code, use the following command:
> Note that when running code that uses `TFHE-rs`, it is highly recommended
to run in release mode with cargo's `--release` flag to have the best performances possible.
*Find an example with more explanations in [this part of the documentation](https://docs.zama.ai/tfhe-rs/get-started/quick-start)*
*Find an example with more explanations in [this part of the documentation](https://docs.zama.ai/tfhe-rs/get-started/quick_start)*
<p align="right">
<a href="#about" > ↑ Back to top </a>
@@ -204,7 +203,7 @@ When a new update is published in the Lattice Estimator, we update parameters ac
By default, the parameter sets used in the High-Level API with the x86 CPU backend have a failure probability $\le 2^{128}$ to securely work in the IND-CPA^D model using the algorithmic techniques provided in our code base [1].
If you want to work within the IND-CPA security model, which is less strict than the IND-CPA-D model, the parameter sets can easily be changed and would have slightly better performance. More details can be found in the [TFHE-rs documentation](https://docs.zama.ai/tfhe-rs).
The default parameters used in the High-Level API with the GPU backend are chosen considering the IND-CPA security model, and are selected with a bootstrapping failure probability fixed at $p_{error} \le 2^{-128}$. In particular, it is assumed that the results of decrypted computations are not shared by the secret key owner with any third parties, as such an action can lead to leakage of the secret encryption key. If you are designing an application where decryptions must be shared, you will need to craft custom encryption parameters which are chosen in consideration of the IND-CPA^D security model [2].
The default parameters used in the High-Level API with the GPU backend are chosen considering the IND-CPA security model, and are selected with a bootstrapping failure probability fixed at $p_{error} \le 2^{-64}$. In particular, it is assumed that the results of decrypted computations are not shared by the secret key owner with any third parties, as such an action can lead to leakage of the secret encryption key. If you are designing an application where decryptions must be shared, you will need to craft custom encryption parameters which are chosen in consideration of the IND-CPA^D security model [2].
[1] Bernard, Olivier, et al. "Drifting Towards Better Error Probabilities in Fully Homomorphic Encryption Schemes". https://eprint.iacr.org/2024/1718.pdf

View File

@@ -1,6 +1,6 @@
[package]
name = "tfhe-cuda-backend"
version = "0.11.0"
version = "0.10.0"
edition = "2021"
authors = ["Zama team"]
license = "BSD-3-Clause-Clear"
@@ -19,4 +19,3 @@ bindgen = "0.71"
[features]
experimental-multi-arch = []
profile = []
debug = []

View File

@@ -53,10 +53,6 @@ fn main() {
cmake_config.define("USE_NVTOOLS", "OFF");
}
if cfg!(feature = "debug") {
cmake_config.define("CMAKE_BUILD_TYPE", "Debug");
}
// Build the CMake project
let dest = cmake_config.build();
println!("cargo:rustc-link-search=native={}", dest.display());

View File

@@ -52,8 +52,6 @@ endif()
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
else()
message("Building CUDA backend in ${CMAKE_BUILD_TYPE}")
endif()
# Add OpenMP support
@@ -78,10 +76,8 @@ endif()
add_compile_definitions(CUDA_ARCH=${CUDA_ARCH})
string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LOWERCASE)
# Check if the DEBUG flag is defined
if(CMAKE_BUILD_TYPE_LOWERCASE STREQUAL "debug")
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
# Debug mode
message("Compiling in Debug mode")
add_definitions(-DDEBUG)

View File

@@ -26,7 +26,6 @@ inline void cuda_error(cudaError_t code, const char *file, int line) {
std::abort(); \
}
uint32_t cuda_get_device();
void cuda_set_device(uint32_t gpu_index);
cudaEvent_t cuda_create_event(uint32_t gpu_index);
@@ -50,13 +49,12 @@ void *cuda_malloc(uint64_t size, uint32_t gpu_index);
void *cuda_malloc_with_size_tracking_async(uint64_t size, cudaStream_t stream,
uint32_t gpu_index,
uint64_t &size_tracker,
uint64_t *size_tracker,
bool allocate_gpu_memory);
void *cuda_malloc_async(uint64_t size, cudaStream_t stream, uint32_t gpu_index);
bool cuda_check_valid_malloc(uint64_t size, uint32_t gpu_index);
uint64_t cuda_device_total_memory(uint32_t gpu_index);
void cuda_memcpy_with_size_tracking_async_to_gpu(void *dest, const void *src,
uint64_t size,

View File

@@ -20,7 +20,7 @@ template <typename Torus> struct int_compression {
uint32_t gpu_count, int_radix_params compression_params,
uint32_t num_radix_blocks, uint32_t lwe_per_glwe,
uint32_t storage_log_modulus, bool allocate_gpu_memory,
uint64_t &size_tracker) {
uint64_t *size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
this->compression_params = compression_params;
this->lwe_per_glwe = lwe_per_glwe;
@@ -38,7 +38,7 @@ template <typename Torus> struct int_compression {
lwe_per_glwe * glwe_accumulator_size * sizeof(Torus), streams[0],
gpu_indexes[0], size_tracker, allocate_gpu_memory);
size_tracker += scratch_packing_keyswitch_lwe_list_to_glwe_64(
*size_tracker += scratch_packing_keyswitch_lwe_list_to_glwe_64(
streams[0], gpu_indexes[0], &fp_ks_buffer,
compression_params.small_lwe_dimension,
compression_params.glwe_dimension, compression_params.polynomial_size,
@@ -76,7 +76,7 @@ template <typename Torus> struct int_decompression {
int_radix_params compression_params,
uint32_t num_radix_blocks, uint32_t body_count,
uint32_t storage_log_modulus, bool allocate_gpu_memory,
uint64_t &size_tracker) {
uint64_t *size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
this->encryption_params = encryption_params;
this->compression_params = compression_params;
@@ -124,7 +124,7 @@ template <typename Torus> struct int_decompression {
encryption_params.carry_modulus, decompression_rescale_f,
gpu_memory_allocated);
decompression_rescale_lut->broadcast_lut(streams, gpu_indexes);
decompression_rescale_lut->broadcast_lut(streams, gpu_indexes, 0);
}
void release(cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count) {

View File

@@ -48,34 +48,6 @@ typedef struct {
uint32_t lwe_dimension;
} CudaRadixCiphertextFFI;
typedef struct {
uint64_t const *chosen_multiplier_has_at_least_one_set;
uint64_t const *decomposed_chosen_multiplier;
uint32_t const num_scalars;
uint32_t const active_bits;
uint64_t const shift_pre;
uint32_t const shift_post;
uint32_t const ilog2_chosen_multiplier;
uint32_t const chosen_multiplier_num_bits;
bool const is_chosen_multiplier_zero;
bool const is_abs_chosen_multiplier_one;
bool const is_chosen_multiplier_negative;
bool const is_chosen_multiplier_pow2;
bool const chosen_multiplier_has_more_bits_than_numerator;
// if signed: test if chosen_multiplier >= 2^{num_bits - 1}
bool const is_chosen_multiplier_geq_two_pow_numerator;
uint32_t const ilog2_divisor;
bool const is_divisor_zero;
bool const is_abs_divisor_one;
bool const is_divisor_negative;
bool const is_divisor_pow2;
bool const divisor_has_more_bits_than_numerator;
} CudaScalarDivisorFFI;
uint64_t scratch_cuda_apply_univariate_lut_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, void const *input_lut, uint32_t lwe_dimension,
@@ -423,14 +395,14 @@ uint64_t scratch_cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks_in_radix, uint32_t max_num_radix_in_vec,
uint32_t message_modulus, uint32_t carry_modulus, PBS_TYPE pbs_type,
bool reduce_degrees_for_single_carry_propagation, bool allocate_gpu_memory,
bool allocate_ms_array);
bool allocate_gpu_memory, bool allocate_ms_array);
void cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *radix_lwe_out,
CudaRadixCiphertextFFI *radix_lwe_vec, int8_t *mem_ptr, void *const *bsks,
void *const *ksks,
CudaRadixCiphertextFFI *radix_lwe_vec,
bool reduce_degrees_for_single_carry_propagation, int8_t *mem_ptr,
void *const *bsks, void *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key);
void cleanup_cuda_integer_radix_partial_sum_ciphertexts_vec(
@@ -577,6 +549,27 @@ void trim_radix_blocks_lsb_64(CudaRadixCiphertextFFI *output,
void *const *streams,
uint32_t const *gpu_indexes);
uint64_t scratch_cuda_integer_radix_scalar_mul_high_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, uint32_t num_scalar_bits, bool anticipated_buffer_drop,
bool allocate_gpu_memory, bool allocate_ms_array);
void cuda_integer_radix_scalar_mul_high_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *ct, int8_t *mem_ptr, void *const *ksks,
uint64_t rhs, uint64_t const *decomposed_scalar,
uint64_t const *has_at_least_one_set,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
void *const *bsks, uint32_t num_scalars);
void cleanup_cuda_integer_radix_scalar_mul_high_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void);
uint64_t scratch_cuda_apply_noise_squashing_kb(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t lwe_dimension, uint32_t glwe_dimension,
@@ -622,26 +615,6 @@ void cleanup_cuda_sub_and_propagate_single_carry(void *const *streams,
uint32_t gpu_count,
int8_t **mem_ptr_void);
uint64_t scratch_cuda_integer_unsigned_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
bool allocate_gpu_memory, bool allocate_ms_array);
void cuda_integer_unsigned_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *numerator_ct, int8_t *mem_ptr, void *const *bsks,
void *const *ksks,
const CudaModulusSwitchNoiseReductionKeyFFI *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi);
void cleanup_cuda_integer_unsigned_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void);
uint64_t scratch_cuda_extend_radix_with_sign_msb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
@@ -662,74 +635,5 @@ void cleanup_cuda_extend_radix_with_sign_msb_64(void *const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count,
int8_t **mem_ptr_void);
uint64_t scratch_cuda_integer_signed_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
bool allocate_gpu_memory, bool allocate_ms_array);
void cuda_integer_signed_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *numerator_ct, int8_t *mem_ptr, void *const *bsks,
void *const *ksks,
const CudaModulusSwitchNoiseReductionKeyFFI *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi, uint32_t numerator_bits);
void cleanup_cuda_integer_signed_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void);
uint64_t scratch_integer_unsigned_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint32_t const active_bits_divisor, bool allocate_gpu_memory,
bool allocate_ms_array);
void cuda_integer_unsigned_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *quotient_ct, CudaRadixCiphertextFFI *remainder_ct,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
const CudaModulusSwitchNoiseReductionKeyFFI *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint64_t const *divisor_has_at_least_one_set,
uint64_t const *decomposed_divisor, uint32_t const num_scalars_divisor,
void const *clear_blocks, void const *h_clear_blocks,
uint32_t num_clear_blocks);
void cleanup_cuda_integer_unsigned_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void);
uint64_t scratch_integer_signed_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint32_t const active_bits_divisor, bool allocate_gpu_memory,
bool allocate_ms_array);
void cuda_integer_signed_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *quotient_ct, CudaRadixCiphertextFFI *remainder_ct,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint64_t const *divisor_has_at_least_one_set,
uint64_t const *decomposed_divisor, uint32_t const num_scalars_divisor,
uint32_t numerator_bits);
void cleanup_cuda_integer_signed_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void);
} // extern C
#endif // CUDA_INTEGER_H

View File

@@ -66,9 +66,6 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
uint32_t num_many_lut, uint32_t lut_stride);
template <typename Torus>
uint64_t get_buffer_size_full_sm_multibit_programmable_bootstrap_128_keybundle(
uint32_t polynomial_size);
template <typename Torus>
uint64_t get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle(
uint32_t polynomial_size);
template <typename Torus>
@@ -98,12 +95,8 @@ uint64_t get_buffer_size_full_sm_tbc_multibit_programmable_bootstrap(
template <typename Torus, class params>
uint32_t get_lwe_chunk_size(uint32_t gpu_index, uint32_t max_num_pbs,
uint32_t polynomial_size,
uint64_t full_sm_keybundle);
template <typename Torus, class params>
uint32_t get_lwe_chunk_size_128(uint32_t gpu_index, uint32_t max_num_pbs,
uint32_t polynomial_size,
uint64_t full_sm_keybundle);
uint32_t polynomial_size);
template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::MULTI_BIT> {
int8_t *d_mem_keybundle = NULL;
int8_t *d_mem_acc_step_one = NULL;
@@ -122,7 +115,7 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::MULTI_BIT> {
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t lwe_chunk_size,
PBS_VARIANT pbs_variant, bool allocate_gpu_memory,
uint64_t &size_tracker) {
uint64_t *size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
cuda_set_device(gpu_index);
@@ -288,146 +281,4 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::MULTI_BIT> {
}
};
template <typename InputTorus>
struct pbs_buffer_128<InputTorus, PBS_TYPE::MULTI_BIT> {
int8_t *d_mem_keybundle = NULL;
int8_t *d_mem_acc_step_one = NULL;
int8_t *d_mem_acc_step_two = NULL;
int8_t *d_mem_acc_cg = NULL;
int8_t *d_mem_acc_tbc = NULL;
uint32_t lwe_chunk_size;
double *keybundle_fft;
__uint128_t *global_accumulator;
double *global_join_buffer;
PBS_VARIANT pbs_variant;
bool gpu_memory_allocated;
pbs_buffer_128(cudaStream_t stream, uint32_t gpu_index,
uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
uint32_t lwe_chunk_size, PBS_VARIANT pbs_variant,
bool allocate_gpu_memory, uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
cuda_set_device(gpu_index);
this->pbs_variant = pbs_variant;
this->lwe_chunk_size = lwe_chunk_size;
auto max_shared_memory = cuda_get_max_shared_memory(gpu_index);
// default
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_programmable_bootstrap_128_keybundle<
__uint128_t>(polynomial_size);
uint64_t full_sm_accumulate_step_one =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_one<
__uint128_t>(polynomial_size);
uint64_t full_sm_accumulate_step_two =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_two<
__uint128_t>(polynomial_size);
uint64_t partial_sm_accumulate_step_one =
get_buffer_size_partial_sm_multibit_programmable_bootstrap_step_one<
__uint128_t>(polynomial_size);
// cg
uint64_t full_sm_cg_accumulate =
get_buffer_size_full_sm_cg_multibit_programmable_bootstrap<__uint128_t>(
polynomial_size);
uint64_t partial_sm_cg_accumulate =
get_buffer_size_partial_sm_cg_multibit_programmable_bootstrap<
__uint128_t>(polynomial_size);
auto num_blocks_keybundle = input_lwe_ciphertext_count * lwe_chunk_size *
(glwe_dimension + 1) * (glwe_dimension + 1) *
level_count;
auto num_blocks_acc_step_one =
level_count * (glwe_dimension + 1) * input_lwe_ciphertext_count;
auto num_blocks_acc_step_two =
input_lwe_ciphertext_count * (glwe_dimension + 1);
auto num_blocks_acc_cg =
level_count * (glwe_dimension + 1) * input_lwe_ciphertext_count;
// Keybundle
if (max_shared_memory < full_sm_keybundle)
d_mem_keybundle = (int8_t *)cuda_malloc_with_size_tracking_async(
num_blocks_keybundle * full_sm_keybundle, stream, gpu_index,
size_tracker, allocate_gpu_memory);
switch (pbs_variant) {
case PBS_VARIANT::CG:
// Accumulator CG
if (max_shared_memory < partial_sm_cg_accumulate)
d_mem_acc_cg = (int8_t *)cuda_malloc_with_size_tracking_async(
num_blocks_acc_cg * full_sm_cg_accumulate, stream, gpu_index,
size_tracker, allocate_gpu_memory);
else if (max_shared_memory < full_sm_cg_accumulate)
d_mem_acc_cg = (int8_t *)cuda_malloc_with_size_tracking_async(
num_blocks_acc_cg * partial_sm_cg_accumulate, stream, gpu_index,
size_tracker, allocate_gpu_memory);
break;
case PBS_VARIANT::DEFAULT:
// Accumulator step one
if (max_shared_memory < partial_sm_accumulate_step_one)
d_mem_acc_step_one = (int8_t *)cuda_malloc_with_size_tracking_async(
num_blocks_acc_step_one * full_sm_accumulate_step_one, stream,
gpu_index, size_tracker, allocate_gpu_memory);
else if (max_shared_memory < full_sm_accumulate_step_one)
d_mem_acc_step_one = (int8_t *)cuda_malloc_with_size_tracking_async(
num_blocks_acc_step_one * partial_sm_accumulate_step_one, stream,
gpu_index, size_tracker, allocate_gpu_memory);
// Accumulator step two
if (max_shared_memory < full_sm_accumulate_step_two)
d_mem_acc_step_two = (int8_t *)cuda_malloc_with_size_tracking_async(
num_blocks_acc_step_two * full_sm_accumulate_step_two, stream,
gpu_index, size_tracker, allocate_gpu_memory);
break;
default:
PANIC("Cuda error (PBS): unsupported implementation variant.")
}
keybundle_fft = (double *)cuda_malloc_with_size_tracking_async(
num_blocks_keybundle * (polynomial_size / 2) * 4 * sizeof(double),
stream, gpu_index, size_tracker, allocate_gpu_memory);
global_accumulator = (__uint128_t *)cuda_malloc_with_size_tracking_async(
input_lwe_ciphertext_count * (glwe_dimension + 1) * polynomial_size *
sizeof(__uint128_t),
stream, gpu_index, size_tracker, allocate_gpu_memory);
global_join_buffer = (double *)cuda_malloc_with_size_tracking_async(
level_count * (glwe_dimension + 1) * input_lwe_ciphertext_count *
(polynomial_size / 2) * 4 * sizeof(double),
stream, gpu_index, size_tracker, allocate_gpu_memory);
}
void release(cudaStream_t stream, uint32_t gpu_index) {
if (d_mem_keybundle)
cuda_drop_with_size_tracking_async(d_mem_keybundle, stream, gpu_index,
gpu_memory_allocated);
switch (pbs_variant) {
case DEFAULT:
if (d_mem_acc_step_one)
cuda_drop_with_size_tracking_async(d_mem_acc_step_one, stream,
gpu_index, gpu_memory_allocated);
if (d_mem_acc_step_two)
cuda_drop_with_size_tracking_async(d_mem_acc_step_two, stream,
gpu_index, gpu_memory_allocated);
break;
case CG:
if (d_mem_acc_cg)
cuda_drop_with_size_tracking_async(d_mem_acc_cg, stream, gpu_index,
gpu_memory_allocated);
break;
default:
PANIC("Cuda error (PBS): unsupported implementation variant.")
}
cuda_drop_with_size_tracking_async(keybundle_fft, stream, gpu_index,
gpu_memory_allocated);
cuda_drop_with_size_tracking_async(global_accumulator, stream, gpu_index,
gpu_memory_allocated);
cuda_drop_with_size_tracking_async(global_join_buffer, stream, gpu_index,
gpu_memory_allocated);
}
};
#endif // CUDA_MULTI_BIT_UTILITIES_H

View File

@@ -90,7 +90,7 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::CLASSICAL> {
uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
PBS_VARIANT pbs_variant, bool allocate_gpu_memory,
bool allocate_ms_array, uint64_t &size_tracker) {
bool allocate_ms_array, uint64_t *size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
cuda_set_device(gpu_index);
this->uses_noise_reduction = allocate_ms_array;
@@ -240,10 +240,7 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::CLASSICAL> {
}
};
template <typename Torus, PBS_TYPE pbs_type> struct pbs_buffer_128;
template <typename InputTorus>
struct pbs_buffer_128<InputTorus, PBS_TYPE::CLASSICAL> {
template <typename InputTorus, PBS_TYPE pbs_type> struct pbs_buffer_128 {
int8_t *d_mem;
__uint128_t *global_accumulator;
@@ -260,7 +257,7 @@ struct pbs_buffer_128<InputTorus, PBS_TYPE::CLASSICAL> {
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, PBS_VARIANT pbs_variant,
bool allocate_gpu_memory, bool allocate_ms_array,
uint64_t &size_tracker) {
uint64_t *size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
cuda_set_device(gpu_index);
this->pbs_variant = pbs_variant;

View File

@@ -15,11 +15,6 @@ void cuda_convert_lwe_multi_bit_programmable_bootstrap_key_64(
uint32_t input_lwe_dim, uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size, uint32_t grouping_factor);
void cuda_convert_lwe_multi_bit_programmable_bootstrap_key_128(
void *stream, uint32_t gpu_index, void *dest, void const *src,
uint32_t input_lwe_dim, uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size, uint32_t grouping_factor);
uint64_t scratch_cuda_multi_bit_programmable_bootstrap_64(
void *stream, uint32_t gpu_index, int8_t **pbs_buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
@@ -38,25 +33,6 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
void cleanup_cuda_multi_bit_programmable_bootstrap(void *stream,
uint32_t gpu_index,
int8_t **pbs_buffer);
uint64_t scratch_cuda_multi_bit_programmable_bootstrap_128_vector_64(
void *stream, uint32_t gpu_index, int8_t **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, bool allocate_gpu_memory);
void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_128(
void *stream, uint32_t gpu_index, void *lwe_array_out,
void const *lwe_output_indexes, void const *lut_vector,
void const *lut_vector_indexes, void const *lwe_array_in,
void const *lwe_input_indexes, void const *bootstrapping_key,
int8_t *mem_ptr, uint32_t lwe_dimension, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t grouping_factor, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_many_lut,
uint32_t lut_stride);
void cleanup_cuda_multi_bit_programmable_bootstrap_128(void *stream,
const uint32_t gpu_index,
int8_t **buffer);
}
#endif // CUDA_MULTI_BIT_H

View File

@@ -27,7 +27,7 @@ template <typename Torus> struct zk_expand_mem {
int_radix_params casting_params, KS_TYPE casting_key_type,
const uint32_t *num_lwes_per_compact_list,
const bool *is_boolean_array, uint32_t num_compact_lists,
bool allocate_gpu_memory, uint64_t &size_tracker)
bool allocate_gpu_memory, uint64_t *size_tracker)
: computing_params(computing_params), casting_params(casting_params),
num_compact_lists(num_compact_lists),
casting_key_type(casting_key_type) {
@@ -232,7 +232,7 @@ template <typename Torus> struct zk_expand_mem {
num_lwes * sizeof(uint32_t), streams[0], gpu_indexes[0],
allocate_gpu_memory);
message_and_carry_extract_luts->broadcast_lut(streams, gpu_indexes);
message_and_carry_extract_luts->broadcast_lut(streams, gpu_indexes, 0);
// The expanded LWEs will always be on the casting key format
tmp_expanded_lwes = (Torus *)cuda_malloc_with_size_tracking_async(

View File

@@ -1,6 +1,5 @@
file(GLOB_RECURSE SOURCES "*.cu")
add_library(tfhe_cuda_backend STATIC ${SOURCES} pbs/programmable_bootstrap_multibit_128.cuh
pbs/programmable_bootstrap_multibit_128.cu)
add_library(tfhe_cuda_backend STATIC ${SOURCES})
set_target_properties(tfhe_cuda_backend PROPERTIES CUDA_SEPARABLE_COMPILATION ON CUDA_RESOLVE_DEVICE_SYMBOLS ON)
target_link_libraries(tfhe_cuda_backend PUBLIC cudart OpenMP::OpenMP_CXX)
target_include_directories(tfhe_cuda_backend PRIVATE .)

View File

@@ -38,16 +38,6 @@ __device__ Torus *get_ith_block(Torus *ksk, int i, int level,
// Each thread in x are used to calculate one output.
// threads in y are used to paralelize the lwe_dimension_in loop.
// shared memory is used to store intermediate results of the reduction.
// Note: To reduce register pressure we have slightly changed the algorithm,
// the idea consists in calculating the negate value of the output. So, instead
// of accumulating subtractions using -=, we accumulate additions using += in
// the local_lwe_out. This seems to work better cause profits madd ops and save
// some regs. For this to work, we need to negate the input
// lwe_array_in[lwe_dimension_in], and negate back the output at the end to get
// the correct results. Additionally, we split the calculation of the ksk offset
// in two parts, a constant part is calculated before the loop, and a variable
// part is calculated inside the loop. This seems to help with the register
// pressure as well.
template <typename Torus>
__global__ void
keyswitch(Torus *lwe_array_out, const Torus *__restrict__ lwe_output_indexes,
@@ -70,7 +60,7 @@ keyswitch(Torus *lwe_array_out, const Torus *__restrict__ lwe_output_indexes,
lwe_array_in, lwe_input_indexes[blockIdx.x], lwe_dimension_in + 1);
if (tid == lwe_dimension_out && threadIdx.y == 0) {
local_lwe_out = -block_lwe_array_in[lwe_dimension_in];
local_lwe_out = block_lwe_array_in[lwe_dimension_in];
}
const Torus mask_mod_b = (1ll << base_log) - 1ll;
@@ -83,12 +73,12 @@ keyswitch(Torus *lwe_array_out, const Torus *__restrict__ lwe_output_indexes,
for (int i = start_i; i < end_i; i++) {
Torus state =
init_decomposer_state(block_lwe_array_in[i], base_log, level_count);
uint32_t offset = i * level_count * (lwe_dimension_out + 1);
for (int j = 0; j < level_count; j++) {
for (int j = 0; j < level_count; j++) {
auto ksk_block =
get_ith_block(ksk, i, j, lwe_dimension_out, level_count);
Torus decomposed = decompose_one<Torus>(state, mask_mod_b, base_log);
local_lwe_out +=
(Torus)ksk[tid + j * (lwe_dimension_out + 1) + offset] * decomposed;
local_lwe_out -= (Torus)ksk_block[tid] * decomposed;
}
}
@@ -103,7 +93,7 @@ keyswitch(Torus *lwe_array_out, const Torus *__restrict__ lwe_output_indexes,
lwe_acc_out[shmem_index + offset * blockDim.x];
}
if (threadIdx.y == 0)
block_lwe_array_out[tid] = -lwe_acc_out[shmem_index];
block_lwe_array_out[tid] = lwe_acc_out[shmem_index];
}
}
@@ -125,7 +115,7 @@ __host__ void host_keyswitch_lwe_ciphertext_vector(
int shared_mem = sizeof(Torus) * num_threads_y * num_threads_x;
if (num_blocks_per_sample > 65536)
PANIC("Cuda error (Keyswitch): number of blocks per sample is too large");
PANIC("Cuda error (Keyswith): number of blocks per sample is too large");
// In multiplication of large integers (512, 1024, 2048), the number of
// samples can be larger than 65536, so we need to set it in the first
@@ -186,10 +176,10 @@ __host__ uint64_t scratch_packing_keyswitch_lwe_list_to_glwe(
? glwe_accumulator_size
: lwe_dimension * 2;
uint64_t size_tracker = 0;
uint64_t size_tracker;
uint64_t buffer_size = 2 * num_lwes * memory_unit * sizeof(Torus);
*fp_ks_buffer = (int8_t *)cuda_malloc_with_size_tracking_async(
buffer_size, stream, gpu_index, size_tracker, allocate_gpu_memory);
buffer_size, stream, gpu_index, &size_tracker, allocate_gpu_memory);
return size_tracker;
}

View File

@@ -66,13 +66,6 @@ __device__ inline void typecast_torus_to_double<uint64_t>(uint64_t x,
r = __ll2double_rn(x);
}
template <>
__device__ inline void typecast_torus_to_double<__uint128_t>(__uint128_t x,
double &r) {
// We truncate x
r = __ll2double_rn(static_cast<uint64_t>(x));
}
template <typename T>
__device__ inline T init_decomposer_state(T input, uint32_t base_log,
uint32_t level_count) {

View File

@@ -2,12 +2,6 @@
#include <cstdint>
#include <cuda_runtime.h>
uint32_t cuda_get_device() {
int device;
check_cuda_error(cudaGetDevice(&device));
return static_cast<uint32_t>(device);
}
void cuda_set_device(uint32_t gpu_index) {
check_cuda_error(cudaSetDevice(gpu_index));
}
@@ -80,9 +74,10 @@ void *cuda_malloc(uint64_t size, uint32_t gpu_index) {
/// asynchronously.
void *cuda_malloc_with_size_tracking_async(uint64_t size, cudaStream_t stream,
uint32_t gpu_index,
uint64_t &size_tracker,
uint64_t *size_tracker,
bool allocate_gpu_memory) {
size_tracker += size;
if (size_tracker != nullptr)
*size_tracker += size;
void *ptr = nullptr;
if (!allocate_gpu_memory)
return ptr;
@@ -111,9 +106,8 @@ void *cuda_malloc_with_size_tracking_async(uint64_t size, cudaStream_t stream,
/// asynchronously.
void *cuda_malloc_async(uint64_t size, cudaStream_t stream,
uint32_t gpu_index) {
uint64_t size_tracker = 0;
return cuda_malloc_with_size_tracking_async(size, stream, gpu_index,
size_tracker, true);
return cuda_malloc_with_size_tracking_async(size, stream, gpu_index, nullptr,
true);
}
/// Check that allocation is valid
@@ -128,13 +122,6 @@ bool cuda_check_valid_malloc(uint64_t size, uint32_t gpu_index) {
}
}
uint64_t cuda_device_total_memory(uint32_t gpu_index) {
cuda_set_device(gpu_index);
size_t total_mem = 0, free_mem = 0;
check_cuda_error(cudaMemGetInfo(&free_mem, &total_mem));
return total_mem;
}
/// Returns
/// false if Cooperative Groups is not supported.
/// true otherwise

View File

@@ -234,29 +234,6 @@ __device__ void convert_u128_to_f128_as_torus(
}
}
// params is expected to be full degree not half degree
// same as convert_u128_to_f128_as_torus() but expects input to be on registers
template <class params>
__device__ void convert_u128_on_regs_to_f128_as_torus(
double *out_re_hi, double *out_re_lo, double *out_im_hi, double *out_im_lo,
const __uint128_t *in_re_on_regs, const __uint128_t *in_im_on_regs) {
const double normalization = pow(2., -128.);
Index tid = threadIdx.x;
// #pragma unroll
for (Index i = 0; i < params::opt / 2; i++) {
auto out_re = u128_to_signed_to_f128(in_re_on_regs[i]);
auto out_im = u128_to_signed_to_f128(in_im_on_regs[i]);
out_re_hi[tid] = out_re.hi * normalization;
out_re_lo[tid] = out_re.lo * normalization;
out_im_hi[tid] = out_im.hi * normalization;
out_im_lo[tid] = out_im.lo * normalization;
tid += params::degree / params::opt;
}
}
template <class params>
__device__ void
convert_f128_to_u128_as_torus(__uint128_t *out_re, __uint128_t *out_im,
@@ -295,7 +272,7 @@ batch_convert_u128_to_f128_as_integer(double *out_re_hi, double *out_re_lo,
}
// params is expected to be full degree not half degree
// converts standard input into complex<128> represented by 4 double
// converts standqard input into complex<128> represented by 4 double
// with following pattern: [re_hi_0, re_hi_1, ... re_hi_n, re_lo_0, re_lo_1,
// ... re_lo_n, im_hi_0, im_hi_1, ..., im_hi_n, im_lo_0, im_lo_1, ..., im_lo_n]
template <class params>
@@ -314,7 +291,7 @@ batch_convert_u128_to_f128_as_torus(double *out_re_hi, double *out_re_lo,
}
// params is expected to be full degree not half degree
// converts standard input into complex<128> represented by 4 double
// converts standqard input into complex<128> represented by 4 double
// with following pattern: [re_hi_0, re_lo_0, im_hi_0, im_lo_0, re_hi_1,
// re_lo_1, im_hi_1, im_lo_1,
// ...,re_hi_n, re_lo_n, im_hi_n, im_lo_n, ]

View File

@@ -26,7 +26,7 @@ __host__ uint64_t scratch_cuda_integer_abs_kb(
if (is_signed) {
*mem_ptr = new int_abs_buffer<Torus>(streams, gpu_indexes, gpu_count,
params, num_blocks,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
}
return size_tracker;
}
@@ -53,8 +53,7 @@ __host__ void host_integer_abs_kb(
streams, gpu_indexes, gpu_count, mask, num_bits_in_ciphertext - 1,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks, ms_noise_reduction_key);
host_addition<Torus>(streams[0], gpu_indexes[0], ct, mask, ct,
ct->num_radix_blocks, mem_ptr->params.message_modulus,
mem_ptr->params.carry_modulus);
ct->num_radix_blocks);
uint32_t requested_flag = outputFlag::FLAG_NONE;
uint32_t uses_carry = 0;

View File

@@ -61,7 +61,7 @@ __host__ uint64_t scratch_cuda_integer_radix_bitop_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_bitop_buffer<Torus>(streams, gpu_indexes, gpu_count, op,
params, num_radix_blocks,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -44,7 +44,7 @@ __host__ uint64_t scratch_extend_radix_with_sign_msb(
*mem_ptr = new int_extend_radix_with_sign_msb_buffer<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
num_additional_blocks, allocate_gpu_memory, size_tracker);
num_additional_blocks, allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -17,12 +17,11 @@ uint64_t scratch_cuda_integer_radix_cmux_kb_64(
std::function<uint64_t(uint64_t)> predicate_lut_f =
[](uint64_t x) -> uint64_t { return x == 1; };
uint64_t ret = scratch_cuda_integer_radix_cmux_kb<uint64_t>(
return scratch_cuda_integer_radix_cmux_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
(int_cmux_buffer<uint64_t> **)mem_ptr, predicate_lut_f,
lwe_ciphertext_count, params, allocate_gpu_memory);
POP_RANGE()
return ret;
}
void cuda_cmux_integer_radix_ciphertext_kb_64(

View File

@@ -84,8 +84,7 @@ __host__ void host_integer_radix_cmux_kb(
num_radix_blocks, 2 * num_radix_blocks);
host_addition<Torus>(streams[0], gpu_indexes[0], &mem_true, &mem_true,
&mem_false, num_radix_blocks, params.message_modulus,
params.carry_modulus);
&mem_false, num_radix_blocks);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, &mem_true, bsks, ksks,
@@ -101,7 +100,7 @@ __host__ uint64_t scratch_cuda_integer_radix_cmux_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_cmux_buffer<Torus>(
streams, gpu_indexes, gpu_count, predicate_lut_f, params,
num_radix_blocks, allocate_gpu_memory, size_tracker);
num_radix_blocks, allocate_gpu_memory, &size_tracker);
return size_tracker;
}
#endif

View File

@@ -148,7 +148,7 @@ __host__ void are_all_comparisons_block_true(
cuda_memcpy_async_to_gpu(is_max_value_lut->get_lut_indexes(0, 0),
h_lut_indexes, num_chunks * sizeof(Torus),
streams[0], gpu_indexes[0]);
is_max_value_lut->broadcast_lut(streams, gpu_indexes);
is_max_value_lut->broadcast_lut(streams, gpu_indexes, 0);
}
lut = is_max_value_lut;
}
@@ -167,7 +167,7 @@ __host__ void are_all_comparisons_block_true(
is_max_value_lut->h_lut_indexes,
is_max_value_lut->num_blocks * sizeof(Torus),
streams[0], gpu_indexes[0]);
is_max_value_lut->broadcast_lut(streams, gpu_indexes);
is_max_value_lut->broadcast_lut(streams, gpu_indexes, 0);
reset_radix_ciphertext_blocks(lwe_array_out, 1);
return;
} else {
@@ -499,7 +499,7 @@ __host__ void tree_sign_reduction(
streams[0], gpu_indexes[0], last_lut->get_lut(0, 0),
last_lut->get_degree(0), last_lut->get_max_degree(0), glwe_dimension,
polynomial_size, message_modulus, carry_modulus, f, true);
last_lut->broadcast_lut(streams, gpu_indexes);
last_lut->broadcast_lut(streams, gpu_indexes, 0);
// Last leaf
integer_radix_apply_univariate_lookup_table_kb<Torus>(
@@ -684,7 +684,7 @@ __host__ uint64_t scratch_cuda_integer_radix_comparison_check_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_comparison_buffer<Torus>(
streams, gpu_indexes, gpu_count, op, params, num_radix_blocks, is_signed,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -370,7 +370,7 @@ __host__ uint64_t scratch_cuda_compress_integer_radix_ciphertext(
uint64_t size_tracker = 0;
*mem_ptr = new int_compression<Torus>(
streams, gpu_indexes, gpu_count, compression_params, num_radix_blocks,
lwe_per_glwe, storage_log_modulus, allocate_gpu_memory, size_tracker);
lwe_per_glwe, storage_log_modulus, allocate_gpu_memory, &size_tracker);
return size_tracker;
}
@@ -386,7 +386,7 @@ __host__ uint64_t scratch_cuda_integer_decompress_radix_ciphertext(
*mem_ptr = new int_decompression<Torus>(
streams, gpu_indexes, gpu_count, encryption_params, compression_params,
num_radix_blocks, body_count, storage_log_modulus, allocate_gpu_memory,
size_tracker);
&size_tracker);
return size_tracker;
}
#endif

View File

@@ -28,7 +28,7 @@ __host__ uint64_t scratch_cuda_integer_div_rem_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_div_rem_memory<Torus>(streams, gpu_indexes, gpu_count,
params, is_signed, num_blocks,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}
@@ -268,11 +268,10 @@ __host__ void host_unsigned_integer_div_rem_kb(
// but in that position, interesting_remainder2 always has a 0
auto merged_interesting_remainder = interesting_remainder1;
host_addition<Torus>(
streams[0], gpu_indexes[0], merged_interesting_remainder,
merged_interesting_remainder, interesting_remainder2,
merged_interesting_remainder->num_radix_blocks,
radix_params.message_modulus, radix_params.carry_modulus);
host_addition<Torus>(streams[0], gpu_indexes[0],
merged_interesting_remainder,
merged_interesting_remainder, interesting_remainder2,
merged_interesting_remainder->num_radix_blocks);
// after create_clean_version_of_merged_remainder
// `merged_interesting_remainder` will be reused as
@@ -383,10 +382,9 @@ __host__ void host_unsigned_integer_div_rem_kb(
cuda_synchronize_stream(mem_ptr->sub_streams_3[j], gpu_indexes[j]);
}
host_addition<Torus>(
streams[0], gpu_indexes[0], overflow_sum, subtraction_overflowed,
at_least_one_upper_block_is_non_zero, 1, radix_params.message_modulus,
radix_params.carry_modulus);
host_addition<Torus>(streams[0], gpu_indexes[0], overflow_sum,
subtraction_overflowed,
at_least_one_upper_block_is_non_zero, 1);
auto message_modulus = radix_params.message_modulus;
int factor = (i) ? message_modulus - 1 : message_modulus - 2;
@@ -436,9 +434,7 @@ __host__ void host_unsigned_integer_div_rem_kb(
as_radix_ciphertext_slice<Torus>(&quotient_block, quotient, block_of_bit,
block_of_bit + 1);
host_addition<Torus>(streams[0], gpu_indexes[0], &quotient_block,
&quotient_block, mem_ptr->did_not_overflow, 1,
radix_params.message_modulus,
radix_params.carry_modulus);
&quotient_block, mem_ptr->did_not_overflow, 1);
};
for (uint j = 0; j < gpu_count; j++) {
@@ -481,9 +477,7 @@ __host__ void host_unsigned_integer_div_rem_kb(
// Clean the quotient and remainder
// as even though they have no carries, they are not at nominal noise level
host_addition<Torus>(streams[0], gpu_indexes[0], remainder, remainder1,
remainder2, remainder1->num_radix_blocks,
radix_params.message_modulus,
radix_params.carry_modulus);
remainder2, remainder1->num_radix_blocks);
for (uint j = 0; j < gpu_count; j++) {
cuda_synchronize_stream(streams[j], gpu_indexes[j]);

View File

@@ -398,7 +398,7 @@ uint64_t scratch_cuda_apply_noise_squashing_mem(
*mem_ptr = new int_noise_squashing_lut<uint64_t>(
(cudaStream_t *)streams, gpu_indexes, gpu_count, params, glwe_dimension,
polynomial_size, num_radix_blocks, original_num_blocks,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -409,8 +409,7 @@ __host__ void host_pack_bivariate_blocks(
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
Torus const *lwe_indexes_out, CudaRadixCiphertextFFI const *lwe_array_1,
CudaRadixCiphertextFFI const *lwe_array_2, Torus const *lwe_indexes_in,
uint32_t shift, uint32_t num_radix_blocks, uint32_t const message_modulus,
uint32_t const carry_modulus) {
uint32_t shift, uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_array_1->lwe_dimension ||
lwe_array_out->lwe_dimension != lwe_array_2->lwe_dimension)
@@ -434,15 +433,6 @@ __host__ void host_pack_bivariate_blocks(
(Torus *)lwe_array_1->ptr, (Torus *)lwe_array_2->ptr, lwe_indexes_in,
lwe_dimension, shift, num_radix_blocks);
check_cuda_error(cudaGetLastError());
for (uint i = 0; i < num_radix_blocks; i++) {
lwe_array_out->degrees[i] =
lwe_array_1->degrees[i] * shift + lwe_array_2->degrees[i];
lwe_array_out->noise_levels[i] =
lwe_array_1->noise_levels[i] * shift + lwe_array_2->noise_levels[i];
CHECK_NOISE_LEVEL(lwe_array_out->noise_levels[i], message_modulus,
carry_modulus);
}
}
// polynomial_size threads
@@ -607,8 +597,6 @@ __host__ void integer_radix_apply_univariate_lookup_table_kb(
auto degrees_index = lut->h_lut_indexes[i];
lwe_array_out->degrees[i] = lut->degrees[degrees_index];
lwe_array_out->noise_levels[i] = NoiseLevel::NOMINAL;
CHECK_NOISE_LEVEL(lwe_array_out->noise_levels[i], params.message_modulus,
params.carry_modulus);
}
POP_RANGE()
}
@@ -714,8 +702,6 @@ __host__ void integer_radix_apply_many_univariate_lookup_table_kb(
auto degrees_index = lut->h_lut_indexes[i % lut->num_blocks];
lwe_array_out->degrees[i] = lut->degrees[degrees_index];
lwe_array_out->noise_levels[i] = NoiseLevel::NOMINAL;
CHECK_NOISE_LEVEL(lwe_array_out->noise_levels[i], params.message_modulus,
params.carry_modulus);
}
POP_RANGE()
}
@@ -764,7 +750,7 @@ __host__ void integer_radix_apply_bivariate_lookup_table_kb(
host_pack_bivariate_blocks<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_pbs_in,
lut->lwe_trivial_indexes, lwe_array_1, lwe_array_2, lut->lwe_indexes_in,
shift, num_radix_blocks, params.message_modulus, params.carry_modulus);
shift, num_radix_blocks);
check_cuda_error(cudaGetLastError());
/// For multi GPU execution we create vectors of pointers for inputs and
@@ -832,8 +818,6 @@ __host__ void integer_radix_apply_bivariate_lookup_table_kb(
auto degrees_index = lut->h_lut_indexes[i];
lwe_array_out->degrees[i] = lut->degrees[degrees_index];
lwe_array_out->noise_levels[i] = NoiseLevel::NOMINAL;
CHECK_NOISE_LEVEL(lwe_array_out->noise_levels[i], params.message_modulus,
params.carry_modulus);
}
POP_RANGE()
}
@@ -1462,8 +1446,6 @@ void host_full_propagate_inplace(
auto degrees_index = mem_ptr->lut->h_lut_indexes[0];
input_blocks->degrees[i] = mem_ptr->lut->degrees[degrees_index];
input_blocks->noise_levels[i] = NoiseLevel::NOMINAL;
CHECK_NOISE_LEVEL(input_blocks->noise_levels[i], params.message_modulus,
params.carry_modulus);
if (i < num_blocks - 1) {
CudaRadixCiphertextFFI next_input_block;
@@ -1474,8 +1456,7 @@ void host_full_propagate_inplace(
mem_ptr->tmp_big_lwe_vector, 1, 2);
host_addition<Torus>(streams[0], gpu_indexes[0], &next_input_block,
&next_input_block, &second_input, 1,
params.message_modulus, params.carry_modulus);
&next_input_block, &second_input, 1);
}
}
}
@@ -1491,7 +1472,7 @@ uint64_t scratch_cuda_full_propagation(cudaStream_t const *streams,
uint64_t size_tracker = 0;
*mem_ptr =
new int_fullprop_buffer<Torus>(streams, gpu_indexes, gpu_count, params,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}
@@ -1657,7 +1638,7 @@ __host__ void reduce_signs(
streams[0], gpu_indexes[0], lut->get_lut(0, 0), lut->get_degree(0),
lut->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, reduce_two_orderings_function, true);
lut->broadcast_lut(streams, gpu_indexes);
lut->broadcast_lut(streams, gpu_indexes, 0);
while (num_sign_blocks > 2) {
pack_blocks<Torus>(streams[0], gpu_indexes[0], signs_b, signs_a,
@@ -1688,7 +1669,7 @@ __host__ void reduce_signs(
streams[0], gpu_indexes[0], lut->get_lut(0, 0), lut->get_degree(0),
lut->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, final_lut_f, true);
lut->broadcast_lut(streams, gpu_indexes);
lut->broadcast_lut(streams, gpu_indexes, 0);
pack_blocks<Torus>(streams[0], gpu_indexes[0], signs_b, signs_a,
num_sign_blocks, message_modulus);
@@ -1708,7 +1689,7 @@ __host__ void reduce_signs(
streams[0], gpu_indexes[0], lut->get_lut(0, 0), lut->get_degree(0),
lut->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, final_lut_f, true);
lut->broadcast_lut(streams, gpu_indexes);
lut->broadcast_lut(streams, gpu_indexes, 0);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, signs_array_out, signs_a, bsks, ksks,
@@ -1726,7 +1707,7 @@ uint64_t scratch_cuda_apply_univariate_lut_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_radix_lut<Torus>(streams, gpu_indexes, gpu_count, params,
1, num_radix_blocks, allocate_gpu_memory,
size_tracker);
&size_tracker);
// It is safe to do this copy on GPU 0, because all LUTs always reside on GPU
// 0
cuda_memcpy_with_size_tracking_async_to_gpu(
@@ -1734,7 +1715,7 @@ uint64_t scratch_cuda_apply_univariate_lut_kb(
(params.glwe_dimension + 1) * params.polynomial_size * sizeof(Torus),
streams[0], gpu_indexes[0], allocate_gpu_memory);
*(*mem_ptr)->get_degree(0) = lut_degree;
(*mem_ptr)->broadcast_lut(streams, gpu_indexes);
(*mem_ptr)->broadcast_lut(streams, gpu_indexes, 0);
return size_tracker;
}
@@ -1762,7 +1743,7 @@ uint64_t scratch_cuda_apply_many_univariate_lut_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_radix_lut<Torus>(streams, gpu_indexes, gpu_count, params,
1, num_radix_blocks, num_many_lut,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
// It is safe to do this copy on GPU 0, because all LUTs always reside on GPU
// 0
cuda_memcpy_with_size_tracking_async_to_gpu(
@@ -1770,7 +1751,7 @@ uint64_t scratch_cuda_apply_many_univariate_lut_kb(
(params.glwe_dimension + 1) * params.polynomial_size * sizeof(Torus),
streams[0], gpu_indexes[0], allocate_gpu_memory);
*(*mem_ptr)->get_degree(0) = lut_degree;
(*mem_ptr)->broadcast_lut(streams, gpu_indexes);
(*mem_ptr)->broadcast_lut(streams, gpu_indexes, 0);
return size_tracker;
}
@@ -1798,7 +1779,7 @@ uint64_t scratch_cuda_apply_bivariate_lut_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_radix_lut<Torus>(streams, gpu_indexes, gpu_count, params,
1, num_radix_blocks, allocate_gpu_memory,
size_tracker);
&size_tracker);
// It is safe to do this copy on GPU 0, because all LUTs always reside on GPU
// 0
cuda_memcpy_with_size_tracking_async_to_gpu(
@@ -1806,7 +1787,7 @@ uint64_t scratch_cuda_apply_bivariate_lut_kb(
(params.glwe_dimension + 1) * params.polynomial_size * sizeof(Torus),
streams[0], gpu_indexes[0], allocate_gpu_memory);
*(*mem_ptr)->get_degree(0) = lut_degree;
(*mem_ptr)->broadcast_lut(streams, gpu_indexes);
(*mem_ptr)->broadcast_lut(streams, gpu_indexes, 0);
return size_tracker;
}
@@ -1836,7 +1817,7 @@ uint64_t scratch_cuda_propagate_single_carry_kb_inplace(
uint64_t size_tracker = 0;
*mem_ptr = new int_sc_prop_memory<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks, requested_flag,
uses_carry, allocate_gpu_memory, size_tracker);
uses_carry, allocate_gpu_memory, &size_tracker);
return size_tracker;
}
// This function perform the three steps of Thomas' new carry propagation
@@ -1866,8 +1847,7 @@ void host_propagate_single_carry(
"pointer")
if (uses_carry == 1) {
host_addition<Torus>(streams[0], gpu_indexes[0], lwe_array, lwe_array,
input_carries, 1, params.message_modulus,
params.carry_modulus);
input_carries, 1);
}
// Step 1
@@ -1893,8 +1873,7 @@ void host_propagate_single_carry(
auto shifted_blocks = mem->shifted_blocks_state_mem->shifted_blocks;
host_addition<Torus>(
streams[0], gpu_indexes[0], prepared_blocks, shifted_blocks,
mem->prop_simu_group_carries_mem->simulators, num_radix_blocks,
params.message_modulus, params.carry_modulus);
mem->prop_simu_group_carries_mem->simulators, num_radix_blocks);
if (requested_flag == outputFlag::FLAG_OVERFLOW ||
requested_flag == outputFlag::FLAG_CARRY) {
@@ -1903,8 +1882,7 @@ void host_propagate_single_carry(
&shifted_simulators, mem->prop_simu_group_carries_mem->simulators,
num_radix_blocks - 1, num_radix_blocks);
host_addition<Torus>(streams[0], gpu_indexes[0], &output_flag, &output_flag,
&shifted_simulators, 1, params.message_modulus,
params.carry_modulus);
&shifted_simulators, 1);
}
host_radix_sum_in_groups<Torus>(
@@ -1918,8 +1896,7 @@ void host_propagate_single_carry(
mem->prop_simu_group_carries_mem->resolved_carries, mem->num_groups - 1,
mem->num_groups);
host_addition<Torus>(streams[0], gpu_indexes[0], &output_flag, &output_flag,
&shifted_resolved_carries, 1, params.message_modulus,
params.carry_modulus);
&shifted_resolved_carries, 1);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], prepared_blocks, num_radix_blocks,
@@ -1958,26 +1935,15 @@ void host_add_and_propagate_single_carry(
PUSH_RANGE("add & propagate sc")
if (lhs_array->num_radix_blocks != rhs_array->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be the same")
// Check input carries if used
if (uses_carry == 1) {
if (input_carries == nullptr)
PANIC("Cuda error: if uses_carry is enabled, input_carries cannot be a "
"null pointer");
if (lhs_array->lwe_dimension != input_carries->lwe_dimension)
PANIC(
"Cuda error: input and input_carries lwe dimension must be the same");
}
// Allow nullptr for carry_out if FLAG_NONE is requested
if (lhs_array->lwe_dimension != rhs_array->lwe_dimension ||
lhs_array->lwe_dimension != input_carries->lwe_dimension ||
lhs_array->lwe_dimension != carry_out->lwe_dimension)
PANIC("Cuda error: input and output lwe dimension must be the same")
if ((requested_flag == outputFlag::FLAG_OVERFLOW ||
requested_flag == outputFlag::FLAG_CARRY)) {
if (carry_out == nullptr)
PANIC("Cuda error: when requesting FLAG_CARRY or FLAG_OVERFLOW, "
"carry_out must be a valid pointer")
if (lhs_array->lwe_dimension != carry_out->lwe_dimension)
PANIC("Cuda error: input and carry_out lwe dimension must be the same")
}
requested_flag == outputFlag::FLAG_CARRY) &&
carry_out == nullptr)
PANIC("Cuda error: when requesting FLAG_CARRY, carry_out must be a valid "
"pointer")
auto num_radix_blocks = lhs_array->num_radix_blocks;
auto params = mem->params;
@@ -2000,13 +1966,11 @@ void host_add_and_propagate_single_carry(
}
host_addition<Torus>(streams[0], gpu_indexes[0], lhs_array, lhs_array,
rhs_array, num_radix_blocks, params.message_modulus,
params.carry_modulus);
rhs_array, num_radix_blocks);
if (uses_carry == 1) {
host_addition<Torus>(streams[0], gpu_indexes[0], lhs_array, lhs_array,
input_carries, 1, params.message_modulus,
params.carry_modulus);
input_carries, 1);
}
// Step 1
host_compute_shifted_blocks_and_states<Torus>(
@@ -2037,8 +2001,7 @@ void host_add_and_propagate_single_carry(
auto shifted_blocks = mem->shifted_blocks_state_mem->shifted_blocks;
host_addition<Torus>(
streams[0], gpu_indexes[0], prepared_blocks, shifted_blocks,
mem->prop_simu_group_carries_mem->simulators, num_radix_blocks,
params.message_modulus, params.carry_modulus);
mem->prop_simu_group_carries_mem->simulators, num_radix_blocks);
if (requested_flag == outputFlag::FLAG_OVERFLOW ||
requested_flag == outputFlag::FLAG_CARRY) {
@@ -2047,8 +2010,7 @@ void host_add_and_propagate_single_carry(
&shifted_simulators, mem->prop_simu_group_carries_mem->simulators,
num_radix_blocks - 1, num_radix_blocks);
host_addition<Torus>(streams[0], gpu_indexes[0], &output_flag, &output_flag,
&shifted_simulators, 1, params.message_modulus,
params.carry_modulus);
&shifted_simulators, 1);
}
// Step 3
@@ -2063,8 +2025,7 @@ void host_add_and_propagate_single_carry(
if (num_radix_blocks == 1 && requested_flag == outputFlag::FLAG_OVERFLOW &&
uses_carry == 1) {
host_addition<Torus>(streams[0], gpu_indexes[0], &output_flag,
&output_flag, input_carries, 1,
params.message_modulus, params.carry_modulus);
&output_flag, input_carries, 1);
} else {
CudaRadixCiphertextFFI shifted_resolved_carries;
@@ -2073,8 +2034,7 @@ void host_add_and_propagate_single_carry(
mem->prop_simu_group_carries_mem->resolved_carries,
mem->num_groups - 1, mem->num_groups);
host_addition<Torus>(streams[0], gpu_indexes[0], &output_flag,
&output_flag, &shifted_resolved_carries, 1,
params.message_modulus, params.carry_modulus);
&output_flag, &shifted_resolved_carries, 1);
}
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], prepared_blocks, num_radix_blocks,
@@ -2087,7 +2047,6 @@ void host_add_and_propagate_single_carry(
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], lhs_array, 0, num_radix_blocks,
mem->output_flag, 0, num_radix_blocks);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], carry_out, 0, 1, mem->output_flag,
num_radix_blocks, num_radix_blocks + 1);
@@ -2109,7 +2068,7 @@ uint64_t scratch_cuda_integer_overflowing_sub(
uint64_t size_tracker = 0;
*mem_ptr = new int_borrow_prop_memory<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
compute_overflow, allocate_gpu_memory, size_tracker);
compute_overflow, allocate_gpu_memory, &size_tracker);
return size_tracker;
}
@@ -2179,8 +2138,7 @@ void host_single_borrow_propagate(
&shifted_simulators, mem->prop_simu_group_carries_mem->simulators,
num_radix_blocks - 1, num_radix_blocks);
host_addition<Torus>(streams[0], gpu_indexes[0], mem->overflow_block,
mem->overflow_block, &shifted_simulators, 1,
params.message_modulus, params.carry_modulus);
mem->overflow_block, &shifted_simulators, 1);
}
CudaRadixCiphertextFFI resolved_borrows;
as_radix_ciphertext_slice<Torus>(
@@ -2192,8 +2150,7 @@ void host_single_borrow_propagate(
// borrows
if (compute_overflow == outputFlag::FLAG_OVERFLOW) {
host_addition<Torus>(streams[0], gpu_indexes[0], mem->overflow_block,
mem->overflow_block, &resolved_borrows, 1,
params.message_modulus, params.carry_modulus);
mem->overflow_block, &resolved_borrows, 1);
}
cuda_event_record(mem->incoming_events[0], streams[0], gpu_indexes[0]);
@@ -2343,8 +2300,6 @@ __host__ void integer_radix_apply_noise_squashing_kb(
for (uint i = 0; i < lut->num_blocks; i++) {
lwe_array_out->degrees[i] = lut->degrees[0];
lwe_array_out->noise_levels[i] = NoiseLevel::NOMINAL;
CHECK_NOISE_LEVEL(lwe_array_out->noise_levels[i], params.message_modulus,
params.carry_modulus);
}
POP_RANGE()
}

View File

@@ -210,8 +210,7 @@ uint64_t scratch_cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks_in_radix, uint32_t max_num_radix_in_vec,
uint32_t message_modulus, uint32_t carry_modulus, PBS_TYPE pbs_type,
bool reduce_degrees_for_single_carry_propagation, bool allocate_gpu_memory,
bool allocate_ms_array) {
bool allocate_gpu_memory, bool allocate_ms_array) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
@@ -221,26 +220,79 @@ uint64_t scratch_cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
return scratch_cuda_integer_partial_sum_ciphertexts_vec_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
(int_sum_ciphertexts_vec_memory<uint64_t> **)mem_ptr, num_blocks_in_radix,
max_num_radix_in_vec, reduce_degrees_for_single_carry_propagation, params,
allocate_gpu_memory);
max_num_radix_in_vec, params, allocate_gpu_memory);
}
void cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *radix_lwe_out,
CudaRadixCiphertextFFI *radix_lwe_vec, int8_t *mem_ptr, void *const *bsks,
void *const *ksks,
CudaRadixCiphertextFFI *radix_lwe_vec,
bool reduce_degrees_for_single_carry_propagation, int8_t *mem_ptr,
void *const *bsks, void *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key) {
auto mem = (int_sum_ciphertexts_vec_memory<uint64_t> *)mem_ptr;
if (radix_lwe_vec->num_radix_blocks % radix_lwe_out->num_radix_blocks != 0)
PANIC("Cuda error: input vector length should be a multiple of the "
"output's number of radix blocks")
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, bsks, (uint64_t **)(ksks), ms_noise_reduction_key, mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks);
switch (mem->params.polynomial_size) {
case 512:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<512>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, reduce_degrees_for_single_carry_propagation, bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks);
break;
case 1024:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<1024>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, reduce_degrees_for_single_carry_propagation, bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks);
break;
case 2048:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<2048>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, reduce_degrees_for_single_carry_propagation, bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks);
break;
case 4096:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<4096>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, reduce_degrees_for_single_carry_propagation, bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks);
break;
case 8192:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<8192>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, reduce_degrees_for_single_carry_propagation, bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks);
break;
case 16384:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<16384>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, reduce_degrees_for_single_carry_propagation, bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks);
break;
default:
PANIC("Cuda error (integer multiplication): unsupported polynomial size. "
"Supported N's are powers of two in the interval [256..16384].")
}
}
void cleanup_cuda_integer_radix_partial_sum_ciphertexts_vec(

View File

@@ -20,6 +20,7 @@
#include <fstream>
#include <iostream>
#include <omp.h>
#include <queue>
#include <sstream>
#include <string>
#include <vector>
@@ -94,10 +95,18 @@ __global__ inline void radix_vec_to_columns(uint32_t *const *const columns,
columns_counter[idx] = cnt;
}
__global__ inline void prepare_new_columns(
template <typename Torus>
__global__ inline void prepare_new_columns_and_pbs_indexes(
uint32_t *const *const new_columns, uint32_t *const new_columns_counter,
const uint32_t *const *const columns, const uint32_t *const columns_counter,
const uint32_t chunk_size) {
Torus *const pbs_indexes_in, Torus *const pbs_indexes_out,
Torus *const lut_indexes, const uint32_t *const *const columns,
const uint32_t *const columns_counter, const uint32_t chunk_size) {
__shared__ uint32_t counter;
if (threadIdx.x == 0) {
counter = 0;
}
__syncthreads();
const uint32_t base_id = threadIdx.x;
const uint32_t column_len = columns_counter[base_id];
@@ -108,8 +117,13 @@ __global__ inline void prepare_new_columns(
// for message ciphertexts in and out index should be same
const uint32_t in_index = columns[base_id][i];
new_columns[base_id][ct_count] = in_index;
const uint32_t pbs_index = atomicAdd(&counter, 1);
pbs_indexes_in[pbs_index] = in_index;
pbs_indexes_out[pbs_index] = in_index;
lut_indexes[pbs_index] = 0;
++ct_count;
}
__syncthreads();
if (base_id > 0) {
const uint32_t prev_base_id = base_id - 1;
@@ -119,8 +133,13 @@ __global__ inline void prepare_new_columns(
// those indexes are for carry ciphertexts
// for carry ciphertexts input is same as for message
// output will be placed to next block in the column
const uint32_t in_index = columns[prev_base_id][i];
const uint32_t out_index = columns[prev_base_id][i + 1];
new_columns[base_id][ct_count] = out_index;
const uint32_t pbs_index = atomicAdd(&counter, 1);
pbs_indexes_in[pbs_index] = in_index;
pbs_indexes_out[pbs_index] = out_index;
lut_indexes[pbs_index] = 1;
++ct_count;
}
}
@@ -134,6 +153,16 @@ __global__ inline void prepare_new_columns(
new_columns_counter[base_id] = ct_count;
}
template <typename Torus>
__global__ inline void prepare_final_pbs_indexes(
Torus *const pbs_indexes_in, Torus *const pbs_indexes_out,
Torus *const lut_indexes, const uint32_t num_radix_blocks) {
int idx = threadIdx.x;
pbs_indexes_in[idx] = idx % num_radix_blocks;
pbs_indexes_out[idx] = idx + idx / num_radix_blocks;
lut_indexes[idx] = idx / num_radix_blocks;
}
template <typename Torus>
__global__ void calculate_chunks(Torus *const input_blocks,
const uint32_t *const *const columns,
@@ -244,27 +273,133 @@ __global__ void fill_radix_from_lsb_msb(Torus *result_blocks, Torus *lsb_blocks,
}
}
struct radix_columns {
std::vector<size_t> columns_counter;
size_t num_blocks;
size_t num_radix_in_vec;
size_t chunk_size;
radix_columns(const uint64_t *const input_degrees, size_t num_blocks,
size_t num_radix_in_vec, size_t chunk_size,
bool &needs_processing)
: num_blocks(num_blocks), num_radix_in_vec(num_radix_in_vec),
chunk_size(chunk_size) {
needs_processing = false;
columns_counter.resize(num_blocks, 0);
for (size_t i = 0; i < num_radix_in_vec; ++i) {
for (size_t j = 0; j < num_blocks; ++j) {
if (input_degrees[i * num_blocks + j])
columns_counter[j] += 1;
}
}
for (size_t i = 0; i < num_blocks; ++i) {
if (columns_counter[i] > chunk_size) {
needs_processing = true;
break;
}
}
}
void next_accumulation(size_t &total_ciphertexts, size_t &message_ciphertexts,
bool &needs_processing) {
message_ciphertexts = 0;
total_ciphertexts = 0;
needs_processing = false;
for (int i = num_blocks - 1; i > 0; --i) {
size_t cur_count = columns_counter[i];
size_t prev_count = columns_counter[i - 1];
size_t new_count = 0;
// accumulated_blocks from current columns
new_count += cur_count / chunk_size;
// all accumulated message blocks needs pbs
message_ciphertexts += new_count;
// carry blocks from previous columns
new_count += prev_count / chunk_size;
// both carry and message blocks that needs pbs
total_ciphertexts += new_count;
// now add remaining non accumulated blocks that does not require pbs
new_count += cur_count % chunk_size;
columns_counter[i] = new_count;
if (new_count > chunk_size)
needs_processing = true;
}
// now do it for 0th block
size_t new_count = columns_counter[0] / chunk_size;
message_ciphertexts += new_count;
total_ciphertexts += new_count;
new_count += columns_counter[0] % chunk_size;
columns_counter[0] = new_count;
if (new_count > chunk_size) {
needs_processing = true;
}
}
};
inline void calculate_final_degrees(uint64_t *const out_degrees,
const uint64_t *const input_degrees,
size_t num_blocks, size_t num_radix_in_vec,
size_t chunk_size,
uint64_t message_modulus) {
auto get_degree = [message_modulus](uint64_t degree) -> uint64_t {
return std::min(message_modulus - 1, degree);
};
std::vector<std::queue<uint64_t>> columns(num_blocks);
for (size_t i = 0; i < num_radix_in_vec; ++i) {
for (size_t j = 0; j < num_blocks; ++j) {
if (input_degrees[i * num_blocks + j])
columns[j].push(input_degrees[i * num_blocks + j]);
}
}
for (size_t i = 0; i < num_blocks; ++i) {
auto &col = columns[i];
while (col.size() > 1) {
uint32_t cur_degree = 0;
size_t mn = std::min(chunk_size, col.size());
for (int j = 0; j < mn; ++j) {
cur_degree += col.front();
col.pop();
}
const uint64_t new_degree = get_degree(cur_degree);
col.push(new_degree);
if ((i + 1) < num_blocks) {
columns[i + 1].push(new_degree);
}
}
}
for (int i = 0; i < num_blocks; i++) {
out_degrees[i] = (columns[i].empty()) ? 0 : columns[i].front();
}
}
template <typename Torus>
__host__ uint64_t scratch_cuda_integer_partial_sum_ciphertexts_vec_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, int_sum_ciphertexts_vec_memory<Torus> **mem_ptr,
uint32_t num_blocks_in_radix, uint32_t max_num_radix_in_vec,
bool reduce_degrees_for_single_carry_propagation, int_radix_params params,
bool allocate_gpu_memory) {
int_radix_params params, bool allocate_gpu_memory) {
uint64_t size_tracker = 0;
*mem_ptr = new int_sum_ciphertexts_vec_memory<Torus>(
streams, gpu_indexes, gpu_count, params, num_blocks_in_radix,
max_num_radix_in_vec, reduce_degrees_for_single_carry_propagation,
allocate_gpu_memory, size_tracker);
max_num_radix_in_vec, allocate_gpu_memory, &size_tracker);
return size_tracker;
}
template <typename Torus>
template <typename Torus, class params>
__host__ void host_integer_partial_sum_ciphertexts_vec_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *radix_lwe_out,
CudaRadixCiphertextFFI *terms, void *const *bsks, uint64_t *const *ksks,
CudaRadixCiphertextFFI *terms,
bool reduce_degrees_for_single_carry_propagation, void *const *bsks,
uint64_t *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
int_sum_ciphertexts_vec_memory<uint64_t> *mem_ptr,
uint32_t num_radix_blocks, uint32_t num_radix_in_vec) {
@@ -289,6 +424,10 @@ __host__ void host_integer_partial_sum_ciphertexts_vec_kb(
auto d_columns_counter = mem_ptr->d_columns_counter;
auto d_new_columns = mem_ptr->d_new_columns;
auto d_new_columns_counter = mem_ptr->d_new_columns_counter;
auto d_pbs_indexes_in = mem_ptr->luts_message_carry->lwe_indexes_in;
auto d_pbs_indexes_out = mem_ptr->luts_message_carry->lwe_indexes_out;
auto luts_message_carry = mem_ptr->luts_message_carry;
auto glwe_dimension = mem_ptr->params.glwe_dimension;
auto polynomial_size = mem_ptr->params.polynomial_size;
@@ -303,9 +442,8 @@ __host__ void host_integer_partial_sum_ciphertexts_vec_kb(
uint32_t num_many_lut = 1;
uint32_t lut_stride = 0;
if (terms->num_radix_blocks == 0) {
if (terms->num_radix_blocks == 0)
return;
}
if (num_radix_in_vec == 1) {
copy_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0],
radix_lwe_out, 0, num_radix_blocks,
@@ -318,12 +456,14 @@ __host__ void host_integer_partial_sum_ciphertexts_vec_kb(
as_radix_ciphertext_slice<Torus>(&terms_slice, terms, num_radix_blocks,
2 * num_radix_blocks);
host_addition<Torus>(streams[0], gpu_indexes[0], radix_lwe_out, terms,
&terms_slice, num_radix_blocks,
mem_ptr->params.message_modulus,
mem_ptr->params.carry_modulus);
&terms_slice, num_radix_blocks);
return;
}
if (mem_ptr->mem_reuse) {
mem_ptr->setup_lookup_tables(streams, gpu_indexes, gpu_count);
}
if (current_blocks != terms) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
current_blocks, terms);
@@ -339,51 +479,27 @@ __host__ void host_integer_partial_sum_ciphertexts_vec_kb(
num_radix_in_vec);
bool needs_processing = false;
radix_columns<Torus> current_columns(current_blocks->degrees,
num_radix_blocks, num_radix_in_vec,
chunk_size, needs_processing);
int number_of_threads = std::min(256, (int)mem_ptr->params.polynomial_size);
radix_columns current_columns(current_blocks->degrees, num_radix_blocks,
num_radix_in_vec, chunk_size, needs_processing);
int number_of_threads = min(256, params::degree);
int part_count = (big_lwe_size + number_of_threads - 1) / number_of_threads;
const dim3 number_of_blocks_2d(num_radix_blocks, part_count, 1);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
mem_ptr->setup_lookup_tables(streams, gpu_indexes, gpu_count,
num_radix_in_vec, current_blocks->degrees);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
while (needs_processing) {
auto luts_message_carry = mem_ptr->luts_message_carry;
auto d_pbs_indexes_in = mem_ptr->luts_message_carry->lwe_indexes_in;
auto d_pbs_indexes_out = mem_ptr->luts_message_carry->lwe_indexes_out;
cuda_set_device(gpu_indexes[0]);
calculate_chunks<Torus>
<<<number_of_blocks_2d, number_of_threads, 0, streams[0]>>>(
(Torus *)(current_blocks->ptr), d_columns, d_columns_counter,
chunk_size, big_lwe_size);
prepare_new_columns<<<1, num_radix_blocks, 0, streams[0]>>>(
d_new_columns, d_new_columns_counter, d_columns, d_columns_counter,
chunk_size);
prepare_new_columns_and_pbs_indexes<<<1, num_radix_blocks, 0, streams[0]>>>(
d_new_columns, d_new_columns_counter, d_pbs_indexes_in,
d_pbs_indexes_out, luts_message_carry->get_lut_indexes(0, 0), d_columns,
d_columns_counter, chunk_size);
uint32_t total_ciphertexts = 0;
uint32_t total_messages = 0;
current_columns.next_accumulation(luts_message_carry->h_lwe_indexes_in,
luts_message_carry->h_lwe_indexes_out,
luts_message_carry->h_lut_indexes,
total_ciphertexts, total_messages,
size_t total_ciphertexts;
size_t total_messages;
current_columns.next_accumulation(total_ciphertexts, total_messages,
needs_processing);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
luts_message_carry->set_lwe_indexes(streams[0], gpu_indexes[0],
luts_message_carry->h_lwe_indexes_in,
luts_message_carry->h_lwe_indexes_out);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
cuda_memcpy_with_size_tracking_async_to_gpu(
luts_message_carry->get_lut_indexes(0, 0),
luts_message_carry->h_lut_indexes,
luts_message_carry->num_blocks * sizeof(Torus), streams[0],
gpu_indexes[0], true);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
luts_message_carry->broadcast_lut(streams, gpu_indexes);
auto active_gpu_count = get_active_gpu_count(total_ciphertexts, gpu_count);
if (active_gpu_count == 1) {
@@ -405,45 +521,57 @@ __host__ void host_integer_partial_sum_ciphertexts_vec_kb(
total_ciphertexts, mem_ptr->params.pbs_type, num_many_lut,
lut_stride);
} else {
Torus *h_lwe_indexes_in_pinned;
Torus *h_lwe_indexes_out_pinned;
cudaMallocHost((void **)&h_lwe_indexes_in_pinned,
total_ciphertexts * sizeof(Torus));
cudaMallocHost((void **)&h_lwe_indexes_out_pinned,
total_ciphertexts * sizeof(Torus));
for (uint32_t i = 0; i < total_ciphertexts; i++) {
h_lwe_indexes_in_pinned[i] = luts_message_carry->h_lwe_indexes_in[i];
h_lwe_indexes_out_pinned[i] = luts_message_carry->h_lwe_indexes_out[i];
}
cuda_memcpy_async_to_cpu(
h_lwe_indexes_in_pinned, luts_message_carry->lwe_indexes_in,
total_ciphertexts * sizeof(Torus), streams[0], gpu_indexes[0]);
cuda_memcpy_async_to_cpu(
h_lwe_indexes_out_pinned, luts_message_carry->lwe_indexes_out,
total_ciphertexts * sizeof(Torus), streams[0], gpu_indexes[0]);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
for (uint32_t i = 0; i < total_ciphertexts; i++) {
luts_message_carry->h_lwe_indexes_in[i] = h_lwe_indexes_in_pinned[i];
luts_message_carry->h_lwe_indexes_out[i] = h_lwe_indexes_out_pinned[i];
}
cudaFreeHost(h_lwe_indexes_in_pinned);
cudaFreeHost(h_lwe_indexes_out_pinned);
luts_message_carry->broadcast_lut(streams, gpu_indexes, 0);
luts_message_carry->using_trivial_lwe_indexes = false;
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, current_blocks, current_blocks, bsks,
ksks, ms_noise_reduction_key, luts_message_carry, total_ciphertexts);
streams, gpu_indexes, active_gpu_count, current_blocks,
current_blocks, bsks, ksks, ms_noise_reduction_key,
luts_message_carry, total_ciphertexts);
}
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
cuda_set_device(gpu_indexes[0]);
std::swap(d_columns, d_new_columns);
std::swap(d_columns_counter, d_new_columns_counter);
}
cuda_set_device(gpu_indexes[0]);
calculate_final_chunk_into_radix<Torus>
<<<number_of_blocks_2d, number_of_threads, 0, streams[0]>>>(
(Torus *)(radix_lwe_out->ptr), (Torus *)(current_blocks->ptr),
d_columns, d_columns_counter, chunk_size, big_lwe_size);
if (mem_ptr->reduce_degrees_for_single_carry_propagation) {
auto luts_message_carry = mem_ptr->luts_message_carry;
auto d_pbs_indexes_in = mem_ptr->luts_message_carry->lwe_indexes_in;
auto d_pbs_indexes_out = mem_ptr->luts_message_carry->lwe_indexes_out;
auto h_pbs_indexes_in = mem_ptr->luts_message_carry->h_lwe_indexes_in;
auto h_pbs_indexes_out = mem_ptr->luts_message_carry->h_lwe_indexes_out;
auto h_lut_indexes = mem_ptr->luts_message_carry->h_lut_indexes;
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
current_columns.final_calculation(luts_message_carry->h_lwe_indexes_in,
luts_message_carry->h_lwe_indexes_out,
luts_message_carry->h_lut_indexes);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
mem_ptr->luts_message_carry->set_lwe_indexes(
streams[0], gpu_indexes[0], h_pbs_indexes_in, h_pbs_indexes_out);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
cuda_memcpy_with_size_tracking_async_to_gpu(
luts_message_carry->get_lut_indexes(0, 0), h_lut_indexes,
2 * num_radix_blocks * sizeof(Torus), streams[0], gpu_indexes[0], true);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
luts_message_carry->broadcast_lut(streams, gpu_indexes);
if (reduce_degrees_for_single_carry_propagation) {
prepare_final_pbs_indexes<Torus>
<<<1, 2 * num_radix_blocks, 0, streams[0]>>>(
d_pbs_indexes_in, d_pbs_indexes_out,
luts_message_carry->get_lut_indexes(0, 0), num_radix_blocks);
set_zero_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], current_blocks, num_radix_blocks,
num_radix_blocks + 1);
cuda_memset_async(
(Torus *)(current_blocks->ptr) + big_lwe_size * num_radix_blocks, 0,
big_lwe_size * sizeof(Torus), streams[0], gpu_indexes[0]);
auto active_gpu_count =
get_active_gpu_count(2 * num_radix_blocks, gpu_count);
@@ -466,12 +594,39 @@ __host__ void host_integer_partial_sum_ciphertexts_vec_kb(
2 * num_radix_blocks, mem_ptr->params.pbs_type, num_many_lut,
lut_stride);
} else {
uint32_t num_blocks_in_apply_lut = 2 * num_radix_blocks;
Torus *h_lwe_indexes_in_pinned;
Torus *h_lwe_indexes_out_pinned;
cudaMallocHost((void **)&h_lwe_indexes_in_pinned,
num_blocks_in_apply_lut * sizeof(Torus));
cudaMallocHost((void **)&h_lwe_indexes_out_pinned,
num_blocks_in_apply_lut * sizeof(Torus));
for (uint32_t i = 0; i < num_blocks_in_apply_lut; i++) {
h_lwe_indexes_in_pinned[i] = luts_message_carry->h_lwe_indexes_in[i];
h_lwe_indexes_out_pinned[i] = luts_message_carry->h_lwe_indexes_out[i];
}
cuda_memcpy_async_to_cpu(
h_lwe_indexes_in_pinned, luts_message_carry->lwe_indexes_in,
num_blocks_in_apply_lut * sizeof(Torus), streams[0], gpu_indexes[0]);
cuda_memcpy_async_to_cpu(
h_lwe_indexes_out_pinned, luts_message_carry->lwe_indexes_out,
num_blocks_in_apply_lut * sizeof(Torus), streams[0], gpu_indexes[0]);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
for (uint32_t i = 0; i < num_blocks_in_apply_lut; i++) {
luts_message_carry->h_lwe_indexes_in[i] = h_lwe_indexes_in_pinned[i];
luts_message_carry->h_lwe_indexes_out[i] = h_lwe_indexes_out_pinned[i];
}
cudaFreeHost(h_lwe_indexes_in_pinned);
cudaFreeHost(h_lwe_indexes_out_pinned);
luts_message_carry->broadcast_lut(streams, gpu_indexes, 0);
luts_message_carry->using_trivial_lwe_indexes = false;
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, active_gpu_count, current_blocks, radix_lwe_out,
bsks, ksks, ms_noise_reduction_key, luts_message_carry,
2 * num_radix_blocks);
num_blocks_in_apply_lut);
}
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
calculate_final_degrees(radix_lwe_out->degrees, terms->degrees,
num_radix_blocks, num_radix_in_vec, chunk_size,
mem_ptr->params.message_modulus);
@@ -482,8 +637,7 @@ __host__ void host_integer_partial_sum_ciphertexts_vec_kb(
host_addition<Torus>(streams[0], gpu_indexes[0], radix_lwe_out,
current_blocks, &current_blocks_slice,
num_radix_blocks, mem_ptr->params.message_modulus,
mem_ptr->params.carry_modulus);
num_radix_blocks);
}
}
@@ -618,10 +772,10 @@ __host__ void host_integer_mult_radix_kb(
size_t b_id = i % num_blocks;
terms_degree_msb[i] = (b_id > r_id) ? message_modulus - 2 : 0;
}
host_integer_partial_sum_ciphertexts_vec_kb<Torus>(
streams, gpu_indexes, gpu_count, radix_lwe_out, vector_result_sb, bsks,
ksks, ms_noise_reduction_key, mem_ptr->sum_ciphertexts_mem, num_blocks,
2 * num_blocks);
host_integer_partial_sum_ciphertexts_vec_kb<Torus, params>(
streams, gpu_indexes, gpu_count, radix_lwe_out, vector_result_sb, true,
bsks, ksks, ms_noise_reduction_key, mem_ptr->sum_ciphertexts_mem,
num_blocks, 2 * num_blocks);
auto scp_mem_ptr = mem_ptr->sc_prop_mem;
uint32_t requested_flag = outputFlag::FLAG_NONE;
@@ -642,7 +796,7 @@ __host__ uint64_t scratch_cuda_integer_mult_radix_ciphertext_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_mul_memory<Torus>(
streams, gpu_indexes, gpu_count, params, is_boolean_left,
is_boolean_right, num_radix_blocks, allocate_gpu_memory, size_tracker);
is_boolean_right, num_radix_blocks, allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -106,8 +106,6 @@ __host__ void host_integer_radix_negation(
lwe_array_out->degrees[i] = z - static_cast<uint64_t>(zb);
lwe_array_out->noise_levels[i] = lwe_array_in->noise_levels[i];
CHECK_NOISE_LEVEL(lwe_array_out->noise_levels[i], message_modulus,
carry_modulus);
zb = z / message_modulus;
}
}
@@ -123,7 +121,7 @@ __host__ uint64_t scratch_cuda_integer_overflowing_sub_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_overflowing_sub_memory<Torus>(
streams, gpu_indexes, gpu_count, params, num_blocks, allocate_gpu_memory,
allocate_ms_array, size_tracker);
allocate_ms_array, &size_tracker);
POP_RANGE()
return size_tracker;
}

View File

@@ -13,7 +13,7 @@ void create_zero_radix_ciphertext_async(cudaStream_t const stream,
CudaRadixCiphertextFFI *radix,
const uint32_t num_radix_blocks,
const uint32_t lwe_dimension,
uint64_t &size_tracker,
uint64_t *size_tracker,
bool allocate_gpu_memory) {
PUSH_RANGE("create zero radix ct");
radix->lwe_dimension = lwe_dimension;

View File

@@ -47,7 +47,7 @@ __host__ void host_integer_radix_scalar_bitop_kb(
cuda_memcpy_async_gpu_to_gpu(lut->get_lut_indexes(0, 0), clear_blocks,
num_clear_blocks * sizeof(Torus), streams[0],
gpu_indexes[0]);
lut->broadcast_lut(streams, gpu_indexes);
lut->broadcast_lut(streams, gpu_indexes, 0);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, output, input, bsks, ksks,

View File

@@ -154,7 +154,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
streams[0], gpu_indexes[0], lut->get_lut(0, 0), lut->get_degree(0),
lut->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, scalar_last_leaf_lut_f, true);
lut->broadcast_lut(streams, gpu_indexes);
lut->broadcast_lut(streams, gpu_indexes, 0);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out,
@@ -253,7 +253,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
streams[0], gpu_indexes[0], lut->get_lut(0, 0), lut->get_degree(0),
lut->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, scalar_bivariate_last_leaf_lut_f, true);
lut->broadcast_lut(streams, gpu_indexes);
lut->broadcast_lut(streams, gpu_indexes, 0);
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, lwe_array_lsb_out,
@@ -277,9 +277,8 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
auto overflowed = x_0 < x_1;
return (Torus)(invert_flags.second ^ overflowed);
};
uint64_t size = 0;
int_radix_lut<Torus> *one_block_lut = new int_radix_lut<Torus>(
streams, gpu_indexes, gpu_count, params, 1, 1, true, size);
streams, gpu_indexes, gpu_count, params, 1, 1, true, nullptr);
generate_device_accumulator<Torus>(
streams[0], gpu_indexes[0], one_block_lut->get_lut(0, 0),
@@ -287,7 +286,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, one_block_lut_f, true);
one_block_lut->broadcast_lut(streams, gpu_indexes);
one_block_lut->broadcast_lut(streams, gpu_indexes, 0);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, lwe_array_in, bsks,
@@ -434,7 +433,7 @@ __host__ void integer_radix_signed_scalar_difference_check_kb(
streams[0], gpu_indexes[0], lut->get_lut(0, 0), lut->get_degree(0),
lut->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, scalar_bivariate_last_leaf_lut_f, true);
lut->broadcast_lut(streams, gpu_indexes);
lut->broadcast_lut(streams, gpu_indexes, 0);
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, are_all_msb_zeros,
@@ -540,7 +539,7 @@ __host__ void integer_radix_signed_scalar_difference_check_kb(
signed_msb_lut->get_degree(0), signed_msb_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, lut_f, true);
signed_msb_lut->broadcast_lut(streams, gpu_indexes);
signed_msb_lut->broadcast_lut(streams, gpu_indexes, 0);
CudaRadixCiphertextFFI sign_block;
as_radix_ciphertext_slice<Torus>(
@@ -579,9 +578,8 @@ __host__ void integer_radix_signed_scalar_difference_check_kb(
is_x_less_than_y_given_input_borrow<Torus>(x_0, x_1, 0,
message_modulus);
};
uint64_t size = 0;
int_radix_lut<Torus> *one_block_lut = new int_radix_lut<Torus>(
streams, gpu_indexes, gpu_count, params, 1, 1, true, size);
streams, gpu_indexes, gpu_count, params, 1, 1, true, nullptr);
generate_device_accumulator<Torus>(
streams[0], gpu_indexes[0], one_block_lut->get_lut(0, 0),
@@ -589,7 +587,7 @@ __host__ void integer_radix_signed_scalar_difference_check_kb(
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, one_block_lut_f, true);
one_block_lut->broadcast_lut(streams, gpu_indexes);
one_block_lut->broadcast_lut(streams, gpu_indexes, 0);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, lwe_array_in, bsks,
@@ -819,7 +817,7 @@ __host__ void host_integer_radix_scalar_equality_check_kb(
num_halved_scalar_blocks * sizeof(Torus), lsb_streams[0],
gpu_indexes[0]);
}
scalar_comparison_luts->broadcast_lut(lsb_streams, gpu_indexes);
scalar_comparison_luts->broadcast_lut(lsb_streams, gpu_indexes, 0);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
lsb_streams, gpu_indexes, gpu_count, mem_ptr->tmp_lwe_array_out,

View File

@@ -1,202 +0,0 @@
#include "scalar_div.cuh"
uint64_t scratch_cuda_integer_unsigned_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
bool allocate_gpu_memory, bool allocate_ms_array) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus,
allocate_ms_array);
return scratch_integer_unsigned_scalar_div_radix<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, params,
(int_unsigned_scalar_div_mem<uint64_t> **)mem_ptr, num_blocks,
scalar_divisor_ffi, allocate_gpu_memory);
}
void cuda_integer_unsigned_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *numerator_ct, int8_t *mem_ptr, void *const *bsks,
void *const *ksks,
const CudaModulusSwitchNoiseReductionKeyFFI *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi) {
host_integer_unsigned_scalar_div_radix<uint64_t>(
(cudaStream_t *)streams, gpu_indexes, gpu_count, numerator_ct,
(int_unsigned_scalar_div_mem<uint64_t> *)mem_ptr, bsks, (uint64_t **)ksks,
ms_noise_reduction_key, scalar_divisor_ffi);
}
void cleanup_cuda_integer_unsigned_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void) {
int_unsigned_scalar_div_mem<uint64_t> *mem_ptr =
(int_unsigned_scalar_div_mem<uint64_t> *)(*mem_ptr_void);
mem_ptr->release((cudaStream_t *)streams, gpu_indexes, gpu_count);
delete mem_ptr;
*mem_ptr_void = nullptr;
}
uint64_t scratch_cuda_integer_signed_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
bool allocate_gpu_memory, bool allocate_ms_array) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus,
allocate_ms_array);
return scratch_integer_signed_scalar_div_radix_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, params,
(int_signed_scalar_div_mem<uint64_t> **)mem_ptr, num_blocks,
scalar_divisor_ffi, allocate_gpu_memory);
}
void cuda_integer_signed_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *numerator_ct, int8_t *mem_ptr, void *const *bsks,
void *const *ksks,
const CudaModulusSwitchNoiseReductionKeyFFI *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi, uint32_t numerator_bits) {
host_integer_signed_scalar_div_radix_kb<uint64_t>(
(cudaStream_t *)streams, gpu_indexes, gpu_count, numerator_ct,
(int_signed_scalar_div_mem<uint64_t> *)mem_ptr, bsks, (uint64_t **)ksks,
ms_noise_reduction_key, scalar_divisor_ffi, numerator_bits);
}
void cleanup_cuda_integer_signed_scalar_div_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void) {
int_signed_scalar_div_mem<uint64_t> *mem_ptr =
(int_signed_scalar_div_mem<uint64_t> *)(*mem_ptr_void);
mem_ptr->release((cudaStream_t *)streams, gpu_indexes, gpu_count);
delete mem_ptr;
*mem_ptr_void = nullptr;
}
uint64_t scratch_integer_unsigned_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint32_t const active_bits_divisor, bool allocate_gpu_memory,
bool allocate_ms_array) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus,
allocate_ms_array);
return scratch_integer_unsigned_scalar_div_rem_radix<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, params,
(int_unsigned_scalar_div_rem_buffer<uint64_t> **)mem_ptr, num_blocks,
scalar_divisor_ffi, active_bits_divisor, allocate_gpu_memory);
}
void cuda_integer_unsigned_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *quotient_ct, CudaRadixCiphertextFFI *remainder_ct,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
const CudaModulusSwitchNoiseReductionKeyFFI *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint64_t const *divisor_has_at_least_one_set,
uint64_t const *decomposed_divisor, uint32_t const num_scalars_divisor,
void const *clear_blocks, void const *h_clear_blocks,
uint32_t num_clear_blocks) {
host_integer_unsigned_scalar_div_rem_radix<uint64_t>(
(cudaStream_t *)streams, gpu_indexes, gpu_count, quotient_ct,
remainder_ct, (int_unsigned_scalar_div_rem_buffer<uint64_t> *)mem_ptr,
bsks, (uint64_t **)ksks, ms_noise_reduction_key, scalar_divisor_ffi,
divisor_has_at_least_one_set, decomposed_divisor, num_scalars_divisor,
(uint64_t *)clear_blocks, (uint64_t *)h_clear_blocks, num_clear_blocks);
}
void cleanup_cuda_integer_unsigned_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void) {
int_unsigned_scalar_div_rem_buffer<uint64_t> *mem_ptr =
(int_unsigned_scalar_div_rem_buffer<uint64_t> *)(*mem_ptr_void);
mem_ptr->release((cudaStream_t *)streams, gpu_indexes, gpu_count);
delete mem_ptr;
*mem_ptr_void = nullptr;
}
uint64_t scratch_integer_signed_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint32_t const active_bits_divisor, bool allocate_gpu_memory,
bool allocate_ms_array) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus,
allocate_ms_array);
return scratch_integer_signed_scalar_div_rem_radix<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, params,
(int_signed_scalar_div_rem_buffer<uint64_t> **)mem_ptr, num_blocks,
scalar_divisor_ffi, active_bits_divisor, allocate_gpu_memory);
}
void cuda_integer_signed_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *quotient_ct, CudaRadixCiphertextFFI *remainder_ct,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint64_t const *divisor_has_at_least_one_set,
uint64_t const *decomposed_divisor, uint32_t const num_scalars_divisor,
uint32_t numerator_bits) {
host_integer_signed_scalar_div_rem_radix<uint64_t>(
(cudaStream_t *)streams, gpu_indexes, gpu_count, quotient_ct,
remainder_ct, (int_signed_scalar_div_rem_buffer<uint64_t> *)mem_ptr, bsks,
(uint64_t **)ksks, ms_noise_reduction_key, scalar_divisor_ffi,
divisor_has_at_least_one_set, decomposed_divisor, num_scalars_divisor,
numerator_bits);
}
void cleanup_cuda_integer_signed_scalar_div_rem_radix_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void) {
int_signed_scalar_div_rem_buffer<uint64_t> *mem_ptr =
(int_signed_scalar_div_rem_buffer<uint64_t> *)(*mem_ptr_void);
mem_ptr->release((cudaStream_t *)streams, gpu_indexes, gpu_count);
delete mem_ptr;
*mem_ptr_void = nullptr;
}

View File

@@ -1,415 +0,0 @@
#ifndef SCALAR_DIV_CUH
#define SCALAR_DIV_CUH
#include "integer/integer_utilities.h"
#include "integer/scalar_bitops.cuh"
#include "integer/scalar_mul.cuh"
#include "integer/scalar_shifts.cuh"
#include "integer/subtraction.cuh"
template <typename Torus>
__host__ uint64_t scratch_integer_unsigned_scalar_div_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, const int_radix_params params,
int_unsigned_scalar_div_mem<Torus> **mem_ptr, uint32_t num_radix_blocks,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
const bool allocate_gpu_memory) {
uint64_t size_tracker = 0;
*mem_ptr = new int_unsigned_scalar_div_mem<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
scalar_divisor_ffi, allocate_gpu_memory, size_tracker);
return size_tracker;
}
template <typename Torus>
__host__ void host_integer_unsigned_scalar_div_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *numerator_ct,
int_unsigned_scalar_div_mem<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi) {
if (scalar_divisor_ffi->is_abs_divisor_one) {
return;
}
if (scalar_divisor_ffi->is_divisor_pow2) {
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct,
scalar_divisor_ffi->ilog2_divisor, mem_ptr->logical_scalar_shift_mem,
bsks, ksks, ms_noise_reduction_key, numerator_ct->num_radix_blocks);
return;
}
if (scalar_divisor_ffi->divisor_has_more_bits_than_numerator) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], numerator_ct,
mem_ptr->tmp_ffi);
return;
}
if (scalar_divisor_ffi->is_chosen_multiplier_geq_two_pow_numerator) {
if (scalar_divisor_ffi->shift_pre != (uint64_t)0) {
PANIC("shift_pre should be == 0");
}
if (scalar_divisor_ffi->shift_post == (uint32_t)0) {
PANIC("shift_post should be > 0");
}
CudaRadixCiphertextFFI *numerator_cpy = mem_ptr->tmp_ffi;
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
numerator_cpy, numerator_ct);
host_integer_radix_scalar_mul_high_kb<Torus>(
streams, gpu_indexes, gpu_count, numerator_cpy,
mem_ptr->scalar_mul_high_mem, ksks, ms_noise_reduction_key, bsks,
scalar_divisor_ffi);
host_sub_and_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct, numerator_cpy, nullptr,
nullptr, mem_ptr->sub_and_propagate_mem, bsks, ksks,
ms_noise_reduction_key, FLAG_NONE, (uint32_t)0);
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct, (uint32_t)1,
mem_ptr->logical_scalar_shift_mem, bsks, ksks, ms_noise_reduction_key,
numerator_ct->num_radix_blocks);
host_add_and_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct, numerator_cpy, nullptr,
nullptr, mem_ptr->scp_mem, bsks, ksks, ms_noise_reduction_key,
FLAG_NONE, (uint32_t)0);
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct,
scalar_divisor_ffi->shift_post - (uint32_t)1,
mem_ptr->logical_scalar_shift_mem, bsks, ksks, ms_noise_reduction_key,
numerator_ct->num_radix_blocks);
return;
}
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct,
scalar_divisor_ffi->shift_pre, mem_ptr->logical_scalar_shift_mem, bsks,
ksks, ms_noise_reduction_key, numerator_ct->num_radix_blocks);
host_integer_radix_scalar_mul_high_kb<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct,
mem_ptr->scalar_mul_high_mem, ksks, ms_noise_reduction_key, bsks,
scalar_divisor_ffi);
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct,
scalar_divisor_ffi->shift_post, mem_ptr->logical_scalar_shift_mem, bsks,
ksks, ms_noise_reduction_key, numerator_ct->num_radix_blocks);
}
template <typename Torus>
__host__ uint64_t scratch_integer_signed_scalar_div_radix_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, int_radix_params params,
int_signed_scalar_div_mem<Torus> **mem_ptr, uint32_t num_radix_blocks,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
const bool allocate_gpu_memory) {
uint64_t size_tracker = 0;
*mem_ptr = new int_signed_scalar_div_mem<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
scalar_divisor_ffi, allocate_gpu_memory, size_tracker);
return size_tracker;
}
template <typename Torus>
__host__ void host_integer_signed_scalar_div_radix_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *numerator_ct,
int_signed_scalar_div_mem<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi, uint32_t numerator_bits) {
if (scalar_divisor_ffi->is_abs_divisor_one) {
if (scalar_divisor_ffi->is_divisor_negative) {
CudaRadixCiphertextFFI *tmp = mem_ptr->tmp_ffi;
host_integer_radix_negation<Torus>(
streams, gpu_indexes, gpu_count, tmp, numerator_ct,
mem_ptr->params.message_modulus, mem_ptr->params.carry_modulus,
numerator_ct->num_radix_blocks);
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
numerator_ct, tmp);
}
return;
}
if (scalar_divisor_ffi->chosen_multiplier_has_more_bits_than_numerator) {
set_zero_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], numerator_ct, 0,
numerator_ct->num_radix_blocks);
return;
}
CudaRadixCiphertextFFI *tmp = mem_ptr->tmp_ffi;
if (scalar_divisor_ffi->is_divisor_pow2) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], tmp,
numerator_ct);
host_integer_radix_arithmetic_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, tmp,
scalar_divisor_ffi->chosen_multiplier_num_bits - 1,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks,
ms_noise_reduction_key);
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, tmp,
numerator_bits - scalar_divisor_ffi->chosen_multiplier_num_bits,
mem_ptr->logical_scalar_shift_mem, bsks, ksks, ms_noise_reduction_key,
tmp->num_radix_blocks);
host_add_and_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, tmp, numerator_ct, nullptr, nullptr,
mem_ptr->scp_mem, bsks, ksks, ms_noise_reduction_key, FLAG_NONE,
(uint32_t)0);
host_integer_radix_arithmetic_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, tmp,
scalar_divisor_ffi->chosen_multiplier_num_bits,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks,
ms_noise_reduction_key);
} else if (!scalar_divisor_ffi->is_chosen_multiplier_geq_two_pow_numerator) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], tmp,
numerator_ct);
host_integer_radix_signed_scalar_mul_high_kb<Torus>(
streams, gpu_indexes, gpu_count, tmp, mem_ptr->scalar_mul_high_mem,
ksks, scalar_divisor_ffi, ms_noise_reduction_key, bsks);
host_integer_radix_arithmetic_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, tmp, scalar_divisor_ffi->shift_post,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks,
ms_noise_reduction_key);
CudaRadixCiphertextFFI *xsign = mem_ptr->xsign_ffi;
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], xsign,
numerator_ct);
host_integer_radix_arithmetic_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, xsign, numerator_bits - 1,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks,
ms_noise_reduction_key);
host_sub_and_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, tmp, xsign, nullptr, nullptr,
mem_ptr->sub_and_propagate_mem, bsks, ksks, ms_noise_reduction_key,
FLAG_NONE, (uint32_t)0);
} else {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], tmp,
numerator_ct);
host_integer_radix_signed_scalar_mul_high_kb<Torus>(
streams, gpu_indexes, gpu_count, tmp, mem_ptr->scalar_mul_high_mem,
ksks, scalar_divisor_ffi, ms_noise_reduction_key, bsks);
host_add_and_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, tmp, numerator_ct, nullptr, nullptr,
mem_ptr->scp_mem, bsks, ksks, ms_noise_reduction_key, FLAG_NONE,
(uint32_t)0);
host_integer_radix_arithmetic_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, tmp, scalar_divisor_ffi->shift_post,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks,
ms_noise_reduction_key);
CudaRadixCiphertextFFI *xsign = mem_ptr->xsign_ffi;
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], xsign,
numerator_ct);
host_integer_radix_arithmetic_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, xsign, numerator_bits - 1,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks,
ms_noise_reduction_key);
host_sub_and_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, tmp, xsign, nullptr, nullptr,
mem_ptr->sub_and_propagate_mem, bsks, ksks, ms_noise_reduction_key,
FLAG_NONE, (uint32_t)0);
}
if (scalar_divisor_ffi->is_divisor_negative) {
host_integer_radix_negation<Torus>(
streams, gpu_indexes, gpu_count, numerator_ct, tmp,
mem_ptr->params.message_modulus, mem_ptr->params.carry_modulus,
numerator_ct->num_radix_blocks);
} else {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], numerator_ct,
tmp);
}
}
template <typename Torus>
__host__ uint64_t scratch_integer_unsigned_scalar_div_rem_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, const int_radix_params params,
int_unsigned_scalar_div_rem_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint32_t const active_bits_divisor, const bool allocate_gpu_memory) {
uint64_t size_tracker = 0;
*mem_ptr = new int_unsigned_scalar_div_rem_buffer<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
scalar_divisor_ffi, active_bits_divisor, allocate_gpu_memory,
size_tracker);
return size_tracker;
}
template <typename Torus>
__host__ void host_integer_unsigned_scalar_div_rem_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *quotient_ct,
CudaRadixCiphertextFFI *remainder_ct,
int_unsigned_scalar_div_rem_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint64_t const *divisor_has_at_least_one_set,
uint64_t const *decomposed_divisor, uint32_t const num_scalars_divisor,
Torus const *clear_blocks, Torus const *h_clear_blocks,
uint32_t num_clear_blocks) {
auto numerator_ct = mem_ptr->numerator_ct;
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], numerator_ct,
quotient_ct);
host_integer_unsigned_scalar_div_radix(
streams, gpu_indexes, gpu_count, quotient_ct, mem_ptr->unsigned_div_mem,
bsks, ksks, ms_noise_reduction_key, scalar_divisor_ffi);
if (scalar_divisor_ffi->is_divisor_pow2) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], remainder_ct,
numerator_ct);
host_integer_radix_scalar_bitop_kb(
streams, gpu_indexes, gpu_count, remainder_ct, remainder_ct,
clear_blocks, h_clear_blocks, num_clear_blocks, mem_ptr->bitop_mem,
bsks, ksks, ms_noise_reduction_key);
} else {
if (!scalar_divisor_ffi->is_divisor_zero) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
remainder_ct, quotient_ct);
if (!scalar_divisor_ffi->is_abs_divisor_one &&
remainder_ct->num_radix_blocks != 0) {
host_integer_scalar_mul_radix<Torus>(
streams, gpu_indexes, gpu_count, remainder_ct, decomposed_divisor,
divisor_has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks, ksks,
ms_noise_reduction_key, mem_ptr->params.message_modulus,
num_scalars_divisor);
}
}
host_sub_and_propagate_single_carry(
streams, gpu_indexes, gpu_count, numerator_ct, remainder_ct, nullptr,
nullptr, mem_ptr->sub_and_propagate_mem, bsks, ksks,
ms_noise_reduction_key, FLAG_NONE, (uint32_t)0);
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], remainder_ct,
numerator_ct);
}
}
template <typename Torus>
__host__ uint64_t scratch_integer_signed_scalar_div_rem_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, const int_radix_params params,
int_signed_scalar_div_rem_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint32_t const active_bits_divisor, const bool allocate_gpu_memory) {
uint64_t size_tracker = 0;
*mem_ptr = new int_signed_scalar_div_rem_buffer<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
scalar_divisor_ffi, active_bits_divisor, allocate_gpu_memory,
size_tracker);
return size_tracker;
}
template <typename Torus>
__host__ void host_integer_signed_scalar_div_rem_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *quotient_ct,
CudaRadixCiphertextFFI *remainder_ct,
int_signed_scalar_div_rem_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
uint64_t const *divisor_has_at_least_one_set,
uint64_t const *decomposed_divisor, uint32_t const num_scalars_divisor,
uint32_t numerator_bits) {
auto numerator_ct = mem_ptr->numerator_ct;
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], numerator_ct,
quotient_ct);
host_integer_signed_scalar_div_radix_kb(
streams, gpu_indexes, gpu_count, quotient_ct, mem_ptr->signed_div_mem,
bsks, ksks, ms_noise_reduction_key, scalar_divisor_ffi, numerator_bits);
host_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, quotient_ct, nullptr, nullptr,
mem_ptr->scp_mem, bsks, ksks, ms_noise_reduction_key, FLAG_NONE,
(uint32_t)0);
if (!scalar_divisor_ffi->is_divisor_negative &&
scalar_divisor_ffi->is_divisor_pow2) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], remainder_ct,
quotient_ct);
host_integer_radix_logical_scalar_shift_kb_inplace(
streams, gpu_indexes, gpu_count, remainder_ct,
scalar_divisor_ffi->ilog2_divisor, mem_ptr->logical_scalar_shift_mem,
bsks, ksks, ms_noise_reduction_key, remainder_ct->num_radix_blocks);
} else if (!scalar_divisor_ffi->is_divisor_zero) {
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], remainder_ct,
quotient_ct);
bool is_divisor_one = scalar_divisor_ffi->is_abs_divisor_one &&
!scalar_divisor_ffi->is_divisor_negative;
if (!is_divisor_one && remainder_ct->num_radix_blocks != 0) {
host_integer_scalar_mul_radix<Torus>(
streams, gpu_indexes, gpu_count, remainder_ct, decomposed_divisor,
divisor_has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks, ksks,
ms_noise_reduction_key, mem_ptr->params.message_modulus,
num_scalars_divisor);
}
}
host_sub_and_propagate_single_carry(
streams, gpu_indexes, gpu_count, numerator_ct, remainder_ct, nullptr,
nullptr, mem_ptr->sub_and_propagate_mem, bsks, ksks,
ms_noise_reduction_key, FLAG_NONE, (uint32_t)0);
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0], remainder_ct,
numerator_ct);
}
#endif

View File

@@ -21,6 +21,27 @@ uint64_t scratch_cuda_integer_scalar_mul_kb_64(
num_scalar_bits, allocate_gpu_memory);
}
uint64_t scratch_cuda_integer_radix_scalar_mul_high_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, uint32_t num_scalar_bits, bool anticipated_buffer_drop,
bool allocate_gpu_memory, bool allocate_ms_array) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus,
allocate_ms_array);
return scratch_cuda_integer_radix_scalar_mul_high_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
(int_scalar_mul_high<uint64_t> **)mem_ptr, num_blocks, params,
num_scalar_bits, anticipated_buffer_drop, allocate_gpu_memory);
}
void cuda_scalar_multiplication_integer_radix_ciphertext_64_inplace(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *lwe_array, uint64_t const *decomposed_scalar,
@@ -29,11 +50,73 @@ void cuda_scalar_multiplication_integer_radix_ciphertext_64_inplace(
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
uint32_t polynomial_size, uint32_t message_modulus, uint32_t num_scalars) {
host_integer_scalar_mul_radix<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array,
decomposed_scalar, has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, message_modulus,
switch (polynomial_size) {
case 512:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<512>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array,
decomposed_scalar, has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, message_modulus,
num_scalars);
break;
case 1024:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<1024>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array,
decomposed_scalar, has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, message_modulus,
num_scalars);
break;
case 2048:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<2048>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array,
decomposed_scalar, has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, message_modulus,
num_scalars);
break;
case 4096:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<4096>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array,
decomposed_scalar, has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, message_modulus,
num_scalars);
break;
case 8192:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<8192>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array,
decomposed_scalar, has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, message_modulus,
num_scalars);
break;
case 16384:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<16384>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array,
decomposed_scalar, has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsks,
(uint64_t **)(ksks), ms_noise_reduction_key, message_modulus,
num_scalars);
break;
default:
PANIC("Cuda error (scalar multiplication): unsupported polynomial size. "
"Only N = 512, 1024, 2048, 4096, 8192, 16384 are supported.")
}
}
void cuda_integer_radix_scalar_mul_high_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *ct, int8_t *mem_ptr, void *const *ksks,
uint64_t rhs, uint64_t const *decomposed_scalar,
uint64_t const *has_at_least_one_set,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
void *const *bsks, uint32_t num_scalars) {
host_integer_radix_scalar_mul_high_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, ct,
(int_scalar_mul_high<uint64_t> *)mem_ptr, (uint64_t **)ksks, rhs,
decomposed_scalar, has_at_least_one_set, ms_noise_reduction_key, bsks,
num_scalars);
}
@@ -47,3 +130,13 @@ void cleanup_cuda_integer_radix_scalar_mul(void *const *streams,
mem_ptr->release((cudaStream_t *)(streams), gpu_indexes, gpu_count);
}
void cleanup_cuda_integer_radix_scalar_mul_high_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
int8_t **mem_ptr_void) {
int_scalar_mul_high<uint64_t> *mem_ptr =
(int_scalar_mul_high<uint64_t> *)(*mem_ptr_void);
mem_ptr->release((cudaStream_t *)streams, gpu_indexes, gpu_count);
}

View File

@@ -38,11 +38,11 @@ __host__ uint64_t scratch_cuda_integer_radix_scalar_mul_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_scalar_mul_buffer<T>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
num_scalar_bits, allocate_gpu_memory, true, size_tracker);
num_scalar_bits, allocate_gpu_memory, true, &size_tracker);
return size_tracker;
}
template <typename T>
template <typename T, class params>
__host__ void host_integer_scalar_mul_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array,
@@ -80,7 +80,7 @@ __host__ void host_integer_scalar_mul_radix(
}
}
size_t j = 0;
for (size_t i = 0; i < std::min(num_scalars, num_ciphertext_bits); i++) {
for (size_t i = 0; i < min(num_scalars, num_ciphertext_bits); i++) {
if (decomposed_scalar[i] == 1) {
// Perform a block shift
CudaRadixCiphertextFFI preshifted_radix_ct;
@@ -116,9 +116,9 @@ __host__ void host_integer_scalar_mul_radix(
set_zero_radix_ciphertext_slice_async<T>(streams[0], gpu_indexes[0],
lwe_array, 0, num_radix_blocks);
} else {
host_integer_partial_sum_ciphertexts_vec_kb<T>(
streams, gpu_indexes, gpu_count, lwe_array, all_shifted_buffer, bsks,
ksks, ms_noise_reduction_key, mem->sum_ciphertexts_vec_mem,
host_integer_partial_sum_ciphertexts_vec_kb<T, params>(
streams, gpu_indexes, gpu_count, lwe_array, all_shifted_buffer, true,
bsks, ksks, ms_noise_reduction_key, mem->sum_ciphertexts_vec_mem,
num_radix_blocks, j);
auto scp_mem_ptr = mem->sc_prop_mem;
@@ -136,8 +136,7 @@ template <typename T>
__host__ void host_integer_small_scalar_mul_radix(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *output_lwe_array,
CudaRadixCiphertextFFI *input_lwe_array, T scalar,
const uint32_t message_modulus, const uint32_t carry_modulus) {
CudaRadixCiphertextFFI *input_lwe_array, T scalar) {
if (output_lwe_array->num_radix_blocks != input_lwe_array->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be the same")
@@ -167,20 +166,37 @@ __host__ void host_integer_small_scalar_mul_radix(
output_lwe_array->noise_levels[i] =
input_lwe_array->noise_levels[i] * scalar;
output_lwe_array->degrees[i] = input_lwe_array->degrees[i] * scalar;
CHECK_NOISE_LEVEL(output_lwe_array->noise_levels[i], message_modulus,
carry_modulus);
}
}
template <typename Torus>
__host__ uint64_t scratch_cuda_integer_radix_scalar_mul_high_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, int_scalar_mul_high<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params,
uint32_t num_scalar_bits, bool anticipated_buffer_drop,
bool allocate_gpu_memory) {
uint64_t size_tracker = 0;
*mem_ptr = new int_scalar_mul_high<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks,
allocate_gpu_memory, LEFT_SHIFT, num_scalar_bits, anticipated_buffer_drop,
&size_tracker);
return size_tracker;
}
template <typename Torus>
__host__ void host_integer_radix_scalar_mul_high_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *ct,
int_scalar_mul_high_buffer<Torus> *mem_ptr, Torus *const *ksks,
int_scalar_mul_high<Torus> *mem_ptr, Torus *const *ksks, uint64_t rhs,
uint64_t const *decomposed_scalar, uint64_t const *has_at_least_one_set,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
void *const *bsks, const CudaScalarDivisorFFI *scalar_divisor_ffi) {
void *const *bsks, uint32_t num_scalars) {
if (scalar_divisor_ffi->is_chosen_multiplier_zero) {
if (rhs == (uint64_t)0) {
set_zero_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0], ct,
0, ct->num_radix_blocks);
return;
@@ -191,71 +207,66 @@ __host__ void host_integer_radix_scalar_mul_high_kb(
host_extend_radix_with_trivial_zero_blocks_msb<Torus>(tmp_ffi, ct, streams,
gpu_indexes);
if (scalar_divisor_ffi->active_bits != (uint32_t)0 &&
!scalar_divisor_ffi->is_abs_chosen_multiplier_one &&
tmp_ffi->num_radix_blocks != 0) {
if (rhs != (uint64_t)1 || tmp_ffi->num_radix_blocks != 0) {
if ((rhs & (rhs - 1)) == 0) {
uint32_t shift = std::log2(rhs);
if (scalar_divisor_ffi->is_chosen_multiplier_pow2) {
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, tmp_ffi,
scalar_divisor_ffi->ilog2_chosen_multiplier,
streams, gpu_indexes, gpu_count, tmp_ffi, shift,
mem_ptr->logical_scalar_shift_mem, bsks, (uint64_t **)ksks,
ms_noise_reduction_key, tmp_ffi->num_radix_blocks);
} else {
host_integer_scalar_mul_radix<Torus>(
streams, gpu_indexes, gpu_count, tmp_ffi,
scalar_divisor_ffi->decomposed_chosen_multiplier,
scalar_divisor_ffi->chosen_multiplier_has_at_least_one_set,
mem_ptr->scalar_mul_mem, bsks, (uint64_t **)ksks,
ms_noise_reduction_key, mem_ptr->params.message_modulus,
scalar_divisor_ffi->num_scalars);
}
}
host_trim_radix_blocks_lsb<Torus>(ct, tmp_ffi, streams, gpu_indexes);
}
template <typename Torus>
__host__ void host_integer_radix_signed_scalar_mul_high_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *ct,
int_signed_scalar_mul_high_buffer<Torus> *mem_ptr, Torus *const *ksks,
const CudaScalarDivisorFFI *scalar_divisor_ffi,
CudaModulusSwitchNoiseReductionKeyFFI const *ms_noise_reduction_key,
void *const *bsks) {
if (scalar_divisor_ffi->is_chosen_multiplier_zero) {
set_zero_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0], ct,
0, ct->num_radix_blocks);
return;
}
CudaRadixCiphertextFFI *tmp_ffi = mem_ptr->tmp;
host_extend_radix_with_sign_msb<Torus>(
streams, gpu_indexes, gpu_count, tmp_ffi, ct, mem_ptr->extend_radix_mem,
ct->num_radix_blocks, bsks, (uint64_t **)ksks, ms_noise_reduction_key);
if (scalar_divisor_ffi->active_bits != (uint32_t)0 &&
!scalar_divisor_ffi->is_abs_chosen_multiplier_one &&
tmp_ffi->num_radix_blocks != 0) {
if (scalar_divisor_ffi->is_chosen_multiplier_pow2) {
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, tmp_ffi,
scalar_divisor_ffi->ilog2_chosen_multiplier,
mem_ptr->logical_scalar_shift_mem, bsks, (uint64_t **)ksks,
ms_noise_reduction_key, tmp_ffi->num_radix_blocks);
} else {
host_integer_scalar_mul_radix<Torus>(
streams, gpu_indexes, gpu_count, tmp_ffi,
scalar_divisor_ffi->decomposed_chosen_multiplier,
scalar_divisor_ffi->chosen_multiplier_has_at_least_one_set,
mem_ptr->scalar_mul_mem, bsks, (uint64_t **)ksks,
ms_noise_reduction_key, mem_ptr->params.message_modulus,
scalar_divisor_ffi->num_scalars);
switch (mem_ptr->params.polynomial_size) {
case 512:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<512>>(
streams, gpu_indexes, gpu_count, tmp_ffi, decomposed_scalar,
has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks,
(uint64_t **)ksks, ms_noise_reduction_key,
mem_ptr->params.message_modulus, num_scalars);
break;
case 1024:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<1024>>(
streams, gpu_indexes, gpu_count, tmp_ffi, decomposed_scalar,
has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks,
(uint64_t **)ksks, ms_noise_reduction_key,
mem_ptr->params.message_modulus, num_scalars);
break;
case 2048:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<2048>>(
streams, gpu_indexes, gpu_count, tmp_ffi, decomposed_scalar,
has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks,
(uint64_t **)ksks, ms_noise_reduction_key,
mem_ptr->params.message_modulus, num_scalars);
break;
case 4096:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<4096>>(
streams, gpu_indexes, gpu_count, tmp_ffi, decomposed_scalar,
has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks,
(uint64_t **)ksks, ms_noise_reduction_key,
mem_ptr->params.message_modulus, num_scalars);
break;
case 8192:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<8192>>(
streams, gpu_indexes, gpu_count, tmp_ffi, decomposed_scalar,
has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks,
(uint64_t **)ksks, ms_noise_reduction_key,
mem_ptr->params.message_modulus, num_scalars);
break;
case 16384:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<16384>>(
streams, gpu_indexes, gpu_count, tmp_ffi, decomposed_scalar,
has_at_least_one_set, mem_ptr->scalar_mul_mem, bsks,
(uint64_t **)ksks, ms_noise_reduction_key,
mem_ptr->params.message_modulus, num_scalars);
break;
default:
PANIC(
"Cuda error (scalar multiplication): unsupported polynomial size. "
"Only N = 512, 1024, 2048, 4096, 8192, 16384 are supported.")
}
}
}

View File

@@ -21,7 +21,7 @@ __host__ uint64_t scratch_cuda_integer_radix_scalar_rotate_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_logical_scalar_shift_buffer<Torus>(
streams, gpu_indexes, gpu_count, shift_type, params, num_radix_blocks,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -21,7 +21,7 @@ __host__ uint64_t scratch_cuda_integer_radix_logical_scalar_shift_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_logical_scalar_shift_buffer<Torus>(
streams, gpu_indexes, gpu_count, shift_type, params, num_radix_blocks,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}
@@ -133,7 +133,7 @@ __host__ uint64_t scratch_cuda_integer_radix_arithmetic_scalar_shift_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_arithmetic_scalar_shift_buffer<Torus>(
streams, gpu_indexes, gpu_count, shift_type, params, num_radix_blocks,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -21,7 +21,7 @@ __host__ uint64_t scratch_cuda_integer_radix_shift_and_rotate_kb(
uint64_t size_tracker = 0;
*mem_ptr = new int_shift_and_rotate_buffer<Torus>(
streams, gpu_indexes, gpu_count, shift_type, is_signed, params,
num_radix_blocks, allocate_gpu_memory, size_tracker);
num_radix_blocks, allocate_gpu_memory, &size_tracker);
return size_tracker;
}
@@ -159,13 +159,11 @@ __host__ void host_integer_radix_shift_and_rotate_kb_inplace(
// control_bit|b|a
host_pack_bivariate_blocks<Torus>(
streams, gpu_indexes, gpu_count, mux_inputs, mux_lut->lwe_indexes_out,
rotated_input, input_bits_a, mux_lut->lwe_indexes_in, 2, total_nb_bits,
mem->params.message_modulus, mem->params.carry_modulus);
rotated_input, input_bits_a, mux_lut->lwe_indexes_in, 2, total_nb_bits);
// The shift bit is already properly aligned/positioned
host_add_the_same_block_to_all_blocks<Torus>(
streams[0], gpu_indexes[0], mux_inputs, mux_inputs, &shift_bit,
mem->params.message_modulus, mem->params.carry_modulus);
streams[0], gpu_indexes[0], mux_inputs, mux_inputs, &shift_bit);
// we have
// control_bit|b|a
@@ -185,9 +183,8 @@ __host__ void host_integer_radix_shift_and_rotate_kb_inplace(
// Bitshift and add the other bits
for (int i = bits_per_block - 2; i >= 0; i--) {
host_integer_small_scalar_mul_radix<Torus>(
streams, gpu_indexes, gpu_count, lwe_array, lwe_array, 2,
mem->params.message_modulus, mem->params.carry_modulus);
host_integer_small_scalar_mul_radix<Torus>(streams, gpu_indexes, gpu_count,
lwe_array, lwe_array, 2);
for (int j = 0; j < num_radix_blocks; j++) {
CudaRadixCiphertextFFI block;
CudaRadixCiphertextFFI bit_to_add;
@@ -196,8 +193,7 @@ __host__ void host_integer_radix_shift_and_rotate_kb_inplace(
i + j * bits_per_block,
i + j * bits_per_block + 1);
host_addition<Torus>(streams[0], gpu_indexes[0], &block, &block,
&bit_to_add, 1, mem->params.message_modulus,
mem->params.carry_modulus);
&bit_to_add, 1);
}
// To give back a clean ciphertext

View File

@@ -23,7 +23,7 @@ uint64_t scratch_cuda_sub_and_propagate_single_carry(
*mem_ptr = new int_sub_and_propagate<Torus>(
streams, gpu_indexes, gpu_count, params, num_radix_blocks, requested_flag,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}
@@ -73,7 +73,6 @@ __host__ void host_integer_radix_subtraction(
streams, gpu_indexes, gpu_count, lwe_array_out, lwe_array_in_2,
message_modulus, carry_modulus, num_radix_blocks);
host_addition<Torus>(streams[0], gpu_indexes[0], lwe_array_out, lwe_array_out,
lwe_array_in_1, num_radix_blocks, message_modulus,
carry_modulus);
lwe_array_in_1, num_radix_blocks);
}
#endif

View File

@@ -10,7 +10,7 @@ void cuda_add_lwe_ciphertext_vector_32(void *stream, uint32_t gpu_index,
output->num_radix_blocks != input_2->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be the same")
host_addition<uint32_t>(static_cast<cudaStream_t>(stream), gpu_index, output,
input_1, input_2, output->num_radix_blocks, 0, 0);
input_1, input_2, output->num_radix_blocks);
}
/*
@@ -48,7 +48,7 @@ void cuda_add_lwe_ciphertext_vector_64(void *stream, uint32_t gpu_index,
output->num_radix_blocks != input_2->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be the same")
host_addition<uint64_t>(static_cast<cudaStream_t>(stream), gpu_index, output,
input_1, input_2, output->num_radix_blocks, 0, 0);
input_1, input_2, output->num_radix_blocks);
}
/*

View File

@@ -9,7 +9,6 @@
#include "device.h"
#include "helper_multi_gpu.h"
#include "integer/integer.h"
#include "integer/integer_utilities.h"
#include "linear_algebra.h"
#include "utils/kernel_dimensions.cuh"
#include <stdio.h>
@@ -103,12 +102,11 @@ __global__ void addition(T *output, T const *input_1, T const *input_2,
// Coefficient-wise addition
// num_radix_blocks selects the amount of blocks to be added from the inputs
template <typename T>
__host__ void
host_addition(cudaStream_t stream, uint32_t gpu_index,
CudaRadixCiphertextFFI *output,
CudaRadixCiphertextFFI const *input_1,
CudaRadixCiphertextFFI const *input_2, uint32_t num_radix_blocks,
const uint32_t message_modulus, const uint32_t carry_modulus) {
__host__ void host_addition(cudaStream_t stream, uint32_t gpu_index,
CudaRadixCiphertextFFI *output,
CudaRadixCiphertextFFI const *input_1,
CudaRadixCiphertextFFI const *input_2,
uint32_t num_radix_blocks) {
if (output->lwe_dimension != input_1->lwe_dimension ||
output->lwe_dimension != input_2->lwe_dimension)
PANIC("Cuda error: input and output num radix blocks must be the same")
@@ -137,7 +135,6 @@ host_addition(cudaStream_t stream, uint32_t gpu_index,
output->degrees[i] = input_1->degrees[i] + input_2->degrees[i];
output->noise_levels[i] =
input_1->noise_levels[i] + input_2->noise_levels[i];
CHECK_NOISE_LEVEL(output->noise_levels[i], message_modulus, carry_modulus);
}
}
@@ -163,8 +160,7 @@ template <typename T>
__host__ void host_add_the_same_block_to_all_blocks(
cudaStream_t stream, uint32_t gpu_index, CudaRadixCiphertextFFI *output,
CudaRadixCiphertextFFI const *input_with_multiple_blocks,
CudaRadixCiphertextFFI const *input_with_single_block,
const uint32_t message_modulus, const uint32_t carry_modulus) {
CudaRadixCiphertextFFI const *input_with_single_block) {
if (output->num_radix_blocks != input_with_multiple_blocks->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be the same")
if (input_with_single_block->num_radix_blocks != 1)
@@ -196,7 +192,6 @@ __host__ void host_add_the_same_block_to_all_blocks(
input_with_single_block->degrees[0];
output->noise_levels[i] = input_with_multiple_blocks->noise_levels[i] +
input_with_single_block->noise_levels[0];
CHECK_NOISE_LEVEL(output->noise_levels[i], message_modulus, carry_modulus);
}
}
@@ -385,7 +380,6 @@ __host__ void host_unchecked_sub_with_correcting_term(
output->noise_levels[i] =
input_1->noise_levels[i] + input_2->noise_levels[i];
zb = z / message_modulus;
CHECK_NOISE_LEVEL(output->noise_levels[i], message_modulus, carry_modulus);
}
}

View File

@@ -35,20 +35,6 @@ void cuda_convert_lwe_multi_bit_programmable_bootstrap_key_64(
static_cast<cudaStream_t>(stream), gpu_index);
}
void cuda_convert_lwe_multi_bit_programmable_bootstrap_key_128(
void *stream, uint32_t gpu_index, void *dest, void const *src,
uint32_t input_lwe_dim, uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size, uint32_t grouping_factor) {
uint32_t total_polynomials = input_lwe_dim * (glwe_dim + 1) * (glwe_dim + 1) *
level_count * (1 << grouping_factor) /
grouping_factor;
size_t buffer_size =
total_polynomials * polynomial_size * sizeof(__uint128_t);
cuda_memcpy_async_to_gpu((__uint128_t *)dest, (__uint128_t *)src, buffer_size,
static_cast<cudaStream_t>(stream), gpu_index);
}
// We need these lines so the compiler knows how to specialize these functions
template __device__ const uint64_t *
get_ith_mask_kth_block(const uint64_t *ptr, int i, int k, int level,
@@ -94,14 +80,6 @@ template __device__ double2 *get_ith_body_kth_block(double2 *ptr, int i, int k,
int glwe_dimension,
uint32_t level_count);
template __device__ const __uint128_t *
get_multi_bit_ith_lwe_gth_group_kth_block(const __uint128_t *ptr, int g, int i,
int k, int level,
uint32_t grouping_factor,
uint32_t polynomial_size,
uint32_t glwe_dimension,
uint32_t level_count);
template __device__ const uint64_t *get_multi_bit_ith_lwe_gth_group_kth_block(
const uint64_t *ptr, int g, int i, int k, int level,
uint32_t grouping_factor, uint32_t polynomial_size, uint32_t glwe_dimension,

View File

@@ -83,62 +83,6 @@ mul_ggsw_glwe_in_fourier_domain(double2 *fft, double2 *join_buffer,
__syncthreads();
}
/** Perform the matrix multiplication between the GGSW and the GLWE,
* each block operating on a single level for mask and body.
* Both operands should be at fourier domain
*
* This function assumes:
* - Thread blocks at dimension z relates to the decomposition level.
* - Thread blocks at dimension y relates to the glwe dimension.
* - polynomial_size / params::opt threads are available per block
*/
template <typename G, class params>
__device__ void mul_ggsw_glwe_in_fourier_domain_128(
double *fft, double *join_buffer,
const double *__restrict__ bootstrapping_key, int iteration, G &group,
bool support_dsm = false) {
const uint32_t polynomial_size = params::degree;
const uint32_t glwe_dimension = gridDim.y - 1;
const uint32_t level_count = gridDim.z;
// The first product is used to initialize level_join_buffer
auto this_block_rank = get_this_block_rank<G>(group, support_dsm);
// Continues multiplying fft by every polynomial in that particular bsk level
// Each y-block accumulates in a different polynomial at each iteration
auto bsk_slice = get_ith_mask_kth_block_128(
bootstrapping_key, iteration, blockIdx.y, blockIdx.z, polynomial_size,
glwe_dimension, level_count);
for (int j = 0; j < glwe_dimension + 1; j++) {
int idx = (j + this_block_rank) % (glwe_dimension + 1);
auto bsk_poly = bsk_slice + idx * polynomial_size / 2 * 4;
auto buffer_slice = get_join_buffer_element_128<G>(
blockIdx.z, idx, group, join_buffer, polynomial_size, glwe_dimension,
support_dsm);
polynomial_product_accumulate_in_fourier_domain_128<params>(
buffer_slice, fft, bsk_poly, j == 0);
group.sync();
}
// -----------------------------------------------------------------
// All blocks are synchronized here; after this sync, level_join_buffer has
// the values needed from every other block
// accumulate rest of the products into fft buffer
for (int l = 0; l < level_count; l++) {
auto cur_src_acc = get_join_buffer_element_128<G>(
l, blockIdx.y, group, join_buffer, polynomial_size, glwe_dimension,
support_dsm);
polynomial_accumulate_in_fourier_domain_128<params>(fft, cur_src_acc,
l == 0);
}
__syncthreads();
}
template <typename Torus>
void execute_pbs_async(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
@@ -279,7 +223,7 @@ void execute_scratch_pbs(cudaStream_t stream, uint32_t gpu_index,
uint32_t level_count, uint32_t grouping_factor,
uint32_t input_lwe_ciphertext_count, PBS_TYPE pbs_type,
bool allocate_gpu_memory, bool allocate_ms_array,
uint64_t &size_tracker) {
uint64_t *size_tracker) {
switch (sizeof(Torus)) {
case sizeof(uint32_t):
// 32 bits
@@ -287,7 +231,7 @@ void execute_scratch_pbs(cudaStream_t stream, uint32_t gpu_index,
case MULTI_BIT:
PANIC("Error: 32-bit multibit PBS is not supported.\n")
case CLASSICAL:
size_tracker = scratch_cuda_programmable_bootstrap_32(
*size_tracker = scratch_cuda_programmable_bootstrap_32(
stream, gpu_index, pbs_buffer, lwe_dimension, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory, allocate_ms_array);
@@ -302,12 +246,12 @@ void execute_scratch_pbs(cudaStream_t stream, uint32_t gpu_index,
case MULTI_BIT:
if (grouping_factor == 0)
PANIC("Multi-bit PBS error: grouping factor should be > 0.")
size_tracker = scratch_cuda_multi_bit_programmable_bootstrap_64(
*size_tracker = scratch_cuda_multi_bit_programmable_bootstrap_64(
stream, gpu_index, pbs_buffer, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, allocate_gpu_memory);
break;
case CLASSICAL:
size_tracker = scratch_cuda_programmable_bootstrap_64(
*size_tracker = scratch_cuda_programmable_bootstrap_64(
stream, gpu_index, pbs_buffer, lwe_dimension, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory, allocate_ms_array);

View File

@@ -8,9 +8,9 @@ execute_scratch_pbs_128(void *stream, uint32_t gpu_index, int8_t **pbs_buffer,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count,
bool allocate_gpu_memory, bool allocate_ms_array,
uint64_t &size_tracker_on_gpu) {
uint64_t *size_tracker_on_gpu) {
// The squash noise function receives as input 64-bit integers
size_tracker_on_gpu = scratch_cuda_programmable_bootstrap_128_vector_64(
*size_tracker_on_gpu = scratch_cuda_programmable_bootstrap_128_vector_64(
stream, gpu_index, pbs_buffer, lwe_dimension, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory, allocate_ms_array);

View File

@@ -286,7 +286,7 @@ __host__ uint64_t scratch_programmable_bootstrap_amortized(
glwe_dimension, polynomial_size, input_lwe_ciphertext_count,
max_shared_memory);
*pbs_buffer = (int8_t *)cuda_malloc_with_size_tracking_async(
buffer_size, stream, gpu_index, size_tracker, allocate_gpu_memory);
buffer_size, stream, gpu_index, &size_tracker, allocate_gpu_memory);
check_cuda_error(cudaGetLastError());
return size_tracker;
}

View File

@@ -225,7 +225,7 @@ __host__ uint64_t scratch_programmable_bootstrap_cg(
*buffer = new pbs_buffer<Torus, CLASSICAL>(
stream, gpu_index, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, PBS_VARIANT::CG,
allocate_gpu_memory, allocate_ms_array, size_tracker);
allocate_gpu_memory, allocate_ms_array, &size_tracker);
return size_tracker;
}

View File

@@ -280,14 +280,13 @@ __host__ uint64_t scratch_cg_multi_bit_programmable_bootstrap(
check_cuda_error(cudaGetLastError());
}
auto lwe_chunk_size =
get_lwe_chunk_size<Torus, params>(gpu_index, input_lwe_ciphertext_count,
polynomial_size, full_sm_keybundle);
auto lwe_chunk_size = get_lwe_chunk_size<Torus, params>(
gpu_index, input_lwe_ciphertext_count, polynomial_size);
uint64_t size_tracker = 0;
*buffer = new pbs_buffer<Torus, MULTI_BIT>(
stream, gpu_index, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, lwe_chunk_size, PBS_VARIANT::CG,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -386,7 +386,7 @@ __host__ uint64_t scratch_programmable_bootstrap(
*buffer = new pbs_buffer<Torus, CLASSICAL>(
stream, gpu_index, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, PBS_VARIANT::DEFAULT,
allocate_gpu_memory, allocate_ms_array, size_tracker);
allocate_gpu_memory, allocate_ms_array, &size_tracker);
return size_tracker;
}

View File

@@ -18,6 +18,62 @@
#include "programmable_bootstrap.cuh"
#include "types/complex/operations.cuh"
/** Perform the matrix multiplication between the GGSW and the GLWE,
* each block operating on a single level for mask and body.
* Both operands should be at fourier domain
*
* This function assumes:
* - Thread blocks at dimension z relates to the decomposition level.
* - Thread blocks at dimension y relates to the glwe dimension.
* - polynomial_size / params::opt threads are available per block
*/
template <typename G, class params>
__device__ void mul_ggsw_glwe_in_fourier_domain_128(
double *fft, double *join_buffer,
const double *__restrict__ bootstrapping_key, int iteration, G &group,
bool support_dsm = false) {
const uint32_t polynomial_size = params::degree;
const uint32_t glwe_dimension = gridDim.y - 1;
const uint32_t level_count = gridDim.z;
// The first product is used to initialize level_join_buffer
auto this_block_rank = get_this_block_rank<G>(group, support_dsm);
// Continues multiplying fft by every polynomial in that particular bsk level
// Each y-block accumulates in a different polynomial at each iteration
auto bsk_slice = get_ith_mask_kth_block_128(
bootstrapping_key, iteration, blockIdx.y, blockIdx.z, polynomial_size,
glwe_dimension, level_count);
for (int j = 0; j < glwe_dimension + 1; j++) {
int idx = (j + this_block_rank) % (glwe_dimension + 1);
auto bsk_poly = bsk_slice + idx * polynomial_size / 2 * 4;
auto buffer_slice = get_join_buffer_element_128<G>(
blockIdx.z, idx, group, join_buffer, polynomial_size, glwe_dimension,
support_dsm);
polynomial_product_accumulate_in_fourier_domain_128<params>(
buffer_slice, fft, bsk_poly, j == 0);
group.sync();
}
// -----------------------------------------------------------------
// All blocks are synchronized here; after this sync, level_join_buffer has
// the values needed from every other block
// accumulate rest of the products into fft buffer
for (int l = 0; l < level_count; l++) {
auto cur_src_acc = get_join_buffer_element_128<G>(
l, blockIdx.y, group, join_buffer, polynomial_size, glwe_dimension,
support_dsm);
polynomial_accumulate_in_fourier_domain_128<params>(fft, cur_src_acc,
l == 0);
}
__syncthreads();
}
template <typename InputTorus, class params, sharedMemDegree SMD,
bool first_iter>
__global__ void __launch_bounds__(params::degree / params::opt)
@@ -118,6 +174,9 @@ __global__ void __launch_bounds__(params::degree / params::opt)
accumulator);
gadget_acc.decompose_and_compress_level_128(accumulator_fft, blockIdx.z);
// We are using the same memory space for accumulator_fft and
// accumulator_rotated, so we need to synchronize here to make sure they
// don't modify the same memory space at the same time
// Switch to the FFT space
auto acc_fft_re_hi = accumulator_fft + 0 * params::degree / 2;
auto acc_fft_re_lo = accumulator_fft + 1 * params::degree / 2;
@@ -396,7 +455,6 @@ __host__ uint64_t scratch_programmable_bootstrap_cg_128(
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
bool allocate_gpu_memory, bool allocate_ms_array) {
cuda_set_device(gpu_index);
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_cg<__uint128_t>(
polynomial_size);
@@ -426,7 +484,7 @@ __host__ uint64_t scratch_programmable_bootstrap_cg_128(
*buffer = new pbs_buffer_128<InputTorus, PBS_TYPE::CLASSICAL>(
stream, gpu_index, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, PBS_VARIANT::CG,
allocate_gpu_memory, allocate_ms_array, size_tracker);
allocate_gpu_memory, allocate_ms_array, &size_tracker);
return size_tracker;
}
@@ -533,7 +591,7 @@ __host__ uint64_t scratch_programmable_bootstrap_128(
*buffer = new pbs_buffer_128<InputTorus, PBS_TYPE::CLASSICAL>(
stream, gpu_index, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, PBS_VARIANT::DEFAULT,
allocate_gpu_memory, allocate_ms_array, size_tracker);
allocate_gpu_memory, allocate_ms_array, &size_tracker);
return size_tracker;
}

View File

@@ -455,8 +455,11 @@ void cleanup_cuda_multi_bit_programmable_bootstrap(void *stream,
*/
template <typename Torus, class params>
uint32_t get_lwe_chunk_size(uint32_t gpu_index, uint32_t max_num_pbs,
uint32_t polynomial_size,
uint64_t full_sm_keybundle) {
uint32_t polynomial_size) {
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle<Torus>(
polynomial_size);
int max_blocks_per_sm;
auto max_shared_memory = cuda_get_max_shared_memory(gpu_index);

View File

@@ -521,14 +521,13 @@ __host__ uint64_t scratch_multi_bit_programmable_bootstrap(
check_cuda_error(cudaGetLastError());
}
auto lwe_chunk_size =
get_lwe_chunk_size<Torus, params>(gpu_index, input_lwe_ciphertext_count,
polynomial_size, full_sm_keybundle);
auto lwe_chunk_size = get_lwe_chunk_size<Torus, params>(
gpu_index, input_lwe_ciphertext_count, polynomial_size);
uint64_t size_tracker = 0;
*buffer = new pbs_buffer<Torus, MULTI_BIT>(
stream, gpu_index, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, lwe_chunk_size, PBS_VARIANT::DEFAULT,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -1,361 +0,0 @@
#include "programmable_bootstrap_cg_multibit.cuh"
#include "programmable_bootstrap_multibit_128.cuh"
template <typename InputTorus>
uint64_t scratch_cuda_multi_bit_programmable_bootstrap_128(
void *stream, uint32_t gpu_index,
pbs_buffer_128<InputTorus, MULTI_BIT> **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, bool allocate_gpu_memory) {
switch (polynomial_size) {
case 256:
return scratch_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 512:
return scratch_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 1024:
return scratch_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 2048:
return scratch_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 4096:
return scratch_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
default:
PANIC("Cuda error (multi-bit PBS): unsupported polynomial size. Supported "
"N's are powers of two"
" in the interval [256..4096].")
}
}
template <typename InputTorus>
uint64_t scratch_cuda_cg_multi_bit_programmable_bootstrap_128(
void *stream, uint32_t gpu_index,
pbs_buffer_128<InputTorus, MULTI_BIT> **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, bool allocate_gpu_memory) {
switch (polynomial_size) {
case 256:
return scratch_cg_multi_bit_programmable_bootstrap_128<
InputTorus, AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 512:
return scratch_cg_multi_bit_programmable_bootstrap_128<
InputTorus, AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 1024:
return scratch_cg_multi_bit_programmable_bootstrap_128<
InputTorus, AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 2048:
return scratch_cg_multi_bit_programmable_bootstrap_128<
InputTorus, AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
case 4096:
return scratch_cg_multi_bit_programmable_bootstrap_128<
InputTorus, AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
allocate_gpu_memory);
default:
PANIC("Cuda error (multi-bit PBS): unsupported polynomial size. Supported "
"N's are powers of two"
" in the interval [256..4096].")
}
}
uint64_t scratch_cuda_multi_bit_programmable_bootstrap_128_vector_64(
void *stream, uint32_t gpu_index, int8_t **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, bool allocate_gpu_memory) {
bool supports_cg =
supports_cooperative_groups_on_multibit_programmable_bootstrap<
__uint128_t>(glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count,
cuda_get_max_shared_memory(gpu_index));
if (supports_cg)
return scratch_cuda_cg_multi_bit_programmable_bootstrap_128<uint64_t>(
stream, gpu_index,
reinterpret_cast<pbs_buffer_128<uint64_t, MULTI_BIT> **>(buffer),
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, allocate_gpu_memory);
else
return scratch_cuda_multi_bit_programmable_bootstrap_128<uint64_t>(
stream, gpu_index,
reinterpret_cast<pbs_buffer_128<uint64_t, MULTI_BIT> **>(buffer),
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, allocate_gpu_memory);
}
template <typename InputTorus>
void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_128(
void *stream, uint32_t gpu_index, __uint128_t *lwe_array_out,
InputTorus const *lwe_output_indexes, __uint128_t const *lut_vector,
InputTorus const *lut_vector_indexes, InputTorus const *lwe_array_in,
InputTorus const *lwe_input_indexes, __uint128_t const *bootstrapping_key,
pbs_buffer_128<InputTorus, MULTI_BIT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_many_lut, uint32_t lut_stride) {
switch (polynomial_size) {
case 256:
host_multi_bit_programmable_bootstrap_128<InputTorus, AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 512:
host_multi_bit_programmable_bootstrap_128<InputTorus, AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 1024:
host_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 2048:
host_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 4096:
host_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
default:
PANIC("Cuda error (multi-bit PBS): unsupported polynomial size. Supported "
"N's are powers of two"
" in the interval [256..4096].")
}
}
template <typename InputTorus>
void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_128(
void *stream, uint32_t gpu_index, __uint128_t *lwe_array_out,
InputTorus const *lwe_output_indexes, __uint128_t const *lut_vector,
InputTorus const *lut_vector_indexes, InputTorus const *lwe_array_in,
InputTorus const *lwe_input_indexes, __uint128_t const *bootstrapping_key,
pbs_buffer_128<InputTorus, MULTI_BIT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_many_lut, uint32_t lut_stride) {
switch (polynomial_size) {
case 256:
host_cg_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 512:
host_cg_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 1024:
host_cg_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 2048:
host_cg_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
case 4096:
host_cg_multi_bit_programmable_bootstrap_128<InputTorus,
AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, lwe_array_out,
lwe_output_indexes, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, bootstrapping_key, pbs_buffer, glwe_dimension,
lwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_many_lut, lut_stride);
break;
default:
PANIC("Cuda error (multi-bit PBS): unsupported polynomial size. Supported "
"N's are powers of two"
" in the interval [256..4096].")
}
}
void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_128(
void *stream, uint32_t gpu_index, void *lwe_array_out,
void const *lwe_output_indexes, void const *lut_vector,
void const *lut_vector_indexes, void const *lwe_array_in,
void const *lwe_input_indexes, void const *bootstrapping_key,
int8_t *mem_ptr, uint32_t lwe_dimension, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t grouping_factor, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_many_lut,
uint32_t lut_stride) {
if (base_log > 64)
PANIC("Cuda error (multi-bit PBS): base log should be <= 64")
auto *buffer =
reinterpret_cast<pbs_buffer_128<uint64_t, MULTI_BIT> *>(mem_ptr);
switch (buffer->pbs_variant) {
case PBS_VARIANT::CG:
cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_128<
uint64_t>(stream, gpu_index, static_cast<__uint128_t *>(lwe_array_out),
static_cast<const uint64_t *>(lwe_output_indexes),
static_cast<const __uint128_t *>(lut_vector),
static_cast<const uint64_t *>(lut_vector_indexes),
static_cast<const uint64_t *>(lwe_array_in),
static_cast<const uint64_t *>(lwe_input_indexes),
static_cast<const __uint128_t *>(bootstrapping_key), buffer,
lwe_dimension, glwe_dimension, polynomial_size,
grouping_factor, base_log, level_count, num_samples,
num_many_lut, lut_stride);
break;
case PBS_VARIANT::DEFAULT:
cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_128<uint64_t>(
stream, gpu_index, static_cast<__uint128_t *>(lwe_array_out),
static_cast<const uint64_t *>(lwe_output_indexes),
static_cast<const __uint128_t *>(lut_vector),
static_cast<const uint64_t *>(lut_vector_indexes),
static_cast<const uint64_t *>(lwe_array_in),
static_cast<const uint64_t *>(lwe_input_indexes),
static_cast<const __uint128_t *>(bootstrapping_key), buffer,
lwe_dimension, glwe_dimension, polynomial_size, grouping_factor,
base_log, level_count, num_samples, num_many_lut, lut_stride);
break;
default:
PANIC("Cuda error (multi-bit PBS): unsupported implementation variant.")
}
}
void cleanup_cuda_multi_bit_programmable_bootstrap_128(void *stream,
const uint32_t gpu_index,
int8_t **buffer) {
const auto x =
reinterpret_cast<pbs_buffer_128<uint64_t, MULTI_BIT> *>(*buffer);
x->release(static_cast<cudaStream_t>(stream), gpu_index);
}
/**
* Computes divisors of the product of num_sms (streaming multiprocessors on the
* GPU) and max_blocks_per_sm (maximum active blocks per SM to launch
* device_multi_bit_programmable_bootstrap_keybundle) smaller than its square
* root, based on max_num_pbs. If log2(max_num_pbs) <= 13, selects the first
* suitable divisor. If greater, calculates an offset as max(1,log2(max_num_pbs)
* - 13) for additional logic.
*
* The value 13 was empirically determined based on memory requirements for
* benchmarking on an RTX 4090 GPU, balancing performance and resource use.
*/
template <typename Torus, class params>
uint32_t get_lwe_chunk_size_128(uint32_t gpu_index, uint32_t max_num_pbs,
uint32_t polynomial_size,
uint64_t full_sm_keybundle) {
int max_blocks_per_sm;
auto max_shared_memory = cuda_get_max_shared_memory(gpu_index);
cuda_set_device(gpu_index);
if (max_shared_memory < full_sm_keybundle)
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_blocks_per_sm,
device_multi_bit_programmable_bootstrap_keybundle_128<Torus, params,
NOSM>,
polynomial_size / params::opt, full_sm_keybundle);
else
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_blocks_per_sm,
device_multi_bit_programmable_bootstrap_keybundle_128<Torus, params,
FULLSM>,
polynomial_size / params::opt, 0);
int num_sms = 0;
check_cuda_error(cudaDeviceGetAttribute(
&num_sms, cudaDevAttrMultiProcessorCount, gpu_index));
int x = num_sms * max_blocks_per_sm;
int count = 0;
int divisor = 1;
int ith_divisor = 0;
#if CUDA_ARCH < 900
// We pick a smaller divisor on GPUs other than H100, so 256-bit integer
// multiplication can run
int log2_max_num_pbs = log2_int(max_num_pbs);
if (log2_max_num_pbs > 13)
ith_divisor = log2_max_num_pbs - 11;
#endif
for (int i = sqrt(x); i >= 1; i--) {
if (x % i == 0) {
if (count == ith_divisor) {
divisor = i;
break;
} else {
count++;
}
}
}
return divisor;
}

View File

@@ -253,7 +253,7 @@ __host__ uint64_t scratch_programmable_bootstrap_tbc(
*buffer = new pbs_buffer<Torus, CLASSICAL>(
stream, gpu_index, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, PBS_VARIANT::TBC,
allocate_gpu_memory, allocate_ms_array, size_tracker);
allocate_gpu_memory, allocate_ms_array, &size_tracker);
return size_tracker;
}

View File

@@ -283,14 +283,13 @@ __host__ uint64_t scratch_tbc_multi_bit_programmable_bootstrap(
check_cuda_error(cudaGetLastError());
}
auto lwe_chunk_size =
get_lwe_chunk_size<Torus, params>(gpu_index, input_lwe_ciphertext_count,
polynomial_size, full_sm_keybundle);
auto lwe_chunk_size = get_lwe_chunk_size<Torus, params>(
gpu_index, input_lwe_ciphertext_count, polynomial_size);
uint64_t size_tracker = 0;
*buffer = new pbs_buffer<uint64_t, MULTI_BIT>(
stream, gpu_index, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, lwe_chunk_size, PBS_VARIANT::TBC,
allocate_gpu_memory, size_tracker);
allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -5,15 +5,15 @@
#include <stdio.h>
#include <type_traits>
template <typename T> __device__ inline const char *get_format();
template <typename T> inline __device__ const char *get_format();
template <> __device__ inline const char *get_format<int>() { return "%d, "; }
template <> inline __device__ const char *get_format<int>() { return "%d, "; }
template <> __device__ inline const char *get_format<unsigned int>() {
template <> inline __device__ const char *get_format<unsigned int>() {
return "%u, ";
}
template <> __device__ inline const char *get_format<uint64_t>() {
template <> inline __device__ const char *get_format<uint64_t>() {
return "%lu, ";
}
@@ -23,15 +23,6 @@ template <typename T> __global__ void print_debug_kernel(const T *src, int N) {
}
}
template <>
__global__ inline void print_debug_kernel(const __uint128_t *src, int N) {
for (int i = 0; i < N; i++) {
uint64_t low = static_cast<uint64_t>(src[i]);
uint64_t high = static_cast<uint64_t>(src[i] >> 64);
printf("(%llu, %llu), ", high, low);
}
}
template <>
__global__ inline void print_debug_kernel(const double2 *src, int N) {
for (int i = 0; i < N; i++) {

View File

@@ -9,7 +9,7 @@ void multi_gpu_alloc_array_async(cudaStream_t const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count, std::vector<Torus *> &dest,
uint32_t elements_per_gpu,
uint64_t &size_tracker_on_gpu_0,
uint64_t *size_tracker_on_gpu_0,
bool allocate_gpu_memory) {
dest.resize(gpu_count);
@@ -17,10 +17,10 @@ void multi_gpu_alloc_array_async(cudaStream_t const *streams,
uint64_t size_tracker_on_gpu_i = 0;
Torus *d_array = (Torus *)cuda_malloc_with_size_tracking_async(
elements_per_gpu * sizeof(Torus), streams[i], gpu_indexes[i],
size_tracker_on_gpu_i, allocate_gpu_memory);
&size_tracker_on_gpu_i, allocate_gpu_memory);
dest[i] = d_array;
if (i == 0) {
size_tracker_on_gpu_0 += size_tracker_on_gpu_i;
if (i == 0 && size_tracker_on_gpu_0 != nullptr) {
*size_tracker_on_gpu_0 = size_tracker_on_gpu_i;
}
}
}
@@ -46,7 +46,7 @@ void multi_gpu_alloc_lwe_async(cudaStream_t const *streams,
uint32_t const *gpu_indexes, uint32_t gpu_count,
std::vector<Torus *> &dest, uint32_t num_inputs,
uint32_t lwe_size,
uint64_t &size_tracker_on_gpu_0,
uint64_t *size_tracker_on_gpu_0,
bool allocate_gpu_memory) {
dest.resize(gpu_count);
for (uint i = 0; i < gpu_count; i++) {
@@ -54,10 +54,10 @@ void multi_gpu_alloc_lwe_async(cudaStream_t const *streams,
auto inputs_on_gpu = get_num_inputs_on_gpu(num_inputs, i, gpu_count);
Torus *d_array = (Torus *)cuda_malloc_with_size_tracking_async(
inputs_on_gpu * lwe_size * sizeof(Torus), streams[i], gpu_indexes[i],
size_tracker_on_gpu_i, allocate_gpu_memory);
&size_tracker_on_gpu_i, allocate_gpu_memory);
dest[i] = d_array;
if (i == 0) {
size_tracker_on_gpu_0 += size_tracker_on_gpu_i;
if (i == 0 && size_tracker_on_gpu_0 != nullptr) {
*size_tracker_on_gpu_0 = size_tracker_on_gpu_i;
}
}
}
@@ -65,7 +65,7 @@ void multi_gpu_alloc_lwe_async(cudaStream_t const *streams,
template void multi_gpu_alloc_lwe_async<__uint128_t>(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, std::vector<__uint128_t *> &dest, uint32_t num_inputs,
uint32_t lwe_size, uint64_t &size_tracker_on_gpu_0,
uint32_t lwe_size, uint64_t *size_tracker_on_gpu_0,
bool allocate_gpu_memory);
/// Allocates the input/output vector for all devices
@@ -75,7 +75,7 @@ template <typename Torus>
void multi_gpu_alloc_lwe_many_lut_output_async(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, std::vector<Torus *> &dest, uint32_t num_inputs,
uint32_t num_many_lut, uint32_t lwe_size, uint64_t &size_tracker_on_gpu_0,
uint32_t num_many_lut, uint32_t lwe_size, uint64_t *size_tracker_on_gpu_0,
bool allocate_gpu_memory) {
dest.resize(gpu_count);
for (uint i = 0; i < gpu_count; i++) {
@@ -83,10 +83,10 @@ void multi_gpu_alloc_lwe_many_lut_output_async(
auto inputs_on_gpu = get_num_inputs_on_gpu(num_inputs, i, gpu_count);
Torus *d_array = (Torus *)cuda_malloc_with_size_tracking_async(
num_many_lut * inputs_on_gpu * lwe_size * sizeof(Torus), streams[i],
gpu_indexes[i], size_tracker, allocate_gpu_memory);
gpu_indexes[i], &size_tracker, allocate_gpu_memory);
dest[i] = d_array;
if (i == 0) {
size_tracker_on_gpu_0 += size_tracker;
if (i == 0 && size_tracker_on_gpu_0 != nullptr) {
*size_tracker_on_gpu_0 = size_tracker;
}
}
}

View File

@@ -94,7 +94,7 @@ __host__ uint64_t scratch_cuda_expand_without_verification(
*mem_ptr = new zk_expand_mem<Torus>(
streams, gpu_indexes, gpu_count, computing_params, casting_params,
casting_key_type, num_lwes_per_compact_list, is_boolean_array,
num_compact_lists, allocate_gpu_memory, size_tracker);
num_compact_lists, allocate_gpu_memory, &size_tracker);
return size_tracker;
}

View File

@@ -238,85 +238,6 @@ const _: () = {
["Offset of field: CudaRadixCiphertextFFI::lwe_dimension"]
[::std::mem::offset_of!(CudaRadixCiphertextFFI, lwe_dimension) - 32usize];
};
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CudaScalarDivisorFFI {
pub chosen_multiplier_has_at_least_one_set: *const u64,
pub decomposed_chosen_multiplier: *const u64,
pub num_scalars: u32,
pub active_bits: u32,
pub shift_pre: u64,
pub shift_post: u32,
pub ilog2_chosen_multiplier: u32,
pub chosen_multiplier_num_bits: u32,
pub is_chosen_multiplier_zero: bool,
pub is_abs_chosen_multiplier_one: bool,
pub is_chosen_multiplier_negative: bool,
pub is_chosen_multiplier_pow2: bool,
pub chosen_multiplier_has_more_bits_than_numerator: bool,
pub is_chosen_multiplier_geq_two_pow_numerator: bool,
pub ilog2_divisor: u32,
pub is_divisor_zero: bool,
pub is_abs_divisor_one: bool,
pub is_divisor_negative: bool,
pub is_divisor_pow2: bool,
pub divisor_has_more_bits_than_numerator: bool,
}
#[allow(clippy::unnecessary_operation, clippy::identity_op)]
const _: () = {
["Size of CudaScalarDivisorFFI"][::std::mem::size_of::<CudaScalarDivisorFFI>() - 64usize];
["Alignment of CudaScalarDivisorFFI"][::std::mem::align_of::<CudaScalarDivisorFFI>() - 8usize];
["Offset of field: CudaScalarDivisorFFI::chosen_multiplier_has_at_least_one_set"][::std::mem::offset_of!(
CudaScalarDivisorFFI,
chosen_multiplier_has_at_least_one_set
) - 0usize];
["Offset of field: CudaScalarDivisorFFI::decomposed_chosen_multiplier"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, decomposed_chosen_multiplier) - 8usize];
["Offset of field: CudaScalarDivisorFFI::num_scalars"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, num_scalars) - 16usize];
["Offset of field: CudaScalarDivisorFFI::active_bits"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, active_bits) - 20usize];
["Offset of field: CudaScalarDivisorFFI::shift_pre"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, shift_pre) - 24usize];
["Offset of field: CudaScalarDivisorFFI::shift_post"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, shift_post) - 32usize];
["Offset of field: CudaScalarDivisorFFI::ilog2_chosen_multiplier"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, ilog2_chosen_multiplier) - 36usize];
["Offset of field: CudaScalarDivisorFFI::chosen_multiplier_num_bits"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, chosen_multiplier_num_bits) - 40usize];
["Offset of field: CudaScalarDivisorFFI::is_chosen_multiplier_zero"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_chosen_multiplier_zero) - 44usize];
["Offset of field: CudaScalarDivisorFFI::is_abs_chosen_multiplier_one"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_abs_chosen_multiplier_one) - 45usize];
["Offset of field: CudaScalarDivisorFFI::is_chosen_multiplier_negative"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_chosen_multiplier_negative) - 46usize];
["Offset of field: CudaScalarDivisorFFI::is_chosen_multiplier_pow2"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_chosen_multiplier_pow2) - 47usize];
["Offset of field: CudaScalarDivisorFFI::chosen_multiplier_has_more_bits_than_numerator"][::std::mem::offset_of!(
CudaScalarDivisorFFI,
chosen_multiplier_has_more_bits_than_numerator
)
- 48usize];
["Offset of field: CudaScalarDivisorFFI::is_chosen_multiplier_geq_two_pow_numerator"][::std::mem::offset_of!(
CudaScalarDivisorFFI,
is_chosen_multiplier_geq_two_pow_numerator
)
- 49usize];
["Offset of field: CudaScalarDivisorFFI::ilog2_divisor"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, ilog2_divisor) - 52usize];
["Offset of field: CudaScalarDivisorFFI::is_divisor_zero"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_divisor_zero) - 56usize];
["Offset of field: CudaScalarDivisorFFI::is_abs_divisor_one"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_abs_divisor_one) - 57usize];
["Offset of field: CudaScalarDivisorFFI::is_divisor_negative"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_divisor_negative) - 58usize];
["Offset of field: CudaScalarDivisorFFI::is_divisor_pow2"]
[::std::mem::offset_of!(CudaScalarDivisorFFI, is_divisor_pow2) - 59usize];
["Offset of field: CudaScalarDivisorFFI::divisor_has_more_bits_than_numerator"][::std::mem::offset_of!(
CudaScalarDivisorFFI,
divisor_has_more_bits_than_numerator
) - 60usize];
};
unsafe extern "C" {
pub fn scratch_cuda_apply_univariate_lut_kb_64(
streams: *const *mut ffi::c_void,
@@ -1086,7 +1007,6 @@ unsafe extern "C" {
message_modulus: u32,
carry_modulus: u32,
pbs_type: PBS_TYPE,
reduce_degrees_for_single_carry_propagation: bool,
allocate_gpu_memory: bool,
allocate_ms_array: bool,
) -> u64;
@@ -1098,6 +1018,7 @@ unsafe extern "C" {
gpu_count: u32,
radix_lwe_out: *mut CudaRadixCiphertextFFI,
radix_lwe_vec: *mut CudaRadixCiphertextFFI,
reduce_degrees_for_single_carry_propagation: bool,
mem_ptr: *mut i8,
bsks: *const *mut ffi::c_void,
ksks: *const *mut ffi::c_void,
@@ -1413,6 +1334,54 @@ unsafe extern "C" {
gpu_indexes: *const u32,
);
}
unsafe extern "C" {
pub fn scratch_cuda_integer_radix_scalar_mul_high_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr: *mut *mut i8,
glwe_dimension: u32,
polynomial_size: u32,
lwe_dimension: u32,
ks_level: u32,
ks_base_log: u32,
pbs_level: u32,
pbs_base_log: u32,
grouping_factor: u32,
num_blocks: u32,
message_modulus: u32,
carry_modulus: u32,
pbs_type: PBS_TYPE,
num_scalar_bits: u32,
anticipated_buffer_drop: bool,
allocate_gpu_memory: bool,
allocate_ms_array: bool,
) -> u64;
}
unsafe extern "C" {
pub fn cuda_integer_radix_scalar_mul_high_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
ct: *mut CudaRadixCiphertextFFI,
mem_ptr: *mut i8,
ksks: *const *mut ffi::c_void,
rhs: u64,
decomposed_scalar: *const u64,
has_at_least_one_set: *const u64,
ms_noise_reduction_key: *const CudaModulusSwitchNoiseReductionKeyFFI,
bsks: *const *mut ffi::c_void,
num_scalars: u32,
);
}
unsafe extern "C" {
pub fn cleanup_cuda_integer_radix_scalar_mul_high_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr_void: *mut *mut i8,
);
}
unsafe extern "C" {
pub fn scratch_cuda_apply_noise_squashing_kb(
streams: *const *mut ffi::c_void,
@@ -1508,50 +1477,6 @@ unsafe extern "C" {
mem_ptr_void: *mut *mut i8,
);
}
unsafe extern "C" {
pub fn scratch_cuda_integer_unsigned_scalar_div_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr: *mut *mut i8,
glwe_dimension: u32,
polynomial_size: u32,
lwe_dimension: u32,
ks_level: u32,
ks_base_log: u32,
pbs_level: u32,
pbs_base_log: u32,
grouping_factor: u32,
num_blocks: u32,
message_modulus: u32,
carry_modulus: u32,
pbs_type: PBS_TYPE,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
allocate_gpu_memory: bool,
allocate_ms_array: bool,
) -> u64;
}
unsafe extern "C" {
pub fn cuda_integer_unsigned_scalar_div_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
numerator_ct: *mut CudaRadixCiphertextFFI,
mem_ptr: *mut i8,
bsks: *const *mut ffi::c_void,
ksks: *const *mut ffi::c_void,
ms_noise_reduction_key: *const CudaModulusSwitchNoiseReductionKeyFFI,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
);
}
unsafe extern "C" {
pub fn cleanup_cuda_integer_unsigned_scalar_div_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr_void: *mut *mut i8,
);
}
unsafe extern "C" {
pub fn scratch_cuda_extend_radix_with_sign_msb_64(
streams: *const *mut ffi::c_void,
@@ -1597,153 +1522,6 @@ unsafe extern "C" {
mem_ptr_void: *mut *mut i8,
);
}
unsafe extern "C" {
pub fn scratch_cuda_integer_signed_scalar_div_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr: *mut *mut i8,
glwe_dimension: u32,
polynomial_size: u32,
lwe_dimension: u32,
ks_level: u32,
ks_base_log: u32,
pbs_level: u32,
pbs_base_log: u32,
grouping_factor: u32,
num_blocks: u32,
message_modulus: u32,
carry_modulus: u32,
pbs_type: PBS_TYPE,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
allocate_gpu_memory: bool,
allocate_ms_array: bool,
) -> u64;
}
unsafe extern "C" {
pub fn cuda_integer_signed_scalar_div_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
numerator_ct: *mut CudaRadixCiphertextFFI,
mem_ptr: *mut i8,
bsks: *const *mut ffi::c_void,
ksks: *const *mut ffi::c_void,
ms_noise_reduction_key: *const CudaModulusSwitchNoiseReductionKeyFFI,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
numerator_bits: u32,
);
}
unsafe extern "C" {
pub fn cleanup_cuda_integer_signed_scalar_div_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr_void: *mut *mut i8,
);
}
unsafe extern "C" {
pub fn scratch_integer_unsigned_scalar_div_rem_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr: *mut *mut i8,
glwe_dimension: u32,
polynomial_size: u32,
lwe_dimension: u32,
ks_level: u32,
ks_base_log: u32,
pbs_level: u32,
pbs_base_log: u32,
grouping_factor: u32,
num_blocks: u32,
message_modulus: u32,
carry_modulus: u32,
pbs_type: PBS_TYPE,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
active_bits_divisor: u32,
allocate_gpu_memory: bool,
allocate_ms_array: bool,
) -> u64;
}
unsafe extern "C" {
pub fn cuda_integer_unsigned_scalar_div_rem_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
quotient_ct: *mut CudaRadixCiphertextFFI,
remainder_ct: *mut CudaRadixCiphertextFFI,
mem_ptr: *mut i8,
bsks: *const *mut ffi::c_void,
ksks: *const *mut ffi::c_void,
ms_noise_reduction_key: *const CudaModulusSwitchNoiseReductionKeyFFI,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
divisor_has_at_least_one_set: *const u64,
decomposed_divisor: *const u64,
num_scalars_divisor: u32,
clear_blocks: *const ffi::c_void,
h_clear_blocks: *const ffi::c_void,
num_clear_blocks: u32,
);
}
unsafe extern "C" {
pub fn cleanup_cuda_integer_unsigned_scalar_div_rem_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr_void: *mut *mut i8,
);
}
unsafe extern "C" {
pub fn scratch_integer_signed_scalar_div_rem_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr: *mut *mut i8,
glwe_dimension: u32,
polynomial_size: u32,
lwe_dimension: u32,
ks_level: u32,
ks_base_log: u32,
pbs_level: u32,
pbs_base_log: u32,
grouping_factor: u32,
num_blocks: u32,
message_modulus: u32,
carry_modulus: u32,
pbs_type: PBS_TYPE,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
active_bits_divisor: u32,
allocate_gpu_memory: bool,
allocate_ms_array: bool,
) -> u64;
}
unsafe extern "C" {
pub fn cuda_integer_signed_scalar_div_rem_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
quotient_ct: *mut CudaRadixCiphertextFFI,
remainder_ct: *mut CudaRadixCiphertextFFI,
mem_ptr: *mut i8,
bsks: *const *mut ffi::c_void,
ksks: *const *mut ffi::c_void,
ms_noise_reduction_key: *const CudaModulusSwitchNoiseReductionKeyFFI,
scalar_divisor_ffi: *const CudaScalarDivisorFFI,
divisor_has_at_least_one_set: *const u64,
decomposed_divisor: *const u64,
num_scalars_divisor: u32,
numerator_bits: u32,
);
}
unsafe extern "C" {
pub fn cleanup_cuda_integer_signed_scalar_div_rem_radix_kb_64(
streams: *const *mut ffi::c_void,
gpu_indexes: *const u32,
gpu_count: u32,
mem_ptr_void: *mut *mut i8,
);
}
pub const KS_TYPE_BIG_TO_SMALL: KS_TYPE = 0;
pub const KS_TYPE_SMALL_TO_BIG: KS_TYPE = 1;
pub type KS_TYPE = ffi::c_uint;
@@ -2338,19 +2116,6 @@ unsafe extern "C" {
grouping_factor: u32,
);
}
unsafe extern "C" {
pub fn cuda_convert_lwe_multi_bit_programmable_bootstrap_key_128(
stream: *mut ffi::c_void,
gpu_index: u32,
dest: *mut ffi::c_void,
src: *const ffi::c_void,
input_lwe_dim: u32,
glwe_dim: u32,
level_count: u32,
polynomial_size: u32,
grouping_factor: u32,
);
}
unsafe extern "C" {
pub fn scratch_cuda_multi_bit_programmable_bootstrap_64(
stream: *mut ffi::c_void,
@@ -2393,45 +2158,3 @@ unsafe extern "C" {
pbs_buffer: *mut *mut i8,
);
}
unsafe extern "C" {
pub fn scratch_cuda_multi_bit_programmable_bootstrap_128_vector_64(
stream: *mut ffi::c_void,
gpu_index: u32,
buffer: *mut *mut i8,
glwe_dimension: u32,
polynomial_size: u32,
level_count: u32,
input_lwe_ciphertext_count: u32,
allocate_gpu_memory: bool,
) -> u64;
}
unsafe extern "C" {
pub fn cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_128(
stream: *mut ffi::c_void,
gpu_index: u32,
lwe_array_out: *mut ffi::c_void,
lwe_output_indexes: *const ffi::c_void,
lut_vector: *const ffi::c_void,
lut_vector_indexes: *const ffi::c_void,
lwe_array_in: *const ffi::c_void,
lwe_input_indexes: *const ffi::c_void,
bootstrapping_key: *const ffi::c_void,
mem_ptr: *mut i8,
lwe_dimension: u32,
glwe_dimension: u32,
polynomial_size: u32,
grouping_factor: u32,
base_log: u32,
level_count: u32,
num_samples: u32,
num_many_lut: u32,
lut_stride: u32,
);
}
unsafe extern "C" {
pub fn cleanup_cuda_multi_bit_programmable_bootstrap_128(
stream: *mut ffi::c_void,
gpu_index: u32,
buffer: *mut *mut i8,
);
}

View File

@@ -23,7 +23,6 @@ extern "C" {
pub fn cuda_malloc_async(size: u64, stream: *mut c_void, gpu_index: u32) -> *mut c_void;
pub fn cuda_check_valid_malloc(size: u64, gpu_index: u32) -> bool;
pub fn cuda_device_total_memory(gpu_index: u32) -> u64;
pub fn cuda_memcpy_with_size_tracking_async_to_gpu(
dest: *mut c_void,

Some files were not shown because too many files have changed in this diff Show More