Compare commits

..

1 Commits

Author SHA1 Message Date
J-B Orfila
d0937aae20 pbs count 2024-03-20 18:28:02 +01:00
732 changed files with 28592 additions and 67719 deletions

View File

@@ -21,7 +21,7 @@ jobs:
uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
with:
# We use a PAT to have the same user (zama-bot) for label deletion as for creation.
github_token: ${{ secrets.FHE_ACTIONS_TOKEN }}
github_token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
labels: approved
# Add label only if the review is approved and if the label doesn't already exist
@@ -30,5 +30,5 @@ jobs:
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && !contains(fromJSON(env.LABELS), 'approved') }}
with:
# We need to use a PAT to be able to trigger `labeled` event for the other workflow.
github_token: ${{ secrets.FHE_ACTIONS_TOKEN }}
github_token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
labels: approved

View File

@@ -23,16 +23,17 @@ jobs:
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
aws-region: ${{ steps.start-instance.outputs.aws-region }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
fast-tests:
@@ -44,14 +45,14 @@ jobs:
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
@@ -59,10 +60,6 @@ jobs:
run: |
make test_concrete_csprng
- name: Run tfhe-zk-pok tests
run: |
make test_zk_pok
- name: Run core tests
run: |
AVX512_SUPPORT=ON make test_core_crypto
@@ -110,7 +107,7 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Fast AWS tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
@@ -123,18 +120,19 @@ jobs:
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
region: ${{ needs.setup-ec2.outputs.aws-region }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (fast-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (fast-tests) failed. (${{ env.ACTION_RUN_URL }})"

View File

@@ -29,10 +29,10 @@ jobs:
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
@@ -61,7 +61,7 @@ jobs:
make test_high_level_api_gpu
- uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
if: ${{ always() && github.event_name == 'pull_request' }}
if: ${{ github.event_name == 'pull_request' }}
with:
labels: 4090_test
github_token: ${{ secrets.GITHUB_TOKEN }}
@@ -69,7 +69,7 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CUDA RTX 4090 tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -23,16 +23,17 @@ jobs:
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
aws-region: ${{ steps.start-instance.outputs.aws-region }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: gpu-test
cuda-tests-linux:
@@ -55,7 +56,7 @@ jobs:
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up home
run: |
@@ -112,7 +113,7 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CUDA AWS tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
@@ -125,18 +126,19 @@ jobs:
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
region: ${{ needs.setup-ec2.outputs.aws-region }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (cuda-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (cuda-tests) failed. (${{ env.ACTION_RUN_URL }})"

View File

@@ -24,16 +24,17 @@ jobs:
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
aws-region: ${{ steps.start-instance.outputs.aws-region }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
unsigned-integer-tests:
@@ -45,14 +46,14 @@ jobs:
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
@@ -75,7 +76,7 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Unsigned Integer tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
@@ -88,18 +89,19 @@ jobs:
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
region: ${{ needs.setup-ec2.outputs.aws-region }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (unsigned-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (unsigned-integer-tests) failed. (${{ env.ACTION_RUN_URL }})"

View File

@@ -24,16 +24,17 @@ jobs:
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
aws-region: ${{ steps.start-instance.outputs.aws-region }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
signed-integer-tests:
@@ -45,14 +46,14 @@ jobs:
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
@@ -79,7 +80,7 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Signed Integer tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
@@ -92,18 +93,19 @@ jobs:
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
region: ${{ needs.setup-ec2.outputs.aws-region }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (signed-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (signed-integer-tests) failed. (${{ env.ACTION_RUN_URL }})"

View File

@@ -24,16 +24,17 @@ jobs:
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
aws-region: ${{ steps.start-instance.outputs.aws-region }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
cpu-tests:
@@ -45,14 +46,14 @@ jobs:
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
@@ -60,10 +61,6 @@ jobs:
run: |
make test_concrete_csprng
- name: Run tfhe-zk-pok tests
run: |
make test_zk_pok
- name: Run core tests
run: |
AVX512_SUPPORT=ON make test_core_crypto
@@ -105,7 +102,7 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CPU tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
@@ -118,18 +115,19 @@ jobs:
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
region: ${{ needs.setup-ec2.outputs.aws-region }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (cpu-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (cpu-tests) failed. (${{ env.ACTION_RUN_URL }})"

View File

@@ -24,16 +24,17 @@ jobs:
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
aws-region: ${{ steps.start-instance.outputs.aws-region }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-small
wasm-tests:
@@ -45,37 +46,30 @@ jobs:
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
- name: Install Node
run: |
make install_node
- name: Run fmt checks
run: |
make check_fmt_js
- name: Run js on wasm API tests
run: |
make test_nodejs_wasm_api_in_docker
- name: Run parallel wasm tests
run: |
make install_node
make ci_test_web_js_api_parallel
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "WASM tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
@@ -88,18 +82,19 @@ jobs:
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
region: ${{ needs.setup-ec2.outputs.aws-region }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (wasm-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (wasm-tests) failed. (${{ env.ACTION_RUN_URL }})"

View File

@@ -53,7 +53,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -63,7 +63,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -103,11 +103,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -126,11 +126,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Boolean benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Boolean benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -23,7 +23,7 @@ jobs:
fail-fast: false
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Install and run newline linter checks
if: matrix.os == 'ubuntu-latest'

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Get actionlint
run: |

View File

@@ -53,7 +53,7 @@ jobs:
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}
@@ -63,13 +63,13 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@2d756ea4c53f7f6b397767d8723b3a10a9f35bf2
uses: tj-actions/changed-files@aa08304bd477b800d468db44fe10f6c61f7f7b11
with:
files_yaml: |
tfhe:
@@ -99,7 +99,7 @@ jobs:
make test_shortint_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71
uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
@@ -113,7 +113,7 @@ jobs:
make test_integer_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@7afa10ed9b269c561c2336fd862446844e0cbf71
uses: codecov/codecov-action@54bcd8715eee62d40e33596ef5e8f0f48dbbccab
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
@@ -124,7 +124,7 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}

View File

@@ -53,7 +53,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -63,7 +63,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -94,11 +94,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -117,11 +117,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "PBS benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "PBS benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -2,8 +2,31 @@
name: Core crypto GPU benchmarks
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
# This input is not used in this workflow but still mandatory since a calling workflow could
# use it. If a triggering command include a user_inputs field, then the triggered workflow
# must include this very input, otherwise the workflow won't be called.
# See start_full_benchmarks.yml as example.
user_inputs:
description: "Type of benchmarks to run"
type: string
default: "weekly_benchmarks"
env:
CARGO_TERM_COLOR: always
@@ -11,27 +34,10 @@ env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
jobs:
setup-ec2:
name: Setup EC2 instance (cuda-benchmarks)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-bench
core-crypto-benchmarks:
name: CUDA core crypto benchmarks
needs: setup-ec2
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
run-core-crypto-benchmarks:
name: Execute GPU core crypto benchmarks in EC2
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !cancelled() }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
@@ -39,29 +45,23 @@ jobs:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
gcc: 9
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.1
steps:
- name: Install dependencies
- name: Instance configuration used
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Get benchmark date
run: |
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -71,7 +71,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -124,11 +124,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -144,39 +144,14 @@ jobs:
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}
# FIXME This action needs docker to be installed on the machine beforehand.
# - name: Slack Notification
# if: ${{ failure() }}
# continue-on-error: true
# uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
# env:
# SLACK_COLOR: ${{ job.status }}
# SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
# SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
# SLACK_MESSAGE: "PBS GPU benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
# SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
# SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
teardown-ec2:
name: Teardown EC2 instance (cuda-benchmarks)
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
needs: [ setup-ec2, core-crypto-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (cuda-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "PBS GPU benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -25,16 +25,17 @@ jobs:
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
instance-id: ${{ steps.start-instance.outputs.ec2-instance-id }}
aws-region: ${{ steps.start-instance.outputs.aws-region }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-small
csprng-randomness-tests:
@@ -46,14 +47,14 @@ jobs:
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
@@ -64,7 +65,7 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "concrete-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
@@ -77,18 +78,19 @@ jobs:
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
uses: zama-ai/slab-github-runner@8562abbdc96b3619bd5debe1fb934db298f9a044
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
region: ${{ needs.setup-ec2.outputs.aws-region }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (csprng-randomness-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (csprng-randomness-tests) failed. (${{ env.ACTION_RUN_URL }})"

View File

@@ -39,7 +39,7 @@ jobs:
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -52,16 +52,16 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Run integer benchmarks
run: |
@@ -103,10 +103,10 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Integer RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer RTX 4090 full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
cuda-core-crypto-benchmarks:
name: Cuda core crypto benchmarks (RTX 4090)
@@ -120,7 +120,7 @@ jobs:
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -133,16 +133,16 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Run integer benchmarks
run: |
@@ -185,14 +185,14 @@ jobs:
- name: Slack Notification
if: ${{ !success() && !cancelled() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Core crypto RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Core crypto RTX 4090 full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
remove_github_label:
name: Remove 4090 bench label
if: ${{ always() && github.event_name == 'pull_request' }}
if: ${{ github.event_name == 'pull_request' }}
needs: [cuda-integer-benchmarks, cuda-core-crypto-benchmarks]
runs-on: ["self-hosted", "4090-desktop"]
steps:

View File

@@ -46,7 +46,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -56,7 +56,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -97,11 +97,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -120,11 +120,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Integer benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -74,7 +74,7 @@ jobs:
echo "Request ID: ${{ inputs.request_id }}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -92,16 +92,16 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Run benchmarks with AVX512
run: |
@@ -148,11 +148,11 @@ jobs:
steps:
- name: Notify
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -2,9 +2,23 @@
name: Integer GPU benchmarks
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
env:
CARGO_TERM_COLOR: always
@@ -15,27 +29,10 @@ env:
RUST_MIN_STACK: "8388608"
jobs:
setup-ec2:
name: Setup EC2 instance (cuda-benchmarks)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-bench
cuda-integer-benchmarks:
name: CUDA integer benchmarks
needs: setup-ec2
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
run-integer-benchmarks:
name: Execute integer benchmarks in EC2
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !cancelled() }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
@@ -43,29 +40,23 @@ jobs:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
gcc: 9
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.1
steps:
- name: Install dependencies
- name: Instance configuration used
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Get benchmark date
run: |
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -75,7 +66,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -120,7 +111,7 @@ jobs:
COMMIT_HASH="$(git describe --tags --dirty)"
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n2-H100x1" \
--hardware ${{ inputs.instance_type }} \
--backend gpu \
--project-version "${COMMIT_HASH}" \
--branch ${{ github.ref_name }} \
@@ -137,11 +128,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -157,39 +148,14 @@ jobs:
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}
# FIXME This action needs docker to be installed on the machine beforehand.
# - name: Slack Notification
# if: ${{ !success() && !cancelled() }}
# continue-on-error: true
# uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
# env:
# SLACK_COLOR: ${{ job.status }}
# SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
# SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
# SLACK_MESSAGE: "Integer GPU benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
# SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
# SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
teardown-ec2:
name: Teardown EC2 instance (cuda-benchmarks)
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
needs: [ setup-ec2, cuda-integer-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
if: ${{ !success() && !cancelled() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (cuda-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Integer GPU benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -2,9 +2,31 @@
name: Integer GPU full benchmarks
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
# This input is not used in this workflow but still mandatory since a calling workflow could
# use it. If a triggering command include a user_inputs field, then the triggered workflow
# must include this very input, otherwise the workflow won't be called.
# See start_full_benchmarks.yml as example.
user_inputs:
description: "Type of benchmarks to run"
type: string
default: "weekly_benchmarks"
env:
CARGO_TERM_COLOR: always
@@ -14,28 +36,11 @@ env:
RUST_MIN_STACK: "8388608"
jobs:
setup-ec2:
name: Setup EC2 instance (cuda-full-benchmarks)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-bench
cuda-integer-full-benchmarks:
name: CUDA integer full benchmarks
needs: setup-ec2
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
integer-benchmarks:
name: Execute integer benchmarks for all operations flavor
runs-on: ${{ github.event.inputs.runner_name }}
timeout-minutes: 1440 # 24 hours
if: ${{ !cancelled() }}
continue-on-error: true
strategy:
fail-fast: false
@@ -47,25 +52,19 @@ jobs:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
gcc: 9
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.1
steps:
- name: Install dependencies
- name: Instance configuration used
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -83,7 +82,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -108,11 +107,11 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Run benchmarks with AVX512
run: |
@@ -122,7 +121,7 @@ jobs:
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n2-H100x1" \
--hardware ${{ inputs.instance_type }} \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
@@ -152,39 +151,19 @@ jobs:
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}
# FIXME This action needs docker to be installed on the machine beforehand.
# - name: Slack Notification
# if: ${{ !success() && !cancelled() }}
# continue-on-error: true
# uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
# env:
# SLACK_COLOR: ${{ job.status }}
# SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
# SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
# SLACK_MESSAGE: "Integer GPU full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
# SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
# SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
teardown-ec2:
name: Teardown EC2 instance (cuda-full-benchmarks)
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
needs: [ setup-ec2, cuda-integer-full-benchmarks ]
runs-on: ubuntu-latest
slack-notification:
name: Slack Notification
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !success() && !cancelled() }}
needs: integer-benchmarks
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
- name: Notify
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (cuda-full-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Integer GPU full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -46,7 +46,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -56,7 +56,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -97,11 +97,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -120,11 +120,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Integer benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -2,9 +2,23 @@
name: Integer GPU Multi-bit benchmarks
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
env:
CARGO_TERM_COLOR: always
@@ -15,28 +29,11 @@ env:
RUST_MIN_STACK: "8388608"
jobs:
setup-ec2:
name: Setup EC2 instance (cuda-multi-bit-benchmarks)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-bench
cuda-integer-multi-bit-benchmarks:
name: CUDA integer multi-bit benchmarks
needs: setup-ec2
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
cuda-integer-benchmarks:
name: Execute integer multi-bit benchmarks in EC2
runs-on: ${{ github.event.inputs.runner_name }}
timeout-minutes: 1440 # 24 hours
if: ${{ !cancelled() }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
@@ -44,29 +41,23 @@ jobs:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
gcc: 9
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.1
steps:
- name: Install dependencies
- name: Instance configuration used
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Get benchmark date
run: |
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -76,7 +67,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -121,7 +112,7 @@ jobs:
COMMIT_HASH="$(git describe --tags --dirty)"
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n2-H100x1" \
--hardware ${{ inputs.instance_type }} \
--backend gpu \
--project-version "${COMMIT_HASH}" \
--branch ${{ github.ref_name }} \
@@ -138,11 +129,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -158,39 +149,14 @@ jobs:
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}
# FIXME This action needs docker to be installed on the machine beforehand.
# - name: Slack Notification
# if: ${{ !success() && !cancelled() }}
# continue-on-error: true
# uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
# env:
# SLACK_COLOR: ${{ job.status }}
# SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
# SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
# SLACK_MESSAGE: "Integer GPU benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
# SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
# SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
teardown-ec2:
name: Teardown EC2 instance (cuda-multi-bit-benchmarks)
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
needs: [ setup-ec2, cuda-integer-multi-bit-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL_PRE_PROD }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
if: ${{ !success() && !cancelled() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (cuda-multi-bit-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Integer GPU benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -31,10 +31,10 @@ jobs:
timeout-minutes: 720
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: stable
@@ -74,10 +74,6 @@ jobs:
run: |
make test_concrete_csprng
- name: Run tfhe-zk-pok tests
run: |
make test_zk_pok
- name: Run core tests
run: |
make test_core_crypto
@@ -137,7 +133,7 @@ jobs:
- name: Slack Notification
if: ${{ needs.cargo-builds.result != 'skipped' }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ needs.cargo-builds.result }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}

View File

@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -49,7 +49,7 @@ jobs:
- name: Publish web package
if: ${{ inputs.push_web_package }}
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
uses: JS-DevTools/npm-publish@4b07b26a2f6e0a51846e1870223e545bae91c552
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
@@ -65,7 +65,7 @@ jobs:
- name: Publish Node package
if: ${{ inputs.push_node_package }}
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
uses: JS-DevTools/npm-publish@4b07b26a2f6e0a51846e1870223e545bae91c552
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
@@ -74,7 +74,7 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}

View File

@@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -32,7 +32,7 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}

View File

@@ -1,129 +0,0 @@
# Publish new release of tfhe-cuda-backend on crates.io.
name: Publish CUDA release
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
push_to_crates:
description: "Push to crate"
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-ec2:
name: Setup EC2 instance (publish-cuda-release)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: gpu-test
publish-cuda-release:
name: Publish CUDA Release
needs: setup-ec2
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 9
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
steps:
- name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
with:
fetch-depth: 0
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Publish crate.io package
if: ${{ inputs.push_to_crates }}
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
cargo publish -p tfhe-cuda-backend --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-cuda-backend release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-ec2:
name: Teardown EC2 instance (publish-release)
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
needs: [ setup-ec2, publish-cuda-release ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@1dced74825027fe3d481392163ed8fc56813fb5d
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (publish-cuda-release) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -17,10 +17,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
- name: Checkout lattice-estimator
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: malb/lattice-estimator
path: lattice_estimator
@@ -42,7 +42,7 @@ jobs:
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}

View File

@@ -45,7 +45,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -55,7 +55,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -95,11 +95,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -118,11 +118,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Shortint benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Shortint benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -53,7 +53,7 @@ jobs:
echo "Request ID: ${{ inputs.request_id }}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -71,16 +71,16 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Run benchmarks with AVX512
run: |
@@ -142,11 +142,11 @@ jobs:
steps:
- name: Notify
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Shortint full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Shortint full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -46,7 +46,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -56,7 +56,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -97,11 +97,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -120,11 +120,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Signed integer benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Signed integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -52,7 +52,7 @@ jobs:
echo "Request ID: ${{ inputs.request_id }}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -70,16 +70,16 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Run benchmarks with AVX512
run: |
@@ -126,11 +126,11 @@ jobs:
steps:
- name: Notify
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Signed integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Signed integer full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -46,7 +46,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -56,7 +56,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -97,11 +97,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -120,11 +120,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Signed integer benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Signed integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -58,13 +58,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@2d756ea4c53f7f6b397767d8723b3a10a9f35bf2
uses: tj-actions/changed-files@aa08304bd477b800d468db44fe10f6c61f7f7b11
with:
files_yaml: |
common_benches:
@@ -111,11 +111,11 @@ jobs:
- .github/workflows/wasm_client_benchmark.yml
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Start AWS job in Slab
# If manually triggered check that the current bench has been requested

View File

@@ -30,16 +30,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Set benchmarks type as weekly
if: (github.event_name == 'workflow_dispatch' && inputs.benchmark_type == 'weekly') || github.event.schedule == '0 1 * * 6'

View File

@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: Save repo
@@ -26,12 +26,12 @@ jobs:
with:
source_repo: "zama-ai/tfhe-rs"
source_branch: "main"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.CONCRETE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_branch: "main"
- name: git-sync tags
uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83
with:
source_repo: "zama-ai/tfhe-rs"
source_branch: "refs/tags/*"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.CONCRETE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_branch: "refs/tags/*"

View File

@@ -53,7 +53,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
@@ -63,7 +63,7 @@ jobs:
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@dc6353516c68da0f06325f42ad880f76a5e77ec9
uses: dtolnay/rust-toolchain@be73d7920c329f220ce78e0234b8f96b7ae60248
with:
toolchain: nightly
@@ -104,11 +104,11 @@ jobs:
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
@@ -127,11 +127,11 @@ jobs:
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "WASM benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "WASM benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,13 +1,6 @@
[workspace]
resolver = "2"
members = [
"tfhe",
"tfhe-zk-pok",
"tasks",
"apps/trivium",
"concrete-csprng",
"backends/tfhe-cuda-backend",
]
members = ["tfhe", "tasks", "apps/trivium", "concrete-csprng", "backends/tfhe-cuda-backend"]
[profile.bench]
lto = "fat"
@@ -24,4 +17,3 @@ lto = "off"
inherits = "dev"
opt-level = 3
lto = "off"
debug-assertions = false

View File

@@ -3,7 +3,6 @@ OS:=$(shell uname)
RS_CHECK_TOOLCHAIN:=$(shell cat toolchain.txt | tr -d '\n')
CARGO_RS_CHECK_TOOLCHAIN:=+$(RS_CHECK_TOOLCHAIN)
TARGET_ARCH_FEATURE:=$(shell ./scripts/get_arch_feature.sh)
CPU_COUNT=$(shell ./scripts/cpu_count.sh)
RS_BUILD_TOOLCHAIN:=stable
CARGO_RS_BUILD_TOOLCHAIN:=+$(RS_BUILD_TOOLCHAIN)
CARGO_PROFILE?=release
@@ -120,12 +119,7 @@ install_wasm_pack: install_rs_build_toolchain
.PHONY: install_node # Install last version of NodeJS via nvm
install_node:
curl -o nvm_install.sh https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh
@echo "2ed5e94ba12434370f0358800deb69f514e8bce90f13beb0e1b241d42c6abafd nvm_install.sh" > nvm_checksum
@sha256sum -c nvm_checksum
@rm nvm_checksum
$(SHELL) nvm_install.sh
@rm nvm_install.sh
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | $(SHELL)
source ~/.bashrc
$(SHELL) -i -c 'nvm install $(NODE_VERSION)' || \
( echo "Unable to install node, unknown error." && exit 1 )
@@ -155,51 +149,24 @@ check_actionlint_installed:
@actionlint --version > /dev/null 2>&1 || \
( echo "Unable to locate actionlint. Try installing it: https://github.com/rhysd/actionlint/releases" && exit 1 )
.PHONY: check_nvm_installed # Check if Node Version Manager is installed
check_nvm_installed:
@source ~/.nvm/nvm.sh && nvm --version > /dev/null 2>&1 || \
( echo "Unable to locate Node. Run 'make install_node'" && exit 1 )
.PHONY: fmt # Format rust code
fmt: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt
.PHONY: fmt_js # Format javascript code
fmt_js: check_nvm_installed
source ~/.nvm/nvm.sh && \
nvm install $(NODE_VERSION) && \
nvm use $(NODE_VERSION) && \
$(MAKE) -C tfhe/web_wasm_parallel_tests fmt
.PHONY: fmt_gpu # Format rust and cuda code
fmt_gpu: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt
cd "$(TFHECUDA_SRC)" && ./format_tfhe_cuda_backend.sh
.PHONY: fmt_c_tests # Format c tests
fmt_c_tests:
find tfhe/c_api_tests/ -regex '.*\.\(cpp\|hpp\|cu\|c\|h\)' -exec clang-format -style=file -i {} \;
.PHONY: check_fmt # Check rust code format
check_fmt: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt --check
.PHONY: check_fmt_c_tests # Check C tests format
check_fmt_c_tests:
find tfhe/c_api_tests/ -regex '.*\.\(cpp\|hpp\|cu\|c\|h\)' -exec clang-format --dry-run --Werror -style=file {} \;
.PHONY: check_fmt_gpu # Check rust and cuda code format
check_fmt_gpu: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt --check
cd "$(TFHECUDA_SRC)" && ./format_tfhe_cuda_backend.sh -c
.PHONY: check_fmt_js # Check javascript code format
check_fmt_js: check_nvm_installed
source ~/.nvm/nvm.sh && \
nvm install $(NODE_VERSION) && \
nvm use $(NODE_VERSION) && \
$(MAKE) -C tfhe/web_wasm_parallel_tests check_fmt
.PHONY: clippy_gpu # Run clippy lints on tfhe with "gpu" enabled
clippy_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
@@ -261,7 +228,7 @@ clippy: install_rs_check_toolchain
.PHONY: clippy_c_api # Run clippy lints enabling the boolean, shortint and the C API
clippy_c_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy_js_wasm_api # Run clippy lints enabling the boolean, shortint, integer and the js wasm API
@@ -277,13 +244,13 @@ clippy_tasks:
.PHONY: clippy_trivium # Run clippy lints on Trivium app
clippy_trivium: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
-p tfhe-trivium -- --no-deps -D warnings
.PHONY: clippy_all_targets # Run clippy lints on all targets (benches, examples, etc.)
clippy_all_targets:
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,zk-pok-experimental \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy_concrete_csprng # Run clippy lints on concrete-csprng
@@ -292,14 +259,9 @@ clippy_concrete_csprng:
--features=$(TARGET_ARCH_FEATURE) \
-p concrete-csprng -- --no-deps -D warnings
.PHONY: clippy_zk_pok # Run clippy lints on tfhe-zk-pok
clippy_zk_pok:
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
-p tfhe-zk-pok -- --no-deps -D warnings
.PHONY: clippy_all # Run all clippy targets
clippy_all: clippy clippy_boolean clippy_shortint clippy_integer clippy_all_targets clippy_c_api \
clippy_js_wasm_api clippy_tasks clippy_core clippy_concrete_csprng clippy_zk_pok clippy_trivium
clippy_js_wasm_api clippy_tasks clippy_core clippy_concrete_csprng clippy_trivium
.PHONY: clippy_fast # Run main clippy targets
clippy_fast: clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core \
@@ -362,14 +324,14 @@ symlink_c_libs_without_fingerprint:
.PHONY: build_c_api # Build the C API for boolean, shortint and integer
build_c_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,zk-pok-experimental,$(FORWARD_COMPAT_FEATURE) \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,$(FORWARD_COMPAT_FEATURE) \
-p $(TFHE_SPEC)
@"$(MAKE)" symlink_c_libs_without_fingerprint
.PHONY: build_c_api_gpu # Build the C API for boolean, shortint and integer
build_c_api_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,zk-pok-experimental,gpu \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,gpu \
-p $(TFHE_SPEC)
@"$(MAKE)" symlink_c_libs_without_fingerprint
@@ -385,7 +347,7 @@ build_web_js_api: install_rs_build_toolchain install_wasm_pack
cd tfhe && \
RUSTFLAGS="$(WASM_RUSTFLAGS)" rustup run "$(RS_BUILD_TOOLCHAIN)" \
wasm-pack build --release --target=web \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,zk-pok-experimental
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api
.PHONY: build_web_js_api_parallel # Build the js API targeting the web browser with parallelism support
build_web_js_api_parallel: install_rs_check_toolchain install_wasm_pack
@@ -393,7 +355,7 @@ build_web_js_api_parallel: install_rs_check_toolchain install_wasm_pack
rustup component add rust-src --toolchain $(RS_CHECK_TOOLCHAIN) && \
RUSTFLAGS="$(WASM_RUSTFLAGS) -C target-feature=+atomics,+bulk-memory,+mutable-globals" rustup run $(RS_CHECK_TOOLCHAIN) \
wasm-pack build --release --target=web \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,parallel-wasm-api,zk-pok-experimental \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,parallel-wasm-api \
-Z build-std=panic_abort,std
.PHONY: build_node_js_api # Build the js API targeting nodejs
@@ -401,7 +363,7 @@ build_node_js_api: install_rs_build_toolchain install_wasm_pack
cd tfhe && \
RUSTFLAGS="$(WASM_RUSTFLAGS)" rustup run "$(RS_BUILD_TOOLCHAIN)" \
wasm-pack build --release --target=nodejs \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,zk-pok-experimental
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api
.PHONY: build_concrete_csprng # Build concrete_csprng
build_concrete_csprng: install_rs_build_toolchain
@@ -411,10 +373,10 @@ build_concrete_csprng: install_rs_build_toolchain
.PHONY: test_core_crypto # Run the tests of the core_crypto module including experimental ones
test_core_crypto: install_rs_build_toolchain install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental,zk-pok-experimental -p $(TFHE_SPEC) -- core_crypto::
--features=$(TARGET_ARCH_FEATURE),experimental -p $(TFHE_SPEC) -- core_crypto::
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental,zk-pok-experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC) -- core_crypto::; \
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC) -- core_crypto::; \
fi
.PHONY: test_core_crypto_cov # Run the tests of the core_crypto module with code coverage
@@ -437,7 +399,7 @@ test_cuda_backend:
mkdir -p "$(TFHECUDA_BUILD)" && \
cd "$(TFHECUDA_BUILD)" && \
cmake .. -DCMAKE_BUILD_TYPE=Release -DTFHE_CUDA_BACKEND_BUILD_TESTS=ON && \
make -j "$(CPU_COUNT)" && \
make -j && \
make test
.PHONY: test_gpu # Run the tests of the core_crypto module including experimental on the gpu backend
@@ -585,7 +547,7 @@ test_integer_cov: install_rs_check_toolchain install_tarpaulin
.PHONY: test_high_level_api # Run all the tests for high_level_api
test_high_level_api: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,zk-pok-experimental -p $(TFHE_SPEC) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p $(TFHE_SPEC) \
-- high_level_api::
test_high_level_api_gpu: install_rs_build_toolchain install_cargo_nextest
@@ -596,14 +558,13 @@ test_high_level_api_gpu: install_rs_build_toolchain install_cargo_nextest
.PHONY: test_user_doc # Run tests from the .md documentation
test_user_doc: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) --doc \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,pbs-stats,zk-pok-experimental \
-p $(TFHE_SPEC) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p $(TFHE_SPEC) \
-- test_user_docs::
.PHONY: test_user_doc_gpu # Run tests for GPU from the .md documentation
test_user_doc_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) --doc \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,gpu,zk-pok-experimental -p $(TFHE_SPEC) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,gpu -p $(TFHE_SPEC) \
-- test_user_docs::
.PHONY: test_fhe_strings # Run tests for fhe_strings example
@@ -642,46 +603,33 @@ test_concrete_csprng:
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE) -p concrete-csprng
.PHONY: test_zk_pok # Run tfhe-zk-pok-experimental tests
test_zk_pok:
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
-p tfhe-zk-pok
.PHONY: doc # Build rust doc
doc: install_rs_check_toolchain
@# Even though we are not in docs.rs, this allows to "just" build the doc
DOCS_RS=1 \
RUSTDOCFLAGS="--html-in-header katex-header.html" \
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" doc \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,gpu,internal-keycache,experimental --no-deps -p $(TFHE_SPEC)
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer --no-deps -p $(TFHE_SPEC)
.PHONY: docs # Build rust doc alias for doc
docs: doc
.PHONY: lint_doc # Build rust doc with linting enabled
lint_doc: install_rs_check_toolchain
@# Even though we are not in docs.rs, this allows to "just" build the doc
DOCS_RS=1 \
RUSTDOCFLAGS="--html-in-header katex-header.html -Dwarnings" \
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" doc \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,gpu,internal-keycache,experimental -p $(TFHE_SPEC) --no-deps
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer -p $(TFHE_SPEC) --no-deps
.PHONY: lint_docs # Build rust doc with linting enabled alias for lint_doc
lint_docs: lint_doc
.PHONY: format_doc_latex # Format the documentation latex equations to avoid broken rendering.
format_doc_latex:
RUSTFLAGS="" cargo xtask format_latex_doc
cargo xtask format_latex_doc
@"$(MAKE)" --no-print-directory fmt
@printf "\n===============================\n\n"
@printf "Please manually inspect changes made by format_latex_doc, rustfmt can break equations \
if the line length is exceeded\n"
@printf "\n===============================\n"
.PHONY: check_md_docs_are_tested # Checks that the rust codeblocks in our .md files are tested
check_md_docs_are_tested:
RUSTFLAGS="" cargo xtask check_tfhe_docs_are_tested
.PHONY: check_compile_tests # Build tests in debug without running them
check_compile_tests:
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
@@ -701,7 +649,7 @@ check_compile_tests_benches_gpu: install_rs_build_toolchain
mkdir -p "$(TFHECUDA_BUILD)" && \
cd "$(TFHECUDA_BUILD)" && \
cmake .. -DCMAKE_BUILD_TYPE=Debug -DTFHE_CUDA_BACKEND_BUILD_TESTS=ON -DTFHE_CUDA_BACKEND_BUILD_BENCHMARKS=ON && \
make -j "$(CPU_COUNT)"
make -j
.PHONY: build_nodejs_test_docker # Build a docker image with tools to run nodejs tests for wasm API
build_nodejs_test_docker:
@@ -942,15 +890,13 @@ sha256_bool: install_rs_check_toolchain
--features=$(TARGET_ARCH_FEATURE),boolean
.PHONY: pcc # pcc stands for pre commit checks (except GPU)
pcc: no_tfhe_typo no_dbg_log check_fmt lint_doc check_md_docs_are_tested clippy_all \
check_compile_tests
pcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_all check_compile_tests
.PHONY: pcc_gpu # pcc stands for pre commit checks for GPU compilation
pcc_gpu: clippy_gpu clippy_cuda_backend check_compile_tests_benches_gpu
.PHONY: fpcc # pcc stands for pre commit checks, the f stands for fast
fpcc: no_tfhe_typo no_dbg_log check_fmt lint_doc check_md_docs_are_tested clippy_fast \
check_compile_tests
fpcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_fast check_compile_tests
.PHONY: conformance # Automatically fix problems that can be fixed
conformance: fix_newline fmt

View File

@@ -1,10 +1,6 @@
<p align="center">
<!-- product name logo -->
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/5283e0ba-da1e-43af-9f2a-c5221367a12b">
<source media="(prefers-color-scheme: light)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/b94a8c96-7595-400b-9311-70765c706955">
<img width=600 alt="Zama TFHE-rs">
</picture>
<img width=600 src="https://user-images.githubusercontent.com/5758427/231206749-8f146b97-3c5a-4201-8388-3ffa88580415.png">
</p>
<hr/>
@@ -131,13 +127,13 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
// Clear equivalent computations: 1344 * 5 = 6720
let encrypted_res_mul = &encrypted_a * &encrypted_b;
// Clear equivalent computations: 6720 >> 5 = 210
// Clear equivalent computations: 1344 >> 5 = 42
encrypted_a = &encrypted_res_mul >> &encrypted_b;
// Clear equivalent computations: let casted_a = a as u8;
let casted_a: FheUint8 = encrypted_a.cast_into();
// Clear equivalent computations: min(210, 7) = 7
// Clear equivalent computations: min(42, 7) = 7
let encrypted_res_min = &casted_a.min(&encrypted_c);
// Operation between clear and encrypted data:
@@ -177,12 +173,12 @@ to run in release mode with cargo's `--release` flag to have the best performanc
<br></br>
### Tutorials
- [[Video tutorial] Implement signed integers using TFHE-rs ](https://www.zama.ai/post/video-tutorial-implement-signed-integers-ssing-tfhe-rs)
- [Homomorphic parity bit](https://docs.zama.ai/tfhe-rs/tutorials/parity_bit)
- [Homomorphic case changing on Ascii string](https://docs.zama.ai/tfhe-rs/tutorials/ascii_fhe_string)
- [Homomorphic Parity Bit](https://docs.zama.ai/tfhe-rs/tutorials/parity_bit)
- [Homomorphic Case Changing on Ascii String](https://docs.zama.ai/tfhe-rs/tutorials/ascii_fhe_string)
- [Boolean SHA256 with TFHE-rs](https://www.zama.ai/post/boolean-sha256-tfhe-rs)
- [Dark market with TFHE-rs](https://www.zama.ai/post/dark-market-tfhe-rs)
- [Regular expression engine with TFHE-rs](https://www.zama.ai/post/regex-engine-tfhe-rs)
- [Dark Market with TFHE-rs](https://www.zama.ai/post/dark-market-tfhe-rs)
- [Regular Expression Engine with TFHE-rs](https://www.zama.ai/post/regex-engine-tfhe-rs)
*Explore more useful resources in [TFHE-rs tutorials](https://docs.zama.ai/tfhe-rs/tutorials) and [Awesome Zama repo](https://github.com/zama-ai/awesome-zama)*
<br></br>
@@ -206,12 +202,6 @@ with `red_cost_model = reduction.RC.BDGL16`.
When a new update is published in the Lattice Estimator, we update parameters accordingly.
### Security Model
The default parameters for the TFHE-rs library are chosen considering the IND-CPA security model, and are selected with a bootstrapping failure probability fixed at p_error = $2^{-40}$. In particular, it is assumed that the results of decrypted computations are not shared by the secret key owner with any third parties, as such an action can lead to leakage of the secret encryption key. If you are designing an application where decryptions must be shared, you will need to craft custom encryption parameters which are chosen in consideration of the IND-CPA^D security model [1].
[1] Li, Baiyu, et al. "Securing approximate homomorphic encryption using differential privacy." Annual International Cryptology Conference. Cham: Springer Nature Switzerland, 2022. https://eprint.iacr.org/2022/816.pdf
#### Side-Channel Attacks
Mitigation for side-channel attacks has not yet been implemented in TFHE-rs,
@@ -250,11 +240,7 @@ This software is distributed under the **BSD-3-Clause-Clear** license. If you ha
## Support
<a target="_blank" href="https://community.zama.ai">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/08656d0a-3f44-4126-b8b6-8c601dff5380">
<source media="(prefers-color-scheme: light)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/1c9c9308-50ac-4aab-a4b9-469bb8c536a4">
<img alt="Support">
</picture>
<img src="https://github.com/zama-ai/tfhe-rs/assets/157474013/8da6cf5b-51a0-4c86-9e75-fd0e4a4c64a4">
</a>
🌟 If you find this project helpful or interesting, please consider giving it a star on GitHub! Your support helps to grow the community and motivates further development.

View File

@@ -15,6 +15,7 @@ Example of a Rust main below:
```rust
use tfhe::{ConfigBuilder, generate_keys, FheBool};
use tfhe::prelude::*;
use tfhe_trivium::TriviumStream;
fn get_hexadecimal_string_from_lsb_first_stream(a: Vec<bool>) -> String {
@@ -138,8 +139,10 @@ Example code:
```rust
use tfhe::shortint::prelude::*;
use tfhe::shortint::CastingKey;
use tfhe::{ConfigBuilder, generate_keys, FheUint64};
use tfhe::prelude::*;
use tfhe_trivium::TriviumStreamShortint;
fn test_shortint() {

View File

@@ -1,8 +1,10 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool};
use tfhe_trivium::KreyviumStream;
use criterion::Criterion;
pub fn kreyvium_bool_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);

View File

@@ -1,8 +1,10 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64, FheUint8};
use tfhe_trivium::{KreyviumStreamByte, TransCiphering};
use criterion::Criterion;
pub fn kreyvium_byte_gen(c: &mut Criterion) {
let config = ConfigBuilder::default()
.enable_function_evaluation()

View File

@@ -1,9 +1,12 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::prelude::*;
use tfhe::shortint::KeySwitchingKey;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
use tfhe_trivium::{KreyviumStreamShortint, TransCiphering};
use criterion::Criterion;
pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);

View File

@@ -1,8 +1,10 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool};
use tfhe_trivium::TriviumStream;
use criterion::Criterion;
pub fn trivium_bool_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);

View File

@@ -1,8 +1,10 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64, FheUint8};
use tfhe_trivium::{TransCiphering, TriviumStreamByte};
use criterion::Criterion;
pub fn trivium_byte_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);

View File

@@ -1,9 +1,12 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::prelude::*;
use tfhe::shortint::KeySwitchingKey;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
use tfhe_trivium::{TransCiphering, TriviumStreamShortint};
use criterion::Criterion;
pub fn trivium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);

View File

@@ -2,10 +2,12 @@
//! for the representation of the inner bits.
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheBool, ServerKey};
use rayon::prelude::*;
/// Internal trait specifying which operations are necessary for KreyviumStream generic type
pub trait KreyviumBoolInput<OpOutput>:
Sized

View File

@@ -2,10 +2,12 @@
//! for the representation of the inner bits.
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheUint8, ServerKey};
use rayon::prelude::*;
/// Internal trait specifying which operations are necessary for KreyviumStreamByte generic type
pub trait KreyviumByteInput<OpOutput>:
Sized

View File

@@ -1,7 +1,9 @@
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::shortint::prelude::*;
use rayon::prelude::*;
/// KreyviumStreamShortint: a struct implementing the Kreyvium stream cipher, using a generic
/// Ciphertext for the internal representation of bits (intended to represent a single bit). To be
/// able to compute FHE operations, it also owns a ServerKey.
@@ -34,7 +36,7 @@ impl KreyviumStreamShortint {
let mut c_register: [Ciphertext; 111] = [0; 111].map(|x| sk.create_trivial(x));
for i in 0..93 {
a_register[i].clone_from(&key[128 - 93 + i]);
a_register[i] = key[128 - 93 + i].clone();
}
for i in 0..84 {
b_register[i] = sk.create_trivial(iv[128 - 84 + i]);

View File

@@ -1,7 +1,8 @@
use crate::{KreyviumStream, KreyviumStreamByte, KreyviumStreamShortint, TransCiphering};
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
use crate::{KreyviumStream, KreyviumStreamByte, KreyviumStreamShortint, TransCiphering};
// Values for these tests come from the github repo renaud1239/Kreyvium,
// commit fd6828f68711276c25f55e605935028f5e843f43

View File

@@ -1,6 +1,5 @@
#[allow(clippy::module_inception)]
mod static_deque;
pub use static_deque::StaticDeque;
mod static_byte_deque;
pub use static_byte_deque::{StaticByteDeque, StaticByteDequeInput};

View File

@@ -4,6 +4,7 @@
//! This is pretending to store bits, and allows accessing bits in chunks of 8 consecutive.
use crate::static_deque::StaticDeque;
use tfhe::FheUint8;
/// Internal trait specifying which operations are needed by StaticByteDeque

View File

@@ -2,11 +2,13 @@
//! when trans ciphering is available to them.
use crate::{KreyviumStreamByte, KreyviumStreamShortint, TriviumStreamByte, TriviumStreamShortint};
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::shortint::Ciphertext;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheUint64, FheUint8, ServerKey};
use rayon::prelude::*;
/// Triat specifying the interface for trans ciphering a FheUint64 object. Since it is meant
/// to be used with stream ciphers, encryption and decryption are by default the same.
pub trait TransCiphering {

View File

@@ -1,7 +1,8 @@
use crate::{TransCiphering, TriviumStream, TriviumStreamByte, TriviumStreamShortint};
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
use crate::{TransCiphering, TriviumStream, TriviumStreamByte, TriviumStreamShortint};
// Values for these tests come from the github repo cantora/avr-crypto-lib, commit 2a5b018,
// file testvectors/trivium-80.80.test-vectors

View File

@@ -2,10 +2,12 @@
//! for the representation of the inner bits.
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheBool, ServerKey};
use rayon::prelude::*;
/// Internal trait specifying which operations are necessary for TriviumStream generic type
pub trait TriviumBoolInput<OpOutput>:
Sized

View File

@@ -2,10 +2,12 @@
//! for the representation of the inner bits.
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheUint8, ServerKey};
use rayon::prelude::*;
/// Internal trait specifying which operations are necessary for TriviumStreamByte generic type
pub trait TriviumByteInput<OpOutput>:
Sized

View File

@@ -1,7 +1,9 @@
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::shortint::prelude::*;
use rayon::prelude::*;
/// TriviumStreamShortint: a struct implementing the Trivium stream cipher, using a generic
/// Ciphertext for the internal representation of bits (intended to represent a single bit). To be
/// able to compute FHE operations, it also owns a ServerKey.
@@ -32,7 +34,7 @@ impl TriviumStreamShortint {
let mut c_register: [Ciphertext; 111] = [0; 111].map(|x| sk.create_trivial(x));
for i in 0..80 {
a_register[93 - 80 + i].clone_from(&key[i]);
a_register[93 - 80 + i] = key[i].clone();
b_register[84 - 80 + i] = sk.create_trivial(iv[i]);
}

View File

@@ -2,12 +2,6 @@ use std::env;
use std::process::Command;
fn main() {
if let Ok(val) = env::var("DOCS_RS") {
if val.parse::<u32>() == Ok(1) {
return;
}
}
println!("Build tfhe-cuda-backend");
if env::consts::OS == "linux" {
let output = Command::new("./get_os_name.sh").output().unwrap();

View File

@@ -1,2 +1 @@
/build/
include/cuda_config.h

View File

@@ -58,15 +58,10 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler ${OpenMP_CXX_FLAGS}")
if(${CUDA_SUCCESS})
set(CMAKE_CUDA_ARCHITECTURES native)
string(REPLACE "-arch=sm_" "" CUDA_ARCH "${ARCH}")
set(CUDA_ARCH "${CUDA_ARCH}0")
else()
set(CMAKE_CUDA_ARCHITECTURES 70)
set(CUDA_ARCH "700")
endif()
add_compile_definitions(CUDA_ARCH=${CUDA_ARCH})
# in production, should use -arch=sm_70 --ptxas-options=-v to see register spills -lineinfo for better debugging
set(CMAKE_CUDA_FLAGS
"${CMAKE_CUDA_FLAGS} -ccbin ${CMAKE_CXX_COMPILER} -O3 \

View File

@@ -6,14 +6,14 @@ while getopts ":c" option; do
case $option in
c)
# code to execute when flag1 is provided
find ./{include,src,tests_and_benchmarks/include,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -iregex '^.*\.\(cpp\|cu\|h\|cuh\)$' -print | xargs clang-format-15 -i -style='file' --dry-run --Werror
find ./{include,src,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -iregex '^.*\.\(cpp\|cu\|h\|cuh\)$' -print | xargs clang-format-15 -i -style='file' --dry-run --Werror
cmake-format -i CMakeLists.txt -c .cmake-format-config.py
find ./{include,src,tests_and_benchmarks/include,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -type f -name "CMakeLists.txt" | xargs -I % sh -c 'cmake-format -i % -c .cmake-format-config.py'
find ./{include,src,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -type f -name "CMakeLists.txt" | xargs -I % sh -c 'cmake-format -i % -c .cmake-format-config.py'
git diff --exit-code
exit
;;
esac
done
find ./{include,src,tests_and_benchmarks/include,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -iregex '^.*\.\(cpp\|cu\|h\|cuh\)$' -print | xargs clang-format-15 -i -style='file'
find ./{include,src,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -iregex '^.*\.\(cpp\|cu\|h\|cuh\)$' -print | xargs clang-format-15 -i -style='file'
cmake-format -i CMakeLists.txt -c .cmake-format-config.py
find ./{include,src,tests_and_benchmarks/include,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -type f -name "CMakeLists.txt" | xargs -I % sh -c 'cmake-format -i % -c .cmake-format-config.py'
find ./{include,src,tests_and_benchmarks/tests,tests_and_benchmarks/benchmarks} -type f -name "CMakeLists.txt" | xargs -I % sh -c 'cmake-format -i % -c .cmake-format-config.py'

View File

@@ -4,8 +4,8 @@
#include "device.h"
#include <cstdint>
enum PBS_TYPE { MULTI_BIT = 0, CLASSICAL = 1 };
enum PBS_VARIANT { DEFAULT = 0, CG = 1 };
enum PBS_TYPE { MULTI_BIT = 0, LOW_LAT = 1, AMORTIZED = 2 };
enum PBS_VARIANT { DEFAULT = 0, FAST = 1 };
extern "C" {
void cuda_fourier_polynomial_mul(void *input1, void *input2, void *output,
@@ -13,25 +13,29 @@ void cuda_fourier_polynomial_mul(void *input1, void *input2, void *output,
uint32_t polynomial_size,
uint32_t total_polynomials);
void cuda_convert_lwe_programmable_bootstrap_key_32(
void *dest, void *src, cuda_stream_t *stream, uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count, uint32_t polynomial_size);
void cuda_convert_lwe_bootstrap_key_32(void *dest, void *src,
cuda_stream_t *stream,
uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size);
void cuda_convert_lwe_programmable_bootstrap_key_64(
void *dest, void *src, cuda_stream_t *stream, uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count, uint32_t polynomial_size);
void cuda_convert_lwe_bootstrap_key_64(void *dest, void *src,
cuda_stream_t *stream,
uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size);
void scratch_cuda_programmable_bootstrap_amortized_32(
void scratch_cuda_bootstrap_amortized_32(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory);
void scratch_cuda_programmable_bootstrap_amortized_64(
void scratch_cuda_bootstrap_amortized_64(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory);
void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
void cuda_bootstrap_amortized_lwe_ciphertext_vector_32(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *pbs_buffer,
@@ -39,7 +43,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory);
void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
void cuda_bootstrap_amortized_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *pbs_buffer,
@@ -47,22 +51,22 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory);
void cleanup_cuda_programmable_bootstrap_amortized(cuda_stream_t *stream,
int8_t **pbs_buffer);
void cleanup_cuda_bootstrap_amortized(cuda_stream_t *stream,
int8_t **pbs_buffer);
void scratch_cuda_programmable_bootstrap_32(
void scratch_cuda_bootstrap_low_latency_32(
cuda_stream_t *stream, int8_t **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);
void scratch_cuda_programmable_bootstrap_64(
void scratch_cuda_bootstrap_low_latency_64(
cuda_stream_t *stream, int8_t **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);
void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
void cuda_bootstrap_low_latency_lwe_ciphertext_vector_32(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *buffer,
@@ -70,7 +74,7 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory);
void cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
void cuda_bootstrap_low_latency_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *buffer,
@@ -78,28 +82,31 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory);
void cleanup_cuda_programmable_bootstrap(cuda_stream_t *stream,
int8_t **pbs_buffer);
void cleanup_cuda_bootstrap_low_latency_32(cuda_stream_t *stream,
int8_t **pbs_buffer);
uint64_t get_buffer_size_programmable_bootstrap_amortized_64(
void cleanup_cuda_bootstrap_low_latency_64(cuda_stream_t *stream,
int8_t **pbs_buffer);
uint64_t get_buffer_size_bootstrap_amortized_64(
uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory);
uint64_t get_buffer_size_programmable_bootstrap_64(
uint64_t get_buffer_size_bootstrap_low_latency_64(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory);
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_programmable_bootstrap_step_one(
get_buffer_size_full_sm_bootstrap_low_latency_step_one(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size + // accumulator_rotated
sizeof(double2) * polynomial_size / 2; // accumulator fft
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_programmable_bootstrap_step_two(
get_buffer_size_full_sm_bootstrap_low_latency_step_two(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size + // accumulator
sizeof(double2) * polynomial_size / 2; // accumulator fft
@@ -107,13 +114,13 @@ get_buffer_size_full_sm_programmable_bootstrap_step_two(
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_partial_sm_programmable_bootstrap(uint32_t polynomial_size) {
get_buffer_size_partial_sm_bootstrap_low_latency(uint32_t polynomial_size) {
return sizeof(double2) * polynomial_size / 2; // accumulator fft
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_programmable_bootstrap_cg(uint32_t polynomial_size) {
get_buffer_size_full_sm_bootstrap_fast_low_latency(uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size + // accumulator_rotated
sizeof(Torus) * polynomial_size + // accumulator
sizeof(double2) * polynomial_size / 2; // accumulator fft
@@ -121,13 +128,14 @@ get_buffer_size_full_sm_programmable_bootstrap_cg(uint32_t polynomial_size) {
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_partial_sm_programmable_bootstrap_cg(uint32_t polynomial_size) {
get_buffer_size_partial_sm_bootstrap_fast_low_latency(
uint32_t polynomial_size) {
return sizeof(double2) * polynomial_size / 2; // accumulator fft mask & body
}
template <typename Torus, PBS_TYPE pbs_type> struct pbs_buffer;
template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::CLASSICAL> {
template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::LOW_LAT> {
int8_t *d_mem;
Torus *global_accumulator;
@@ -147,13 +155,13 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::CLASSICAL> {
switch (pbs_variant) {
case PBS_VARIANT::DEFAULT: {
uint64_t full_sm_step_one =
get_buffer_size_full_sm_programmable_bootstrap_step_one<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_one<Torus>(
polynomial_size);
uint64_t full_sm_step_two =
get_buffer_size_full_sm_programmable_bootstrap_step_two<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_two<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap<Torus>(
get_buffer_size_partial_sm_bootstrap_low_latency<Torus>(
polynomial_size);
uint64_t partial_dm_step_one = full_sm_step_one - partial_sm;
@@ -185,12 +193,12 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::CLASSICAL> {
polynomial_size * sizeof(Torus),
stream);
} break;
case PBS_VARIANT::CG: {
case PBS_VARIANT::FAST: {
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_cg<Torus>(
get_buffer_size_full_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_cg<Torus>(
get_buffer_size_partial_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
uint64_t partial_dm = full_sm - partial_sm;
@@ -229,14 +237,14 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::CLASSICAL> {
};
template <typename Torus>
__host__ __device__ uint64_t get_buffer_size_programmable_bootstrap_cg(
__host__ __device__ uint64_t get_buffer_size_bootstrap_fast_low_latency(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory) {
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_cg<Torus>(polynomial_size);
uint64_t full_sm = get_buffer_size_full_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_cg<Torus>(
get_buffer_size_partial_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
uint64_t partial_dm = full_sm - partial_sm;
uint64_t full_dm = full_sm;
@@ -255,42 +263,42 @@ __host__ __device__ uint64_t get_buffer_size_programmable_bootstrap_cg(
}
template <typename Torus>
bool has_support_to_cuda_programmable_bootstrap_cg(uint32_t glwe_dimension,
uint32_t polynomial_size,
uint32_t level_count,
uint32_t num_samples,
uint32_t max_shared_memory);
bool has_support_to_cuda_bootstrap_fast_low_latency(uint32_t glwe_dimension,
uint32_t polynomial_size,
uint32_t level_count,
uint32_t num_samples,
uint32_t max_shared_memory);
template <typename Torus>
void cuda_programmable_bootstrap_cg_lwe_ciphertext_vector(
void cuda_bootstrap_fast_low_latency_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<Torus, CLASSICAL> *buffer, uint32_t lwe_dimension,
pbs_buffer<Torus, LOW_LAT> *buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory);
template <typename Torus>
void cuda_programmable_bootstrap_lwe_ciphertext_vector(
void cuda_bootstrap_low_latency_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<Torus, CLASSICAL> *buffer, uint32_t lwe_dimension,
pbs_buffer<Torus, LOW_LAT> *buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory);
template <typename Torus, typename STorus>
void scratch_cuda_programmable_bootstrap_cg(
cuda_stream_t *stream, pbs_buffer<Torus, CLASSICAL> **pbs_buffer,
void scratch_cuda_fast_bootstrap_low_latency(
cuda_stream_t *stream, pbs_buffer<Torus, LOW_LAT> **pbs_buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);
template <typename Torus, typename STorus>
void scratch_cuda_programmable_bootstrap(
cuda_stream_t *stream, pbs_buffer<Torus, CLASSICAL> **buffer,
void scratch_cuda_bootstrap_low_latency(
cuda_stream_t *stream, pbs_buffer<Torus, LOW_LAT> **buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);

View File

@@ -0,0 +1,155 @@
#ifndef CUDA_MULTI_BIT_H
#define CUDA_MULTI_BIT_H
#include "bootstrap.h"
#include <cstdint>
extern "C" {
bool has_support_to_cuda_bootstrap_fast_multi_bit(uint32_t glwe_dimension,
uint32_t polynomial_size,
uint32_t level_count,
uint32_t num_samples,
uint32_t max_shared_memory);
void cuda_convert_lwe_multi_bit_bootstrap_key_64(
void *dest, void *src, cuda_stream_t *stream, uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count, uint32_t polynomial_size,
uint32_t grouping_factor);
void scratch_cuda_multi_bit_pbs_64(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t grouping_factor, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory,
uint32_t chunk_size = 0);
void cuda_multi_bit_pbs_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t grouping_factor, uint32_t base_log, uint32_t level_count,
uint32_t num_samples, uint32_t num_luts, uint32_t lwe_idx,
uint32_t max_shared_memory, uint32_t lwe_chunk_size = 0);
void scratch_cuda_generic_multi_bit_pbs_64(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t grouping_factor, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory,
uint32_t lwe_chunk_size = 0);
void cuda_generic_multi_bit_pbs_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t grouping_factor, uint32_t base_log, uint32_t level_count,
uint32_t num_samples, uint32_t num_luts, uint32_t lwe_idx,
uint32_t max_shared_memory, uint32_t lwe_chunk_size = 0);
void cleanup_cuda_multi_bit_pbs_32(cuda_stream_t *stream, int8_t **pbs_buffer);
void cleanup_cuda_multi_bit_pbs_64(cuda_stream_t *stream, int8_t **pbs_buffer);
}
template <typename Torus, typename STorus>
void scratch_cuda_fast_multi_bit_pbs(
cuda_stream_t *stream, pbs_buffer<Torus, MULTI_BIT> **pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory, uint32_t lwe_chunk_size = 0);
template <typename Torus>
void cuda_fast_multi_bit_pbs_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, Torus *bootstrapping_key,
pbs_buffer<Torus, MULTI_BIT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory,
uint32_t lwe_chunk_size = 0);
template <typename Torus, typename STorus>
void scratch_cuda_multi_bit_pbs(
cuda_stream_t *stream, pbs_buffer<Torus, MULTI_BIT> **pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory, uint32_t lwe_chunk_size = 0);
template <typename Torus>
void cuda_multi_bit_pbs_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, Torus *bootstrapping_key,
pbs_buffer<Torus, MULTI_BIT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory,
uint32_t lwe_chunk_size = 0);
template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::MULTI_BIT> {
double2 *keybundle_fft;
Torus *global_accumulator;
double2 *global_accumulator_fft;
PBS_VARIANT pbs_variant;
pbs_buffer(cuda_stream_t *stream, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t lwe_chunk_size,
PBS_VARIANT pbs_variant, bool allocate_gpu_memory) {
this->pbs_variant = pbs_variant;
auto max_shared_memory = cuda_get_max_shared_memory(stream->gpu_index);
if (allocate_gpu_memory) {
switch (pbs_variant) {
case DEFAULT:
case FAST:
keybundle_fft = (double2 *)cuda_malloc_async(
input_lwe_ciphertext_count * lwe_chunk_size * level_count *
(glwe_dimension + 1) * (glwe_dimension + 1) *
(polynomial_size / 2) * sizeof(double2),
stream);
global_accumulator = (Torus *)cuda_malloc_async(
input_lwe_ciphertext_count * (glwe_dimension + 1) *
polynomial_size * sizeof(Torus),
stream);
global_accumulator_fft = (double2 *)cuda_malloc_async(
input_lwe_ciphertext_count * (glwe_dimension + 1) * level_count *
(polynomial_size / 2) * sizeof(double2),
stream);
break;
default:
PANIC("Cuda error (PBS): unsupported implementation variant.")
}
}
}
void release(cuda_stream_t *stream) {
cuda_drop_async(keybundle_fft, stream);
cuda_drop_async(global_accumulator, stream);
cuda_drop_async(global_accumulator_fft, stream);
}
};
#ifdef __CUDACC__
__host__ uint32_t get_lwe_chunk_size(uint32_t lwe_dimension,
uint32_t level_count,
uint32_t glwe_dimension,
uint32_t num_samples);
__host__ uint32_t get_average_lwe_chunk_size(uint32_t lwe_dimension,
uint32_t level_count,
uint32_t glwe_dimension,
uint32_t ct_count);
__host__ uint64_t get_max_buffer_size_multibit_bootstrap(
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t max_input_lwe_ciphertext_count);
#endif
#endif // CUDA_MULTI_BIT_H

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
#ifndef CUDA_LINALG_H_
#define CUDA_LINALG_H_
#include "programmable_bootstrap.h"
#include "bootstrap.h"
#include <cstdint>
#include <device.h>

View File

@@ -1,241 +0,0 @@
#ifndef CUDA_MULTI_BIT_H
#define CUDA_MULTI_BIT_H
#include "programmable_bootstrap.h"
#include <cstdint>
extern "C" {
bool has_support_to_cuda_programmable_bootstrap_cg_multi_bit(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t num_samples, uint32_t max_shared_memory);
void cuda_convert_lwe_multi_bit_programmable_bootstrap_key_64(
void *dest, void *src, cuda_stream_t *stream, uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count, uint32_t polynomial_size,
uint32_t grouping_factor);
void scratch_cuda_multi_bit_programmable_bootstrap_64(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t grouping_factor, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory,
uint32_t chunk_size = 0);
void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t grouping_factor, uint32_t base_log, uint32_t level_count,
uint32_t num_samples, uint32_t num_luts, uint32_t lwe_idx,
uint32_t max_shared_memory, uint32_t lwe_chunk_size = 0);
void scratch_cuda_generic_multi_bit_programmable_bootstrap_64(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t grouping_factor, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory,
uint32_t lwe_chunk_size = 0);
void cuda_generic_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t grouping_factor, uint32_t base_log, uint32_t level_count,
uint32_t num_samples, uint32_t num_luts, uint32_t lwe_idx,
uint32_t max_shared_memory, uint32_t lwe_chunk_size = 0);
void cleanup_cuda_multi_bit_programmable_bootstrap(cuda_stream_t *stream,
int8_t **pbs_buffer);
}
template <typename Torus, typename STorus>
void scratch_cuda_cg_multi_bit_programmable_bootstrap(
cuda_stream_t *stream, pbs_buffer<Torus, MULTI_BIT> **pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory, uint32_t lwe_chunk_size = 0);
template <typename Torus>
void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, Torus *bootstrapping_key,
pbs_buffer<Torus, MULTI_BIT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory,
uint32_t lwe_chunk_size = 0);
template <typename Torus, typename STorus>
void scratch_cuda_multi_bit_programmable_bootstrap(
cuda_stream_t *stream, pbs_buffer<Torus, MULTI_BIT> **pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory, uint32_t lwe_chunk_size = 0);
template <typename Torus>
void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, Torus *bootstrapping_key,
pbs_buffer<Torus, MULTI_BIT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory,
uint32_t lwe_chunk_size = 0);
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle(
uint32_t polynomial_size);
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_one(
uint32_t polynomial_size);
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_two(
uint32_t polynomial_size);
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_partial_sm_multibit_programmable_bootstrap_step_one(
uint32_t polynomial_size);
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_cg_multibit_programmable_bootstrap(
uint32_t polynomial_size);
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_partial_sm_cg_multibit_programmable_bootstrap(
uint32_t polynomial_size);
template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::MULTI_BIT> {
int8_t *d_mem_keybundle = NULL;
int8_t *d_mem_acc_step_one = NULL;
int8_t *d_mem_acc_step_two = NULL;
int8_t *d_mem_acc_cg = NULL;
double2 *keybundle_fft;
Torus *global_accumulator;
double2 *global_accumulator_fft;
PBS_VARIANT pbs_variant;
pbs_buffer(cuda_stream_t *stream, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t lwe_chunk_size,
PBS_VARIANT pbs_variant, bool allocate_gpu_memory) {
this->pbs_variant = pbs_variant;
auto max_shared_memory = cuda_get_max_shared_memory(stream->gpu_index);
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle<
Torus>(polynomial_size);
uint64_t full_sm_accumulate_step_one =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_one<Torus>(
polynomial_size);
uint64_t partial_sm_accumulate_step_one =
get_buffer_size_partial_sm_multibit_programmable_bootstrap_step_one<
Torus>(polynomial_size);
uint64_t full_sm_accumulate_step_two =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_two<Torus>(
polynomial_size);
uint64_t full_sm_cg_accumulate =
get_buffer_size_full_sm_cg_multibit_programmable_bootstrap<Torus>(
polynomial_size);
uint64_t partial_sm_cg_accumulate =
get_buffer_size_partial_sm_cg_multibit_programmable_bootstrap<Torus>(
polynomial_size);
auto num_blocks_keybundle = input_lwe_ciphertext_count * lwe_chunk_size *
(glwe_dimension + 1) * (glwe_dimension + 1) *
level_count;
auto num_blocks_acc_step_one =
level_count * (glwe_dimension + 1) * input_lwe_ciphertext_count;
auto num_blocks_acc_step_two =
input_lwe_ciphertext_count * (glwe_dimension + 1);
auto num_blocks_acc_cg =
level_count * (glwe_dimension + 1) * input_lwe_ciphertext_count;
if (allocate_gpu_memory) {
// Keybundle
if (max_shared_memory < full_sm_keybundle)
d_mem_keybundle = (int8_t *)cuda_malloc_async(
num_blocks_keybundle * full_sm_keybundle, stream);
switch (pbs_variant) {
case DEFAULT:
// Accumulator step one
if (max_shared_memory < partial_sm_accumulate_step_one)
d_mem_acc_step_one = (int8_t *)cuda_malloc_async(
num_blocks_acc_step_one * full_sm_accumulate_step_one, stream);
else if (max_shared_memory < full_sm_accumulate_step_one)
d_mem_acc_step_one = (int8_t *)cuda_malloc_async(
num_blocks_acc_step_one * partial_sm_accumulate_step_one, stream);
// Accumulator step two
if (max_shared_memory < full_sm_accumulate_step_two)
d_mem_acc_step_two = (int8_t *)cuda_malloc_async(
num_blocks_acc_step_two * full_sm_accumulate_step_two, stream);
break;
case CG:
// Accumulator CG
if (max_shared_memory < partial_sm_cg_accumulate)
d_mem_acc_cg = (int8_t *)cuda_malloc_async(
num_blocks_acc_cg * full_sm_cg_accumulate, stream);
else if (max_shared_memory < full_sm_cg_accumulate)
d_mem_acc_cg = (int8_t *)cuda_malloc_async(
num_blocks_acc_cg * partial_sm_cg_accumulate, stream);
break;
default:
PANIC("Cuda error (PBS): unsupported implementation variant.")
}
keybundle_fft = (double2 *)cuda_malloc_async(
num_blocks_keybundle * (polynomial_size / 2) * sizeof(double2),
stream);
global_accumulator = (Torus *)cuda_malloc_async(
num_blocks_acc_step_two * polynomial_size * sizeof(Torus), stream);
global_accumulator_fft = (double2 *)cuda_malloc_async(
num_blocks_acc_step_one * (polynomial_size / 2) * sizeof(double2),
stream);
}
}
void release(cuda_stream_t *stream) {
if (d_mem_keybundle)
cuda_drop_async(d_mem_keybundle, stream);
switch (pbs_variant) {
case DEFAULT:
if (d_mem_acc_step_one)
cuda_drop_async(d_mem_acc_step_one, stream);
if (d_mem_acc_step_two)
cuda_drop_async(d_mem_acc_step_two, stream);
break;
case CG:
if (d_mem_acc_cg)
cuda_drop_async(d_mem_acc_cg, stream);
break;
default:
PANIC("Cuda error (PBS): unsupported implementation variant.")
}
cuda_drop_async(keybundle_fft, stream);
cuda_drop_async(global_accumulator, stream);
cuda_drop_async(global_accumulator_fft, stream);
}
};
#ifdef __CUDACC__
__host__ uint32_t get_lwe_chunk_size(uint32_t ct_count);
#endif
#endif // CUDA_MULTI_BIT_H

View File

@@ -216,10 +216,14 @@ void cuda_drop_async(void *ptr, cuda_stream_t *stream) {
/// Get the maximum size for the shared memory
int cuda_get_max_shared_memory(uint32_t gpu_index) {
check_cuda_error(cudaSetDevice(gpu_index));
cudaDeviceProp prop;
check_cuda_error(cudaGetDeviceProperties(&prop, gpu_index));
int max_shared_memory = 0;
cudaDeviceGetAttribute(&max_shared_memory, cudaDevAttrMaxSharedMemoryPerBlock,
gpu_index);
check_cuda_error(cudaGetLastError());
if (prop.major >= 6) {
max_shared_memory = prop.sharedMemPerMultiprocessor;
} else {
max_shared_memory = prop.sharedMemPerBlock;
}
return max_shared_memory;
}

View File

@@ -181,7 +181,7 @@ template <class params> __device__ void NSMFFT_direct(double2 *A) {
// from level 8, we need to check size of params degree, because we support
// minimum actual polynomial size = 256, when compressed size is halfed and
// minimum supported compressed size is 128, so we always need first 7
// levels of butterfly operation, since butterfly levels are hardcoded
// levels of butterfy operation, since butterfly levels are hardcoded
// we need to check if polynomial size is big enough to require specific level
// of butterfly.
if constexpr (params::degree >= 256) {
@@ -353,7 +353,7 @@ template <class params> __device__ void NSMFFT_inverse(double2 *A) {
// compressed size = 8192 is actual polynomial size = 16384.
// twiddles for this size can't fit in constant memory so
// butterfly operation for this level access device memory to fetch
// butterfly operation for this level acess device memory to fetch
// twiddles
if constexpr (params::degree >= 8192) {
// level 13
@@ -484,7 +484,7 @@ template <class params> __device__ void NSMFFT_inverse(double2 *A) {
// below level 8, we don't need to check size of params degree, because we
// support minimum actual polynomial size = 256, when compressed size is
// halfed and minimum supported compressed size is 128, so we always need
// last 7 levels of butterfly operation, since butterfly levels are hardcoded
// last 7 levels of butterfy operation, since butterfly levels are hardcoded
// we don't need to check if polynomial size is big enough to require
// specific level of butterfly.
// level 7

View File

@@ -3,7 +3,7 @@
/*
* 'negtwiddles' are stored in constant memory for faster access times
* because of it's limited size, only twiddles for up to 2^12 polynomial size
* because of it's limitied size, only twiddles for up to 2^12 polynomial size
* can be stored there, twiddles for 2^13 are stored in device memory
* 'negtwiddles13'
*/

View File

@@ -5,8 +5,8 @@
#include "device.h"
#include "integer.cuh"
#include "integer.h"
#include "pbs/programmable_bootstrap_classic.cuh"
#include "pbs/programmable_bootstrap_multibit.cuh"
#include "pbs/bootstrap_low_latency.cuh"
#include "pbs/bootstrap_multibit.cuh"
#include "polynomial/functions.cuh"
#include "utils/kernel_dimensions.cuh"
#include <omp.h>

View File

@@ -29,8 +29,8 @@ __host__ void zero_out_if(cuda_stream_t *stream, Torus *lwe_array_out,
device_pack_bivariate_blocks<<<num_blocks, num_threads, 0,
stream->stream>>>(
lwe_array_out_block, predicate->lwe_indexes_in, lwe_array_input_block,
lwe_condition, predicate->lwe_indexes_in, params.big_lwe_dimension,
lwe_array_out_block, lwe_array_input_block, lwe_condition,
predicate->lwe_indexes, params.big_lwe_dimension,
params.message_modulus, 1);
check_cuda_error(cudaGetLastError());
}

View File

@@ -5,8 +5,8 @@ void scratch_cuda_integer_radix_comparison_kb_64(
uint32_t polynomial_size, uint32_t big_lwe_dimension,
uint32_t small_lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_radix_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, COMPARISON_TYPE op_type, bool is_signed,
uint32_t lwe_ciphertext_count, uint32_t message_modulus,
uint32_t carry_modulus, PBS_TYPE pbs_type, COMPARISON_TYPE op_type,
bool allocate_gpu_memory) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
@@ -17,9 +17,9 @@ void scratch_cuda_integer_radix_comparison_kb_64(
switch (op_type) {
case EQ:
case NE:
scratch_cuda_integer_radix_comparison_check_kb<uint64_t>(
stream, (int_comparison_buffer<uint64_t> **)mem_ptr, num_radix_blocks,
params, op_type, false, allocate_gpu_memory);
scratch_cuda_integer_radix_equality_check_kb<uint64_t>(
stream, (int_comparison_buffer<uint64_t> **)mem_ptr,
lwe_ciphertext_count, params, op_type, allocate_gpu_memory);
break;
case GT:
case GE:
@@ -27,9 +27,9 @@ void scratch_cuda_integer_radix_comparison_kb_64(
case LE:
case MAX:
case MIN:
scratch_cuda_integer_radix_comparison_check_kb<uint64_t>(
stream, (int_comparison_buffer<uint64_t> **)mem_ptr, num_radix_blocks,
params, op_type, is_signed, allocate_gpu_memory);
scratch_cuda_integer_radix_difference_check_kb<uint64_t>(
stream, (int_comparison_buffer<uint64_t> **)mem_ptr,
lwe_ciphertext_count, params, op_type, allocate_gpu_memory);
break;
}
}
@@ -37,7 +37,7 @@ void scratch_cuda_integer_radix_comparison_kb_64(
void cuda_comparison_integer_radix_ciphertext_kb_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_array_1,
void *lwe_array_2, int8_t *mem_ptr, void *bsk, void *ksk,
uint32_t num_radix_blocks) {
uint32_t lwe_ciphertext_count) {
int_comparison_buffer<uint64_t> *buffer =
(int_comparison_buffer<uint64_t> *)mem_ptr;
@@ -48,7 +48,7 @@ void cuda_comparison_integer_radix_ciphertext_kb_64(
stream, static_cast<uint64_t *>(lwe_array_out),
static_cast<uint64_t *>(lwe_array_1),
static_cast<uint64_t *>(lwe_array_2), buffer, bsk,
static_cast<uint64_t *>(ksk), num_radix_blocks);
static_cast<uint64_t *>(ksk), lwe_ciphertext_count);
break;
case GT:
case GE:
@@ -59,7 +59,7 @@ void cuda_comparison_integer_radix_ciphertext_kb_64(
static_cast<uint64_t *>(lwe_array_1),
static_cast<uint64_t *>(lwe_array_2), buffer,
buffer->diff_buffer->operator_f, bsk, static_cast<uint64_t *>(ksk),
num_radix_blocks);
lwe_ciphertext_count);
break;
case MAX:
case MIN:
@@ -67,7 +67,7 @@ void cuda_comparison_integer_radix_ciphertext_kb_64(
stream, static_cast<uint64_t *>(lwe_array_out),
static_cast<uint64_t *>(lwe_array_1),
static_cast<uint64_t *>(lwe_array_2), buffer, bsk,
static_cast<uint64_t *>(ksk), num_radix_blocks);
static_cast<uint64_t *>(ksk), lwe_ciphertext_count);
break;
default:
PANIC("Cuda error: integer operation not supported")

View File

@@ -8,8 +8,8 @@
#include "integer/cmux.cuh"
#include "integer/negation.cuh"
#include "integer/scalar_addition.cuh"
#include "pbs/programmable_bootstrap_classic.cuh"
#include "pbs/programmable_bootstrap_multibit.cuh"
#include "pbs/bootstrap_low_latency.cuh"
#include "pbs/bootstrap_multibit.cuh"
#include "types/complex/operations.cuh"
#include "utils/kernel_dimensions.cuh"
@@ -71,25 +71,24 @@ are_all_comparisons_block_true(cuda_stream_t *stream, Torus *lwe_array_out,
auto are_all_block_true_buffer =
mem_ptr->eq_buffer->are_all_block_true_buffer;
auto tmp_out = are_all_block_true_buffer->tmp_out;
uint32_t total_modulus = message_modulus * carry_modulus;
uint32_t max_value = total_modulus - 1;
cuda_memcpy_async_gpu_to_gpu(
tmp_out, lwe_array_in,
lwe_array_out, lwe_array_in,
num_radix_blocks * (big_lwe_dimension + 1) * sizeof(Torus), stream);
int lut_num_blocks = 0;
uint32_t remaining_blocks = num_radix_blocks;
while (remaining_blocks > 0) {
while (remaining_blocks > 1) {
// Split in max_value chunks
uint32_t chunk_length = std::min(max_value, remaining_blocks);
int num_chunks = remaining_blocks / chunk_length;
// Since all blocks encrypt either 0 or 1, we can sum max_value of them
// as in the worst case we will be adding `max_value` ones
auto input_blocks = tmp_out;
auto input_blocks = lwe_array_out;
auto accumulator = are_all_block_true_buffer->tmp_block_accumulated;
for (int i = 0; i < num_chunks; i++) {
accumulate_all_blocks(stream, accumulator, input_blocks,
@@ -132,15 +131,8 @@ are_all_comparisons_block_true(cuda_stream_t *stream, Torus *lwe_array_out,
}
// Applies the LUT
if (remaining_blocks == 1) {
// In the last iteration we copy the output to the final address
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, lwe_array_out, accumulator, bsk, ksk, 1, lut);
return;
} else {
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, tmp_out, accumulator, bsk, ksk, num_chunks, lut);
}
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, lwe_array_out, accumulator, bsk, ksk, num_chunks, lut);
}
}
@@ -166,18 +158,18 @@ __host__ void is_at_least_one_comparisons_block_true(
uint32_t max_value = total_modulus - 1;
cuda_memcpy_async_gpu_to_gpu(
mem_ptr->tmp_lwe_array_out, lwe_array_in,
lwe_array_out, lwe_array_in,
num_radix_blocks * (big_lwe_dimension + 1) * sizeof(Torus), stream);
uint32_t remaining_blocks = num_radix_blocks;
while (remaining_blocks > 0) {
while (remaining_blocks > 1) {
// Split in max_value chunks
uint32_t chunk_length = std::min(max_value, remaining_blocks);
int num_chunks = remaining_blocks / chunk_length;
// Since all blocks encrypt either 0 or 1, we can sum max_value of them
// as in the worst case we will be adding `max_value` ones
auto input_blocks = mem_ptr->tmp_lwe_array_out;
auto input_blocks = lwe_array_out;
auto accumulator = buffer->tmp_block_accumulated;
for (int i = 0; i < num_chunks; i++) {
accumulate_all_blocks(stream, accumulator, input_blocks,
@@ -193,16 +185,8 @@ __host__ void is_at_least_one_comparisons_block_true(
int_radix_lut<Torus> *lut = mem_ptr->eq_buffer->is_non_zero_lut;
// Applies the LUT
if (remaining_blocks == 1) {
// In the last iteration we copy the output to the final address
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, lwe_array_out, accumulator, bsk, ksk, 1, lut);
return;
} else {
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, mem_ptr->tmp_lwe_array_out, accumulator, bsk, ksk, num_chunks,
lut);
}
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, lwe_array_out, accumulator, bsk, ksk, num_chunks, lut);
}
}
@@ -273,7 +257,7 @@ __host__ void host_compare_with_zero_equality(
remainder_blocks -= (chunk_size - 1);
// Update operands
chunk += (chunk_size - 1) * big_lwe_size;
chunk += chunk_size * big_lwe_size;
sum_i += big_lwe_size;
}
}
@@ -282,6 +266,11 @@ __host__ void host_compare_with_zero_equality(
stream, sum, sum, bsk, ksk, num_sum_blocks, zero_comparison);
are_all_comparisons_block_true(stream, lwe_array_out, sum, mem_ptr, bsk, ksk,
num_sum_blocks);
// The result will be in the two first block. Everything else is
// garbage.
cuda_memset_async(lwe_array_out + big_lwe_size, 0,
big_lwe_size_bytes * (num_radix_blocks - 1), stream);
}
template <typename Torus>
@@ -290,9 +279,11 @@ __host__ void host_integer_radix_equality_check_kb(
Torus *lwe_array_2, int_comparison_buffer<Torus> *mem_ptr, void *bsk,
Torus *ksk, uint32_t num_radix_blocks) {
cudaSetDevice(stream->gpu_index);
auto eq_buffer = mem_ptr->eq_buffer;
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
// Applies the LUT for the comparison operation
auto comparisons = mem_ptr->tmp_block_comparisons;
integer_radix_apply_bivariate_lookup_table_kb(
@@ -301,10 +292,27 @@ __host__ void host_integer_radix_equality_check_kb(
// This takes a Vec of blocks, where each block is either 0 or 1.
//
// It returns a block encrypting 1 if all input blocks are 1
// It return a block encrypting 1 if all input blocks are 1
// otherwise the block encrypts 0
are_all_comparisons_block_true(stream, lwe_array_out, comparisons, mem_ptr,
bsk, ksk, num_radix_blocks);
// Zero all blocks but the first
size_t big_lwe_size = big_lwe_dimension + 1;
size_t big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
cuda_memset_async(lwe_array_out + big_lwe_size, 0,
big_lwe_size_bytes * (num_radix_blocks - 1), stream);
}
template <typename Torus>
__host__ void scratch_cuda_integer_radix_equality_check_kb(
cuda_stream_t *stream, int_comparison_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params, COMPARISON_TYPE op,
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
*mem_ptr = new int_comparison_buffer<Torus>(
stream, op, params, num_radix_blocks, allocate_gpu_memory);
}
template <typename Torus>
@@ -439,45 +447,38 @@ __host__ void host_integer_radix_difference_check_kb(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_array_left,
Torus *lwe_array_right, int_comparison_buffer<Torus> *mem_ptr,
std::function<Torus(Torus)> reduction_lut_f, void *bsk, Torus *ksk,
uint32_t num_radix_blocks) {
uint32_t total_num_radix_blocks) {
cudaSetDevice(stream->gpu_index);
auto diff_buffer = mem_ptr->diff_buffer;
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto big_lwe_size = big_lwe_dimension + 1;
auto message_modulus = params.message_modulus;
auto carry_modulus = params.carry_modulus;
uint32_t packed_num_radix_blocks = num_radix_blocks;
uint32_t num_radix_blocks = total_num_radix_blocks;
auto lhs = lwe_array_left;
auto rhs = lwe_array_right;
if (carry_modulus >= message_modulus) {
if (carry_modulus == message_modulus) {
// Packing is possible
// Pack inputs
Torus *packed_left = diff_buffer->tmp_packed_left;
Torus *packed_right = diff_buffer->tmp_packed_right;
// In case the ciphertext is signed, the sign block and the one before it
// are handled separately
if (mem_ptr->is_signed) {
packed_num_radix_blocks -= 2;
}
pack_blocks(stream, packed_left, lwe_array_left, big_lwe_dimension,
packed_num_radix_blocks, message_modulus);
num_radix_blocks, message_modulus);
pack_blocks(stream, packed_right, lwe_array_right, big_lwe_dimension,
packed_num_radix_blocks, message_modulus);
num_radix_blocks, message_modulus);
// From this point we have half number of blocks
packed_num_radix_blocks /= 2;
num_radix_blocks /= 2;
// Clean noise
auto identity_lut = mem_ptr->identity_lut;
auto cleaning_lut = mem_ptr->cleaning_lut;
integer_radix_apply_univariate_lookup_table_kb(
stream, packed_left, packed_left, bsk, ksk, packed_num_radix_blocks,
identity_lut);
stream, packed_left, packed_left, bsk, ksk, num_radix_blocks,
cleaning_lut);
integer_radix_apply_univariate_lookup_table_kb(
stream, packed_right, packed_right, bsk, ksk, packed_num_radix_blocks,
identity_lut);
stream, packed_right, packed_right, bsk, ksk, num_radix_blocks,
cleaning_lut);
lhs = packed_left;
rhs = packed_right;
@@ -488,78 +489,31 @@ __host__ void host_integer_radix_difference_check_kb(
// - 1 if lhs == rhs
// - 2 if lhs > rhs
auto comparisons = mem_ptr->tmp_block_comparisons;
auto num_comparisons = 0;
if (!mem_ptr->is_signed) {
// Compare packed blocks, or simply the total number of radix blocks in the
// inputs
compare_radix_blocks_kb(stream, comparisons, lhs, rhs, mem_ptr, bsk, ksk,
packed_num_radix_blocks);
num_comparisons = packed_num_radix_blocks;
} else {
// Packing is possible
if (carry_modulus >= message_modulus) {
// Compare (num_radix_blocks - 2) / 2 packed blocks
compare_radix_blocks_kb(stream, comparisons, lhs, rhs, mem_ptr, bsk, ksk,
packed_num_radix_blocks);
// Compare the last block before the sign block separately
auto identity_lut = mem_ptr->identity_lut;
Torus *last_left_block_before_sign_block =
diff_buffer->tmp_packed_left + packed_num_radix_blocks * big_lwe_size;
Torus *last_right_block_before_sign_block =
diff_buffer->tmp_packed_right +
packed_num_radix_blocks * big_lwe_size;
integer_radix_apply_univariate_lookup_table_kb(
stream, last_left_block_before_sign_block,
lwe_array_left + (num_radix_blocks - 2) * big_lwe_size, bsk, ksk, 1,
identity_lut);
integer_radix_apply_univariate_lookup_table_kb(
stream, last_right_block_before_sign_block,
lwe_array_right + (num_radix_blocks - 2) * big_lwe_size, bsk, ksk, 1,
identity_lut);
compare_radix_blocks_kb(
stream, comparisons + packed_num_radix_blocks * big_lwe_size,
last_left_block_before_sign_block, last_right_block_before_sign_block,
mem_ptr, bsk, ksk, 1);
// Compare the sign block separately
integer_radix_apply_bivariate_lookup_table_kb(
stream, comparisons + (packed_num_radix_blocks + 1) * big_lwe_size,
lwe_array_left + (num_radix_blocks - 1) * big_lwe_size,
lwe_array_right + (num_radix_blocks - 1) * big_lwe_size, bsk, ksk, 1,
mem_ptr->signed_lut);
num_comparisons = packed_num_radix_blocks + 2;
} else {
compare_radix_blocks_kb(stream, comparisons, lwe_array_left,
lwe_array_right, mem_ptr, bsk, ksk,
num_radix_blocks - 1);
// Compare the sign block separately
integer_radix_apply_bivariate_lookup_table_kb(
stream, comparisons + (num_radix_blocks - 1) * big_lwe_size,
lwe_array_left + (num_radix_blocks - 1) * big_lwe_size,
lwe_array_right + (num_radix_blocks - 1) * big_lwe_size, bsk, ksk, 1,
mem_ptr->signed_lut);
num_comparisons = num_radix_blocks;
}
}
compare_radix_blocks_kb(stream, comparisons, lhs, rhs, mem_ptr, bsk, ksk,
num_radix_blocks);
// Reduces a vec containing radix blocks that encrypts a sign
// (inferior, equal, superior) to one single radix block containing the
// final sign
tree_sign_reduction(stream, lwe_array_out, comparisons,
mem_ptr->diff_buffer->tree_buffer, reduction_lut_f, bsk,
ksk, num_comparisons);
ksk, num_radix_blocks);
// The result will be in the first block. Everything else is garbage.
size_t big_lwe_size = big_lwe_dimension + 1;
size_t big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
cuda_memset_async(lwe_array_out + big_lwe_size, 0,
(total_num_radix_blocks - 1) * big_lwe_size_bytes, stream);
}
template <typename Torus>
__host__ void scratch_cuda_integer_radix_comparison_check_kb(
__host__ void scratch_cuda_integer_radix_difference_check_kb(
cuda_stream_t *stream, int_comparison_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params, COMPARISON_TYPE op,
bool is_signed, bool allocate_gpu_memory) {
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
*mem_ptr = new int_comparison_buffer<Torus>(
stream, op, params, num_radix_blocks, is_signed, allocate_gpu_memory);
stream, op, params, num_radix_blocks, allocate_gpu_memory);
}
template <typename Torus>
@@ -569,11 +523,10 @@ host_integer_radix_maxmin_kb(cuda_stream_t *stream, Torus *lwe_array_out,
int_comparison_buffer<Torus> *mem_ptr, void *bsk,
Torus *ksk, uint32_t total_num_radix_blocks) {
cudaSetDevice(stream->gpu_index);
// Compute the sign
host_integer_radix_difference_check_kb(
stream, mem_ptr->tmp_lwe_array_out, lwe_array_left, lwe_array_right,
mem_ptr, mem_ptr->identity_lut_f, bsk, ksk, total_num_radix_blocks);
mem_ptr, mem_ptr->cleaning_lut_f, bsk, ksk, total_num_radix_blocks);
// Selector
host_integer_radix_cmux_kb(

View File

@@ -88,14 +88,12 @@ void cleanup_cuda_full_propagation(cuda_stream_t *stream,
cuda_drop_async(mem_ptr->lut_buffer, stream);
cuda_drop_async(mem_ptr->lut_indexes, stream);
cuda_drop_async(mem_ptr->lwe_indexes, stream);
cuda_drop_async(mem_ptr->tmp_small_lwe_vector, stream);
cuda_drop_async(mem_ptr->tmp_big_lwe_vector, stream);
switch (mem_ptr->pbs_type) {
case CLASSICAL: {
auto x = (pbs_buffer<uint64_t, CLASSICAL> *)(mem_ptr->pbs_buffer);
case LOW_LAT: {
auto x = (pbs_buffer<uint64_t, LOW_LAT> *)(mem_ptr->pbs_buffer);
x->release(stream);
} break;
case MULTI_BIT: {
@@ -107,7 +105,7 @@ void cleanup_cuda_full_propagation(cuda_stream_t *stream,
}
}
void scratch_cuda_propagate_single_carry_kb_64_inplace(
void scratch_cuda_propagate_single_carry_low_latency_kb_64_inplace(
cuda_stream_t *stream, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t big_lwe_dimension,
uint32_t small_lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
@@ -120,23 +118,22 @@ void scratch_cuda_propagate_single_carry_kb_64_inplace(
ks_base_log, pbs_level, pbs_base_log, grouping_factor,
message_modulus, carry_modulus);
scratch_cuda_propagate_single_carry_kb_inplace(
scratch_cuda_propagate_single_carry_low_latency_kb_inplace(
stream, (int_sc_prop_memory<uint64_t> **)mem_ptr, num_blocks, params,
allocate_gpu_memory);
}
void cuda_propagate_single_carry_kb_64_inplace(cuda_stream_t *stream,
void *lwe_array, int8_t *mem_ptr,
void *bsk, void *ksk,
uint32_t num_blocks) {
host_propagate_single_carry<uint64_t>(
void cuda_propagate_single_carry_low_latency_kb_64_inplace(
cuda_stream_t *stream, void *lwe_array, int8_t *mem_ptr, void *bsk,
void *ksk, uint32_t num_blocks) {
host_propagate_single_carry_low_latency<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array),
(int_sc_prop_memory<uint64_t> *)mem_ptr, bsk,
static_cast<uint64_t *>(ksk), num_blocks);
}
void cleanup_cuda_propagate_single_carry(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
void cleanup_cuda_propagate_single_carry_low_latency(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
int_sc_prop_memory<uint64_t> *mem_ptr =
(int_sc_prop_memory<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);

View File

@@ -1,6 +1,7 @@
#ifndef CUDA_INTEGER_CUH
#define CUDA_INTEGER_CUH
#include "bootstrap.h"
#include "crypto/keyswitch.cuh"
#include "device.h"
#include "integer.h"
@@ -8,8 +9,6 @@
#include "linear_algebra.h"
#include "linearalgebra/addition.cuh"
#include "polynomial/functions.cuh"
#include "programmable_bootstrap.h"
#include "utils/helper.cuh"
#include "utils/kernel_dimensions.cuh"
#include <functional>
@@ -62,30 +61,26 @@ __global__ void radix_blocks_rotate_left(Torus *dst, Torus *src, uint32_t value,
// polynomial_size threads
template <typename Torus>
__global__ void
device_pack_bivariate_blocks(Torus *lwe_array_out, Torus *lwe_indexes_out,
Torus *lwe_array_1, Torus *lwe_array_2,
Torus *lwe_indexes_in, uint32_t lwe_dimension,
uint32_t shift, uint32_t num_blocks) {
device_pack_bivariate_blocks(Torus *lwe_array_out, Torus *lwe_array_1,
Torus *lwe_array_2, Torus *lwe_indexes,
uint32_t lwe_dimension, uint32_t message_modulus,
uint32_t num_blocks) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < num_blocks * (lwe_dimension + 1)) {
int block_id = tid / (lwe_dimension + 1);
int coeff_id = tid % (lwe_dimension + 1);
int pos_in = lwe_indexes_in[block_id] * (lwe_dimension + 1) + coeff_id;
int pos_out = lwe_indexes_out[block_id] * (lwe_dimension + 1) + coeff_id;
lwe_array_out[pos_out] = lwe_array_1[pos_in] * shift + lwe_array_2[pos_in];
int pos = lwe_indexes[block_id] * (lwe_dimension + 1) + coeff_id;
lwe_array_out[pos] = lwe_array_1[pos] * message_modulus + lwe_array_2[pos];
}
}
/* Combine lwe_array_1 and lwe_array_2 so that each block m1 and m2
* becomes out = m1 * shift + m2
*/
template <typename Torus>
__host__ void pack_bivariate_blocks(cuda_stream_t *stream, Torus *lwe_array_out,
Torus *lwe_indexes_out, Torus *lwe_array_1,
Torus *lwe_array_2, Torus *lwe_indexes_in,
uint32_t lwe_dimension, uint32_t shift,
Torus *lwe_array_1, Torus *lwe_array_2,
Torus *lwe_indexes, uint32_t lwe_dimension,
uint32_t message_modulus,
uint32_t num_radix_blocks) {
cudaSetDevice(stream->gpu_index);
@@ -94,8 +89,8 @@ __host__ void pack_bivariate_blocks(cuda_stream_t *stream, Torus *lwe_array_out,
int num_entries = num_radix_blocks * (lwe_dimension + 1);
getNumBlocksAndThreads(num_entries, 512, num_blocks, num_threads);
device_pack_bivariate_blocks<<<num_blocks, num_threads, 0, stream->stream>>>(
lwe_array_out, lwe_indexes_out, lwe_array_1, lwe_array_2, lwe_indexes_in,
lwe_dimension, shift, num_radix_blocks);
lwe_array_out, lwe_array_1, lwe_array_2, lwe_indexes, lwe_dimension,
message_modulus, num_radix_blocks);
check_cuda_error(cudaGetLastError());
}
@@ -119,15 +114,15 @@ __host__ void integer_radix_apply_univariate_lookup_table_kb(
// Compute Keyswitch-PBS
cuda_keyswitch_lwe_ciphertext_vector(
stream, lut->tmp_lwe_after_ks, lut->lwe_trivial_indexes, lwe_array_in,
lut->lwe_indexes_in, ksk, big_lwe_dimension, small_lwe_dimension,
stream, lut->tmp_lwe_after_ks, lut->lwe_indexes, lwe_array_in,
lut->lwe_indexes, ksk, big_lwe_dimension, small_lwe_dimension,
ks_base_log, ks_level, num_radix_blocks);
execute_pbs<Torus>(stream, lwe_array_out, lut->lwe_indexes_out, lut->lut,
lut->lut_indexes, lut->tmp_lwe_after_ks,
lut->lwe_trivial_indexes, bsk, lut->buffer, glwe_dimension,
small_lwe_dimension, polynomial_size, pbs_base_log,
pbs_level, grouping_factor, num_radix_blocks, 1, 0,
execute_pbs<Torus>(stream, lwe_array_out, lut->lwe_indexes, lut->lut,
lut->lut_indexes, lut->tmp_lwe_after_ks, lut->lwe_indexes,
bsk, lut->buffer, glwe_dimension, small_lwe_dimension,
polynomial_size, pbs_base_log, pbs_level, grouping_factor,
num_radix_blocks, 1, 0,
cuda_get_max_shared_memory(stream->gpu_index), pbs_type);
}
@@ -138,38 +133,21 @@ __host__ void integer_radix_apply_bivariate_lookup_table_kb(
int_radix_lut<Torus> *lut) {
cudaSetDevice(stream->gpu_index);
// apply_lookup_table_bivariate
auto params = lut->params;
auto pbs_type = params.pbs_type;
auto big_lwe_dimension = params.big_lwe_dimension;
auto small_lwe_dimension = params.small_lwe_dimension;
auto ks_level = params.ks_level;
auto ks_base_log = params.ks_base_log;
auto pbs_level = params.pbs_level;
auto pbs_base_log = params.pbs_base_log;
auto glwe_dimension = params.glwe_dimension;
auto polynomial_size = params.polynomial_size;
auto grouping_factor = params.grouping_factor;
auto message_modulus = params.message_modulus;
// Left message is shifted
auto lwe_array_pbs_in = lut->tmp_lwe_before_ks;
pack_bivariate_blocks(stream, lwe_array_pbs_in, lut->lwe_trivial_indexes,
lwe_array_1, lwe_array_2, lut->lwe_indexes_in,
big_lwe_dimension, message_modulus, num_radix_blocks);
pack_bivariate_blocks(stream, lut->tmp_lwe_before_ks, lwe_array_1,
lwe_array_2, lut->lwe_indexes, big_lwe_dimension,
message_modulus, num_radix_blocks);
check_cuda_error(cudaGetLastError());
// Apply LUT
cuda_keyswitch_lwe_ciphertext_vector(
stream, lut->tmp_lwe_after_ks, lut->lwe_trivial_indexes, lwe_array_pbs_in,
lut->lwe_trivial_indexes, ksk, big_lwe_dimension, small_lwe_dimension,
ks_base_log, ks_level, num_radix_blocks);
execute_pbs<Torus>(stream, lwe_array_out, lut->lwe_indexes_out, lut->lut,
lut->lut_indexes, lut->tmp_lwe_after_ks,
lut->lwe_trivial_indexes, bsk, lut->buffer, glwe_dimension,
small_lwe_dimension, polynomial_size, pbs_base_log,
pbs_level, grouping_factor, num_radix_blocks, 1, 0,
cuda_get_max_shared_memory(stream->gpu_index), pbs_type);
integer_radix_apply_univariate_lookup_table_kb(stream, lwe_array_out,
lut->tmp_lwe_before_ks, bsk,
ksk, num_radix_blocks, lut);
}
// Rotates the slice in-place such that the first mid elements of the slice move
@@ -298,7 +276,7 @@ void generate_device_accumulator(cuda_stream_t *stream, Torus *acc,
}
template <typename Torus>
void scratch_cuda_propagate_single_carry_kb_inplace(
void scratch_cuda_propagate_single_carry_low_latency_kb_inplace(
cuda_stream_t *stream, int_sc_prop_memory<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params,
bool allocate_gpu_memory) {
@@ -308,9 +286,11 @@ void scratch_cuda_propagate_single_carry_kb_inplace(
}
template <typename Torus>
void host_propagate_single_carry(cuda_stream_t *stream, Torus *lwe_array,
int_sc_prop_memory<Torus> *mem, void *bsk,
Torus *ksk, uint32_t num_blocks) {
void host_propagate_single_carry_low_latency(cuda_stream_t *stream,
Torus *lwe_array,
int_sc_prop_memory<Torus> *mem,
void *bsk, Torus *ksk,
uint32_t num_blocks) {
auto params = mem->params;
auto glwe_dimension = params.glwe_dimension;
auto polynomial_size = params.polynomial_size;
@@ -361,65 +341,6 @@ void host_propagate_single_carry(cuda_stream_t *stream, Torus *lwe_array,
stream, lwe_array, lwe_array, bsk, ksk, num_blocks, message_acc);
}
template <typename Torus>
void host_propagate_single_sub_borrow(cuda_stream_t *stream, Torus *overflowed,
Torus *lwe_array,
int_single_borrow_prop_memory<Torus> *mem,
void *bsk, Torus *ksk,
uint32_t num_blocks) {
auto params = mem->params;
auto glwe_dimension = params.glwe_dimension;
auto polynomial_size = params.polynomial_size;
auto big_lwe_size = glwe_dimension * polynomial_size + 1;
auto big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
auto generates_or_propagates = mem->generates_or_propagates;
auto step_output = mem->step_output;
auto luts_array = mem->luts_array;
auto luts_carry_propagation_sum = mem->luts_borrow_propagation_sum;
auto message_acc = mem->message_acc;
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, generates_or_propagates, lwe_array, bsk, ksk, num_blocks,
luts_array);
// compute prefix sum with hillis&steele
int num_steps = ceil(log2((double)num_blocks));
int space = 1;
cuda_memcpy_async_gpu_to_gpu(step_output, generates_or_propagates,
big_lwe_size_bytes * num_blocks, stream);
for (int step = 0; step < num_steps; step++) {
auto cur_blocks = &step_output[space * big_lwe_size];
auto prev_blocks = generates_or_propagates;
int cur_total_blocks = num_blocks - space;
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
stream, cur_blocks, cur_blocks, prev_blocks, bsk, ksk, cur_total_blocks,
luts_carry_propagation_sum);
cuda_memcpy_async_gpu_to_gpu(&generates_or_propagates[space * big_lwe_size],
cur_blocks,
big_lwe_size_bytes * cur_total_blocks, stream);
space *= 2;
}
cuda_memcpy_async_gpu_to_gpu(
overflowed, &generates_or_propagates[big_lwe_size * (num_blocks - 1)],
big_lwe_size_bytes, stream);
radix_blocks_rotate_right<<<num_blocks, 256, 0, stream->stream>>>(
step_output, generates_or_propagates, 1, num_blocks, big_lwe_size);
cuda_memset_async(step_output, 0, big_lwe_size_bytes, stream);
host_subtraction(stream, lwe_array, lwe_array, step_output,
glwe_dimension * polynomial_size, num_blocks);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, lwe_array, lwe_array, bsk, ksk, num_blocks, message_acc);
}
/*
* input_blocks: input radix ciphertext propagation will happen inplace
* acc_message_carry: list of two lut s, [(message_acc), (carry_acc)]
@@ -587,7 +508,7 @@ __global__ void device_pack_blocks(Torus *lwe_array_out, Torus *lwe_array_in,
packed_block[tid] = lsb_block[tid] + factor * msb_block[tid];
}
if (num_radix_blocks % 2 == 1) {
if (num_radix_blocks % 2 != 0) {
// We couldn't pack the last block, so we just copy it
Torus *lsb_block =
lwe_array_in + (num_radix_blocks - 1) * (lwe_dimension + 1);
@@ -668,107 +589,4 @@ create_trivial_radix(cuda_stream_t *stream, Torus *lwe_array_out,
check_cuda_error(cudaGetLastError());
}
/**
* Each bit in lwe_array_in becomes a lwe ciphertext in lwe_array_out
* Thus, lwe_array_out must be allocated with num_radix_blocks * bits_per_block
* * (lwe_dimension+1) * sizeeof(Torus) bytes
*/
template <typename Torus>
__host__ void extract_n_bits(cuda_stream_t *stream, Torus *lwe_array_out,
Torus *lwe_array_in, void *bsk, Torus *ksk,
uint32_t num_radix_blocks, uint32_t bits_per_block,
int_bit_extract_luts_buffer<Torus> *bit_extract) {
integer_radix_apply_univariate_lookup_table_kb(
stream, lwe_array_out, lwe_array_in, bsk, ksk,
num_radix_blocks * bits_per_block, bit_extract->lut);
}
template <typename Torus>
__host__ void reduce_signs(cuda_stream_t *stream, Torus *signs_array_out,
Torus *signs_array_in,
int_comparison_buffer<Torus> *mem_ptr,
std::function<Torus(Torus)> sign_handler_f,
void *bsk, Torus *ksk, uint32_t num_sign_blocks) {
auto diff_buffer = mem_ptr->diff_buffer;
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto glwe_dimension = params.glwe_dimension;
auto polynomial_size = params.polynomial_size;
auto message_modulus = params.message_modulus;
auto carry_modulus = params.carry_modulus;
std::function<Torus(Torus)> reduce_two_orderings_function =
[diff_buffer, sign_handler_f](Torus x) -> Torus {
int msb = (x >> 2) & 3;
int lsb = x & 3;
return diff_buffer->tree_buffer->block_selector_f(msb, lsb);
};
auto signs_a = diff_buffer->tmp_signs_a;
auto signs_b = diff_buffer->tmp_signs_b;
cuda_memcpy_async_gpu_to_gpu(
signs_a, signs_array_in,
(big_lwe_dimension + 1) * num_sign_blocks * sizeof(Torus), stream);
if (num_sign_blocks > 2) {
auto lut = diff_buffer->reduce_signs_lut;
generate_device_accumulator<Torus>(
stream, lut->lut, glwe_dimension, polynomial_size, message_modulus,
carry_modulus, reduce_two_orderings_function);
while (num_sign_blocks > 2) {
pack_blocks(stream, signs_b, signs_a, big_lwe_dimension, num_sign_blocks,
4);
integer_radix_apply_univariate_lookup_table_kb(
stream, signs_a, signs_b, bsk, ksk, num_sign_blocks / 2, lut);
auto last_block_signs_b =
signs_b + (num_sign_blocks / 2) * (big_lwe_dimension + 1);
auto last_block_signs_a =
signs_a + (num_sign_blocks / 2) * (big_lwe_dimension + 1);
if (num_sign_blocks % 2 == 1)
cuda_memcpy_async_gpu_to_gpu(last_block_signs_a, last_block_signs_b,
(big_lwe_dimension + 1) * sizeof(Torus),
stream);
num_sign_blocks = (num_sign_blocks / 2) + (num_sign_blocks % 2);
}
}
if (num_sign_blocks == 2) {
std::function<Torus(Torus)> final_lut_f =
[reduce_two_orderings_function, sign_handler_f](Torus x) -> Torus {
Torus final_sign = reduce_two_orderings_function(x);
return sign_handler_f(final_sign);
};
auto lut = diff_buffer->reduce_signs_lut;
generate_device_accumulator<Torus>(stream, lut->lut, glwe_dimension,
polynomial_size, message_modulus,
carry_modulus, final_lut_f);
pack_blocks(stream, signs_b, signs_a, big_lwe_dimension, 2, 4);
integer_radix_apply_univariate_lookup_table_kb(stream, signs_array_out,
signs_b, bsk, ksk, 1, lut);
} else {
std::function<Torus(Torus)> final_lut_f =
[mem_ptr, sign_handler_f](Torus x) -> Torus {
return sign_handler_f(x & 3);
};
auto lut = mem_ptr->diff_buffer->reduce_signs_lut;
generate_device_accumulator<Torus>(stream, lut->lut, glwe_dimension,
polynomial_size, message_modulus,
carry_modulus, final_lut_f);
integer_radix_apply_univariate_lookup_table_kb(stream, signs_array_out,
signs_a, bsk, ksk, 1, lut);
}
}
#endif // TFHE_RS_INTERNAL_INTEGER_CUH

View File

@@ -1,66 +1,5 @@
#include "integer/multiplication.cuh"
/*
* when adding chunk_size times terms together, there might be some blocks
* where addition have not happened or degree is zero, in that case we don't
* need to apply lookup table, so we find the indexes of the blocks where
* addition happened and store them inside h_lwe_idx_in, from same block
* might be extracted message and carry(if it is not the last block), so
* one block id might have two output id and we store them in h_lwe_idx_out
* blocks that do not require applying lookup table might be copied on both
* message and carry side or be replaced with zero ciphertexts, indexes of such
* blocks are stored inside h_smart_copy_in as input ids and h_smart_copy_out
* as output ids, -1 value as an input id means that zero ciphertext will be
* copied on output index.
*/
void generate_ids_update_degrees(int *terms_degree, size_t *h_lwe_idx_in,
size_t *h_lwe_idx_out,
int32_t *h_smart_copy_in,
int32_t *h_smart_copy_out, size_t ch_amount,
uint32_t num_radix, uint32_t num_blocks,
size_t chunk_size, size_t message_max,
size_t &total_count, size_t &message_count,
size_t &carry_count, size_t &sm_copy_count) {
for (size_t c_id = 0; c_id < ch_amount; c_id++) {
auto cur_chunk = &terms_degree[c_id * chunk_size * num_blocks];
for (size_t r_id = 0; r_id < num_blocks; r_id++) {
size_t new_degree = 0;
for (size_t chunk_id = 0; chunk_id < chunk_size; chunk_id++) {
new_degree += cur_chunk[chunk_id * num_blocks + r_id];
}
if (new_degree > message_max) {
h_lwe_idx_in[message_count] = c_id * num_blocks + r_id;
h_lwe_idx_out[message_count] = c_id * num_blocks + r_id;
message_count++;
} else {
h_smart_copy_in[sm_copy_count] = c_id * num_blocks + r_id;
h_smart_copy_out[sm_copy_count] = c_id * num_blocks + r_id;
sm_copy_count++;
}
}
}
for (size_t i = 0; i < sm_copy_count; i++) {
h_smart_copy_in[i] = -1;
h_smart_copy_out[i] = h_smart_copy_out[i] + ch_amount * num_blocks + 1;
}
for (size_t i = 0; i < message_count; i++) {
if (h_lwe_idx_in[i] % num_blocks != num_blocks - 1) {
h_lwe_idx_in[message_count + carry_count] = h_lwe_idx_in[i];
h_lwe_idx_out[message_count + carry_count] =
ch_amount * num_blocks + h_lwe_idx_in[i] + 1;
carry_count++;
} else {
h_smart_copy_in[sm_copy_count] = -1;
h_smart_copy_out[sm_copy_count] =
h_lwe_idx_in[i] - (num_blocks - 1) + ch_amount * num_blocks;
sm_copy_count++;
}
}
total_count = message_count + carry_count;
}
/*
* This scratch function allocates the necessary amount of data on the GPU for
* the integer radix multiplication in keyswitch->bootstrap order.
@@ -74,9 +13,9 @@ void scratch_cuda_integer_mult_radix_ciphertext_kb_64(
bool allocate_gpu_memory) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
polynomial_size * glwe_dimension, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus);
polynomial_size, lwe_dimension, ks_level, ks_base_log,
pbs_level, pbs_base_log, grouping_factor,
message_modulus, carry_modulus);
switch (polynomial_size) {
case 2048:
@@ -150,92 +89,21 @@ void cleanup_cuda_integer_mult(cuda_stream_t *stream, int8_t **mem_ptr_void) {
mem_ptr->release(stream);
}
void scratch_cuda_integer_radix_sum_ciphertexts_vec_kb_64(
cuda_stream_t *stream, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t lwe_dimension, uint32_t ks_level,
uint32_t ks_base_log, uint32_t pbs_level, uint32_t pbs_base_log,
uint32_t grouping_factor, uint32_t num_blocks_in_radix,
uint32_t max_num_radix_in_vec, uint32_t message_modulus,
uint32_t carry_modulus, PBS_TYPE pbs_type, bool allocate_gpu_memory) {
void cuda_small_scalar_multiplication_integer_radix_ciphertext_64_inplace(
cuda_stream_t *stream, void *lwe_array, uint64_t scalar,
uint32_t lwe_dimension, uint32_t lwe_ciphertext_count) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus);
scratch_cuda_integer_sum_ciphertexts_vec_kb<uint64_t>(
stream, (int_sum_ciphertexts_vec_memory<uint64_t> **)mem_ptr,
num_blocks_in_radix, max_num_radix_in_vec, params, allocate_gpu_memory);
cuda_small_scalar_multiplication_integer_radix_ciphertext_64(
stream, lwe_array, lwe_array, scalar, lwe_dimension,
lwe_ciphertext_count);
}
void cuda_integer_radix_sum_ciphertexts_vec_kb_64(
cuda_stream_t *stream, void *radix_lwe_out, void *radix_lwe_vec,
uint32_t num_radix_in_vec, int8_t *mem_ptr, void *bsk, void *ksk,
uint32_t num_blocks_in_radix) {
void cuda_small_scalar_multiplication_integer_radix_ciphertext_64(
cuda_stream_t *stream, void *output_lwe_array, void *input_lwe_array,
uint64_t scalar, uint32_t lwe_dimension, uint32_t lwe_ciphertext_count) {
auto mem = (int_sum_ciphertexts_vec_memory<uint64_t> *)mem_ptr;
int *terms_degree =
(int *)malloc(num_blocks_in_radix * num_radix_in_vec * sizeof(int));
for (int i = 0; i < num_radix_in_vec * num_blocks_in_radix; i++) {
terms_degree[i] = mem->params.message_modulus - 1;
}
switch (mem->params.polynomial_size) {
case 512:
host_integer_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<512>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks_in_radix,
num_radix_in_vec);
break;
case 1024:
host_integer_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<1024>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks_in_radix,
num_radix_in_vec);
break;
case 2048:
host_integer_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<2048>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks_in_radix,
num_radix_in_vec);
break;
case 4096:
host_integer_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<4096>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks_in_radix,
num_radix_in_vec);
break;
case 8192:
host_integer_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<8192>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks_in_radix,
num_radix_in_vec);
break;
case 16384:
host_integer_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<16384>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks_in_radix,
num_radix_in_vec);
break;
default:
PANIC("Cuda error (integer sum ciphertexts): unsupported polynomial size. "
"Only N = 512, 1024, 2048, 4096, 8192, 16384 is supported")
}
free(terms_degree);
}
void cleanup_cuda_integer_radix_sum_ciphertexts_vec(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
int_sum_ciphertexts_vec_memory<uint64_t> *mem_ptr =
(int_sum_ciphertexts_vec_memory<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);
host_integer_small_scalar_mult_radix(
stream, static_cast<uint64_t *>(output_lwe_array),
static_cast<uint64_t *>(input_lwe_array), scalar, lwe_dimension,
lwe_ciphertext_count);
}

View File

@@ -6,12 +6,12 @@
#include <cuda_runtime.h>
#endif
#include "bootstrap.h"
#include "crypto/keyswitch.cuh"
#include "device.h"
#include "integer.h"
#include "integer/integer.cuh"
#include "linear_algebra.h"
#include "programmable_bootstrap.h"
#include "utils/helper.cuh"
#include "utils/kernel_dimensions.cuh"
#include <fstream>
@@ -21,24 +21,6 @@
#include <string>
#include <vector>
template <typename Torus>
__global__ void smart_copy(Torus *dst, Torus *src, int32_t *id_out,
int32_t *id_in, size_t lwe_size) {
size_t tid = threadIdx.x;
size_t b_id = blockIdx.x;
size_t stride = blockDim.x;
auto input_id = id_in[b_id];
auto output_id = id_out[b_id];
auto cur_src = (input_id >= 0) ? &src[input_id * lwe_size] : nullptr;
auto cur_dst = &dst[output_id * lwe_size];
for (int i = tid; i < lwe_size; i += stride) {
cur_dst[i] = (input_id >= 0) ? cur_src[i] : 0;
}
}
template <typename Torus, class params>
__global__ void
all_shifted_lhs_rhs(Torus *radix_lwe_left, Torus *lsb_ciphertext,
@@ -92,37 +74,100 @@ all_shifted_lhs_rhs(Torus *radix_lwe_left, Torus *lsb_ciphertext,
}
template <typename Torus>
void compress_device_array_with_map(cuda_stream_t *stream, Torus *src,
Torus *dst, int *S, int *F, int num_blocks,
uint32_t map_size, uint32_t unit_size,
int &total_copied, bool is_message) {
cudaSetDevice(stream->gpu_index);
for (int i = 0; i < map_size; i++) {
int s_index = i * num_blocks + S[i];
int number_of_unit = F[i] - S[i] + is_message;
auto cur_dst = &dst[total_copied * unit_size];
auto cur_src = &src[s_index * unit_size];
size_t copy_size = unit_size * number_of_unit * sizeof(Torus);
cuda_memcpy_async_gpu_to_gpu(cur_dst, cur_src, copy_size, stream);
total_copied += number_of_unit;
}
}
template <typename Torus>
void extract_message_carry_to_full_radix(cuda_stream_t *stream, Torus *src,
Torus *dst, int *S, int *F,
uint32_t map_size, uint32_t unit_size,
int &total_copied,
int &total_radix_copied,
int num_blocks, bool is_message) {
cudaSetDevice(stream->gpu_index);
size_t radix_size = unit_size * num_blocks;
for (int i = 0; i < map_size; i++) {
auto cur_dst_radix = &dst[total_radix_copied * radix_size];
int s_index = S[i];
int number_of_unit = F[i] - s_index + is_message;
if (!is_message) {
int zero_block_count = num_blocks - number_of_unit;
cuda_memset_async(cur_dst_radix, 0,
zero_block_count * unit_size * sizeof(Torus), stream);
s_index = zero_block_count;
}
auto cur_dst = &cur_dst_radix[s_index * unit_size];
auto cur_src = &src[total_copied * unit_size];
size_t copy_size = unit_size * number_of_unit * sizeof(Torus);
cuda_memcpy_async_gpu_to_gpu(cur_dst, cur_src, copy_size, stream);
total_copied += number_of_unit;
++total_radix_copied;
}
}
template <typename Torus, class params>
__global__ void tree_add_chunks(Torus *result_blocks, Torus *input_blocks,
uint32_t chunk_size, uint32_t block_size,
uint32_t num_blocks) {
uint32_t chunk_size, uint32_t num_blocks) {
extern __shared__ Torus result[];
size_t stride = blockDim.x;
size_t chunk_id = blockIdx.x;
size_t chunk_elem_size = chunk_size * num_blocks * block_size;
size_t radix_elem_size = num_blocks * block_size;
size_t chunk_elem_size = chunk_size * num_blocks * (params::degree + 1);
size_t radix_elem_size = num_blocks * (params::degree + 1);
auto src_chunk = &input_blocks[chunk_id * chunk_elem_size];
auto dst_radix = &result_blocks[chunk_id * radix_elem_size];
size_t block_stride = blockIdx.y * block_size;
size_t block_stride = blockIdx.y * (params::degree + 1);
auto dst_block = &dst_radix[block_stride];
// init shared mem with first radix of chunk
size_t tid = threadIdx.x;
for (int i = tid; i < block_size; i += stride) {
result[i] = src_chunk[block_stride + i];
for (int i = 0; i < params::opt; i++) {
result[tid] = src_chunk[block_stride + tid];
tid += params::degree / params::opt;
}
if (threadIdx.x == 0) {
result[params::degree] = src_chunk[block_stride + params::degree];
}
// accumulate rest of the radixes
for (int r_id = 1; r_id < chunk_size; r_id++) {
auto cur_src_radix = &src_chunk[r_id * radix_elem_size];
for (int i = tid; i < block_size; i += stride) {
result[i] += cur_src_radix[block_stride + i];
tid = threadIdx.x;
for (int i = 0; i < params::opt; i++) {
result[tid] += cur_src_radix[block_stride + tid];
tid += params::degree / params::opt;
}
if (threadIdx.x == 0) {
result[params::degree] += cur_src_radix[block_stride + params::degree];
}
}
// put result from shared mem to global mem
for (int i = tid; i < block_size; i += stride) {
dst_block[i] = result[i];
tid = threadIdx.x;
for (int i = 0; i < params::opt; i++) {
dst_block[tid] = result[tid];
tid += params::degree / params::opt;
}
if (threadIdx.x == 0) {
dst_block[params::degree] = result[params::degree];
}
}
@@ -173,142 +218,6 @@ __global__ void fill_radix_from_lsb_msb(Torus *result_blocks, Torus *lsb_blocks,
(process_msb) ? cur_msb_ct[params::degree] : 0;
}
}
template <typename Torus>
__host__ void scratch_cuda_integer_sum_ciphertexts_vec_kb(
cuda_stream_t *stream, int_sum_ciphertexts_vec_memory<Torus> **mem_ptr,
uint32_t num_blocks_in_radix, uint32_t max_num_radix_in_vec,
int_radix_params params, bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
size_t sm_size = (params.big_lwe_dimension + 1) * sizeof(Torus);
check_cuda_error(cudaFuncSetAttribute(
tree_add_chunks<Torus>, cudaFuncAttributeMaxDynamicSharedMemorySize,
sm_size));
cudaFuncSetCacheConfig(tree_add_chunks<Torus>, cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
*mem_ptr = new int_sum_ciphertexts_vec_memory<Torus>(
stream, params, num_blocks_in_radix, max_num_radix_in_vec,
allocate_gpu_memory);
}
template <typename Torus, class params>
__host__ void host_integer_sum_ciphertexts_vec_kb(
cuda_stream_t *stream, Torus *radix_lwe_out, Torus *terms,
int *terms_degree, void *bsk, uint64_t *ksk,
int_sum_ciphertexts_vec_memory<uint64_t> *mem_ptr,
uint32_t num_blocks_in_radix, uint32_t num_radix_in_vec) {
cudaSetDevice(stream->gpu_index);
auto new_blocks = mem_ptr->new_blocks;
auto old_blocks = mem_ptr->old_blocks;
auto small_lwe_vector = mem_ptr->small_lwe_vector;
auto luts_message_carry = mem_ptr->luts_message_carry;
auto lwe_indexes_in = luts_message_carry->lwe_indexes_in;
auto lwe_indexes_out = luts_message_carry->lwe_indexes_out;
auto d_smart_copy_in = mem_ptr->d_smart_copy_in;
auto d_smart_copy_out = mem_ptr->d_smart_copy_out;
auto message_modulus = mem_ptr->params.message_modulus;
auto carry_modulus = mem_ptr->params.carry_modulus;
auto num_blocks = num_blocks_in_radix;
auto big_lwe_size = mem_ptr->params.big_lwe_dimension + 1;
auto glwe_dimension = mem_ptr->params.glwe_dimension;
auto polynomial_size = mem_ptr->params.polynomial_size;
auto lwe_dimension = mem_ptr->params.small_lwe_dimension;
auto big_lwe_dimension = mem_ptr->params.big_lwe_dimension;
if (old_blocks != terms) {
cuda_memcpy_async_gpu_to_gpu(old_blocks, terms,
num_blocks_in_radix * num_radix_in_vec *
big_lwe_size * sizeof(Torus),
stream);
}
size_t r = num_radix_in_vec;
size_t total_modulus = message_modulus * carry_modulus;
size_t message_max = message_modulus - 1;
size_t chunk_size = (total_modulus - 1) / message_max;
size_t h_lwe_idx_in[r * num_blocks];
size_t h_lwe_idx_out[r * num_blocks];
int32_t h_smart_copy_in[r * num_blocks];
int32_t h_smart_copy_out[r * num_blocks];
auto max_shared_memory = cuda_get_max_shared_memory(stream->gpu_index);
while (r > 2) {
size_t cur_total_blocks = r * num_blocks;
size_t ch_amount = r / chunk_size;
if (!ch_amount)
ch_amount++;
dim3 add_grid(ch_amount, num_blocks, 1);
size_t sm_size = big_lwe_size * sizeof(Torus);
tree_add_chunks<Torus><<<add_grid, 512, sm_size, stream->stream>>>(
new_blocks, old_blocks, min(r, chunk_size), big_lwe_size, num_blocks);
size_t total_count = 0;
size_t message_count = 0;
size_t carry_count = 0;
size_t sm_copy_count = 0;
generate_ids_update_degrees(
terms_degree, h_lwe_idx_in, h_lwe_idx_out, h_smart_copy_in,
h_smart_copy_out, ch_amount, r, num_blocks, chunk_size, message_max,
total_count, message_count, carry_count, sm_copy_count);
size_t copy_size = total_count * sizeof(Torus);
cuda_memcpy_async_to_gpu(lwe_indexes_in, h_lwe_idx_in, copy_size, stream);
cuda_memcpy_async_to_gpu(lwe_indexes_out, h_lwe_idx_out, copy_size, stream);
copy_size = sm_copy_count * sizeof(int32_t);
cuda_memcpy_async_to_gpu(d_smart_copy_in, h_smart_copy_in, copy_size,
stream);
cuda_memcpy_async_to_gpu(d_smart_copy_out, h_smart_copy_out, copy_size,
stream);
smart_copy<<<sm_copy_count, 256, 0, stream->stream>>>(
new_blocks, new_blocks, d_smart_copy_out, d_smart_copy_in,
big_lwe_size);
if (carry_count > 0)
cuda_set_value_async<Torus>(
&(stream->stream), luts_message_carry->get_lut_indexes(message_count),
1, carry_count);
cuda_keyswitch_lwe_ciphertext_vector(
stream, small_lwe_vector, lwe_indexes_in, new_blocks, lwe_indexes_in,
ksk, polynomial_size * glwe_dimension, lwe_dimension,
mem_ptr->params.ks_base_log, mem_ptr->params.ks_level, message_count);
execute_pbs<Torus>(
stream, new_blocks, lwe_indexes_out, luts_message_carry->lut,
luts_message_carry->lut_indexes, small_lwe_vector, lwe_indexes_in, bsk,
luts_message_carry->buffer, glwe_dimension, lwe_dimension,
polynomial_size, mem_ptr->params.pbs_base_log,
mem_ptr->params.pbs_level, mem_ptr->params.grouping_factor, total_count,
2, 0, max_shared_memory, mem_ptr->params.pbs_type);
int rem_blocks = (r > chunk_size) ? r % chunk_size * num_blocks : 0;
int new_blocks_created = 2 * ch_amount * num_blocks;
copy_size = rem_blocks * big_lwe_size * sizeof(Torus);
auto cur_dst = &new_blocks[new_blocks_created * big_lwe_size];
auto cur_src = &old_blocks[(cur_total_blocks - rem_blocks) * big_lwe_size];
cuda_memcpy_async_gpu_to_gpu(cur_dst, cur_src, copy_size, stream);
std::swap(new_blocks, old_blocks);
r = (new_blocks_created + rem_blocks) / num_blocks;
}
host_addition(stream, radix_lwe_out, old_blocks,
&old_blocks[num_blocks * big_lwe_size], big_lwe_dimension,
num_blocks);
host_propagate_single_carry<Torus>(stream, radix_lwe_out, mem_ptr->scp_mem,
bsk, ksk, num_blocks);
}
template <typename Torus, typename STorus, class params>
__host__ void host_integer_mult_radix_kb(
@@ -324,6 +233,7 @@ __host__ void host_integer_mult_radix_kb(
auto carry_modulus = mem_ptr->params.carry_modulus;
int big_lwe_dimension = glwe_dimension * polynomial_size;
int big_lwe_size = big_lwe_dimension + 1;
// 'vector_result_lsb' contains blocks from all possible right shifts of
// radix_lwe_left, only nonzero blocks are kept
@@ -371,6 +281,17 @@ __host__ void host_integer_mult_radix_kb(
// 2 * (glwe_dimension + 1) * polynomial_size
auto luts_array = mem_ptr->luts_array;
// accumulator to extract message
// with length (glwe_dimension + 1) * polynomial_size
auto luts_message = mem_ptr->luts_message;
// accumulator to extract carry
// with length (glwe_dimension + 1) * polynomial_size
auto luts_carry = mem_ptr->luts_carry;
// to be used as default indexing
auto lwe_indexes = luts_array->lwe_indexes;
auto vector_result_lsb = &vector_result_sb[0];
auto vector_result_msb =
&vector_result_sb[lsb_vector_block_count *
@@ -402,22 +323,144 @@ __host__ void host_integer_mult_radix_kb(
lsb_vector_block_count, msb_vector_block_count,
num_blocks);
int terms_degree[2 * num_blocks * num_blocks];
auto new_blocks = block_mul_res;
auto old_blocks = vector_result_sb;
// amount of current radixes after block_mul
size_t r = 2 * num_blocks;
size_t total_modulus = message_modulus * carry_modulus;
size_t message_max = message_modulus - 1;
size_t chunk_size = (total_modulus - 1) / message_max;
size_t ch_amount = r / chunk_size;
int terms_degree[r * num_blocks];
int f_b[ch_amount];
int l_b[ch_amount];
for (int i = 0; i < num_blocks * num_blocks; i++) {
size_t r_id = i / num_blocks;
size_t b_id = i % num_blocks;
terms_degree[i] = (b_id >= r_id) ? message_modulus - 1 : 0;
terms_degree[i] = (b_id >= r_id) ? 3 : 0;
}
auto terms_degree_msb = &terms_degree[num_blocks * num_blocks];
for (int i = 0; i < num_blocks * num_blocks; i++) {
size_t r_id = i / num_blocks;
size_t b_id = i % num_blocks;
terms_degree_msb[i] = (b_id > r_id) ? message_modulus - 2 : 0;
terms_degree_msb[i] = (b_id > r_id) ? 2 : 0;
}
host_integer_sum_ciphertexts_vec_kb<Torus, params>(
stream, radix_lwe_out, vector_result_sb, terms_degree, bsk, ksk,
mem_ptr->sum_ciphertexts_mem, num_blocks, 2 * num_blocks);
auto max_shared_memory = cuda_get_max_shared_memory(stream->gpu_index);
while (r > chunk_size) {
int cur_total_blocks = r * num_blocks;
ch_amount = r / chunk_size;
dim3 add_grid(ch_amount, num_blocks, 1);
size_t sm_size = big_lwe_size * sizeof(Torus);
cuda_memset_async(new_blocks, 0,
ch_amount * num_blocks * big_lwe_size * sizeof(Torus),
stream);
tree_add_chunks<Torus, params><<<add_grid, 256, sm_size, stream->stream>>>(
new_blocks, old_blocks, chunk_size, num_blocks);
for (int c_id = 0; c_id < ch_amount; c_id++) {
auto cur_chunk = &terms_degree[c_id * chunk_size * num_blocks];
int mx = 0;
int mn = num_blocks;
for (int r_id = 1; r_id < chunk_size; r_id++) {
auto cur_radix = &cur_chunk[r_id * num_blocks];
for (int i = 0; i < num_blocks; i++) {
if (cur_radix[i]) {
mn = min(mn, i);
mx = max(mx, i);
}
}
}
f_b[c_id] = mn;
l_b[c_id] = mx;
}
int total_copied = 0;
int message_count = 0;
int carry_count = 0;
compress_device_array_with_map<Torus>(stream, new_blocks, old_blocks, f_b,
l_b, num_blocks, ch_amount,
big_lwe_size, total_copied, true);
message_count = total_copied;
compress_device_array_with_map<Torus>(stream, new_blocks, old_blocks, f_b,
l_b, num_blocks, ch_amount,
big_lwe_size, total_copied, false);
carry_count = total_copied - message_count;
auto message_blocks_vector = old_blocks;
auto carry_blocks_vector =
&old_blocks[message_count * (glwe_dimension * polynomial_size + 1)];
cuda_keyswitch_lwe_ciphertext_vector(
stream, small_lwe_vector, lwe_indexes, old_blocks, lwe_indexes, ksk,
polynomial_size * glwe_dimension, lwe_dimension,
mem_ptr->params.ks_base_log, mem_ptr->params.ks_level, total_copied);
execute_pbs<Torus>(stream, message_blocks_vector, lwe_indexes,
luts_message->lut, luts_message->lut_indexes,
small_lwe_vector, lwe_indexes, bsk, luts_message->buffer,
glwe_dimension, lwe_dimension, polynomial_size,
mem_ptr->params.pbs_base_log, mem_ptr->params.pbs_level,
mem_ptr->params.grouping_factor, message_count, 1, 0,
max_shared_memory, mem_ptr->params.pbs_type);
execute_pbs<Torus>(stream, carry_blocks_vector, lwe_indexes,
luts_carry->lut, luts_carry->lut_indexes,
&small_lwe_vector[message_count * (lwe_dimension + 1)],
lwe_indexes, bsk, luts_carry->buffer, glwe_dimension,
lwe_dimension, polynomial_size,
mem_ptr->params.pbs_base_log, mem_ptr->params.pbs_level,
mem_ptr->params.grouping_factor, carry_count, 1, 0,
max_shared_memory, mem_ptr->params.pbs_type);
int rem_blocks = r % chunk_size * num_blocks;
int new_blocks_created = 2 * ch_amount * num_blocks;
int copy_size = rem_blocks * big_lwe_size * sizeof(Torus);
auto cur_dst = &new_blocks[new_blocks_created * big_lwe_size];
auto cur_src = &old_blocks[(cur_total_blocks - rem_blocks) * big_lwe_size];
cuda_memcpy_async_gpu_to_gpu(cur_dst, cur_src, copy_size, stream);
total_copied = 0;
int total_radix_copied = 0;
extract_message_carry_to_full_radix<Torus>(
stream, old_blocks, new_blocks, f_b, l_b, ch_amount, big_lwe_size,
total_copied, total_radix_copied, num_blocks, true);
extract_message_carry_to_full_radix<Torus>(
stream, old_blocks, new_blocks, f_b, l_b, ch_amount, big_lwe_size,
total_copied, total_radix_copied, num_blocks, false);
std::swap(new_blocks, old_blocks);
r = (new_blocks_created + rem_blocks) / num_blocks;
}
dim3 add_grid(1, num_blocks, 1);
size_t sm_size = big_lwe_size * sizeof(Torus);
cuda_memset_async(radix_lwe_out, 0, num_blocks * big_lwe_size * sizeof(Torus),
stream);
tree_add_chunks<Torus, params><<<add_grid, 256, sm_size, stream->stream>>>(
radix_lwe_out, old_blocks, r, num_blocks);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, vector_result_sb, radix_lwe_out, bsk, ksk, num_blocks,
luts_message);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, &block_mul_res[big_lwe_size], radix_lwe_out, bsk, ksk, num_blocks,
luts_carry);
cuda_memset_async(block_mul_res, 0, big_lwe_size * sizeof(Torus), stream);
host_addition(stream, radix_lwe_out, vector_result_sb, block_mul_res,
big_lwe_dimension, num_blocks);
host_propagate_single_carry_low_latency<Torus>(
stream, radix_lwe_out, mem_ptr->scp_mem, bsk, ksk, num_blocks);
}
template <typename Torus>
@@ -426,15 +469,166 @@ __host__ void scratch_cuda_integer_mult_radix_ciphertext_kb(
uint32_t num_radix_blocks, int_radix_params params,
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
size_t sm_size = (params.big_lwe_dimension + 1) * sizeof(Torus);
check_cuda_error(cudaFuncSetAttribute(
tree_add_chunks<Torus>, cudaFuncAttributeMaxDynamicSharedMemorySize,
sm_size));
cudaFuncSetCacheConfig(tree_add_chunks<Torus>, cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
*mem_ptr = new int_mul_memory<Torus>(stream, params, num_radix_blocks,
allocate_gpu_memory);
}
// Function to apply lookup table,
// It has two mode
// lsb_msb_mode == true - extracts lsb and msb
// lsb_msb_mode == false - extracts message and carry
template <typename Torus, typename STorus, class params>
void apply_lookup_table(Torus *input_ciphertexts, Torus *output_ciphertexts,
int_mul_memory<Torus> *mem_ptr, uint32_t glwe_dimension,
uint32_t lwe_dimension, uint32_t polynomial_size,
uint32_t pbs_base_log, uint32_t pbs_level,
uint32_t ks_base_log, uint32_t ks_level,
uint32_t grouping_factor,
uint32_t lsb_message_blocks_count,
uint32_t msb_carry_blocks_count,
uint32_t max_shared_memory, bool lsb_msb_mode) {
int total_blocks_count = lsb_message_blocks_count + msb_carry_blocks_count;
int gpu_n = mem_ptr->p2p_gpu_count;
if (total_blocks_count < gpu_n)
gpu_n = total_blocks_count;
int gpu_blocks_count = total_blocks_count / gpu_n;
int big_lwe_size = glwe_dimension * polynomial_size + 1;
// int small_lwe_size = lwe_dimension + 1;
#pragma omp parallel for num_threads(gpu_n)
for (int i = 0; i < gpu_n; i++) {
cudaSetDevice(i);
auto this_stream = mem_ptr->streams[i];
// Index where input and output blocks start for current gpu
int big_lwe_start_index = i * gpu_blocks_count * big_lwe_size;
// Last gpu might have extra blocks to process if total blocks number is not
// divisible by gpu_n
if (i == gpu_n - 1) {
gpu_blocks_count += total_blocks_count % gpu_n;
}
int can_access_peer;
cudaDeviceCanAccessPeer(&can_access_peer, i, 0);
if (i == 0) {
check_cuda_error(
cudaMemcpyAsync(mem_ptr->pbs_output_multi_gpu[i],
&input_ciphertexts[big_lwe_start_index],
gpu_blocks_count * big_lwe_size * sizeof(Torus),
cudaMemcpyDeviceToDevice, *this_stream));
} else if (can_access_peer) {
check_cuda_error(cudaMemcpyPeerAsync(
mem_ptr->pbs_output_multi_gpu[i], i,
&input_ciphertexts[big_lwe_start_index], 0,
gpu_blocks_count * big_lwe_size * sizeof(Torus), *this_stream));
} else {
// Uses host memory as middle ground
cuda_memcpy_async_to_cpu(mem_ptr->device_to_device_buffer[i],
&input_ciphertexts[big_lwe_start_index],
gpu_blocks_count * big_lwe_size * sizeof(Torus),
this_stream, i);
cuda_memcpy_async_to_gpu(
mem_ptr->pbs_output_multi_gpu[i], mem_ptr->device_to_device_buffer[i],
gpu_blocks_count * big_lwe_size * sizeof(Torus), this_stream, i);
}
// when lsb and msb have to be extracted
// for first lsb_count blocks we need lsb_acc
// for last msb_count blocks we need msb_acc
// when message and carry have tobe extracted
// for first message_count blocks we need message_acc
// for last carry_count blocks we need carry_acc
Torus *cur_lut_indexes;
if (lsb_msb_mode) {
cur_lut_indexes = (big_lwe_start_index < lsb_message_blocks_count)
? mem_ptr->lut_indexes_lsb_multi_gpu[i]
: mem_ptr->lut_indexes_msb_multi_gpu[i];
} else {
cur_lut_indexes = (big_lwe_start_index < lsb_message_blocks_count)
? mem_ptr->lut_indexes_message_multi_gpu[i]
: mem_ptr->lut_indexes_carry_multi_gpu[i];
}
// execute keyswitch on a current gpu with corresponding input and output
// blocks pbs_output_multi_gpu[i] is an input for keyswitch and
// pbs_input_multi_gpu[i] is an output for keyswitch
cuda_keyswitch_lwe_ciphertext_vector(
this_stream, i, mem_ptr->pbs_input_multi_gpu[i],
mem_ptr->pbs_output_multi_gpu[i], mem_ptr->ksk_multi_gpu[i],
polynomial_size * glwe_dimension, lwe_dimension, ks_base_log, ks_level,
gpu_blocks_count);
// execute pbs on a current gpu with corresponding input and output
cuda_multi_bit_pbs_lwe_ciphertext_vector_64(
this_stream, i, mem_ptr->pbs_output_multi_gpu[i],
mem_ptr->lut_multi_gpu[i], cur_lut_indexes,
mem_ptr->pbs_input_multi_gpu[i], mem_ptr->bsk_multi_gpu[i],
mem_ptr->pbs_buffer_multi_gpu[i], lwe_dimension, glwe_dimension,
polynomial_size, grouping_factor, pbs_base_log, pbs_level,
grouping_factor, gpu_blocks_count, 2, 0, max_shared_memory);
// lookup table is applied and now data from current gpu have to be copied
// back to gpu_0 in 'output_ciphertexts' buffer
if (i == 0) {
check_cuda_error(
cudaMemcpyAsync(&output_ciphertexts[big_lwe_start_index],
mem_ptr->pbs_output_multi_gpu[i],
gpu_blocks_count * big_lwe_size * sizeof(Torus),
cudaMemcpyDeviceToDevice, *this_stream));
} else if (can_access_peer) {
check_cuda_error(cudaMemcpyPeerAsync(
&output_ciphertexts[big_lwe_start_index], 0,
mem_ptr->pbs_output_multi_gpu[i], i,
gpu_blocks_count * big_lwe_size * sizeof(Torus), *this_stream));
} else {
// Uses host memory as middle ground
cuda_memcpy_async_to_cpu(
mem_ptr->device_to_device_buffer[i], mem_ptr->pbs_output_multi_gpu[i],
gpu_blocks_count * big_lwe_size * sizeof(Torus), this_stream, i);
cuda_memcpy_async_to_gpu(&output_ciphertexts[big_lwe_start_index],
mem_ptr->device_to_device_buffer[i],
gpu_blocks_count * big_lwe_size * sizeof(Torus),
this_stream, i);
}
}
}
template <typename T>
__global__ void device_small_scalar_radix_multiplication(T *output_lwe_array,
T *input_lwe_array,
T scalar,
uint32_t lwe_dimension,
uint32_t num_blocks) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int lwe_size = lwe_dimension + 1;
if (index < num_blocks * lwe_size) {
// Here we take advantage of the wrapping behaviour of uint
output_lwe_array[index] = input_lwe_array[index] * scalar;
}
}
template <typename T>
__host__ void host_integer_small_scalar_mult_radix(
cuda_stream_t *stream, T *output_lwe_array, T *input_lwe_array, T scalar,
uint32_t input_lwe_dimension, uint32_t input_lwe_ciphertext_count) {
cudaSetDevice(stream->gpu_index);
// lwe_size includes the presence of the body
// whereas lwe_dimension is the number of elements in the mask
int lwe_size = input_lwe_dimension + 1;
// Create a 1-dimensional grid of threads
int num_blocks = 0, num_threads = 0;
int num_entries = input_lwe_ciphertext_count * lwe_size;
getNumBlocksAndThreads(num_entries, 512, num_blocks, num_threads);
dim3 grid(num_blocks, 1, 1);
dim3 thds(num_threads, 1, 1);
device_small_scalar_radix_multiplication<<<grid, thds, 0, stream->stream>>>(
output_lwe_array, input_lwe_array, scalar, input_lwe_dimension,
input_lwe_ciphertext_count);
check_cuda_error(cudaGetLastError());
}
#endif

View File

@@ -10,91 +10,3 @@ void cuda_negate_integer_radix_ciphertext_64_inplace(
lwe_ciphertext_count, message_modulus,
carry_modulus);
}
void scratch_cuda_integer_radix_overflowing_sub_kb_64(
cuda_stream_t *stream, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t big_lwe_dimension,
uint32_t small_lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, bool allocate_gpu_memory) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
big_lwe_dimension, small_lwe_dimension, ks_level,
ks_base_log, pbs_level, pbs_base_log, grouping_factor,
message_modulus, carry_modulus);
scratch_cuda_integer_overflowing_sub_kb<uint64_t>(
stream, (int_overflowing_sub_memory<uint64_t> **)mem_ptr, num_blocks,
params, allocate_gpu_memory);
}
void cuda_integer_radix_overflowing_sub_kb_64(
cuda_stream_t *stream, void *radix_lwe_out, void *radix_lwe_overflowed,
void *radix_lwe_left, void *radix_lwe_right, int8_t *mem_ptr, void *bsk,
void *ksk, uint32_t num_blocks) {
auto mem = (int_overflowing_sub_memory<uint64_t> *)mem_ptr;
switch (mem->params.polynomial_size) {
case 512:
host_integer_overflowing_sub_kb<uint64_t, AmortizedDegree<512>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_overflowed),
static_cast<uint64_t *>(radix_lwe_left),
static_cast<uint64_t *>(radix_lwe_right), bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks);
break;
case 1024:
host_integer_overflowing_sub_kb<uint64_t, AmortizedDegree<1024>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_overflowed),
static_cast<uint64_t *>(radix_lwe_left),
static_cast<uint64_t *>(radix_lwe_right), bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks);
break;
case 2048:
host_integer_overflowing_sub_kb<uint64_t, AmortizedDegree<2048>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_overflowed),
static_cast<uint64_t *>(radix_lwe_left),
static_cast<uint64_t *>(radix_lwe_right), bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks);
break;
case 4096:
host_integer_overflowing_sub_kb<uint64_t, AmortizedDegree<4096>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_overflowed),
static_cast<uint64_t *>(radix_lwe_left),
static_cast<uint64_t *>(radix_lwe_right), bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks);
break;
case 8192:
host_integer_overflowing_sub_kb<uint64_t, AmortizedDegree<8192>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_overflowed),
static_cast<uint64_t *>(radix_lwe_left),
static_cast<uint64_t *>(radix_lwe_right), bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks);
break;
case 16384:
host_integer_overflowing_sub_kb<uint64_t, AmortizedDegree<16384>>(
stream, static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_overflowed),
static_cast<uint64_t *>(radix_lwe_left),
static_cast<uint64_t *>(radix_lwe_right), bsk,
static_cast<uint64_t *>(ksk), mem, num_blocks);
break;
default:
PANIC("Cuda error (integer overflowing sub): unsupported polynomial size. "
"Only N = 512, 1024, 2048, 4096, 8192, 16384 is supported")
}
}
void cleanup_cuda_integer_radix_overflowing_sub(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
int_overflowing_sub_memory<uint64_t> *mem_ptr =
(int_overflowing_sub_memory<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);
}

View File

@@ -6,20 +6,9 @@
#include <cuda_runtime.h>
#endif
#include "crypto/keyswitch.cuh"
#include "device.h"
#include "integer.h"
#include "integer/integer.cuh"
#include "linear_algebra.h"
#include "programmable_bootstrap.h"
#include "utils/helper.cuh"
#include "utils/kernel_dimensions.cuh"
#include <fstream>
#include <iostream>
#include <omp.h>
#include <sstream>
#include <string>
#include <vector>
template <typename Torus>
__global__ void
@@ -87,32 +76,4 @@ __host__ void host_integer_radix_negation(cuda_stream_t *stream, Torus *output,
check_cuda_error(cudaGetLastError());
}
template <typename Torus>
__host__ void scratch_cuda_integer_overflowing_sub_kb(
cuda_stream_t *stream, int_overflowing_sub_memory<Torus> **mem_ptr,
uint32_t num_blocks, int_radix_params params, bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
*mem_ptr = new int_overflowing_sub_memory<Torus>(stream, params, num_blocks,
allocate_gpu_memory);
}
template <typename Torus, class params>
__host__ void host_integer_overflowing_sub_kb(
cuda_stream_t *stream, Torus *radix_lwe_out, Torus *radix_lwe_overflowed,
Torus *radix_lwe_left, Torus *radix_lwe_right, void *bsk, uint64_t *ksk,
int_overflowing_sub_memory<uint64_t> *mem_ptr, uint32_t num_blocks) {
auto radix_params = mem_ptr->params;
host_unchecked_sub_with_correcting_term(
stream, radix_lwe_out, radix_lwe_left, radix_lwe_right,
radix_params.big_lwe_dimension, num_blocks, radix_params.message_modulus,
radix_params.carry_modulus, radix_params.message_modulus - 1);
host_propagate_single_sub_borrow<Torus>(
stream, radix_lwe_overflowed, radix_lwe_out, mem_ptr->borrow_prop_mem,
bsk, ksk, num_blocks);
}
#endif

View File

@@ -5,7 +5,7 @@
#include <omp.h>
template <typename Torus>
__host__ void integer_radix_unsigned_scalar_difference_check_kb(
__host__ void host_integer_radix_scalar_difference_check_kb(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_array_in,
Torus *scalar_blocks, int_comparison_buffer<Torus> *mem_ptr,
std::function<Torus(Torus)> sign_handler_f, void *bsk, Torus *ksk,
@@ -22,6 +22,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
auto diff_buffer = mem_ptr->diff_buffer;
size_t big_lwe_size = big_lwe_dimension + 1;
size_t big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
// Reducing the signs is the bottleneck of the comparison algorithms,
// however if the scalar case there is an improvement:
@@ -64,6 +65,12 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
stream, lwe_array_out, mem_ptr->tmp_lwe_array_out, bsk, ksk, 1, lut);
// The result will be in the two first block. Everything else is
// garbage.
cuda_memset_async(lwe_array_out + big_lwe_size, 0,
big_lwe_size_bytes * (total_num_radix_blocks - 1),
stream);
} else if (total_num_scalar_blocks < total_num_radix_blocks) {
// We have to handle both part of the work described above
@@ -71,6 +78,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
uint32_t num_msb_radix_blocks =
total_num_radix_blocks - num_lsb_radix_blocks;
auto lsb = lwe_array_in;
auto msb = lwe_array_in + num_lsb_radix_blocks * big_lwe_size;
auto lwe_array_lsb_out = mem_ptr->tmp_lwe_array_out;
@@ -113,7 +121,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
// final sign
tree_sign_reduction(lsb_stream, lwe_array_lsb_out, comparisons,
mem_ptr->diff_buffer->tree_buffer,
mem_ptr->identity_lut_f, bsk, ksk,
mem_ptr->cleaning_lut_f, bsk, ksk,
num_lsb_radix_blocks);
}
#pragma omp section
@@ -148,6 +156,10 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
stream, lwe_array_out, lwe_array_lsb_out, lwe_array_msb_out, bsk, ksk,
1, lut);
// The result will be in the first block. Everything else is garbage.
cuda_memset_async(lwe_array_out + big_lwe_size, 0,
(total_num_radix_blocks - 1) * big_lwe_size_bytes,
stream);
} else {
// We only have to do the regular comparison
// And not the part where we compare most significant blocks with zeros
@@ -155,6 +167,8 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
uint32_t num_lsb_radix_blocks = total_num_radix_blocks;
uint32_t num_scalar_blocks = total_num_scalar_blocks;
auto lsb = lwe_array_in;
Torus *lhs = diff_buffer->tmp_packed_left;
Torus *rhs = diff_buffer->tmp_packed_right;
@@ -181,344 +195,11 @@ __host__ void integer_radix_unsigned_scalar_difference_check_kb(
tree_sign_reduction(stream, lwe_array_out, comparisons,
mem_ptr->diff_buffer->tree_buffer, sign_handler_f, bsk,
ksk, num_lsb_radix_blocks);
}
}
template <typename Torus>
__host__ void integer_radix_signed_scalar_difference_check_kb(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_array_in,
Torus *scalar_blocks, int_comparison_buffer<Torus> *mem_ptr,
std::function<Torus(Torus)> sign_handler_f, void *bsk, Torus *ksk,
uint32_t total_num_radix_blocks, uint32_t total_num_scalar_blocks) {
cudaSetDevice(stream->gpu_index);
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto glwe_dimension = params.glwe_dimension;
auto polynomial_size = params.polynomial_size;
auto message_modulus = params.message_modulus;
auto carry_modulus = params.carry_modulus;
auto diff_buffer = mem_ptr->diff_buffer;
size_t big_lwe_size = big_lwe_dimension + 1;
// Reducing the signs is the bottleneck of the comparison algorithms,
// however if the scalar case there is an improvement:
//
// The idea is to reduce the number of signs block we have to
// reduce. We can do that by splitting the comparison problem in two parts.
//
// - One part where we compute the signs block between the scalar with just
// enough blocks
// from the ciphertext that can represent the scalar value
//
// - The other part is to compare the ciphertext blocks not considered for the
// sign
// computation with zero, and create a single sign block from that.
//
// The smaller the scalar value is compared to the ciphertext num bits
// encrypted, the more the comparisons with zeros we have to do, and the less
// signs block we will have to reduce.
//
// This will create a speedup as comparing a bunch of blocks with 0
// is faster
if (total_num_scalar_blocks == 0) {
// We only have to compare blocks with zero
// means scalar is zero
Torus *are_all_msb_zeros = mem_ptr->tmp_lwe_array_out;
host_compare_with_zero_equality(stream, are_all_msb_zeros, lwe_array_in,
mem_ptr, bsk, ksk, total_num_radix_blocks,
mem_ptr->is_zero_lut);
Torus *sign_block =
lwe_array_in + (total_num_radix_blocks - 1) * big_lwe_size;
auto sign_bit_pos = (int)std::log2(message_modulus) - 1;
auto scalar_last_leaf_with_respect_to_zero_lut_f =
[sign_handler_f, sign_bit_pos,
message_modulus](Torus sign_block) -> Torus {
sign_block %= message_modulus;
int sign_bit_is_set = (sign_block >> sign_bit_pos) == 1;
CMP_ORDERING sign_block_ordering;
if (sign_bit_is_set) {
sign_block_ordering = CMP_ORDERING::IS_INFERIOR;
} else if (sign_block != 0) {
sign_block_ordering = CMP_ORDERING::IS_SUPERIOR;
} else {
sign_block_ordering = CMP_ORDERING::IS_EQUAL;
}
return sign_block_ordering;
};
auto block_selector_f = mem_ptr->diff_buffer->tree_buffer->block_selector_f;
auto scalar_bivariate_last_leaf_lut_f =
[scalar_last_leaf_with_respect_to_zero_lut_f, sign_handler_f,
block_selector_f](Torus are_all_zeros, Torus sign_block) -> Torus {
// "re-code" are_all_zeros as an ordering value
if (are_all_zeros == 1) {
are_all_zeros = CMP_ORDERING::IS_EQUAL;
} else {
are_all_zeros = CMP_ORDERING::IS_SUPERIOR;
};
return sign_handler_f(block_selector_f(
scalar_last_leaf_with_respect_to_zero_lut_f(sign_block),
are_all_zeros));
};
auto lut = mem_ptr->diff_buffer->tree_buffer->tree_last_leaf_scalar_lut;
generate_device_accumulator_bivariate<Torus>(
stream, lut->lut, glwe_dimension, polynomial_size, message_modulus,
carry_modulus, scalar_bivariate_last_leaf_lut_f);
integer_radix_apply_bivariate_lookup_table_kb(
stream, lwe_array_out, are_all_msb_zeros, sign_block, bsk, ksk, 1, lut);
} else if (total_num_scalar_blocks < total_num_radix_blocks) {
// We have to handle both part of the work described above
// And the sign bit is located in the most_significant_blocks
uint32_t num_lsb_radix_blocks = total_num_scalar_blocks;
uint32_t num_msb_radix_blocks =
total_num_radix_blocks - num_lsb_radix_blocks;
auto msb = lwe_array_in + num_lsb_radix_blocks * big_lwe_size;
auto lwe_array_lsb_out = mem_ptr->tmp_lwe_array_out;
auto lwe_array_msb_out = lwe_array_lsb_out + big_lwe_size;
cuda_synchronize_stream(stream);
auto lsb_stream = mem_ptr->lsb_stream;
auto msb_stream = mem_ptr->msb_stream;
#pragma omp parallel sections
{
// Both sections may be executed in parallel
#pragma omp section
{
//////////////
// lsb
Torus *lhs = diff_buffer->tmp_packed_left;
Torus *rhs = diff_buffer->tmp_packed_right;
pack_blocks(lsb_stream, lhs, lwe_array_in, big_lwe_dimension,
num_lsb_radix_blocks, message_modulus);
pack_blocks(lsb_stream, rhs, scalar_blocks, 0, total_num_scalar_blocks,
message_modulus);
// From this point we have half number of blocks
num_lsb_radix_blocks /= 2;
num_lsb_radix_blocks += (total_num_scalar_blocks % 2);
// comparisons will be assigned
// - 0 if lhs < rhs
// - 1 if lhs == rhs
// - 2 if lhs > rhs
auto comparisons = mem_ptr->tmp_block_comparisons;
scalar_compare_radix_blocks_kb(lsb_stream, comparisons, lhs, rhs,
mem_ptr, bsk, ksk, num_lsb_radix_blocks);
// Reduces a vec containing radix blocks that encrypts a sign
// (inferior, equal, superior) to one single radix block containing the
// final sign
tree_sign_reduction(lsb_stream, lwe_array_lsb_out, comparisons,
mem_ptr->diff_buffer->tree_buffer,
mem_ptr->identity_lut_f, bsk, ksk,
num_lsb_radix_blocks);
}
#pragma omp section
{
//////////////
// msb
// We remove the last block (which is the sign)
Torus *are_all_msb_zeros = lwe_array_msb_out;
host_compare_with_zero_equality(msb_stream, are_all_msb_zeros, msb,
mem_ptr, bsk, ksk, num_msb_radix_blocks,
mem_ptr->is_zero_lut);
auto sign_bit_pos = (int)log2(message_modulus) - 1;
auto lut_f = [mem_ptr, sign_bit_pos](Torus sign_block,
Torus msb_are_zeros) {
bool sign_bit_is_set = (sign_block >> sign_bit_pos) == 1;
CMP_ORDERING sign_block_ordering;
if (sign_bit_is_set) {
sign_block_ordering = CMP_ORDERING::IS_INFERIOR;
} else if (sign_block != 0) {
sign_block_ordering = CMP_ORDERING::IS_SUPERIOR;
} else {
sign_block_ordering = CMP_ORDERING::IS_EQUAL;
}
CMP_ORDERING msb_ordering;
if (msb_are_zeros == 1)
msb_ordering = CMP_ORDERING::IS_EQUAL;
else
msb_ordering = CMP_ORDERING::IS_SUPERIOR;
return mem_ptr->diff_buffer->tree_buffer->block_selector_f(
sign_block_ordering, msb_ordering);
};
auto signed_msb_lut = mem_ptr->signed_msb_lut;
generate_device_accumulator_bivariate<Torus>(
msb_stream, signed_msb_lut->lut, params.glwe_dimension,
params.polynomial_size, params.message_modulus,
params.carry_modulus, lut_f);
Torus *sign_block = msb + (num_msb_radix_blocks - 1) * big_lwe_size;
integer_radix_apply_bivariate_lookup_table_kb(
msb_stream, lwe_array_msb_out, sign_block, are_all_msb_zeros, bsk,
ksk, 1, signed_msb_lut);
}
}
cuda_synchronize_stream(lsb_stream);
cuda_synchronize_stream(msb_stream);
//////////////
// Reduce the two blocks into one final
reduce_signs(stream, lwe_array_out, lwe_array_lsb_out, mem_ptr,
sign_handler_f, bsk, ksk, 2);
} else {
// We only have to do the regular comparison
// And not the part where we compare most significant blocks with zeros
// total_num_radix_blocks == total_num_scalar_blocks
uint32_t num_lsb_radix_blocks = total_num_radix_blocks;
cuda_synchronize_stream(stream);
auto lsb_stream = mem_ptr->lsb_stream;
auto msb_stream = mem_ptr->msb_stream;
auto lwe_array_ct_out = mem_ptr->tmp_lwe_array_out;
auto lwe_array_sign_out =
lwe_array_ct_out + (num_lsb_radix_blocks / 2) * big_lwe_size;
#pragma omp parallel sections
{
// Both sections may be executed in parallel
#pragma omp section
{
Torus *lhs = diff_buffer->tmp_packed_left;
Torus *rhs = diff_buffer->tmp_packed_right;
pack_blocks(lsb_stream, lhs, lwe_array_in, big_lwe_dimension,
num_lsb_radix_blocks - 1, message_modulus);
pack_blocks(lsb_stream, rhs, scalar_blocks, 0, num_lsb_radix_blocks - 1,
message_modulus);
// From this point we have half number of blocks
num_lsb_radix_blocks /= 2;
// comparisons will be assigned
// - 0 if lhs < rhs
// - 1 if lhs == rhs
// - 2 if lhs > rhs
scalar_compare_radix_blocks_kb(lsb_stream, lwe_array_ct_out, lhs, rhs,
mem_ptr, bsk, ksk, num_lsb_radix_blocks);
}
#pragma omp section
{
Torus *encrypted_sign_block =
lwe_array_in + (total_num_radix_blocks - 1) * big_lwe_size;
Torus *scalar_sign_block =
scalar_blocks + (total_num_scalar_blocks - 1);
auto trivial_sign_block = mem_ptr->tmp_trivial_sign_block;
create_trivial_radix(msb_stream, trivial_sign_block, scalar_sign_block,
big_lwe_dimension, 1, 1, message_modulus,
carry_modulus);
integer_radix_apply_bivariate_lookup_table_kb(
msb_stream, lwe_array_sign_out, encrypted_sign_block,
trivial_sign_block, bsk, ksk, 1, mem_ptr->signed_lut);
}
}
cuda_synchronize_stream(lsb_stream);
cuda_synchronize_stream(msb_stream);
// Reduces a vec containing radix blocks that encrypts a sign
// (inferior, equal, superior) to one single radix block containing the
// final sign
reduce_signs(stream, lwe_array_out, lwe_array_ct_out, mem_ptr,
sign_handler_f, bsk, ksk, num_lsb_radix_blocks + 1);
}
}
template <typename Torus>
__host__ void integer_radix_signed_scalar_maxmin_kb(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_array_in,
Torus *scalar_blocks, int_comparison_buffer<Torus> *mem_ptr, void *bsk,
Torus *ksk, uint32_t total_num_radix_blocks,
uint32_t total_num_scalar_blocks) {
cudaSetDevice(stream->gpu_index);
auto params = mem_ptr->params;
// Calculates the difference sign between the ciphertext and the scalar
// - 0 if lhs < rhs
// - 1 if lhs == rhs
// - 2 if lhs > rhs
auto sign = mem_ptr->tmp_lwe_array_out;
integer_radix_signed_scalar_difference_check_kb(
stream, sign, lwe_array_in, scalar_blocks, mem_ptr,
mem_ptr->identity_lut_f, bsk, ksk, total_num_radix_blocks,
total_num_scalar_blocks);
// There is no optimized CMUX for scalars, so we convert to a trivial
// ciphertext
auto lwe_array_left = lwe_array_in;
auto lwe_array_right = mem_ptr->tmp_block_comparisons;
create_trivial_radix(stream, lwe_array_right, scalar_blocks,
params.big_lwe_dimension, total_num_radix_blocks,
total_num_scalar_blocks, params.message_modulus,
params.carry_modulus);
// Selector
// CMUX for Max or Min
host_integer_radix_cmux_kb(stream, lwe_array_out, sign, lwe_array_left,
lwe_array_right, mem_ptr->cmux_buffer, bsk, ksk,
total_num_radix_blocks);
}
template <typename Torus>
__host__ void host_integer_radix_scalar_difference_check_kb(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_array_in,
Torus *scalar_blocks, int_comparison_buffer<Torus> *mem_ptr,
std::function<Torus(Torus)> sign_handler_f, void *bsk, Torus *ksk,
uint32_t total_num_radix_blocks, uint32_t total_num_scalar_blocks) {
if (mem_ptr->is_signed) {
// is signed and scalar is positive
integer_radix_signed_scalar_difference_check_kb(
stream, lwe_array_out, lwe_array_in, scalar_blocks, mem_ptr,
sign_handler_f, bsk, ksk, total_num_radix_blocks,
total_num_scalar_blocks);
} else {
integer_radix_unsigned_scalar_difference_check_kb(
stream, lwe_array_out, lwe_array_in, scalar_blocks, mem_ptr,
sign_handler_f, bsk, ksk, total_num_radix_blocks,
total_num_scalar_blocks);
}
}
template <typename Torus>
__host__ void host_integer_radix_signed_scalar_maxmin_kb(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_array_in,
Torus *scalar_blocks, int_comparison_buffer<Torus> *mem_ptr, void *bsk,
Torus *ksk, uint32_t total_num_radix_blocks,
uint32_t total_num_scalar_blocks) {
if (mem_ptr->is_signed) {
// is signed and scalar is positive
integer_radix_signed_scalar_maxmin_kb(
stream, lwe_array_out, lwe_array_in, scalar_blocks, mem_ptr, bsk, ksk,
total_num_radix_blocks, total_num_scalar_blocks);
} else {
integer_radix_unsigned_scalar_maxmin_kb(
stream, lwe_array_out, lwe_array_in, scalar_blocks, mem_ptr, bsk, ksk,
total_num_radix_blocks, total_num_scalar_blocks);
// The result will be in the first block. Everything else is garbage.
cuda_memset_async(lwe_array_out + big_lwe_size, 0,
(total_num_radix_blocks - 1) * big_lwe_size_bytes,
stream);
}
}
@@ -589,7 +270,7 @@ __host__ void host_integer_radix_scalar_maxmin_kb(
auto sign = mem_ptr->tmp_lwe_array_out;
host_integer_radix_scalar_difference_check_kb(
stream, sign, lwe_array_in, scalar_blocks, mem_ptr,
mem_ptr->identity_lut_f, bsk, ksk, total_num_radix_blocks,
mem_ptr->cleaning_lut_f, bsk, ksk, total_num_radix_blocks,
total_num_scalar_blocks);
// There is no optimized CMUX for scalars, so we convert to a trivial
@@ -622,6 +303,7 @@ __host__ void host_integer_radix_scalar_equality_check_kb(
auto eq_buffer = mem_ptr->eq_buffer;
size_t big_lwe_size = big_lwe_dimension + 1;
size_t big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
auto scalar_comparison_luts = eq_buffer->scalar_comparison_luts;
@@ -711,5 +393,11 @@ __host__ void host_integer_radix_scalar_equality_check_kb(
default:
PANIC("Cuda error: integer operation not supported")
}
// The result will be in the two first block. Everything else is
// garbage.
if (num_radix_blocks > 1)
cuda_memset_async(lwe_array_out + big_lwe_size, 0,
big_lwe_size_bytes * (num_radix_blocks - 1), stream);
}
#endif

View File

@@ -1,89 +0,0 @@
#include "integer/scalar_mul.cuh"
void scratch_cuda_integer_scalar_mul_kb_64(
cuda_stream_t *stream, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t lwe_dimension, uint32_t ks_level,
uint32_t ks_base_log, uint32_t pbs_level, uint32_t pbs_base_log,
uint32_t grouping_factor, uint32_t num_blocks, uint32_t message_modulus,
uint32_t carry_modulus, PBS_TYPE pbs_type, bool allocate_gpu_memory) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
glwe_dimension * polynomial_size, lwe_dimension,
ks_level, ks_base_log, pbs_level, pbs_base_log,
grouping_factor, message_modulus, carry_modulus);
scratch_cuda_integer_radix_scalar_mul_kb<uint64_t>(
stream, (int_scalar_mul_buffer<uint64_t> **)mem_ptr, num_blocks, params,
allocate_gpu_memory);
}
void cuda_scalar_multiplication_integer_radix_ciphertext_64_inplace(
cuda_stream_t *stream, void *lwe_array, uint64_t *decomposed_scalar,
uint64_t *has_at_least_one_set, int8_t *mem, void *bsk, void *ksk,
uint32_t lwe_dimension, uint32_t polynomial_size, uint32_t message_modulus,
uint32_t num_blocks, uint32_t num_scalars) {
switch (polynomial_size) {
case 512:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<512>>(
stream, static_cast<uint64_t *>(lwe_array), decomposed_scalar,
has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsk,
static_cast<uint64_t *>(ksk), lwe_dimension, message_modulus,
num_blocks, num_scalars);
break;
case 1024:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<1024>>(
stream, static_cast<uint64_t *>(lwe_array), decomposed_scalar,
has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsk,
static_cast<uint64_t *>(ksk), lwe_dimension, message_modulus,
num_blocks, num_scalars);
break;
case 2048:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<2048>>(
stream, static_cast<uint64_t *>(lwe_array), decomposed_scalar,
has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsk,
static_cast<uint64_t *>(ksk), lwe_dimension, message_modulus,
num_blocks, num_scalars);
break;
case 4096:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<4096>>(
stream, static_cast<uint64_t *>(lwe_array), decomposed_scalar,
has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsk,
static_cast<uint64_t *>(ksk), lwe_dimension, message_modulus,
num_blocks, num_scalars);
break;
case 8192:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<8192>>(
stream, static_cast<uint64_t *>(lwe_array), decomposed_scalar,
has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsk,
static_cast<uint64_t *>(ksk), lwe_dimension, message_modulus,
num_blocks, num_scalars);
break;
case 16384:
host_integer_scalar_mul_radix<uint64_t, AmortizedDegree<16384>>(
stream, static_cast<uint64_t *>(lwe_array), decomposed_scalar,
has_at_least_one_set,
reinterpret_cast<int_scalar_mul_buffer<uint64_t> *>(mem), bsk,
static_cast<uint64_t *>(ksk), lwe_dimension, message_modulus,
num_blocks, num_scalars);
break;
default:
PANIC("Cuda error (scalar multiplication): unsupported polynomial size. "
"Only N = 512, 1024, 2048, 4096, 8192, 16384 are supported.")
}
}
void cleanup_cuda_integer_radix_scalar_mul(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
cudaSetDevice(stream->gpu_index);
int_scalar_mul_buffer<uint64_t> *mem_ptr =
(int_scalar_mul_buffer<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);
}

View File

@@ -1,136 +0,0 @@
#ifndef CUDA_INTEGER_SCALAR_MUL_CUH
#define CUDA_INTEGER_SCALAR_MUL_CUH
#ifdef __CDT_PARSER__
#undef __CUDA_RUNTIME_H__
#include <cuda_runtime.h>
#endif
#include "device.h"
#include "integer.h"
#include "multiplication.cuh"
#include "scalar_shifts.cuh"
#include "utils/kernel_dimensions.cuh"
#include <stdio.h>
template <typename T>
__global__ void device_small_scalar_radix_multiplication(T *output_lwe_array,
T *input_lwe_array,
T scalar,
uint32_t lwe_dimension,
uint32_t num_blocks) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int lwe_size = lwe_dimension + 1;
if (index < num_blocks * lwe_size) {
// Here we take advantage of the wrapping behaviour of uint
output_lwe_array[index] = input_lwe_array[index] * scalar;
}
}
template <typename T>
__host__ void scratch_cuda_integer_radix_scalar_mul_kb(
cuda_stream_t *stream, int_scalar_mul_buffer<T> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params,
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
size_t sm_size = (params.big_lwe_dimension + 1) * sizeof(T);
check_cuda_error(cudaFuncSetAttribute(
tree_add_chunks<T>, cudaFuncAttributeMaxDynamicSharedMemorySize,
sm_size));
cudaFuncSetCacheConfig(tree_add_chunks<T>, cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
*mem_ptr = new int_scalar_mul_buffer<T>(stream, params, num_radix_blocks,
allocate_gpu_memory);
}
template <typename T, class params>
__host__ void host_integer_scalar_mul_radix(
cuda_stream_t *stream, T *lwe_array, T *decomposed_scalar,
T *has_at_least_one_set, int_scalar_mul_buffer<T> *mem, void *bsk, T *ksk,
uint32_t input_lwe_dimension, uint32_t message_modulus,
uint32_t num_radix_blocks, uint32_t num_scalars) {
if (num_radix_blocks == 0 | num_scalars == 0)
return;
cudaSetDevice(stream->gpu_index);
// lwe_size includes the presence of the body
// whereas lwe_dimension is the number of elements in the mask
uint32_t lwe_size = input_lwe_dimension + 1;
uint32_t lwe_size_bytes = lwe_size * sizeof(T);
uint32_t msg_bits = (uint32_t)std::log2(message_modulus);
uint32_t num_ciphertext_bits = msg_bits * num_radix_blocks;
T *preshifted_buffer = mem->preshifted_buffer;
T *all_shifted_buffer = mem->all_shifted_buffer;
for (size_t shift_amount = 0; shift_amount < msg_bits; shift_amount++) {
T *ptr = preshifted_buffer + shift_amount * lwe_size * num_radix_blocks;
if (has_at_least_one_set[shift_amount] == 1) {
cuda_memcpy_async_gpu_to_gpu(ptr, lwe_array,
lwe_size_bytes * num_radix_blocks, stream);
host_integer_radix_logical_scalar_shift_kb_inplace(
stream, ptr, shift_amount, mem->logical_scalar_shift_buffer, bsk, ksk,
num_radix_blocks);
} else {
// create trivial assign for value = 0
cuda_memset_async(ptr, 0, num_radix_blocks * lwe_size_bytes, stream);
}
}
size_t j = 0;
for (size_t i = 0; i < min(num_scalars, num_ciphertext_bits); i++) {
if (decomposed_scalar[i] == 1) {
// Perform a block shift
T *preshifted_radix_ct =
preshifted_buffer + (i % msg_bits) * num_radix_blocks * lwe_size;
T *block_shift_buffer =
all_shifted_buffer + j * num_radix_blocks * lwe_size;
radix_blocks_rotate_right<<<num_radix_blocks, 256, 0, stream->stream>>>(
block_shift_buffer, preshifted_radix_ct, i / msg_bits,
num_radix_blocks, lwe_size);
// create trivial assign for value = 0
cuda_memset_async(block_shift_buffer, 0, (i / msg_bits) * lwe_size_bytes,
stream);
j++;
}
}
if (j == 0) {
// lwe array = 0
cuda_memset_async(lwe_array, 0, num_radix_blocks * lwe_size_bytes, stream);
} else {
int terms_degree[j * num_radix_blocks];
for (int i = 0; i < j * num_radix_blocks; i++) {
terms_degree[i] = message_modulus - 1;
}
host_integer_sum_ciphertexts_vec_kb<T, params>(
stream, lwe_array, all_shifted_buffer, terms_degree, bsk, ksk,
mem->sum_ciphertexts_vec_mem, num_radix_blocks, j);
}
}
// Small scalar_mul is used in shift/rotate
template <typename T>
__host__ void host_integer_small_scalar_mul_radix(
cuda_stream_t *stream, T *output_lwe_array, T *input_lwe_array, T scalar,
uint32_t input_lwe_dimension, uint32_t input_lwe_ciphertext_count) {
cudaSetDevice(stream->gpu_index);
// lwe_size includes the presence of the body
// whereas lwe_dimension is the number of elements in the mask
int lwe_size = input_lwe_dimension + 1;
// Create a 1-dimensional grid of threads
int num_blocks = 0, num_threads = 0;
int num_entries = input_lwe_ciphertext_count * lwe_size;
getNumBlocksAndThreads(num_entries, 512, num_blocks, num_threads);
dim3 grid(num_blocks, 1, 1);
dim3 thds(num_threads, 1, 1);
device_small_scalar_radix_multiplication<<<grid, thds, 0, stream->stream>>>(
output_lwe_array, input_lwe_array, scalar, input_lwe_dimension,
input_lwe_ciphertext_count);
check_cuda_error(cudaGetLastError());
}
#endif

View File

@@ -6,8 +6,7 @@ void scratch_cuda_integer_radix_scalar_rotate_kb_64(
uint32_t small_lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, SHIFT_OR_ROTATE_TYPE shift_type,
bool allocate_gpu_memory) {
PBS_TYPE pbs_type, SHIFT_TYPE shift_type, bool allocate_gpu_memory) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
big_lwe_dimension, small_lwe_dimension, ks_level,
@@ -15,8 +14,8 @@ void scratch_cuda_integer_radix_scalar_rotate_kb_64(
message_modulus, carry_modulus);
scratch_cuda_integer_radix_scalar_rotate_kb<uint64_t>(
stream, (int_logical_scalar_shift_buffer<uint64_t> **)mem_ptr, num_blocks,
params, shift_type, allocate_gpu_memory);
stream, (int_shift_buffer<uint64_t> **)mem_ptr, num_blocks, params,
shift_type, allocate_gpu_memory);
}
void cuda_integer_radix_scalar_rotate_kb_64_inplace(cuda_stream_t *stream,
@@ -27,15 +26,15 @@ void cuda_integer_radix_scalar_rotate_kb_64_inplace(cuda_stream_t *stream,
host_integer_radix_scalar_rotate_kb_inplace<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array), n,
(int_logical_scalar_shift_buffer<uint64_t> *)mem_ptr, bsk,
static_cast<uint64_t *>(ksk), num_blocks);
(int_shift_buffer<uint64_t> *)mem_ptr, bsk, static_cast<uint64_t *>(ksk),
num_blocks);
}
void cleanup_cuda_integer_radix_scalar_rotate(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
int_logical_scalar_shift_buffer<uint64_t> *mem_ptr =
(int_logical_scalar_shift_buffer<uint64_t> *)(*mem_ptr_void);
int_shift_buffer<uint64_t> *mem_ptr =
(int_shift_buffer<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);
}

View File

@@ -5,28 +5,40 @@
#include "device.h"
#include "integer.cuh"
#include "integer.h"
#include "pbs/programmable_bootstrap_classic.cuh"
#include "pbs/programmable_bootstrap_multibit.cuh"
#include "pbs/bootstrap_low_latency.cuh"
#include "pbs/bootstrap_multibit.cuh"
#include "types/complex/operations.cuh"
#include "utils/helper.cuh"
#include "utils/kernel_dimensions.cuh"
#ifndef CUDA_INTEGER_SHIFT_OPS_CUH
#define CUDA_INTEGER_SHIFT_OPS_CUH
#include "crypto/keyswitch.cuh"
#include "device.h"
#include "integer.cuh"
#include "integer.h"
#include "pbs/bootstrap_low_latency.cuh"
#include "pbs/bootstrap_multibit.cuh"
#include "types/complex/operations.cuh"
#include "utils/helper.cuh"
#include "utils/kernel_dimensions.cuh"
template <typename Torus>
__host__ void scratch_cuda_integer_radix_scalar_rotate_kb(
cuda_stream_t *stream, int_logical_scalar_shift_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params,
SHIFT_OR_ROTATE_TYPE shift_type, bool allocate_gpu_memory) {
cuda_stream_t *stream, int_shift_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params, SHIFT_TYPE shift_type,
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
*mem_ptr = new int_logical_scalar_shift_buffer<Torus>(
stream, shift_type, params, num_radix_blocks, allocate_gpu_memory);
*mem_ptr = new int_shift_buffer<Torus>(stream, shift_type, params,
num_radix_blocks, allocate_gpu_memory);
}
template <typename Torus>
__host__ void host_integer_radix_scalar_rotate_kb_inplace(
cuda_stream_t *stream, Torus *lwe_array, uint32_t n,
int_logical_scalar_shift_buffer<Torus> *mem, void *bsk, Torus *ksk,
uint32_t num_blocks) {
int_shift_buffer<Torus> *mem, void *bsk, Torus *ksk, uint32_t num_blocks) {
cudaSetDevice(stream->gpu_index);
auto params = mem->params;
@@ -99,4 +111,6 @@ __host__ void host_integer_radix_scalar_rotate_kb_inplace(
}
}
#endif // CUDA_SCALAR_OPS_CUH
#endif // CUDA_INTEGER_SCALAR_ROTATE_OPS_CUH

View File

@@ -1,90 +1,38 @@
#include "scalar_shifts.cuh"
void scratch_cuda_integer_radix_logical_scalar_shift_kb_64(
void scratch_cuda_integer_radix_scalar_shift_kb_64(
cuda_stream_t *stream, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t big_lwe_dimension,
uint32_t small_lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, SHIFT_OR_ROTATE_TYPE shift_type,
bool allocate_gpu_memory) {
PBS_TYPE pbs_type, SHIFT_TYPE shift_type, bool allocate_gpu_memory) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
big_lwe_dimension, small_lwe_dimension, ks_level,
ks_base_log, pbs_level, pbs_base_log, grouping_factor,
message_modulus, carry_modulus);
scratch_cuda_integer_radix_logical_scalar_shift_kb<uint64_t>(
stream, (int_logical_scalar_shift_buffer<uint64_t> **)mem_ptr, num_blocks,
params, shift_type, allocate_gpu_memory);
scratch_cuda_integer_radix_scalar_shift_kb<uint64_t>(
stream, (int_shift_buffer<uint64_t> **)mem_ptr, num_blocks, params,
shift_type, allocate_gpu_memory);
}
/// The logical scalar shift is the one used for unsigned integers, and
/// for the left scalar shift. It is constituted of a rotation, followed by
/// the application of a PBS onto the rotated blocks up to num_blocks -
/// rotations - 1 The remaining blocks are padded with zeros
void cuda_integer_radix_logical_scalar_shift_kb_64_inplace(
void cuda_integer_radix_scalar_shift_kb_64_inplace(
cuda_stream_t *stream, void *lwe_array, uint32_t shift, int8_t *mem_ptr,
void *bsk, void *ksk, uint32_t num_blocks) {
host_integer_radix_logical_scalar_shift_kb_inplace<uint64_t>(
host_integer_radix_scalar_shift_kb_inplace<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array), shift,
(int_logical_scalar_shift_buffer<uint64_t> *)mem_ptr, bsk,
static_cast<uint64_t *>(ksk), num_blocks);
(int_shift_buffer<uint64_t> *)mem_ptr, bsk, static_cast<uint64_t *>(ksk),
num_blocks);
}
void scratch_cuda_integer_radix_arithmetic_scalar_shift_kb_64(
cuda_stream_t *stream, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t big_lwe_dimension,
uint32_t small_lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, SHIFT_OR_ROTATE_TYPE shift_type,
bool allocate_gpu_memory) {
void cleanup_cuda_integer_radix_scalar_shift(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
big_lwe_dimension, small_lwe_dimension, ks_level,
ks_base_log, pbs_level, pbs_base_log, grouping_factor,
message_modulus, carry_modulus);
scratch_cuda_integer_radix_arithmetic_scalar_shift_kb<uint64_t>(
stream, (int_arithmetic_scalar_shift_buffer<uint64_t> **)mem_ptr,
num_blocks, params, shift_type, allocate_gpu_memory);
}
/// The arithmetic scalar shift is the one used for the signed right shift.
/// It is constituted of a rotation, followed by
/// the application of a PBS onto the rotated blocks up to num_blocks -
/// rotations - 2 The last rotated block has another PBS applied, as it is the
/// sign block, and a second PBS is also applied to it to compute the padding
/// block, which is copied onto all remaining blocks instead of padding with
/// zeros as would be done in the logical shift.
void cuda_integer_radix_arithmetic_scalar_shift_kb_64_inplace(
cuda_stream_t *stream, void *lwe_array, uint32_t shift, int8_t *mem_ptr,
void *bsk, void *ksk, uint32_t num_blocks) {
host_integer_radix_arithmetic_scalar_shift_kb_inplace<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array), shift,
(int_arithmetic_scalar_shift_buffer<uint64_t> *)mem_ptr, bsk,
static_cast<uint64_t *>(ksk), num_blocks);
}
void cleanup_cuda_integer_radix_logical_scalar_shift(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
cudaSetDevice(stream->gpu_index);
int_logical_scalar_shift_buffer<uint64_t> *mem_ptr =
(int_logical_scalar_shift_buffer<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);
}
void cleanup_cuda_integer_radix_arithmetic_scalar_shift(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
cudaSetDevice(stream->gpu_index);
int_arithmetic_scalar_shift_buffer<uint64_t> *mem_ptr =
(int_arithmetic_scalar_shift_buffer<uint64_t> *)(*mem_ptr_void);
int_shift_buffer<uint64_t> *mem_ptr =
(int_shift_buffer<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);
}

View File

@@ -1,33 +1,31 @@
#ifndef CUDA_INTEGER_SCALAR_SHIFT_OPS_CUH
#define CUDA_INTEGER_SCALAR_SHIFT_OPS_CUH
#ifndef CUDA_INTEGER_SHIFT_OPS_CUH
#define CUDA_INTEGER_SHIFT_OPS_CUH
#include "crypto/keyswitch.cuh"
#include "device.h"
#include "integer.cuh"
#include "integer.h"
#include "pbs/programmable_bootstrap_classic.cuh"
#include "pbs/programmable_bootstrap_multibit.cuh"
#include "pbs/bootstrap_low_latency.cuh"
#include "pbs/bootstrap_multibit.cuh"
#include "types/complex/operations.cuh"
#include "utils/helper.cuh"
#include "utils/kernel_dimensions.cuh"
#include <omp.h>
template <typename Torus>
__host__ void scratch_cuda_integer_radix_logical_scalar_shift_kb(
cuda_stream_t *stream, int_logical_scalar_shift_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params,
SHIFT_OR_ROTATE_TYPE shift_type, bool allocate_gpu_memory) {
__host__ void scratch_cuda_integer_radix_scalar_shift_kb(
cuda_stream_t *stream, int_shift_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params, SHIFT_TYPE shift_type,
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
*mem_ptr = new int_logical_scalar_shift_buffer<Torus>(
stream, shift_type, params, num_radix_blocks, allocate_gpu_memory);
*mem_ptr = new int_shift_buffer<Torus>(stream, shift_type, params,
num_radix_blocks, allocate_gpu_memory);
}
template <typename Torus>
__host__ void host_integer_radix_logical_scalar_shift_kb_inplace(
__host__ void host_integer_radix_scalar_shift_kb_inplace(
cuda_stream_t *stream, Torus *lwe_array, uint32_t shift,
int_logical_scalar_shift_buffer<Torus> *mem, void *bsk, Torus *ksk,
uint32_t num_blocks) {
int_shift_buffer<Torus> *mem, void *bsk, Torus *ksk, uint32_t num_blocks) {
cudaSetDevice(stream->gpu_index);
auto params = mem->params;
@@ -109,128 +107,4 @@ __host__ void host_integer_radix_logical_scalar_shift_kb_inplace(
}
}
template <typename Torus>
__host__ void scratch_cuda_integer_radix_arithmetic_scalar_shift_kb(
cuda_stream_t *stream, int_arithmetic_scalar_shift_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params,
SHIFT_OR_ROTATE_TYPE shift_type, bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
*mem_ptr = new int_arithmetic_scalar_shift_buffer<Torus>(
stream, shift_type, params, num_radix_blocks, allocate_gpu_memory);
}
template <typename Torus>
__host__ void host_integer_radix_arithmetic_scalar_shift_kb_inplace(
cuda_stream_t *stream, Torus *lwe_array, uint32_t shift,
int_arithmetic_scalar_shift_buffer<Torus> *mem, void *bsk, Torus *ksk,
uint32_t num_blocks) {
cudaSetDevice(stream->gpu_index);
auto params = mem->params;
auto glwe_dimension = params.glwe_dimension;
auto polynomial_size = params.polynomial_size;
auto message_modulus = params.message_modulus;
size_t big_lwe_size = glwe_dimension * polynomial_size + 1;
size_t big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
size_t num_bits_in_block = (size_t)log2(message_modulus);
size_t total_num_bits = num_bits_in_block * num_blocks;
shift = shift % total_num_bits;
if (shift == 0) {
return;
}
size_t rotations = std::min(shift / num_bits_in_block, (size_t)num_blocks);
size_t shift_within_block = shift % num_bits_in_block;
Torus *rotated_buffer = mem->tmp_rotated;
Torus *padding_block = &rotated_buffer[num_blocks * big_lwe_size];
Torus *last_block_copy = &padding_block[big_lwe_size];
auto lut_univariate_shift_last_block =
mem->lut_buffers_univariate[shift_within_block - 1];
auto lut_univariate_padding_block =
mem->lut_buffers_univariate[num_bits_in_block - 1];
auto lut_bivariate = mem->lut_buffers_bivariate[shift_within_block - 1];
if (mem->shift_type == RIGHT_SHIFT) {
radix_blocks_rotate_left<<<num_blocks, 256, 0, stream->stream>>>(
rotated_buffer, lwe_array, rotations, num_blocks, big_lwe_size);
cuda_memcpy_async_gpu_to_gpu(lwe_array, rotated_buffer,
num_blocks * big_lwe_size_bytes, stream);
if (num_bits_in_block == 1) {
// if there is only 1 bit in the msg part, it means shift_within block is
// 0 thus only rotations is required.
// We still need to pad with the value of the sign bit.
// And here since a block only has 1 bit of message
// we can optimize things by not doing the pbs to extract this sign bit
Torus *block_src =
rotated_buffer + (num_blocks - rotations - 1) * big_lwe_size;
Torus *block_dest =
rotated_buffer + (num_blocks - rotations) * big_lwe_size;
for (uint i = 0; i < num_blocks; i++) {
cuda_memcpy_async_gpu_to_gpu(block_dest, block_src, big_lwe_size_bytes,
stream);
block_dest += big_lwe_size;
}
return;
}
// In the arithmetic shift case we have to pad with the value of the sign
// bit. This creates the need for a different shifting lut than in the
// logical shift case. We also need another PBS to create the padding block.
Torus *last_block = lwe_array + (num_blocks - rotations - 1) * big_lwe_size;
cuda_memcpy_async_gpu_to_gpu(last_block_copy,
rotated_buffer + (num_blocks - rotations - 1) *
big_lwe_size,
big_lwe_size_bytes, stream);
auto partial_current_blocks = lwe_array;
auto partial_next_blocks = &rotated_buffer[big_lwe_size];
size_t partial_block_count = num_blocks - rotations;
if (shift_within_block != 0 && rotations != num_blocks) {
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
stream, partial_current_blocks, partial_current_blocks,
partial_next_blocks, bsk, ksk, partial_block_count, lut_bivariate);
}
// Since our CPU threads will be working on different streams we shall
// assert the work in the main stream is completed
stream->synchronize();
#pragma omp parallel sections
{
// All sections may be executed in parallel
#pragma omp section
{
integer_radix_apply_univariate_lookup_table_kb(
mem->local_stream_1, padding_block, last_block_copy, bsk, ksk, 1,
lut_univariate_padding_block);
// Replace blocks 'pulled' from the left with the correct padding block
for (uint i = 0; i < rotations; i++) {
cuda_memcpy_async_gpu_to_gpu(
lwe_array + (num_blocks - rotations + i) * big_lwe_size,
padding_block, big_lwe_size_bytes, mem->local_stream_1);
}
}
#pragma omp section
{
if (shift_within_block != 0 && rotations != num_blocks) {
integer_radix_apply_univariate_lookup_table_kb(
mem->local_stream_2, last_block, last_block_copy, bsk, ksk, 1,
lut_univariate_shift_last_block);
}
}
}
cuda_synchronize_stream(mem->local_stream_1);
cuda_synchronize_stream(mem->local_stream_2);
} else {
PANIC("Cuda error (scalar shift): left scalar shift is never of the "
"arithmetic type")
}
}
#endif // CUDA_SCALAR_OPS_CUH

View File

@@ -1,40 +0,0 @@
#include "shift_and_rotate.cuh"
void scratch_cuda_integer_radix_shift_and_rotate_kb_64(
cuda_stream_t *stream, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t big_lwe_dimension,
uint32_t small_lwe_dimension, uint32_t ks_level, uint32_t ks_base_log,
uint32_t pbs_level, uint32_t pbs_base_log, uint32_t grouping_factor,
uint32_t num_blocks, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, SHIFT_OR_ROTATE_TYPE shift_type, bool is_signed,
bool allocate_gpu_memory) {
int_radix_params params(pbs_type, glwe_dimension, polynomial_size,
big_lwe_dimension, small_lwe_dimension, ks_level,
ks_base_log, pbs_level, pbs_base_log, grouping_factor,
message_modulus, carry_modulus);
scratch_cuda_integer_radix_shift_and_rotate_kb<uint64_t>(
stream, (int_shift_and_rotate_buffer<uint64_t> **)mem_ptr, num_blocks,
params, shift_type, is_signed, allocate_gpu_memory);
}
void cuda_integer_radix_shift_and_rotate_kb_64_inplace(
cuda_stream_t *stream, void *lwe_array, void *lwe_shift, int8_t *mem_ptr,
void *bsk, void *ksk, uint32_t num_blocks) {
host_integer_radix_shift_and_rotate_kb_inplace<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array),
static_cast<uint64_t *>(lwe_shift),
(int_shift_and_rotate_buffer<uint64_t> *)mem_ptr, bsk,
static_cast<uint64_t *>(ksk), num_blocks);
}
void cleanup_cuda_integer_radix_shift_and_rotate(cuda_stream_t *stream,
int8_t **mem_ptr_void) {
int_shift_and_rotate_buffer<uint64_t> *mem_ptr =
(int_shift_and_rotate_buffer<uint64_t> *)(*mem_ptr_void);
mem_ptr->release(stream);
}

View File

@@ -1,181 +0,0 @@
#ifndef CUDA_INTEGER_SHIFT_OPS_CUH
#define CUDA_INTEGER_SHIFT_OPS_CUH
#include "crypto/keyswitch.cuh"
#include "device.h"
#include "integer.cuh"
#include "integer.h"
#include "pbs/programmable_bootstrap_classic.cuh"
#include "pbs/programmable_bootstrap_multibit.cuh"
#include "scalar_mul.cuh"
#include "types/complex/operations.cuh"
#include "utils/helper.cuh"
#include "utils/kernel_dimensions.cuh"
template <typename Torus>
__host__ void scratch_cuda_integer_radix_shift_and_rotate_kb(
cuda_stream_t *stream, int_shift_and_rotate_buffer<Torus> **mem_ptr,
uint32_t num_radix_blocks, int_radix_params params,
SHIFT_OR_ROTATE_TYPE shift_type, bool is_signed, bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
*mem_ptr = new int_shift_and_rotate_buffer<Torus>(
stream, shift_type, is_signed, params, num_radix_blocks,
allocate_gpu_memory);
}
template <typename Torus>
__host__ void host_integer_radix_shift_and_rotate_kb_inplace(
cuda_stream_t *stream, Torus *lwe_array, Torus *lwe_shift,
int_shift_and_rotate_buffer<Torus> *mem, void *bsk, Torus *ksk,
uint32_t num_radix_blocks) {
uint32_t bits_per_block = std::log2(mem->params.message_modulus);
uint32_t total_nb_bits = bits_per_block * num_radix_blocks;
auto big_lwe_dimension = mem->params.big_lwe_dimension;
auto big_lwe_size = big_lwe_dimension + 1;
auto big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
assert(total_nb_bits > 0);
// Extract all bits
auto bits = mem->tmp_bits;
extract_n_bits<Torus>(stream, bits, lwe_array, bsk, ksk, num_radix_blocks,
bits_per_block, mem->bit_extract_luts);
// Extract shift bits
auto shift_bits = mem->tmp_shift_bits;
auto is_power_of_two = [](uint32_t n) {
return (n > 0) && ((n & (n - 1)) == 0);
};
// This effectively means, that if the block parameters
// give a total_nb_bits that is not a power of two,
// then the behaviour of shifting won't be the same
// if shift >= total_nb_bits compared to when total_nb_bits
// is a power of two, as will 'capture' more bits in `shift_bits`
uint32_t max_num_bits_that_tell_shift = std::log2(total_nb_bits);
if (!is_power_of_two(total_nb_bits))
max_num_bits_that_tell_shift += 1;
// Extracts bits and put them in the bit index 2 (=> bit number 3)
// so that it is already aligned to the correct position of the cmux input
// and we reduce noise growth
extract_n_bits<Torus>(stream, shift_bits, lwe_shift, bsk, ksk, 1,
max_num_bits_that_tell_shift,
mem->bit_extract_luts_with_offset_2);
// If signed, do an "arithmetic shift" by padding with the sign bit
auto last_bit = bits + (total_nb_bits - 1) * big_lwe_size;
// Apply op
auto rotated_input = mem->tmp_rotated;
auto input_bits_a = mem->tmp_input_bits_a;
auto input_bits_b = mem->tmp_input_bits_b;
auto mux_lut = mem->mux_lut;
auto mux_inputs = mem->tmp_mux_inputs;
cuda_memcpy_async_gpu_to_gpu(input_bits_a, bits,
total_nb_bits * big_lwe_size_bytes, stream);
for (int d = 0; d < max_num_bits_that_tell_shift; d++) {
auto shift_bit = shift_bits + d * big_lwe_size;
cuda_memcpy_async_gpu_to_gpu(input_bits_b, input_bits_a,
total_nb_bits * big_lwe_size_bytes, stream);
auto rotations = 1 << d;
switch (mem->shift_type) {
case LEFT_SHIFT:
radix_blocks_rotate_right<<<total_nb_bits, 256, 0, stream->stream>>>(
rotated_input, input_bits_b, rotations, total_nb_bits, big_lwe_size);
if (mem->is_signed && mem->shift_type == RIGHT_SHIFT)
for (int i = 0; i < rotations; i++)
cuda_memcpy_async_gpu_to_gpu(rotated_input + i * big_lwe_size,
last_bit, big_lwe_size_bytes, stream);
else
cuda_memset_async(rotated_input, 0, rotations * big_lwe_size_bytes,
stream);
break;
case RIGHT_SHIFT:
radix_blocks_rotate_left<<<total_nb_bits, 256, 0, stream->stream>>>(
rotated_input, input_bits_b, rotations, total_nb_bits, big_lwe_size);
if (mem->is_signed)
for (int i = 0; i < rotations; i++)
cuda_memcpy_async_gpu_to_gpu(
rotated_input + (total_nb_bits - rotations + i) * big_lwe_size,
last_bit, big_lwe_size_bytes, stream);
else
cuda_memset_async(rotated_input +
(total_nb_bits - rotations) * big_lwe_size,
0, rotations * big_lwe_size_bytes, stream);
break;
case LEFT_ROTATE:
radix_blocks_rotate_right<<<total_nb_bits, 256, 0, stream->stream>>>(
rotated_input, input_bits_b, rotations, total_nb_bits, big_lwe_size);
break;
case RIGHT_ROTATE:
radix_blocks_rotate_left<<<total_nb_bits, 256, 0, stream->stream>>>(
rotated_input, input_bits_b, rotations, total_nb_bits, big_lwe_size);
break;
default:
PANIC("Unknown operation")
}
// pack bits into one block so that we have
// control_bit|b|a
cuda_memset_async(mux_inputs, 0, total_nb_bits * big_lwe_size_bytes,
stream); // Do we need this?
pack_bivariate_blocks(stream, mux_inputs, mux_lut->lwe_indexes_out,
rotated_input, input_bits_a, mux_lut->lwe_indexes_in,
big_lwe_dimension, 2, total_nb_bits);
// The shift bit is already properly aligned/positioned
for (int i = 0; i < total_nb_bits; i++)
host_addition(stream, mux_inputs + i * big_lwe_size,
mux_inputs + i * big_lwe_size, shift_bit,
mem->params.big_lwe_dimension, 1);
// we have
// control_bit|b|a
integer_radix_apply_univariate_lookup_table_kb(
stream, input_bits_a, mux_inputs, bsk, ksk, total_nb_bits, mux_lut);
}
// Initializes the output
// Copy the last bit for each radix block
auto lwe_last_out = lwe_array;
last_bit = input_bits_a + (bits_per_block - 1) * big_lwe_size;
for (int i = 0; i < num_radix_blocks; i++) {
cuda_memcpy_async_gpu_to_gpu(lwe_last_out, last_bit, big_lwe_size_bytes,
stream);
lwe_last_out += big_lwe_size;
last_bit += bits_per_block * big_lwe_size;
}
// Bitshift and add the other bits
lwe_last_out = lwe_array;
for (int i = bits_per_block - 2; i >= 0; i--) {
host_integer_small_scalar_mul_radix<Torus>(
stream, lwe_last_out, lwe_last_out, 2, big_lwe_dimension,
num_radix_blocks);
auto block = lwe_last_out;
auto bit_to_add = input_bits_a + i * big_lwe_size;
for (int j = 0; j < num_radix_blocks; j++) {
host_addition(stream, block, block, bit_to_add, big_lwe_dimension, 1);
block += big_lwe_size;
bit_to_add += bits_per_block * big_lwe_size;
}
// To give back a clean ciphertext
auto cleaning_lut = mem->cleaning_lut;
integer_radix_apply_univariate_lookup_table_kb(
stream, lwe_last_out, lwe_last_out, bsk, ksk, num_radix_blocks,
cleaning_lut);
}
}
#endif

View File

@@ -151,49 +151,4 @@ __host__ void host_subtraction_plaintext(cuda_stream_t *stream, T *output,
output, plaintext_input, input_lwe_dimension, num_entries);
check_cuda_error(cudaGetLastError());
}
template <typename T>
__global__ void unchecked_sub_with_correcting_term(
T *output, T *input_1, T *input_2, uint32_t num_entries, uint32_t lwe_size,
uint32_t message_modulus, uint32_t carry_modulus, uint32_t degree) {
uint32_t msg_mod = message_modulus;
uint64_t z = max((uint64_t)ceil(degree / msg_mod), (uint64_t)1);
z *= msg_mod;
uint64_t delta = (1ULL << 63) / (message_modulus * carry_modulus);
uint64_t w = z * delta;
int tid = threadIdx.x;
int index = blockIdx.x * blockDim.x + tid;
if (index < num_entries) {
// Here we take advantage of the wrapping behaviour of uint
output[index] = input_1[index] + ((0 - input_2[index]));
if (index % lwe_size == lwe_size - 1)
output[index] += w;
}
}
template <typename T>
__host__ void host_unchecked_sub_with_correcting_term(
cuda_stream_t *stream, T *output, T *input_1, T *input_2,
uint32_t input_lwe_dimension, uint32_t input_lwe_ciphertext_count,
uint32_t message_modulus, uint32_t carry_modulus, uint32_t degree) {
cudaSetDevice(stream->gpu_index);
// lwe_size includes the presence of the body
// whereas lwe_dimension is the number of elements in the mask
int lwe_size = input_lwe_dimension + 1;
// Create a 1-dimensional grid of threads
int num_blocks = 0, num_threads = 0;
int num_entries = input_lwe_ciphertext_count * lwe_size;
getNumBlocksAndThreads(num_entries, 512, num_blocks, num_threads);
dim3 grid(num_blocks, 1, 1);
dim3 thds(num_threads, 1, 1);
unchecked_sub_with_correcting_term<<<grid, thds, 0, stream->stream>>>(
output, input_1, input_2, num_entries, lwe_size, message_modulus,
carry_modulus, degree);
check_cuda_error(cudaGetLastError());
}
#endif // CUDA_ADD_H

View File

@@ -0,0 +1 @@
#include "bootstrap.cuh"

View File

@@ -1,8 +1,8 @@
#include "../../include/bootstrap.h"
#include "../../include/device.h"
#include "../../include/programmable_bootstrap.h"
#include "../include/device.h"
#include "programmable_bootstrap_classic.cuh"
#include "programmable_bootstrap_multibit.cuh"
#include "bootstrap_low_latency.cuh"
#include "bootstrap_multibit.cuh"
template <typename Torus>
void execute_pbs(cuda_stream_t *stream, Torus *lwe_array_out,
@@ -21,8 +21,16 @@ void execute_pbs(cuda_stream_t *stream, Torus *lwe_array_out,
switch (pbs_type) {
case MULTI_BIT:
PANIC("Error: 32-bit multibit PBS is not supported.\n")
case CLASSICAL:
cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
case LOW_LAT:
cuda_bootstrap_low_latency_lwe_ciphertext_vector_32(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, pbs_buffer, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, input_lwe_ciphertext_count,
num_luts, lwe_idx, max_shared_memory);
break;
case AMORTIZED:
cuda_bootstrap_amortized_lwe_ciphertext_vector_32(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, pbs_buffer, lwe_dimension, glwe_dimension,
@@ -37,17 +45,23 @@ void execute_pbs(cuda_stream_t *stream, Torus *lwe_array_out,
// 64 bits
switch (pbs_type) {
case MULTI_BIT:
if (grouping_factor == 0)
PANIC("Multi-bit PBS error: grouping factor should be > 0.")
cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
cuda_multi_bit_pbs_lwe_ciphertext_vector_64(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, pbs_buffer, lwe_dimension, glwe_dimension,
polynomial_size, grouping_factor, base_log, level_count,
input_lwe_ciphertext_count, num_luts, lwe_idx, max_shared_memory);
break;
case CLASSICAL:
cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
case LOW_LAT:
cuda_bootstrap_low_latency_lwe_ciphertext_vector_64(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, pbs_buffer, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, input_lwe_ciphertext_count,
num_luts, lwe_idx, max_shared_memory);
break;
case AMORTIZED:
cuda_bootstrap_amortized_lwe_ciphertext_vector_64(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, pbs_buffer, lwe_dimension, glwe_dimension,
@@ -78,11 +92,16 @@ void execute_scratch_pbs(cuda_stream_t *stream, int8_t **pbs_buffer,
switch (pbs_type) {
case MULTI_BIT:
PANIC("Error: 32-bit multibit PBS is not supported.\n")
case CLASSICAL:
scratch_cuda_programmable_bootstrap_32(
case LOW_LAT:
scratch_cuda_bootstrap_low_latency_32(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case AMORTIZED:
scratch_cuda_bootstrap_amortized_32(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
default:
PANIC("Error: unsupported cuda PBS type.")
}
@@ -91,18 +110,21 @@ void execute_scratch_pbs(cuda_stream_t *stream, int8_t **pbs_buffer,
// 64 bits
switch (pbs_type) {
case MULTI_BIT:
if (grouping_factor == 0)
PANIC("Multi-bit PBS error: grouping factor should be > 0.")
scratch_cuda_multi_bit_programmable_bootstrap_64(
scratch_cuda_multi_bit_pbs_64(
stream, pbs_buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, grouping_factor, input_lwe_ciphertext_count,
max_shared_memory, allocate_gpu_memory);
break;
case CLASSICAL:
scratch_cuda_programmable_bootstrap_64(
case LOW_LAT:
scratch_cuda_bootstrap_low_latency_64(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case AMORTIZED:
scratch_cuda_bootstrap_amortized_64(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
default:
PANIC("Error: unsupported cuda PBS type.")
}

View File

@@ -1,12 +1,12 @@
#include "programmable_bootstrap_amortized.cuh"
#include "bootstrap_amortized.cuh"
/*
* Returns the buffer size for 64 bits executions
*/
uint64_t get_buffer_size_programmable_bootstrap_amortized_64(
uint64_t get_buffer_size_bootstrap_amortized_64(
uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory) {
return get_buffer_size_programmable_bootstrap_amortized<uint64_t>(
return get_buffer_size_bootstrap_amortized<uint64_t>(
glwe_dimension, polynomial_size, input_lwe_ciphertext_count,
max_shared_memory);
}
@@ -17,51 +17,44 @@ uint64_t get_buffer_size_programmable_bootstrap_amortized_64(
* configures SM options on the GPU in case FULLSM or PARTIALSM mode is going to
* be used.
*/
void scratch_cuda_programmable_bootstrap_amortized_32(
void scratch_cuda_bootstrap_amortized_32(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory) {
switch (polynomial_size) {
case 256:
scratch_programmable_bootstrap_amortized<uint32_t, int32_t,
AmortizedDegree<256>>(
scratch_bootstrap_amortized<uint32_t, int32_t, AmortizedDegree<256>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 512:
scratch_programmable_bootstrap_amortized<uint32_t, int32_t,
AmortizedDegree<512>>(
scratch_bootstrap_amortized<uint32_t, int32_t, AmortizedDegree<512>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 1024:
scratch_programmable_bootstrap_amortized<uint32_t, int32_t,
AmortizedDegree<1024>>(
scratch_bootstrap_amortized<uint32_t, int32_t, AmortizedDegree<1024>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 2048:
scratch_programmable_bootstrap_amortized<uint32_t, int32_t,
AmortizedDegree<2048>>(
scratch_bootstrap_amortized<uint32_t, int32_t, AmortizedDegree<2048>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 4096:
scratch_programmable_bootstrap_amortized<uint32_t, int32_t,
AmortizedDegree<4096>>(
scratch_bootstrap_amortized<uint32_t, int32_t, AmortizedDegree<4096>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 8192:
scratch_programmable_bootstrap_amortized<uint32_t, int32_t,
AmortizedDegree<8192>>(
scratch_bootstrap_amortized<uint32_t, int32_t, AmortizedDegree<8192>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 16384:
scratch_programmable_bootstrap_amortized<uint32_t, int32_t,
AmortizedDegree<16384>>(
scratch_bootstrap_amortized<uint32_t, int32_t, AmortizedDegree<16384>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
@@ -78,51 +71,44 @@ void scratch_cuda_programmable_bootstrap_amortized_32(
* configures SM options on the GPU in case FULLSM or PARTIALSM mode is going to
* be used.
*/
void scratch_cuda_programmable_bootstrap_amortized_64(
void scratch_cuda_bootstrap_amortized_64(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory) {
switch (polynomial_size) {
case 256:
scratch_programmable_bootstrap_amortized<uint64_t, int64_t,
AmortizedDegree<256>>(
scratch_bootstrap_amortized<uint64_t, int64_t, AmortizedDegree<256>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 512:
scratch_programmable_bootstrap_amortized<uint64_t, int64_t,
AmortizedDegree<512>>(
scratch_bootstrap_amortized<uint64_t, int64_t, AmortizedDegree<512>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 1024:
scratch_programmable_bootstrap_amortized<uint64_t, int64_t,
AmortizedDegree<1024>>(
scratch_bootstrap_amortized<uint64_t, int64_t, AmortizedDegree<1024>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 2048:
scratch_programmable_bootstrap_amortized<uint64_t, int64_t,
AmortizedDegree<2048>>(
scratch_bootstrap_amortized<uint64_t, int64_t, AmortizedDegree<2048>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 4096:
scratch_programmable_bootstrap_amortized<uint64_t, int64_t,
AmortizedDegree<4096>>(
scratch_bootstrap_amortized<uint64_t, int64_t, AmortizedDegree<4096>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 8192:
scratch_programmable_bootstrap_amortized<uint64_t, int64_t,
AmortizedDegree<8192>>(
scratch_bootstrap_amortized<uint64_t, int64_t, AmortizedDegree<8192>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 16384:
scratch_programmable_bootstrap_amortized<uint64_t, int64_t,
AmortizedDegree<16384>>(
scratch_bootstrap_amortized<uint64_t, int64_t, AmortizedDegree<16384>>(
stream, pbs_buffer, glwe_dimension, polynomial_size,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
@@ -136,7 +122,7 @@ void scratch_cuda_programmable_bootstrap_amortized_64(
/* Perform the programmable bootstrapping on a batch of input u32 LWE
* ciphertexts. See the corresponding operation on 64 bits for more details.
*/
void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
void cuda_bootstrap_amortized_lwe_ciphertext_vector_32(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *pbs_buffer,
@@ -150,7 +136,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
switch (polynomial_size) {
case 256:
host_programmable_bootstrap_amortized<uint32_t, AmortizedDegree<256>>(
host_bootstrap_amortized<uint32_t, AmortizedDegree<256>>(
stream, (uint32_t *)lwe_array_out, (uint32_t *)lwe_output_indexes,
(uint32_t *)lut_vector, (uint32_t *)lut_vector_indexes,
(uint32_t *)lwe_array_in, (uint32_t *)lwe_input_indexes,
@@ -159,7 +145,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
max_shared_memory);
break;
case 512:
host_programmable_bootstrap_amortized<uint32_t, AmortizedDegree<512>>(
host_bootstrap_amortized<uint32_t, AmortizedDegree<512>>(
stream, (uint32_t *)lwe_array_out, (uint32_t *)lwe_output_indexes,
(uint32_t *)lut_vector, (uint32_t *)lut_vector_indexes,
(uint32_t *)lwe_array_in, (uint32_t *)lwe_input_indexes,
@@ -168,7 +154,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
max_shared_memory);
break;
case 1024:
host_programmable_bootstrap_amortized<uint32_t, AmortizedDegree<1024>>(
host_bootstrap_amortized<uint32_t, AmortizedDegree<1024>>(
stream, (uint32_t *)lwe_array_out, (uint32_t *)lwe_output_indexes,
(uint32_t *)lut_vector, (uint32_t *)lut_vector_indexes,
(uint32_t *)lwe_array_in, (uint32_t *)lwe_input_indexes,
@@ -177,7 +163,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
max_shared_memory);
break;
case 2048:
host_programmable_bootstrap_amortized<uint32_t, AmortizedDegree<2048>>(
host_bootstrap_amortized<uint32_t, AmortizedDegree<2048>>(
stream, (uint32_t *)lwe_array_out, (uint32_t *)lwe_output_indexes,
(uint32_t *)lut_vector, (uint32_t *)lut_vector_indexes,
(uint32_t *)lwe_array_in, (uint32_t *)lwe_input_indexes,
@@ -186,7 +172,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
max_shared_memory);
break;
case 4096:
host_programmable_bootstrap_amortized<uint32_t, AmortizedDegree<4096>>(
host_bootstrap_amortized<uint32_t, AmortizedDegree<4096>>(
stream, (uint32_t *)lwe_array_out, (uint32_t *)lwe_output_indexes,
(uint32_t *)lut_vector, (uint32_t *)lut_vector_indexes,
(uint32_t *)lwe_array_in, (uint32_t *)lwe_input_indexes,
@@ -195,7 +181,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
max_shared_memory);
break;
case 8192:
host_programmable_bootstrap_amortized<uint32_t, AmortizedDegree<8192>>(
host_bootstrap_amortized<uint32_t, AmortizedDegree<8192>>(
stream, (uint32_t *)lwe_array_out, (uint32_t *)lwe_output_indexes,
(uint32_t *)lut_vector, (uint32_t *)lut_vector_indexes,
(uint32_t *)lwe_array_in, (uint32_t *)lwe_input_indexes,
@@ -204,7 +190,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
max_shared_memory);
break;
case 16384:
host_programmable_bootstrap_amortized<uint32_t, AmortizedDegree<16384>>(
host_bootstrap_amortized<uint32_t, AmortizedDegree<16384>>(
stream, (uint32_t *)lwe_array_out, (uint32_t *)lwe_output_indexes,
(uint32_t *)lut_vector, (uint32_t *)lut_vector_indexes,
(uint32_t *)lwe_array_in, (uint32_t *)lwe_input_indexes,
@@ -284,7 +270,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_32(
* - the constant memory (64K) is used for storing the roots of identity
* values for the FFT
*/
void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
void cuda_bootstrap_amortized_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *pbs_buffer,
@@ -298,7 +284,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
switch (polynomial_size) {
case 256:
host_programmable_bootstrap_amortized<uint64_t, AmortizedDegree<256>>(
host_bootstrap_amortized<uint64_t, AmortizedDegree<256>>(
stream, (uint64_t *)lwe_array_out, (uint64_t *)lwe_output_indexes,
(uint64_t *)lut_vector, (uint64_t *)lut_vector_indexes,
(uint64_t *)lwe_array_in, (uint64_t *)lwe_input_indexes,
@@ -307,7 +293,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
max_shared_memory);
break;
case 512:
host_programmable_bootstrap_amortized<uint64_t, AmortizedDegree<512>>(
host_bootstrap_amortized<uint64_t, AmortizedDegree<512>>(
stream, (uint64_t *)lwe_array_out, (uint64_t *)lwe_output_indexes,
(uint64_t *)lut_vector, (uint64_t *)lut_vector_indexes,
(uint64_t *)lwe_array_in, (uint64_t *)lwe_input_indexes,
@@ -316,7 +302,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
max_shared_memory);
break;
case 1024:
host_programmable_bootstrap_amortized<uint64_t, AmortizedDegree<1024>>(
host_bootstrap_amortized<uint64_t, AmortizedDegree<1024>>(
stream, (uint64_t *)lwe_array_out, (uint64_t *)lwe_output_indexes,
(uint64_t *)lut_vector, (uint64_t *)lut_vector_indexes,
(uint64_t *)lwe_array_in, (uint64_t *)lwe_input_indexes,
@@ -325,7 +311,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
max_shared_memory);
break;
case 2048:
host_programmable_bootstrap_amortized<uint64_t, AmortizedDegree<2048>>(
host_bootstrap_amortized<uint64_t, AmortizedDegree<2048>>(
stream, (uint64_t *)lwe_array_out, (uint64_t *)lwe_output_indexes,
(uint64_t *)lut_vector, (uint64_t *)lut_vector_indexes,
(uint64_t *)lwe_array_in, (uint64_t *)lwe_input_indexes,
@@ -334,7 +320,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
max_shared_memory);
break;
case 4096:
host_programmable_bootstrap_amortized<uint64_t, AmortizedDegree<4096>>(
host_bootstrap_amortized<uint64_t, AmortizedDegree<4096>>(
stream, (uint64_t *)lwe_array_out, (uint64_t *)lwe_output_indexes,
(uint64_t *)lut_vector, (uint64_t *)lut_vector_indexes,
(uint64_t *)lwe_array_in, (uint64_t *)lwe_input_indexes,
@@ -343,7 +329,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
max_shared_memory);
break;
case 8192:
host_programmable_bootstrap_amortized<uint64_t, AmortizedDegree<8192>>(
host_bootstrap_amortized<uint64_t, AmortizedDegree<8192>>(
stream, (uint64_t *)lwe_array_out, (uint64_t *)lwe_output_indexes,
(uint64_t *)lut_vector, (uint64_t *)lut_vector_indexes,
(uint64_t *)lwe_array_in, (uint64_t *)lwe_input_indexes,
@@ -352,7 +338,7 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
max_shared_memory);
break;
case 16384:
host_programmable_bootstrap_amortized<uint64_t, AmortizedDegree<16384>>(
host_bootstrap_amortized<uint64_t, AmortizedDegree<16384>>(
stream, (uint64_t *)lwe_array_out, (uint64_t *)lwe_output_indexes,
(uint64_t *)lut_vector, (uint64_t *)lut_vector_indexes,
(uint64_t *)lwe_array_in, (uint64_t *)lwe_input_indexes,
@@ -371,8 +357,8 @@ void cuda_programmable_bootstrap_amortized_lwe_ciphertext_vector_64(
* This cleanup function frees the data for the amortized PBS on GPU in
* buffer for 32 or 64 bits inputs.
*/
void cleanup_cuda_programmable_bootstrap_amortized(cuda_stream_t *stream,
int8_t **pbs_buffer) {
void cleanup_cuda_bootstrap_amortized(cuda_stream_t *stream,
int8_t **pbs_buffer) {
// Free memory
cuda_drop_async(*pbs_buffer, stream);

View File

@@ -6,6 +6,7 @@
#include <cuda_runtime.h>
#endif
#include "bootstrap.h"
#include "crypto/gadget.cuh"
#include "crypto/torus.cuh"
#include "device.h"
@@ -14,12 +15,11 @@
#include "polynomial/functions.cuh"
#include "polynomial/parameters.cuh"
#include "polynomial/polynomial_math.cuh"
#include "programmable_bootstrap.h"
#include "types/complex/operations.cuh"
template <typename Torus, class params, sharedMemDegree SMD>
/*
* Kernel launched by host_programmable_bootstrap_amortized
* Kernel launched by host_bootstrap_amortized
*
* Uses shared memory to increase performance
* - lwe_array_out: output batch of num_samples bootstrapped ciphertexts c =
@@ -46,7 +46,7 @@ template <typename Torus, class params, sharedMemDegree SMD>
* - device_memory_size_per_sample: amount of global memory to allocate if SMD
* is not FULLSM
*/
__global__ void device_programmable_bootstrap_amortized(
__global__ void device_bootstrap_amortized(
Torus *lwe_array_out, Torus *lwe_output_indexes, Torus *lut_vector,
Torus *lut_vector_indexes, Torus *lwe_array_in, Torus *lwe_input_indexes,
double2 *bootstrapping_key, int8_t *device_mem, uint32_t glwe_dimension,
@@ -211,8 +211,7 @@ __global__ void device_programmable_bootstrap_amortized(
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_programmable_bootstrap_amortized(
__host__ __device__ uint64_t get_buffer_size_full_sm_bootstrap_amortized(
uint32_t polynomial_size, uint32_t glwe_dimension) {
return sizeof(Torus) * polynomial_size * (glwe_dimension + 1) + // accumulator
sizeof(Torus) * polynomial_size *
@@ -224,22 +223,19 @@ get_buffer_size_full_sm_programmable_bootstrap_amortized(
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_partial_sm_programmable_bootstrap_amortized(
uint32_t polynomial_size) {
get_buffer_size_partial_sm_bootstrap_amortized(uint32_t polynomial_size) {
return sizeof(double2) * polynomial_size / 2; // accumulator fft
}
template <typename Torus>
__host__ __device__ uint64_t get_buffer_size_programmable_bootstrap_amortized(
__host__ __device__ uint64_t get_buffer_size_bootstrap_amortized(
uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory) {
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_amortized<Torus>(
polynomial_size, glwe_dimension);
uint64_t full_sm = get_buffer_size_full_sm_bootstrap_amortized<Torus>(
polynomial_size, glwe_dimension);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_amortized<Torus>(
polynomial_size);
get_buffer_size_partial_sm_bootstrap_amortized<Torus>(polynomial_size);
uint64_t partial_dm = full_sm - partial_sm;
uint64_t full_dm = full_sm;
uint64_t device_mem = 0;
@@ -252,45 +248,41 @@ __host__ __device__ uint64_t get_buffer_size_programmable_bootstrap_amortized(
}
template <typename Torus, typename STorus, typename params>
__host__ void scratch_programmable_bootstrap_amortized(
__host__ void scratch_bootstrap_amortized(
cuda_stream_t *stream, int8_t **pbs_buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_amortized<Torus>(
polynomial_size, glwe_dimension);
uint64_t full_sm = get_buffer_size_full_sm_bootstrap_amortized<Torus>(
polynomial_size, glwe_dimension);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_amortized<Torus>(
polynomial_size);
get_buffer_size_partial_sm_bootstrap_amortized<Torus>(polynomial_size);
if (max_shared_memory >= partial_sm && max_shared_memory < full_sm) {
cudaFuncSetAttribute(
device_programmable_bootstrap_amortized<Torus, params, PARTIALSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, partial_sm);
cudaFuncSetCacheConfig(
device_programmable_bootstrap_amortized<Torus, params, PARTIALSM>,
cudaFuncCachePreferShared);
cudaFuncSetAttribute(device_bootstrap_amortized<Torus, params, PARTIALSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
partial_sm);
cudaFuncSetCacheConfig(device_bootstrap_amortized<Torus, params, PARTIALSM>,
cudaFuncCachePreferShared);
} else if (max_shared_memory >= partial_sm) {
check_cuda_error(cudaFuncSetAttribute(
device_programmable_bootstrap_amortized<Torus, params, FULLSM>,
device_bootstrap_amortized<Torus, params, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm));
check_cuda_error(cudaFuncSetCacheConfig(
device_programmable_bootstrap_amortized<Torus, params, FULLSM>,
device_bootstrap_amortized<Torus, params, FULLSM>,
cudaFuncCachePreferShared));
}
if (allocate_gpu_memory) {
uint64_t buffer_size =
get_buffer_size_programmable_bootstrap_amortized<Torus>(
glwe_dimension, polynomial_size, input_lwe_ciphertext_count,
max_shared_memory);
uint64_t buffer_size = get_buffer_size_bootstrap_amortized<Torus>(
glwe_dimension, polynomial_size, input_lwe_ciphertext_count,
max_shared_memory);
*pbs_buffer = (int8_t *)cuda_malloc_async(buffer_size, stream);
check_cuda_error(cudaGetLastError());
}
}
template <typename Torus, class params>
__host__ void host_programmable_bootstrap_amortized(
__host__ void host_bootstrap_amortized(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key, int8_t *pbs_buffer,
@@ -300,13 +292,11 @@ __host__ void host_programmable_bootstrap_amortized(
uint32_t max_shared_memory) {
cudaSetDevice(stream->gpu_index);
uint64_t SM_FULL =
get_buffer_size_full_sm_programmable_bootstrap_amortized<Torus>(
polynomial_size, glwe_dimension);
uint64_t SM_FULL = get_buffer_size_full_sm_bootstrap_amortized<Torus>(
polynomial_size, glwe_dimension);
uint64_t SM_PART =
get_buffer_size_partial_sm_programmable_bootstrap_amortized<Torus>(
polynomial_size);
get_buffer_size_partial_sm_bootstrap_amortized<Torus>(polynomial_size);
uint64_t DM_PART = SM_FULL - SM_PART;
@@ -326,14 +316,14 @@ __host__ void host_programmable_bootstrap_amortized(
// from one of three templates (no use, partial use or full use
// of shared memory)
if (max_shared_memory < SM_PART) {
device_programmable_bootstrap_amortized<Torus, params, NOSM>
device_bootstrap_amortized<Torus, params, NOSM>
<<<grid, thds, 0, stream->stream>>>(
lwe_array_out, lwe_output_indexes, lut_vector, lut_vector_indexes,
lwe_array_in, lwe_input_indexes, bootstrapping_key, pbs_buffer,
glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, lwe_idx, DM_FULL);
} else if (max_shared_memory < SM_FULL) {
device_programmable_bootstrap_amortized<Torus, params, PARTIALSM>
device_bootstrap_amortized<Torus, params, PARTIALSM>
<<<grid, thds, SM_PART, stream->stream>>>(
lwe_array_out, lwe_output_indexes, lut_vector, lut_vector_indexes,
lwe_array_in, lwe_input_indexes, bootstrapping_key, pbs_buffer,
@@ -345,7 +335,7 @@ __host__ void host_programmable_bootstrap_amortized(
// device then has to be allocated dynamically.
// For lower compute capabilities, this call
// just does nothing and the amount of shared memory used is 48 KB
device_programmable_bootstrap_amortized<Torus, params, FULLSM>
device_bootstrap_amortized<Torus, params, FULLSM>
<<<grid, thds, SM_FULL, stream->stream>>>(
lwe_array_out, lwe_output_indexes, lut_vector, lut_vector_indexes,
lwe_array_in, lwe_input_indexes, bootstrapping_key, pbs_buffer,
@@ -364,8 +354,8 @@ int cuda_get_pbs_per_gpu(int polynomial_size) {
cudaDeviceProp device_properties;
cudaGetDeviceProperties(&device_properties, 0);
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&blocks_per_sm, device_programmable_bootstrap_amortized<Torus, params>,
num_threads, 0);
&blocks_per_sm, device_bootstrap_amortized<Torus, params>, num_threads,
0);
return device_properties.multiProcessorCount * blocks_per_sm;
}

View File

@@ -1,5 +1,5 @@
#ifndef CUDA_CG_PBS_CUH
#define CUDA_CG_PBS_CUH
#ifndef CUDA_FAST_LOWLAT_PBS_CUH
#define CUDA_FAST_LOWLAT_PBS_CUH
#ifdef __CDT_PARSER__
#undef __CUDA_RUNTIME_H__
@@ -8,6 +8,7 @@
#include "cooperative_groups.h"
#include "bootstrap.h"
#include "crypto/gadget.cuh"
#include "crypto/torus.cuh"
#include "device.h"
@@ -15,10 +16,9 @@
#include "fft/twiddles.cuh"
#include "polynomial/parameters.cuh"
#include "polynomial/polynomial_math.cuh"
#include "programmable_bootstrap.h"
#include "types/complex/operations.cuh"
// Cooperative groups are used for this implementation
// Cooperative groups are used in the low latency PBS
using namespace cooperative_groups;
namespace cg = cooperative_groups;
@@ -114,7 +114,8 @@ __device__ void mul_ggsw_glwe(Torus *accumulator, double2 *fft,
}
/*
* Kernel that computes the classical PBS using cooperative groups
* Kernel launched by the low latency version of the
* bootstrapping, that uses cooperative groups
*
* - lwe_array_out: vector of output lwe s, with length
* (glwe_dimension * polynomial_size+1)*num_samples
@@ -127,7 +128,7 @@ __device__ void mul_ggsw_glwe(Torus *accumulator, double2 *fft,
* Each y-block computes one element of the lwe_array_out.
*/
template <typename Torus, class params, sharedMemDegree SMD>
__global__ void device_programmable_bootstrap_cg(
__global__ void device_bootstrap_fast_low_latency(
Torus *lwe_array_out, Torus *lwe_output_indexes, Torus *lut_vector,
Torus *lut_vector_indexes, Torus *lwe_array_in, Torus *lwe_input_indexes,
double2 *bootstrapping_key, double2 *join_buffer, uint32_t lwe_dimension,
@@ -245,50 +246,51 @@ __global__ void device_programmable_bootstrap_cg(
}
template <typename Torus, typename STorus, typename params>
__host__ void scratch_programmable_bootstrap_cg(
cuda_stream_t *stream, pbs_buffer<Torus, CLASSICAL> **buffer,
__host__ void scratch_bootstrap_fast_low_latency(
cuda_stream_t *stream, pbs_buffer<Torus, LOW_LAT> **buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_cg<Torus>(polynomial_size);
uint64_t full_sm = get_buffer_size_full_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_cg<Torus>(
get_buffer_size_partial_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
if (max_shared_memory >= partial_sm && max_shared_memory < full_sm) {
check_cuda_error(cudaFuncSetAttribute(
device_programmable_bootstrap_cg<Torus, params, PARTIALSM>,
device_bootstrap_fast_low_latency<Torus, params, PARTIALSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, partial_sm));
cudaFuncSetCacheConfig(
device_programmable_bootstrap_cg<Torus, params, PARTIALSM>,
device_bootstrap_fast_low_latency<Torus, params, PARTIALSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else if (max_shared_memory >= partial_sm) {
check_cuda_error(cudaFuncSetAttribute(
device_programmable_bootstrap_cg<Torus, params, FULLSM>,
device_bootstrap_fast_low_latency<Torus, params, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm));
cudaFuncSetCacheConfig(
device_programmable_bootstrap_cg<Torus, params, FULLSM>,
device_bootstrap_fast_low_latency<Torus, params, FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
*buffer = new pbs_buffer<Torus, CLASSICAL>(
*buffer = new pbs_buffer<Torus, LOW_LAT>(
stream, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, PBS_VARIANT::CG, allocate_gpu_memory);
input_lwe_ciphertext_count, PBS_VARIANT::FAST, allocate_gpu_memory);
}
/*
* Host wrapper
* Host wrapper to the low latency version
* of bootstrapping
*/
template <typename Torus, class params>
__host__ void host_programmable_bootstrap_cg(
__host__ void host_bootstrap_fast_low_latency(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<Torus, CLASSICAL> *buffer, uint32_t glwe_dimension,
pbs_buffer<Torus, LOW_LAT> *buffer, uint32_t glwe_dimension,
uint32_t lwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
uint32_t num_luts, uint32_t max_shared_memory) {
@@ -296,11 +298,11 @@ __host__ void host_programmable_bootstrap_cg(
// With SM each block corresponds to either the mask or body, no need to
// duplicate data for each
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_cg<Torus>(polynomial_size);
uint64_t full_sm = get_buffer_size_full_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_cg<Torus>(
get_buffer_size_partial_sm_bootstrap_fast_low_latency<Torus>(
polynomial_size);
uint64_t full_dm = full_sm;
@@ -331,27 +333,28 @@ __host__ void host_programmable_bootstrap_cg(
if (max_shared_memory < partial_sm) {
kernel_args[13] = &full_dm;
check_cuda_error(cudaLaunchCooperativeKernel(
(void *)device_programmable_bootstrap_cg<Torus, params, NOSM>, grid,
(void *)device_bootstrap_fast_low_latency<Torus, params, NOSM>, grid,
thds, (void **)kernel_args, 0, stream->stream));
} else if (max_shared_memory < full_sm) {
kernel_args[13] = &partial_dm;
check_cuda_error(cudaLaunchCooperativeKernel(
(void *)device_programmable_bootstrap_cg<Torus, params, PARTIALSM>,
(void *)device_bootstrap_fast_low_latency<Torus, params, PARTIALSM>,
grid, thds, (void **)kernel_args, partial_sm, stream->stream));
} else {
int no_dm = 0;
kernel_args[13] = &no_dm;
check_cuda_error(cudaLaunchCooperativeKernel(
(void *)device_programmable_bootstrap_cg<Torus, params, FULLSM>, grid,
(void *)device_bootstrap_fast_low_latency<Torus, params, FULLSM>, grid,
thds, (void **)kernel_args, full_sm, stream->stream));
}
check_cuda_error(cudaGetLastError());
}
// Verify if the grid size satisfies the cooperative group constraints
// Verify if the grid size for the low latency kernel satisfies the cooperative
// group constraints
template <typename Torus, class params>
__host__ bool verify_cuda_programmable_bootstrap_cg_grid_size(
__host__ bool verify_cuda_bootstrap_fast_low_latency_grid_size(
int glwe_dimension, int level_count, int num_samples,
uint32_t max_shared_memory) {
@@ -361,10 +364,10 @@ __host__ bool verify_cuda_programmable_bootstrap_cg_grid_size(
// Calculate the dimension of the kernel
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_cg<Torus>(params::degree);
get_buffer_size_full_sm_bootstrap_fast_low_latency<Torus>(params::degree);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_cg<Torus>(
get_buffer_size_partial_sm_bootstrap_fast_low_latency<Torus>(
params::degree);
int thds = params::degree / params::opt;
@@ -376,16 +379,17 @@ __host__ bool verify_cuda_programmable_bootstrap_cg_grid_size(
if (max_shared_memory < partial_sm) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks_per_sm,
(void *)device_programmable_bootstrap_cg<Torus, params, NOSM>, thds, 0);
(void *)device_bootstrap_fast_low_latency<Torus, params, NOSM>, thds,
0);
} else if (max_shared_memory < full_sm) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks_per_sm,
(void *)device_programmable_bootstrap_cg<Torus, params, PARTIALSM>,
(void *)device_bootstrap_fast_low_latency<Torus, params, PARTIALSM>,
thds, partial_sm);
} else {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks_per_sm,
(void *)device_programmable_bootstrap_cg<Torus, params, FULLSM>, thds,
(void *)device_bootstrap_fast_low_latency<Torus, params, FULLSM>, thds,
full_sm);
}
@@ -395,45 +399,46 @@ __host__ bool verify_cuda_programmable_bootstrap_cg_grid_size(
return number_of_blocks <= max_active_blocks_per_sm * number_of_sm;
}
// Verify if the grid size satisfies the cooperative group constraints
// Verify if the grid size for the low latency kernel satisfies the cooperative
// group constraints
template <typename Torus>
__host__ bool supports_cooperative_groups_on_programmable_bootstrap(
__host__ bool supports_cooperative_groups_on_lowlat_pbs(
int glwe_dimension, int polynomial_size, int level_count, int num_samples,
uint32_t max_shared_memory) {
switch (polynomial_size) {
case 256:
return verify_cuda_programmable_bootstrap_cg_grid_size<
return verify_cuda_bootstrap_fast_low_latency_grid_size<
Torus, AmortizedDegree<256>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 512:
return verify_cuda_programmable_bootstrap_cg_grid_size<
return verify_cuda_bootstrap_fast_low_latency_grid_size<
Torus, AmortizedDegree<512>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 1024:
return verify_cuda_programmable_bootstrap_cg_grid_size<
return verify_cuda_bootstrap_fast_low_latency_grid_size<
Torus, AmortizedDegree<1024>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 2048:
return verify_cuda_programmable_bootstrap_cg_grid_size<
return verify_cuda_bootstrap_fast_low_latency_grid_size<
Torus, AmortizedDegree<2048>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 4096:
return verify_cuda_programmable_bootstrap_cg_grid_size<
return verify_cuda_bootstrap_fast_low_latency_grid_size<
Torus, AmortizedDegree<4096>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 8192:
return verify_cuda_programmable_bootstrap_cg_grid_size<
return verify_cuda_bootstrap_fast_low_latency_grid_size<
Torus, AmortizedDegree<8192>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 16384:
return verify_cuda_programmable_bootstrap_cg_grid_size<
return verify_cuda_bootstrap_fast_low_latency_grid_size<
Torus, AmortizedDegree<16384>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
default:
PANIC("Cuda error (classical PBS): unsupported polynomial size. "
PANIC("Cuda error (low latency PBS): unsupported polynomial size. "
"Supported N's are powers of two"
" in the interval [256..16384].")
}
}
#endif // CG_PBS_H
#endif // LOWLAT_FAST_PBS_H

View File

@@ -1,6 +1,8 @@
#ifndef CUDA_FAST_MULTIBIT_PBS_CUH
#define CUDA_FAST_MULTIBIT_PBS_CUH
#include "bootstrap.h"
#include "bootstrap_multibit.cuh"
#include "cooperative_groups.h"
#include "crypto/gadget.cuh"
#include "crypto/ggsw.cuh"
@@ -11,21 +13,18 @@
#include "polynomial/functions.cuh"
#include "polynomial/parameters.cuh"
#include "polynomial/polynomial_math.cuh"
#include "programmable_bootstrap.h"
#include "programmable_bootstrap_multibit.cuh"
#include "types/complex/operations.cuh"
#include <vector>
template <typename Torus, class params, sharedMemDegree SMD>
__global__ void device_multi_bit_programmable_bootstrap_cg_accumulate(
template <typename Torus, class params>
__global__ void device_multi_bit_bootstrap_fast_accumulate(
Torus *lwe_array_out, Torus *lwe_output_indexes, Torus *lut_vector,
Torus *lut_vector_indexes, Torus *lwe_array_in, Torus *lwe_input_indexes,
double2 *keybundle_array, double2 *join_buffer, Torus *global_accumulator,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t base_log, uint32_t level_count, uint32_t grouping_factor,
uint32_t lwe_offset, uint32_t lwe_chunk_size,
uint32_t keybundle_size_per_input, int8_t *device_mem,
uint64_t device_memory_size_per_block) {
uint32_t keybundle_size_per_input) {
grid_group grid = this_grid();
@@ -35,21 +34,14 @@ __global__ void device_multi_bit_programmable_bootstrap_cg_accumulate(
extern __shared__ int8_t sharedmem[];
int8_t *selected_memory;
if constexpr (SMD == FULLSM) {
selected_memory = sharedmem;
} else {
int block_index = blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y;
selected_memory = &device_mem[block_index * device_memory_size_per_block];
}
selected_memory = sharedmem;
Torus *accumulator = (Torus *)selected_memory;
double2 *accumulator_fft =
(double2 *)accumulator +
(ptrdiff_t)(sizeof(Torus) * polynomial_size / sizeof(double2));
if constexpr (SMD == PARTIALSM)
accumulator_fft = (double2 *)sharedmem;
// We always compute the pointer with most restrictive alignment to avoid
// alignment issues
double2 *accumulator_fft = (double2 *)selected_memory;
Torus *accumulator =
(Torus *)accumulator_fft +
(ptrdiff_t)(sizeof(double2) * polynomial_size / 2 / sizeof(Torus));
// The third dimension of the block is used to determine on which ciphertext
// this block is operating, in the case of batch bootstraps
@@ -136,19 +128,12 @@ __global__ void device_multi_bit_programmable_bootstrap_cg_accumulate(
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_partial_sm_cg_multibit_programmable_bootstrap(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_cg_multibit_programmable_bootstrap(
uint32_t polynomial_size) {
get_buffer_size_full_sm_fast_multibit_bootstrap(uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size * 2; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t get_buffer_size_cg_multibit_programmable_bootstrap(
__host__ __device__ uint64_t get_buffer_size_fast_multibit_bootstrap(
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
uint32_t grouping_factor, uint32_t lwe_chunk_size,
@@ -168,7 +153,7 @@ __host__ __device__ uint64_t get_buffer_size_cg_multibit_programmable_bootstrap(
}
template <typename Torus, typename STorus, typename params>
__host__ void scratch_cg_multi_bit_programmable_bootstrap(
__host__ void scratch_fast_multi_bit_pbs(
cuda_stream_t *stream, pbs_buffer<uint64_t, MULTI_BIT> **buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
@@ -178,106 +163,69 @@ __host__ void scratch_cg_multi_bit_programmable_bootstrap(
cudaSetDevice(stream->gpu_index);
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle<Torus>(
polynomial_size);
uint64_t full_sm_cg_accumulate =
get_buffer_size_full_sm_cg_multibit_programmable_bootstrap<Torus>(
polynomial_size);
uint64_t partial_sm_cg_accumulate =
get_buffer_size_partial_sm_cg_multibit_programmable_bootstrap<Torus>(
get_buffer_size_full_sm_multibit_bootstrap_keybundle<Torus>(
polynomial_size);
uint64_t full_sm_accumulate =
get_buffer_size_full_sm_fast_multibit_bootstrap<Torus>(polynomial_size);
if (max_shared_memory < full_sm_keybundle) {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params, NOSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, 0));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params, NOSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params,
FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_keybundle));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params,
FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_bootstrap_keybundle<Torus, params>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_keybundle));
cudaFuncSetCacheConfig(device_multi_bit_bootstrap_keybundle<Torus, params>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
if (max_shared_memory < partial_sm_cg_accumulate) {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_cg_accumulate<Torus, params,
NOSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, 0));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_cg_accumulate<Torus, params,
NOSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else if (max_shared_memory < full_sm_cg_accumulate) {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_cg_accumulate<Torus, params,
PARTIALSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, partial_sm_cg_accumulate));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_cg_accumulate<Torus, params,
PARTIALSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_cg_accumulate<Torus, params,
FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_cg_accumulate));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_cg_accumulate<Torus, params,
FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_bootstrap_fast_accumulate<Torus, params>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_accumulate));
cudaFuncSetCacheConfig(
device_multi_bit_bootstrap_fast_accumulate<Torus, params>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
if (!lwe_chunk_size)
lwe_chunk_size = get_lwe_chunk_size(input_lwe_ciphertext_count);
lwe_chunk_size = get_average_lwe_chunk_size(
lwe_dimension, level_count, glwe_dimension, input_lwe_ciphertext_count);
*buffer = new pbs_buffer<uint64_t, MULTI_BIT>(
stream, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, lwe_chunk_size, PBS_VARIANT::CG,
input_lwe_ciphertext_count, lwe_chunk_size, PBS_VARIANT::FAST,
allocate_gpu_memory);
}
template <typename Torus, class params>
__host__ void execute_external_product_loop(
cuda_stream_t *stream, Torus *lut_vector, Torus *lut_vector_indexes,
Torus *lwe_array_in, Torus *lwe_input_indexes, Torus *lwe_array_out,
Torus *lwe_output_indexes, pbs_buffer<Torus, MULTI_BIT> *buffer,
uint32_t num_samples, uint32_t lwe_dimension, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t grouping_factor, uint32_t base_log,
uint32_t level_count, uint32_t lwe_chunk_size, uint32_t max_shared_memory,
int lwe_offset) {
template <typename Torus, typename STorus, class params>
__host__ void host_fast_multi_bit_pbs(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, uint64_t *bootstrapping_key,
pbs_buffer<Torus, MULTI_BIT> *pbs_buffer, uint32_t glwe_dimension,
uint32_t lwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory,
uint32_t lwe_chunk_size = 0) {
cudaSetDevice(stream->gpu_index);
uint64_t full_dm =
get_buffer_size_full_sm_cg_multibit_programmable_bootstrap<Torus>(
if (!lwe_chunk_size)
lwe_chunk_size = get_average_lwe_chunk_size(lwe_dimension, level_count,
glwe_dimension, num_samples);
//
double2 *keybundle_fft = pbs_buffer->keybundle_fft;
Torus *global_accumulator = pbs_buffer->global_accumulator;
double2 *buffer_fft = pbs_buffer->global_accumulator_fft;
//
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_bootstrap_keybundle<Torus>(
polynomial_size);
uint64_t partial_dm =
get_buffer_size_partial_sm_cg_multibit_programmable_bootstrap<Torus>(
polynomial_size);
uint64_t no_dm = 0;
uint64_t full_sm_accumulate =
get_buffer_size_full_sm_fast_multibit_bootstrap<Torus>(polynomial_size);
uint32_t keybundle_size_per_input =
lwe_chunk_size * level_count * (glwe_dimension + 1) *
(glwe_dimension + 1) * (polynomial_size / 2);
uint32_t chunk_size =
std::min(lwe_chunk_size, (lwe_dimension / grouping_factor) - lwe_offset);
auto d_mem = buffer->d_mem_acc_cg;
auto keybundle_fft = buffer->keybundle_fft;
auto global_accumulator = buffer->global_accumulator;
auto buffer_fft = buffer->global_accumulator_fft;
void *kernel_args[20];
//
void *kernel_args[18];
kernel_args[0] = &lwe_array_out;
kernel_args[1] = &lwe_output_indexes;
kernel_args[2] = &lut_vector;
@@ -293,87 +241,55 @@ __host__ void execute_external_product_loop(
kernel_args[12] = &base_log;
kernel_args[13] = &level_count;
kernel_args[14] = &grouping_factor;
kernel_args[15] = &lwe_offset;
kernel_args[16] = &chunk_size;
kernel_args[17] = &keybundle_size_per_input;
kernel_args[18] = &d_mem;
//
dim3 grid_accumulate(level_count, glwe_dimension + 1, num_samples);
dim3 thds(polynomial_size / params::opt, 1, 1);
if (max_shared_memory < partial_dm) {
kernel_args[19] = &full_dm;
check_cuda_error(cudaLaunchCooperativeKernel(
(void *)device_multi_bit_programmable_bootstrap_cg_accumulate<
Torus, params, NOSM>,
grid_accumulate, thds, (void **)kernel_args, 0, stream->stream));
} else if (max_shared_memory < full_dm) {
kernel_args[19] = &partial_dm;
check_cuda_error(cudaLaunchCooperativeKernel(
(void *)device_multi_bit_programmable_bootstrap_cg_accumulate<
Torus, params, PARTIALSM>,
grid_accumulate, thds, (void **)kernel_args, partial_dm,
stream->stream));
} else {
kernel_args[19] = &no_dm;
check_cuda_error(cudaLaunchCooperativeKernel(
(void *)device_multi_bit_programmable_bootstrap_cg_accumulate<
Torus, params, FULLSM>,
grid_accumulate, thds, (void **)kernel_args, full_dm, stream->stream));
}
}
template <typename Torus, typename STorus, class params>
__host__ void host_cg_multi_bit_programmable_bootstrap(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, uint64_t *bootstrapping_key,
pbs_buffer<Torus, MULTI_BIT> *buffer, uint32_t glwe_dimension,
uint32_t lwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory,
uint32_t lwe_chunk_size = 0) {
cudaSetDevice(stream->gpu_index);
if (!lwe_chunk_size)
lwe_chunk_size = get_lwe_chunk_size(num_samples);
for (uint32_t lwe_offset = 0; lwe_offset < (lwe_dimension / grouping_factor);
lwe_offset += lwe_chunk_size) {
// Compute a keybundle
execute_compute_keybundle<Torus, params>(
stream, lwe_array_in, lwe_input_indexes, bootstrapping_key, buffer,
num_samples, lwe_dimension, glwe_dimension, polynomial_size,
grouping_factor, base_log, level_count, max_shared_memory,
lwe_chunk_size, lwe_offset);
uint32_t chunk_size = std::min(
lwe_chunk_size, (lwe_dimension / grouping_factor) - lwe_offset);
// Accumulate
execute_external_product_loop<Torus, params>(
stream, lut_vector, lut_vector_indexes, lwe_array_in, lwe_input_indexes,
lwe_array_out, lwe_output_indexes, buffer, num_samples, lwe_dimension,
glwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
lwe_chunk_size, max_shared_memory, lwe_offset);
// Compute a keybundle
dim3 grid_keybundle(num_samples * chunk_size,
(glwe_dimension + 1) * (glwe_dimension + 1),
level_count);
device_multi_bit_bootstrap_keybundle<Torus, params>
<<<grid_keybundle, thds, full_sm_keybundle, stream->stream>>>(
lwe_array_in, lwe_input_indexes, keybundle_fft, bootstrapping_key,
lwe_dimension, glwe_dimension, polynomial_size, grouping_factor,
base_log, level_count, lwe_offset, chunk_size,
keybundle_size_per_input);
check_cuda_error(cudaGetLastError());
kernel_args[15] = &lwe_offset;
kernel_args[16] = &chunk_size;
check_cuda_error(cudaLaunchCooperativeKernel(
(void *)device_multi_bit_bootstrap_fast_accumulate<Torus, params>,
grid_accumulate, thds, (void **)kernel_args, full_sm_accumulate,
stream->stream));
}
}
// Verify if the grid size satisfies the cooperative group constraints
// Verify if the grid size for the low latency kernel satisfies the cooperative
// group constraints
template <typename Torus, class params>
__host__ bool verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size(
int glwe_dimension, int level_count, int num_samples,
uint32_t max_shared_memory) {
__host__ bool
verify_cuda_bootstrap_fast_multi_bit_grid_size(int glwe_dimension,
int level_count, int num_samples,
uint32_t max_shared_memory) {
// If Cooperative Groups is not supported, no need to check anything else
if (!cuda_check_support_cooperative_groups())
return false;
// Calculate the dimension of the kernel
uint64_t full_sm_cg_accumulate =
get_buffer_size_full_sm_cg_multibit_programmable_bootstrap<Torus>(
params::degree);
uint64_t partial_sm_cg_accumulate =
get_buffer_size_partial_sm_cg_multibit_programmable_bootstrap<Torus>(
params::degree);
uint64_t full_sm =
get_buffer_size_full_sm_fast_multibit_bootstrap<Torus>(params::degree);
int thds = params::degree / params::opt;
@@ -381,25 +297,10 @@ __host__ bool verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size(
int number_of_blocks = level_count * (glwe_dimension + 1) * num_samples;
int max_active_blocks_per_sm;
if (max_shared_memory < partial_sm_cg_accumulate) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks_per_sm,
(void *)device_multi_bit_programmable_bootstrap_cg_accumulate<
Torus, params, NOSM>,
thds, 0);
} else if (max_shared_memory < full_sm_cg_accumulate) {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks_per_sm,
(void *)device_multi_bit_programmable_bootstrap_cg_accumulate<
Torus, params, PARTIALSM>,
thds, partial_sm_cg_accumulate);
} else {
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks_per_sm,
(void *)device_multi_bit_programmable_bootstrap_cg_accumulate<
Torus, params, FULLSM>,
thds, full_sm_cg_accumulate);
}
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks_per_sm,
(void *)device_multi_bit_bootstrap_fast_accumulate<Torus, params>, thds,
full_sm);
// Get the number of streaming multiprocessors
int number_of_sm = 0;
@@ -410,36 +311,36 @@ __host__ bool verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size(
// Verify if the grid size for the multi-bit kernel satisfies the cooperative
// group constraints
template <typename Torus>
__host__ bool supports_cooperative_groups_on_multibit_programmable_bootstrap(
__host__ bool supports_cooperative_groups_on_multibit_pbs(
int glwe_dimension, int polynomial_size, int level_count, int num_samples,
uint32_t max_shared_memory) {
switch (polynomial_size) {
case 256:
return verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size<
Torus, AmortizedDegree<256>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
return verify_cuda_bootstrap_fast_multi_bit_grid_size<Torus,
AmortizedDegree<256>>(
glwe_dimension, level_count, num_samples, max_shared_memory);
case 512:
return verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size<
Torus, AmortizedDegree<512>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
return verify_cuda_bootstrap_fast_multi_bit_grid_size<Torus,
AmortizedDegree<512>>(
glwe_dimension, level_count, num_samples, max_shared_memory);
case 1024:
return verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size<
return verify_cuda_bootstrap_fast_multi_bit_grid_size<
Torus, AmortizedDegree<1024>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 2048:
return verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size<
return verify_cuda_bootstrap_fast_multi_bit_grid_size<
Torus, AmortizedDegree<2048>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 4096:
return verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size<
return verify_cuda_bootstrap_fast_multi_bit_grid_size<
Torus, AmortizedDegree<4096>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 8192:
return verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size<
return verify_cuda_bootstrap_fast_multi_bit_grid_size<
Torus, AmortizedDegree<8192>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
case 16384:
return verify_cuda_programmable_bootstrap_cg_multi_bit_grid_size<
return verify_cuda_bootstrap_fast_multi_bit_grid_size<
Torus, AmortizedDegree<16384>>(glwe_dimension, level_count, num_samples,
max_shared_memory);
default:

View File

@@ -1,13 +1,11 @@
#include "programmable_bootstrap_cg_classic.cuh"
#include "programmable_bootstrap_classic.cuh"
#include "bootstrap_fast_low_latency.cuh"
#include "bootstrap_low_latency.cuh"
template <typename Torus>
bool has_support_to_cuda_programmable_bootstrap_cg(uint32_t glwe_dimension,
uint32_t polynomial_size,
uint32_t level_count,
uint32_t num_samples,
uint32_t max_shared_memory) {
return supports_cooperative_groups_on_programmable_bootstrap<Torus>(
bool has_support_to_cuda_bootstrap_fast_low_latency(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t num_samples, uint32_t max_shared_memory) {
return supports_cooperative_groups_on_lowlat_pbs<Torus>(
glwe_dimension, polynomial_size, level_count, num_samples,
max_shared_memory);
}
@@ -15,117 +13,117 @@ bool has_support_to_cuda_programmable_bootstrap_cg(uint32_t glwe_dimension,
/*
* Returns the buffer size for 64 bits executions
*/
uint64_t get_buffer_size_programmable_bootstrap_64(
uint64_t get_buffer_size_bootstrap_low_latency_64(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory) {
if (has_support_to_cuda_programmable_bootstrap_cg<uint64_t>(
if (has_support_to_cuda_bootstrap_fast_low_latency<uint64_t>(
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory))
return get_buffer_size_programmable_bootstrap_cg<uint64_t>(
return get_buffer_size_bootstrap_fast_low_latency<uint64_t>(
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory);
else
return get_buffer_size_programmable_bootstrap_cg<uint64_t>(
return get_buffer_size_bootstrap_fast_low_latency<uint64_t>(
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory);
}
template <typename Torus, typename STorus>
void scratch_cuda_programmable_bootstrap_cg(
cuda_stream_t *stream, pbs_buffer<Torus, CLASSICAL> **pbs_buffer,
void scratch_cuda_fast_bootstrap_low_latency(
cuda_stream_t *stream, pbs_buffer<Torus, LOW_LAT> **pbs_buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory) {
switch (polynomial_size) {
case 256:
scratch_programmable_bootstrap_cg<Torus, STorus, AmortizedDegree<256>>(
scratch_bootstrap_fast_low_latency<Torus, STorus, AmortizedDegree<256>>(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 512:
scratch_programmable_bootstrap_cg<Torus, STorus, AmortizedDegree<512>>(
scratch_bootstrap_fast_low_latency<Torus, STorus, AmortizedDegree<512>>(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 1024:
scratch_programmable_bootstrap_cg<Torus, STorus, AmortizedDegree<1024>>(
scratch_bootstrap_fast_low_latency<Torus, STorus, AmortizedDegree<1024>>(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 2048:
scratch_programmable_bootstrap_cg<Torus, STorus, AmortizedDegree<2048>>(
scratch_bootstrap_fast_low_latency<Torus, STorus, AmortizedDegree<2048>>(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 4096:
scratch_programmable_bootstrap_cg<Torus, STorus, AmortizedDegree<4096>>(
scratch_bootstrap_fast_low_latency<Torus, STorus, AmortizedDegree<4096>>(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 8192:
scratch_programmable_bootstrap_cg<Torus, STorus, AmortizedDegree<8192>>(
scratch_bootstrap_fast_low_latency<Torus, STorus, AmortizedDegree<8192>>(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 16384:
scratch_programmable_bootstrap_cg<Torus, STorus, AmortizedDegree<16384>>(
scratch_bootstrap_fast_low_latency<Torus, STorus, AmortizedDegree<16384>>(
stream, pbs_buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
default:
PANIC("Cuda error (classical PBS): unsupported polynomial size. "
PANIC("Cuda error (low latency PBS): unsupported polynomial size. "
"Supported N's are powers of two"
" in the interval [256..16384].")
}
}
template <typename Torus, typename STorus>
void scratch_cuda_programmable_bootstrap(
cuda_stream_t *stream, pbs_buffer<Torus, CLASSICAL> **buffer,
void scratch_cuda_bootstrap_low_latency(
cuda_stream_t *stream, pbs_buffer<Torus, LOW_LAT> **buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory) {
switch (polynomial_size) {
case 256:
scratch_programmable_bootstrap<Torus, STorus, AmortizedDegree<256>>(
scratch_bootstrap_low_latency<Torus, STorus, AmortizedDegree<256>>(
stream, buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 512:
scratch_programmable_bootstrap<Torus, STorus, AmortizedDegree<512>>(
scratch_bootstrap_low_latency<Torus, STorus, AmortizedDegree<512>>(
stream, buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 1024:
scratch_programmable_bootstrap<Torus, STorus, AmortizedDegree<1024>>(
scratch_bootstrap_low_latency<Torus, STorus, AmortizedDegree<1024>>(
stream, buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 2048:
scratch_programmable_bootstrap<Torus, STorus, AmortizedDegree<2048>>(
scratch_bootstrap_low_latency<Torus, STorus, AmortizedDegree<2048>>(
stream, buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 4096:
scratch_programmable_bootstrap<Torus, STorus, AmortizedDegree<4096>>(
scratch_bootstrap_low_latency<Torus, STorus, AmortizedDegree<4096>>(
stream, buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 8192:
scratch_programmable_bootstrap<Torus, STorus, AmortizedDegree<8192>>(
scratch_bootstrap_low_latency<Torus, STorus, AmortizedDegree<8192>>(
stream, buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
case 16384:
scratch_programmable_bootstrap<Torus, STorus, AmortizedDegree<16384>>(
scratch_bootstrap_low_latency<Torus, STorus, AmortizedDegree<16384>>(
stream, buffer, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory);
break;
default:
PANIC("Cuda error (classical PBS): unsupported polynomial size. "
PANIC("Cuda error (low latency PBS): unsupported polynomial size. "
"Supported N's are powers of two"
" in the interval [256..16384].")
}
@@ -133,192 +131,198 @@ void scratch_cuda_programmable_bootstrap(
/*
* This scratch function allocates the necessary amount of data on the GPU for
* the classical PBS on 32 bits inputs, into `buffer`. It also
* the low latency PBS on 32 bits inputs, into `buffer`. It also
* configures SM options on the GPU in case FULLSM or PARTIALSM mode is going to
* be used.
*/
void scratch_cuda_programmable_bootstrap_32(
void scratch_cuda_bootstrap_low_latency_32(
cuda_stream_t *stream, int8_t **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory) {
if (has_support_to_cuda_programmable_bootstrap_cg<uint32_t>(
if (has_support_to_cuda_bootstrap_fast_low_latency<uint32_t>(
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory))
scratch_cuda_programmable_bootstrap_cg<uint32_t, int32_t>(
stream, (pbs_buffer<uint32_t, CLASSICAL> **)buffer, glwe_dimension,
scratch_cuda_fast_bootstrap_low_latency<uint32_t, int32_t>(
stream, (pbs_buffer<uint32_t, LOW_LAT> **)buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
max_shared_memory, allocate_gpu_memory);
else
scratch_cuda_programmable_bootstrap<uint32_t, int32_t>(
stream, (pbs_buffer<uint32_t, CLASSICAL> **)buffer, glwe_dimension,
scratch_cuda_bootstrap_low_latency<uint32_t, int32_t>(
stream, (pbs_buffer<uint32_t, LOW_LAT> **)buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
max_shared_memory, allocate_gpu_memory);
}
/*
* This scratch function allocates the necessary amount of data on the GPU for
* the PBS on 64 bits inputs, into `buffer`. It also configures SM options on
* the GPU in case FULLSM or PARTIALSM mode is going to be used.
* the low_latency PBS on 64 bits inputs, into `buffer`. It also
* configures SM options on the GPU in case FULLSM or PARTIALSM mode is going to
* be used.
*/
void scratch_cuda_programmable_bootstrap_64(
void scratch_cuda_bootstrap_low_latency_64(
cuda_stream_t *stream, int8_t **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory) {
if (has_support_to_cuda_programmable_bootstrap_cg<uint64_t>(
if (has_support_to_cuda_bootstrap_fast_low_latency<uint64_t>(
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory))
scratch_cuda_programmable_bootstrap_cg<uint64_t, int64_t>(
stream, (pbs_buffer<uint64_t, CLASSICAL> **)buffer, glwe_dimension,
scratch_cuda_fast_bootstrap_low_latency<uint64_t, int64_t>(
stream, (pbs_buffer<uint64_t, LOW_LAT> **)buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
max_shared_memory, allocate_gpu_memory);
else
scratch_cuda_programmable_bootstrap<uint64_t, int64_t>(
stream, (pbs_buffer<uint64_t, CLASSICAL> **)buffer, glwe_dimension,
scratch_cuda_bootstrap_low_latency<uint64_t, int64_t>(
stream, (pbs_buffer<uint64_t, LOW_LAT> **)buffer, glwe_dimension,
polynomial_size, level_count, input_lwe_ciphertext_count,
max_shared_memory, allocate_gpu_memory);
}
template <typename Torus>
void cuda_programmable_bootstrap_cg_lwe_ciphertext_vector(
void cuda_bootstrap_fast_low_latency_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<Torus, CLASSICAL> *buffer, uint32_t lwe_dimension,
pbs_buffer<Torus, LOW_LAT> *buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory) {
switch (polynomial_size) {
case 256:
host_programmable_bootstrap_cg<Torus, AmortizedDegree<256>>(
host_bootstrap_fast_low_latency<Torus, AmortizedDegree<256>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 512:
host_programmable_bootstrap_cg<Torus, Degree<512>>(
host_bootstrap_fast_low_latency<Torus, Degree<512>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 1024:
host_programmable_bootstrap_cg<Torus, Degree<1024>>(
host_bootstrap_fast_low_latency<Torus, Degree<1024>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 2048:
host_programmable_bootstrap_cg<Torus, AmortizedDegree<2048>>(
host_bootstrap_fast_low_latency<Torus, AmortizedDegree<2048>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 4096:
host_programmable_bootstrap_cg<Torus, AmortizedDegree<4096>>(
host_bootstrap_fast_low_latency<Torus, AmortizedDegree<4096>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 8192:
host_programmable_bootstrap_cg<Torus, AmortizedDegree<8192>>(
host_bootstrap_fast_low_latency<Torus, AmortizedDegree<8192>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 16384:
host_programmable_bootstrap_cg<Torus, AmortizedDegree<16384>>(
host_bootstrap_fast_low_latency<Torus, AmortizedDegree<16384>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
default:
PANIC("Cuda error (classical PBS): unsupported polynomial size. "
PANIC("Cuda error (low latency PBS): unsupported polynomial size. "
"Supported N's are powers of two"
" in the interval [256..16384].")
}
}
template <typename Torus>
void cuda_programmable_bootstrap_lwe_ciphertext_vector(
void cuda_bootstrap_low_latency_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<Torus, CLASSICAL> *buffer, uint32_t lwe_dimension,
pbs_buffer<Torus, LOW_LAT> *buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory) {
switch (polynomial_size) {
case 256:
host_programmable_bootstrap<Torus, AmortizedDegree<256>>(
host_bootstrap_low_latency<Torus, AmortizedDegree<256>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 512:
host_programmable_bootstrap<Torus, Degree<512>>(
host_bootstrap_low_latency<Torus, Degree<512>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 1024:
host_programmable_bootstrap<Torus, Degree<1024>>(
host_bootstrap_low_latency<Torus, Degree<1024>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 2048:
host_programmable_bootstrap<Torus, AmortizedDegree<2048>>(
host_bootstrap_low_latency<Torus, AmortizedDegree<2048>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 4096:
host_programmable_bootstrap<Torus, AmortizedDegree<4096>>(
host_bootstrap_low_latency<Torus, AmortizedDegree<4096>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 8192:
host_programmable_bootstrap<Torus, AmortizedDegree<8192>>(
host_bootstrap_low_latency<Torus, AmortizedDegree<8192>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
case 16384:
host_programmable_bootstrap<Torus, AmortizedDegree<16384>>(
host_bootstrap_low_latency<Torus, AmortizedDegree<16384>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
buffer, glwe_dimension, lwe_dimension, polynomial_size, base_log,
level_count, num_samples, num_luts, max_shared_memory);
break;
default:
PANIC("Cuda error (classical PBS): unsupported polynomial size. "
PANIC("Cuda error (low latency PBS): unsupported polynomial size. "
"Supported N's are powers of two"
" in the interval [256..16384].")
}
}
/* Perform bootstrapping on a batch of input u32 LWE ciphertexts.
* This function performs best for small numbers of inputs. Beyond a certain
* number of inputs (the exact number depends on the cryptographic parameters),
* the kernel cannot be launched and it is necessary to split the kernel call
* into several calls on smaller batches of inputs. For more details on this
* operation, head on to the equivalent u64 operation.
*/
void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
void cuda_bootstrap_low_latency_lwe_ciphertext_vector_32(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *buffer,
@@ -327,13 +331,13 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory) {
if (base_log > 32)
PANIC("Cuda error (classical PBS): base log should be > number of bits "
PANIC("Cuda error (low latency PBS): base log should be > number of bits "
"in the ciphertext representation (32)");
if (has_support_to_cuda_programmable_bootstrap_cg<uint32_t>(
if (has_support_to_cuda_bootstrap_fast_low_latency<uint32_t>(
glwe_dimension, polynomial_size, level_count, num_samples,
max_shared_memory))
cuda_programmable_bootstrap_cg_lwe_ciphertext_vector<uint32_t>(
cuda_bootstrap_fast_low_latency_lwe_ciphertext_vector<uint32_t>(
stream, static_cast<uint32_t *>(lwe_array_out),
static_cast<uint32_t *>(lwe_output_indexes),
static_cast<uint32_t *>(lut_vector),
@@ -341,11 +345,11 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
static_cast<uint32_t *>(lwe_array_in),
static_cast<uint32_t *>(lwe_input_indexes),
static_cast<double2 *>(bootstrapping_key),
(pbs_buffer<uint32_t, CLASSICAL> *)buffer, lwe_dimension,
glwe_dimension, polynomial_size, base_log, level_count, num_samples,
num_luts, lwe_idx, max_shared_memory);
(pbs_buffer<uint32_t, LOW_LAT> *)buffer, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, num_samples, num_luts, lwe_idx,
max_shared_memory);
else
cuda_programmable_bootstrap_lwe_ciphertext_vector<uint32_t>(
cuda_bootstrap_low_latency_lwe_ciphertext_vector<uint32_t>(
stream, static_cast<uint32_t *>(lwe_array_out),
static_cast<uint32_t *>(lwe_output_indexes),
static_cast<uint32_t *>(lut_vector),
@@ -353,12 +357,16 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
static_cast<uint32_t *>(lwe_array_in),
static_cast<uint32_t *>(lwe_input_indexes),
static_cast<double2 *>(bootstrapping_key),
(pbs_buffer<uint32_t, CLASSICAL> *)buffer, lwe_dimension,
glwe_dimension, polynomial_size, base_log, level_count, num_samples,
num_luts, lwe_idx, max_shared_memory);
(pbs_buffer<uint32_t, LOW_LAT> *)buffer, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, num_samples, num_luts, lwe_idx,
max_shared_memory);
}
/* Perform bootstrapping on a batch of input u64 LWE ciphertexts.
* This function performs best for small numbers of inputs. Beyond a certain
* number of inputs (the exact number depends on the cryptographic parameters),
* the kernel cannot be launched and it is necessary to split the kernel call
* into several calls on smaller batches of inputs.
*
* - `v_stream` is a void pointer to the Cuda stream to be used in the kernel
* launch
@@ -430,7 +438,7 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
* - the constant memory (64K) is used for storing the roots of identity
* values for the FFT
*/
void cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
void cuda_bootstrap_low_latency_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *buffer,
@@ -438,13 +446,13 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory) {
if (base_log > 64)
PANIC("Cuda error (classical PBS): base log should be > number of bits "
PANIC("Cuda error (low latency PBS): base log should be > number of bits "
"in the ciphertext representation (64)");
if (has_support_to_cuda_programmable_bootstrap_cg<uint64_t>(
if (has_support_to_cuda_bootstrap_fast_low_latency<uint64_t>(
glwe_dimension, polynomial_size, level_count, num_samples,
max_shared_memory))
cuda_programmable_bootstrap_cg_lwe_ciphertext_vector<uint64_t>(
cuda_bootstrap_fast_low_latency_lwe_ciphertext_vector<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array_out),
static_cast<uint64_t *>(lwe_output_indexes),
static_cast<uint64_t *>(lut_vector),
@@ -452,11 +460,11 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
static_cast<uint64_t *>(lwe_array_in),
static_cast<uint64_t *>(lwe_input_indexes),
static_cast<double2 *>(bootstrapping_key),
(pbs_buffer<uint64_t, CLASSICAL> *)buffer, lwe_dimension,
glwe_dimension, polynomial_size, base_log, level_count, num_samples,
num_luts, lwe_idx, max_shared_memory);
(pbs_buffer<uint64_t, LOW_LAT> *)buffer, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, num_samples, num_luts, lwe_idx,
max_shared_memory);
else
cuda_programmable_bootstrap_lwe_ciphertext_vector<uint64_t>(
cuda_bootstrap_low_latency_lwe_ciphertext_vector<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array_out),
static_cast<uint64_t *>(lwe_output_indexes),
static_cast<uint64_t *>(lut_vector),
@@ -464,85 +472,90 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
static_cast<uint64_t *>(lwe_array_in),
static_cast<uint64_t *>(lwe_input_indexes),
static_cast<double2 *>(bootstrapping_key),
(pbs_buffer<uint64_t, CLASSICAL> *)buffer, lwe_dimension,
glwe_dimension, polynomial_size, base_log, level_count, num_samples,
num_luts, lwe_idx, max_shared_memory);
(pbs_buffer<uint64_t, LOW_LAT> *)buffer, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, num_samples, num_luts, lwe_idx,
max_shared_memory);
}
/*
* This cleanup function frees the data on GPU for the PBS buffer for 32 or 64
* bits inputs.
* This cleanup function frees the data for the low latency PBS on GPU in
* buffer for 32 or 64 bits inputs.
*/
void cleanup_cuda_programmable_bootstrap(cuda_stream_t *stream,
int8_t **buffer) {
auto x = (pbs_buffer<uint64_t, CLASSICAL> *)(*buffer);
void cleanup_cuda_bootstrap_low_latency_32(cuda_stream_t *stream,
int8_t **buffer) {
auto x = (pbs_buffer<uint32_t, LOW_LAT> *)(*buffer);
x->release(stream);
}
void cleanup_cuda_bootstrap_low_latency_64(cuda_stream_t *stream,
int8_t **buffer) {
auto x = (pbs_buffer<uint64_t, LOW_LAT> *)(*buffer);
x->release(stream);
}
template bool has_support_to_cuda_programmable_bootstrap_cg<uint64_t>(
template bool has_support_to_cuda_bootstrap_fast_low_latency<uint64_t>(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t num_samples, uint32_t max_shared_memory);
template void cuda_programmable_bootstrap_cg_lwe_ciphertext_vector<uint64_t>(
template void cuda_bootstrap_fast_low_latency_lwe_ciphertext_vector<uint64_t>(
cuda_stream_t *stream, uint64_t *lwe_array_out,
uint64_t *lwe_output_indexes, uint64_t *lut_vector,
uint64_t *lut_vector_indexes, uint64_t *lwe_array_in,
uint64_t *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<uint64_t, CLASSICAL> *pbs_buffer, uint32_t lwe_dimension,
pbs_buffer<uint64_t, LOW_LAT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory);
template void cuda_programmable_bootstrap_lwe_ciphertext_vector<uint64_t>(
template void cuda_bootstrap_low_latency_lwe_ciphertext_vector<uint64_t>(
cuda_stream_t *stream, uint64_t *lwe_array_out,
uint64_t *lwe_output_indexes, uint64_t *lut_vector,
uint64_t *lut_vector_indexes, uint64_t *lwe_array_in,
uint64_t *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<uint64_t, CLASSICAL> *pbs_buffer, uint32_t lwe_dimension,
pbs_buffer<uint64_t, LOW_LAT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory);
template void scratch_cuda_programmable_bootstrap_cg<uint64_t, int64_t>(
cuda_stream_t *stream, pbs_buffer<uint64_t, CLASSICAL> **pbs_buffer,
template void scratch_cuda_fast_bootstrap_low_latency<uint64_t, int64_t>(
cuda_stream_t *stream, pbs_buffer<uint64_t, LOW_LAT> **pbs_buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);
template void scratch_cuda_programmable_bootstrap<uint64_t, int64_t>(
cuda_stream_t *stream, pbs_buffer<uint64_t, CLASSICAL> **buffer,
template void scratch_cuda_bootstrap_low_latency<uint64_t, int64_t>(
cuda_stream_t *stream, pbs_buffer<uint64_t, LOW_LAT> **buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);
template void cuda_programmable_bootstrap_cg_lwe_ciphertext_vector<uint32_t>(
template void cuda_bootstrap_fast_low_latency_lwe_ciphertext_vector<uint32_t>(
cuda_stream_t *stream, uint32_t *lwe_array_out,
uint32_t *lwe_output_indexes, uint32_t *lut_vector,
uint32_t *lut_vector_indexes, uint32_t *lwe_array_in,
uint32_t *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<uint32_t, CLASSICAL> *pbs_buffer, uint32_t lwe_dimension,
pbs_buffer<uint32_t, LOW_LAT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory);
template void cuda_programmable_bootstrap_lwe_ciphertext_vector<uint32_t>(
template void cuda_bootstrap_low_latency_lwe_ciphertext_vector<uint32_t>(
cuda_stream_t *stream, uint32_t *lwe_array_out,
uint32_t *lwe_output_indexes, uint32_t *lut_vector,
uint32_t *lut_vector_indexes, uint32_t *lwe_array_in,
uint32_t *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<uint32_t, CLASSICAL> *pbs_buffer, uint32_t lwe_dimension,
pbs_buffer<uint32_t, LOW_LAT> *pbs_buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples, uint32_t num_luts,
uint32_t lwe_idx, uint32_t max_shared_memory);
template void scratch_cuda_programmable_bootstrap_cg<uint32_t, int32_t>(
cuda_stream_t *stream, pbs_buffer<uint32_t, CLASSICAL> **pbs_buffer,
template void scratch_cuda_fast_bootstrap_low_latency<uint32_t, int32_t>(
cuda_stream_t *stream, pbs_buffer<uint32_t, LOW_LAT> **pbs_buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);
template void scratch_cuda_programmable_bootstrap<uint32_t, int32_t>(
cuda_stream_t *stream, pbs_buffer<uint32_t, CLASSICAL> **buffer,
template void scratch_cuda_bootstrap_low_latency<uint32_t, int32_t>(
cuda_stream_t *stream, pbs_buffer<uint32_t, LOW_LAT> **buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory);

View File

@@ -1,11 +1,12 @@
#ifndef CUDA_PBS_CUH
#define CUDA_PBS_CUH
#ifndef CUDA_LOWLAT_PBS_CUH
#define CUDA_LOWLAT_PBS_CUH
#ifdef __CDT_PARSER__
#undef __CUDA_RUNTIME_H__
#include <cuda_runtime.h>
#endif
#include "bootstrap.h"
#include "crypto/gadget.cuh"
#include "crypto/torus.cuh"
#include "device.h"
@@ -13,11 +14,10 @@
#include "fft/twiddles.cuh"
#include "polynomial/parameters.cuh"
#include "polynomial/polynomial_math.cuh"
#include "programmable_bootstrap.h"
#include "types/complex/operations.cuh"
template <typename Torus, class params, sharedMemDegree SMD>
__global__ void device_programmable_bootstrap_step_one(
__global__ void device_bootstrap_low_latency_step_one(
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key,
Torus *global_accumulator, double2 *global_accumulator_fft,
@@ -127,7 +127,7 @@ __global__ void device_programmable_bootstrap_step_one(
}
template <typename Torus, class params, sharedMemDegree SMD>
__global__ void device_programmable_bootstrap_step_two(
__global__ void device_bootstrap_low_latency_step_two(
Torus *lwe_array_out, Torus *lwe_output_indexes, Torus *lut_vector,
Torus *lut_vector_indexes, double2 *bootstrapping_key,
Torus *global_accumulator, double2 *global_accumulator_fft,
@@ -222,18 +222,18 @@ __global__ void device_programmable_bootstrap_step_two(
}
template <typename Torus>
__host__ __device__ uint64_t get_buffer_size_programmable_bootstrap(
__host__ __device__ uint64_t get_buffer_size_bootstrap_low_latency(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory) {
uint64_t full_sm_step_one =
get_buffer_size_full_sm_programmable_bootstrap_step_one<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_one<Torus>(
polynomial_size);
uint64_t full_sm_step_two =
get_buffer_size_full_sm_programmable_bootstrap_step_two<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_two<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap<Torus>(polynomial_size);
get_buffer_size_partial_sm_bootstrap_low_latency<Torus>(polynomial_size);
uint64_t partial_dm_step_one = full_sm_step_one - partial_sm;
uint64_t partial_dm_step_two = full_sm_step_two - partial_sm;
@@ -263,37 +263,37 @@ __host__ __device__ uint64_t get_buffer_size_programmable_bootstrap(
}
template <typename Torus, typename STorus, typename params>
__host__ void scratch_programmable_bootstrap(
cuda_stream_t *stream, pbs_buffer<Torus, CLASSICAL> **buffer,
__host__ void scratch_bootstrap_low_latency(
cuda_stream_t *stream, pbs_buffer<Torus, LOW_LAT> **buffer,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory) {
cudaSetDevice(stream->gpu_index);
uint64_t full_sm_step_one =
get_buffer_size_full_sm_programmable_bootstrap_step_one<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_one<Torus>(
polynomial_size);
uint64_t full_sm_step_two =
get_buffer_size_full_sm_programmable_bootstrap_step_two<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_two<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap<Torus>(polynomial_size);
get_buffer_size_partial_sm_bootstrap_low_latency<Torus>(polynomial_size);
// Configure step one
if (max_shared_memory >= partial_sm && max_shared_memory < full_sm_step_one) {
check_cuda_error(cudaFuncSetAttribute(
device_programmable_bootstrap_step_one<Torus, params, PARTIALSM>,
device_bootstrap_low_latency_step_one<Torus, params, PARTIALSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, partial_sm));
cudaFuncSetCacheConfig(
device_programmable_bootstrap_step_one<Torus, params, PARTIALSM>,
device_bootstrap_low_latency_step_one<Torus, params, PARTIALSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else if (max_shared_memory >= partial_sm) {
check_cuda_error(cudaFuncSetAttribute(
device_programmable_bootstrap_step_one<Torus, params, FULLSM>,
device_bootstrap_low_latency_step_one<Torus, params, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_step_one));
cudaFuncSetCacheConfig(
device_programmable_bootstrap_step_one<Torus, params, FULLSM>,
device_bootstrap_low_latency_step_one<Torus, params, FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
@@ -301,29 +301,29 @@ __host__ void scratch_programmable_bootstrap(
// Configure step two
if (max_shared_memory >= partial_sm && max_shared_memory < full_sm_step_two) {
check_cuda_error(cudaFuncSetAttribute(
device_programmable_bootstrap_step_two<Torus, params, PARTIALSM>,
device_bootstrap_low_latency_step_two<Torus, params, PARTIALSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, partial_sm));
cudaFuncSetCacheConfig(
device_programmable_bootstrap_step_two<Torus, params, PARTIALSM>,
device_bootstrap_low_latency_step_two<Torus, params, PARTIALSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else if (max_shared_memory >= partial_sm) {
check_cuda_error(cudaFuncSetAttribute(
device_programmable_bootstrap_step_two<Torus, params, FULLSM>,
device_bootstrap_low_latency_step_two<Torus, params, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_step_two));
cudaFuncSetCacheConfig(
device_programmable_bootstrap_step_two<Torus, params, FULLSM>,
device_bootstrap_low_latency_step_two<Torus, params, FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
*buffer = new pbs_buffer<Torus, CLASSICAL>(
*buffer = new pbs_buffer<Torus, LOW_LAT>(
stream, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, PBS_VARIANT::DEFAULT, allocate_gpu_memory);
}
template <typename Torus, class params>
__host__ void execute_step_one(
__host__ void execute_low_latency_step_one(
cuda_stream_t *stream, Torus *lut_vector, Torus *lut_vector_indexes,
Torus *lwe_array_in, Torus *lwe_input_indexes, double2 *bootstrapping_key,
Torus *global_accumulator, double2 *global_accumulator_fft,
@@ -337,21 +337,21 @@ __host__ void execute_step_one(
dim3 grid(level_count, glwe_dimension + 1, input_lwe_ciphertext_count);
if (max_shared_memory < partial_sm) {
device_programmable_bootstrap_step_one<Torus, params, NOSM>
device_bootstrap_low_latency_step_one<Torus, params, NOSM>
<<<grid, thds, 0, stream->stream>>>(
lut_vector, lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, global_accumulator, global_accumulator_fft,
lwe_iteration, lwe_dimension, polynomial_size, base_log,
level_count, d_mem, full_dm);
} else if (max_shared_memory < full_sm) {
device_programmable_bootstrap_step_one<Torus, params, PARTIALSM>
device_bootstrap_low_latency_step_one<Torus, params, PARTIALSM>
<<<grid, thds, partial_sm, stream->stream>>>(
lut_vector, lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, global_accumulator, global_accumulator_fft,
lwe_iteration, lwe_dimension, polynomial_size, base_log,
level_count, d_mem, partial_dm);
} else {
device_programmable_bootstrap_step_one<Torus, params, FULLSM>
device_bootstrap_low_latency_step_one<Torus, params, FULLSM>
<<<grid, thds, full_sm, stream->stream>>>(
lut_vector, lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, global_accumulator, global_accumulator_fft,
@@ -362,7 +362,7 @@ __host__ void execute_step_one(
}
template <typename Torus, class params>
__host__ void execute_step_two(
__host__ void execute_low_latency_step_two(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, double2 *bootstrapping_key,
Torus *global_accumulator, double2 *global_accumulator_fft,
@@ -376,21 +376,21 @@ __host__ void execute_step_two(
dim3 grid(input_lwe_ciphertext_count, glwe_dimension + 1);
if (max_shared_memory < partial_sm) {
device_programmable_bootstrap_step_two<Torus, params, NOSM>
device_bootstrap_low_latency_step_two<Torus, params, NOSM>
<<<grid, thds, 0, stream->stream>>>(
lwe_array_out, lwe_output_indexes, lut_vector, lut_vector_indexes,
bootstrapping_key, global_accumulator, global_accumulator_fft,
lwe_iteration, lwe_dimension, polynomial_size, base_log,
level_count, d_mem, full_dm);
} else if (max_shared_memory < full_sm) {
device_programmable_bootstrap_step_two<Torus, params, PARTIALSM>
device_bootstrap_low_latency_step_two<Torus, params, PARTIALSM>
<<<grid, thds, partial_sm, stream->stream>>>(
lwe_array_out, lwe_output_indexes, lut_vector, lut_vector_indexes,
bootstrapping_key, global_accumulator, global_accumulator_fft,
lwe_iteration, lwe_dimension, polynomial_size, base_log,
level_count, d_mem, partial_dm);
} else {
device_programmable_bootstrap_step_two<Torus, params, FULLSM>
device_bootstrap_low_latency_step_two<Torus, params, FULLSM>
<<<grid, thds, full_sm, stream->stream>>>(
lwe_array_out, lwe_output_indexes, lut_vector, lut_vector_indexes,
bootstrapping_key, global_accumulator, global_accumulator_fft,
@@ -400,14 +400,15 @@ __host__ void execute_step_two(
check_cuda_error(cudaGetLastError());
}
/*
* Host wrapper to the programmable bootstrap
* Host wrapper to the low latency version
* of bootstrapping
*/
template <typename Torus, class params>
__host__ void host_programmable_bootstrap(
__host__ void host_bootstrap_low_latency(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, double2 *bootstrapping_key,
pbs_buffer<Torus, CLASSICAL> *pbs_buffer, uint32_t glwe_dimension,
pbs_buffer<Torus, LOW_LAT> *pbs_buffer, uint32_t glwe_dimension,
uint32_t lwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
uint32_t num_luts, uint32_t max_shared_memory) {
@@ -416,14 +417,14 @@ __host__ void host_programmable_bootstrap(
// With SM each block corresponds to either the mask or body, no need to
// duplicate data for each
uint64_t full_sm_step_one =
get_buffer_size_full_sm_programmable_bootstrap_step_one<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_one<Torus>(
polynomial_size);
uint64_t full_sm_step_two =
get_buffer_size_full_sm_programmable_bootstrap_step_two<Torus>(
get_buffer_size_full_sm_bootstrap_low_latency_step_two<Torus>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap<Torus>(polynomial_size);
get_buffer_size_partial_sm_bootstrap_low_latency<Torus>(polynomial_size);
uint64_t partial_dm_step_one = full_sm_step_one - partial_sm;
uint64_t partial_dm_step_two = full_sm_step_two - partial_sm;
@@ -435,13 +436,13 @@ __host__ void host_programmable_bootstrap(
int8_t *d_mem = pbs_buffer->d_mem;
for (int i = 0; i < lwe_dimension; i++) {
execute_step_one<Torus, params>(
execute_low_latency_step_one<Torus, params>(
stream, lut_vector, lut_vector_indexes, lwe_array_in, lwe_input_indexes,
bootstrapping_key, global_accumulator, global_accumulator_fft,
input_lwe_ciphertext_count, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, d_mem, max_shared_memory, i,
partial_sm, partial_dm_step_one, full_sm_step_one, full_dm_step_one);
execute_step_two<Torus, params>(
execute_low_latency_step_two<Torus, params>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, bootstrapping_key, global_accumulator,
global_accumulator_fft, input_lwe_ciphertext_count, lwe_dimension,
@@ -451,4 +452,4 @@ __host__ void host_programmable_bootstrap(
}
}
#endif // CUDA_PBS_CUH
#endif // LOWLAT_PBS_H

View File

@@ -1,18 +1,20 @@
#include "../polynomial/parameters.cuh"
#include "programmable_bootstrap_cg_multibit.cuh"
#include "programmable_bootstrap_multibit.cuh"
#include "programmable_bootstrap_multibit.h"
#include "bootstrap_fast_multibit.cuh"
#include "bootstrap_multibit.cuh"
#include "bootstrap_multibit.h"
bool has_support_to_cuda_programmable_bootstrap_cg_multi_bit(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t num_samples, uint32_t max_shared_memory) {
return supports_cooperative_groups_on_multibit_programmable_bootstrap<
uint64_t>(glwe_dimension, polynomial_size, level_count, num_samples,
max_shared_memory);
bool has_support_to_cuda_bootstrap_fast_multi_bit(uint32_t glwe_dimension,
uint32_t polynomial_size,
uint32_t level_count,
uint32_t num_samples,
uint32_t max_shared_memory) {
return supports_cooperative_groups_on_multibit_pbs<uint64_t>(
glwe_dimension, polynomial_size, level_count, num_samples,
max_shared_memory);
}
template <typename Torus>
void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
void cuda_fast_multi_bit_pbs_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, Torus *bootstrapping_key,
@@ -28,8 +30,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
switch (polynomial_size) {
case 256:
host_cg_multi_bit_programmable_bootstrap<uint64_t, int64_t,
AmortizedDegree<256>>(
host_fast_multi_bit_pbs<uint64_t, int64_t, AmortizedDegree<256>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -37,8 +38,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 512:
host_cg_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<512>>(
host_fast_multi_bit_pbs<Torus, int64_t, AmortizedDegree<512>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -46,8 +46,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 1024:
host_cg_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<1024>>(
host_fast_multi_bit_pbs<Torus, int64_t, AmortizedDegree<1024>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -55,8 +54,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 2048:
host_cg_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<2048>>(
host_fast_multi_bit_pbs<Torus, int64_t, AmortizedDegree<2048>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -64,8 +62,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 4096:
host_cg_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<4096>>(
host_fast_multi_bit_pbs<Torus, int64_t, AmortizedDegree<4096>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -73,8 +70,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 8192:
host_cg_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<8192>>(
host_fast_multi_bit_pbs<Torus, int64_t, AmortizedDegree<8192>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -82,8 +78,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 16384:
host_cg_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<16384>>(
host_fast_multi_bit_pbs<Torus, int64_t, AmortizedDegree<16384>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -98,7 +93,7 @@ void cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
}
template <typename Torus>
void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
void cuda_multi_bit_pbs_lwe_ciphertext_vector(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, Torus *bootstrapping_key,
@@ -114,8 +109,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
switch (polynomial_size) {
case 256:
host_multi_bit_programmable_bootstrap<uint64_t, int64_t,
AmortizedDegree<256>>(
host_multi_bit_pbs<uint64_t, int64_t, AmortizedDegree<256>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -123,7 +117,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 512:
host_multi_bit_programmable_bootstrap<Torus, int64_t, AmortizedDegree<512>>(
host_multi_bit_pbs<Torus, int64_t, AmortizedDegree<512>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -131,8 +125,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 1024:
host_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<1024>>(
host_multi_bit_pbs<Torus, int64_t, AmortizedDegree<1024>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -140,8 +133,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 2048:
host_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<2048>>(
host_multi_bit_pbs<Torus, int64_t, AmortizedDegree<2048>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -149,8 +141,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 4096:
host_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<4096>>(
host_multi_bit_pbs<Torus, int64_t, AmortizedDegree<4096>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -158,8 +149,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 8192:
host_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<8192>>(
host_multi_bit_pbs<Torus, int64_t, AmortizedDegree<8192>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -167,8 +157,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
max_shared_memory, lwe_chunk_size);
break;
case 16384:
host_multi_bit_programmable_bootstrap<Torus, int64_t,
AmortizedDegree<16384>>(
host_multi_bit_pbs<Torus, int64_t, AmortizedDegree<16384>>(
stream, lwe_array_out, lwe_output_indexes, lut_vector,
lut_vector_indexes, lwe_array_in, lwe_input_indexes, bootstrapping_key,
pbs_buffer, glwe_dimension, lwe_dimension, polynomial_size,
@@ -182,7 +171,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector(
}
}
void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
void cuda_multi_bit_pbs_lwe_ciphertext_vector_64(
cuda_stream_t *stream, void *lwe_array_out, void *lwe_output_indexes,
void *lut_vector, void *lut_vector_indexes, void *lwe_array_in,
void *lwe_input_indexes, void *bootstrapping_key, int8_t *buffer,
@@ -191,10 +180,10 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
uint32_t num_samples, uint32_t num_luts, uint32_t lwe_idx,
uint32_t max_shared_memory, uint32_t lwe_chunk_size) {
if (supports_cooperative_groups_on_multibit_programmable_bootstrap<uint64_t>(
if (supports_cooperative_groups_on_multibit_pbs<uint64_t>(
glwe_dimension, polynomial_size, level_count, num_samples,
max_shared_memory))
cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector<uint64_t>(
cuda_fast_multi_bit_pbs_lwe_ciphertext_vector<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array_out),
static_cast<uint64_t *>(lwe_output_indexes),
static_cast<uint64_t *>(lut_vector),
@@ -206,7 +195,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
glwe_dimension, polynomial_size, grouping_factor, base_log, level_count,
num_samples, num_luts, lwe_idx, max_shared_memory, lwe_chunk_size);
else
cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector<uint64_t>(
cuda_multi_bit_pbs_lwe_ciphertext_vector<uint64_t>(
stream, static_cast<uint64_t *>(lwe_array_out),
static_cast<uint64_t *>(lwe_output_indexes),
static_cast<uint64_t *>(lut_vector),
@@ -220,7 +209,7 @@ void cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector_64(
}
template <typename Torus, typename STorus>
void scratch_cuda_cg_multi_bit_programmable_bootstrap(
void scratch_cuda_fast_multi_bit_pbs(
cuda_stream_t *stream, pbs_buffer<Torus, MULTI_BIT> **buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
@@ -229,50 +218,43 @@ void scratch_cuda_cg_multi_bit_programmable_bootstrap(
switch (polynomial_size) {
case 256:
scratch_cg_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<256>>(
scratch_fast_multi_bit_pbs<Torus, STorus, AmortizedDegree<256>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 512:
scratch_cg_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<512>>(
scratch_fast_multi_bit_pbs<Torus, STorus, AmortizedDegree<512>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 1024:
scratch_cg_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<1024>>(
scratch_fast_multi_bit_pbs<Torus, STorus, AmortizedDegree<1024>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 2048:
scratch_cg_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<2048>>(
scratch_fast_multi_bit_pbs<Torus, STorus, AmortizedDegree<2048>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 4096:
scratch_cg_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<4096>>(
scratch_fast_multi_bit_pbs<Torus, STorus, AmortizedDegree<4096>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 8192:
scratch_cg_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<8192>>(
scratch_fast_multi_bit_pbs<Torus, STorus, AmortizedDegree<8192>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 16384:
scratch_cg_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<16384>>(
scratch_fast_multi_bit_pbs<Torus, STorus, AmortizedDegree<16384>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
@@ -285,7 +267,7 @@ void scratch_cuda_cg_multi_bit_programmable_bootstrap(
}
template <typename Torus, typename STorus>
void scratch_cuda_multi_bit_programmable_bootstrap(
void scratch_cuda_multi_bit_pbs(
cuda_stream_t *stream, pbs_buffer<Torus, MULTI_BIT> **buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
@@ -294,50 +276,43 @@ void scratch_cuda_multi_bit_programmable_bootstrap(
switch (polynomial_size) {
case 256:
scratch_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<256>>(
scratch_multi_bit_pbs<Torus, STorus, AmortizedDegree<256>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 512:
scratch_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<512>>(
scratch_multi_bit_pbs<Torus, STorus, AmortizedDegree<512>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 1024:
scratch_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<1024>>(
scratch_multi_bit_pbs<Torus, STorus, AmortizedDegree<1024>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 2048:
scratch_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<2048>>(
scratch_multi_bit_pbs<Torus, STorus, AmortizedDegree<2048>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 4096:
scratch_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<4096>>(
scratch_multi_bit_pbs<Torus, STorus, AmortizedDegree<4096>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 8192:
scratch_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<8192>>(
scratch_multi_bit_pbs<Torus, STorus, AmortizedDegree<8192>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
break;
case 16384:
scratch_multi_bit_programmable_bootstrap<Torus, STorus,
AmortizedDegree<16384>>(
scratch_multi_bit_pbs<Torus, STorus, AmortizedDegree<16384>>(
stream, buffer, lwe_dimension, glwe_dimension, polynomial_size,
level_count, input_lwe_ciphertext_count, grouping_factor,
max_shared_memory, allocate_gpu_memory, lwe_chunk_size);
@@ -349,65 +324,173 @@ void scratch_cuda_multi_bit_programmable_bootstrap(
}
}
void scratch_cuda_multi_bit_programmable_bootstrap_64(
void scratch_cuda_multi_bit_pbs_64(
cuda_stream_t *stream, int8_t **buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t grouping_factor, uint32_t input_lwe_ciphertext_count,
uint32_t max_shared_memory, bool allocate_gpu_memory,
uint32_t lwe_chunk_size) {
if (supports_cooperative_groups_on_multibit_programmable_bootstrap<uint64_t>(
if (supports_cooperative_groups_on_multibit_pbs<uint64_t>(
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, max_shared_memory))
scratch_cuda_cg_multi_bit_programmable_bootstrap<uint64_t, int64_t>(
scratch_cuda_fast_multi_bit_pbs<uint64_t, int64_t>(
stream, (pbs_buffer<uint64_t, MULTI_BIT> **)buffer, lwe_dimension,
glwe_dimension, polynomial_size, level_count, grouping_factor,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory,
lwe_chunk_size);
else
scratch_cuda_multi_bit_programmable_bootstrap<uint64_t, int64_t>(
scratch_cuda_multi_bit_pbs<uint64_t, int64_t>(
stream, (pbs_buffer<uint64_t, MULTI_BIT> **)buffer, lwe_dimension,
glwe_dimension, polynomial_size, level_count, grouping_factor,
input_lwe_ciphertext_count, max_shared_memory, allocate_gpu_memory,
lwe_chunk_size);
}
void cleanup_cuda_multi_bit_programmable_bootstrap(cuda_stream_t *stream,
int8_t **buffer) {
void cleanup_cuda_multi_bit_pbs_32(cuda_stream_t *stream, int8_t **buffer) {
auto x = (pbs_buffer<uint32_t, MULTI_BIT> *)(*buffer);
x->release(stream);
}
void cleanup_cuda_multi_bit_pbs_64(cuda_stream_t *stream, int8_t **buffer) {
auto x = (pbs_buffer<uint64_t, MULTI_BIT> *)(*buffer);
x->release(stream);
}
// Returns a chunk size that is not optimal but close to
__host__ uint32_t get_lwe_chunk_size(uint32_t ct_count) {
// Pick the best possible chunk size for each GPU
__host__ uint32_t get_lwe_chunk_size(uint32_t lwe_dimension,
uint32_t level_count,
uint32_t glwe_dimension,
uint32_t num_samples) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0); // Assuming device 0
const char *v100Name = "V100"; // Known name of V100 GPU
const char *a100Name = "A100"; // Known name of A100 GPU
const char *h100Name = "H100"; // Known name of H100 GPU
if (std::strstr(deviceProp.name, v100Name) != nullptr) {
// Tesla V100
if (num_samples == 1)
return 60;
else if (num_samples == 2)
return 40;
else if (num_samples <= 4)
return 20;
else if (num_samples <= 8)
return 10;
else if (num_samples <= 16)
return 40;
else if (num_samples <= 32)
return 27;
else if (num_samples <= 64)
return 20;
else if (num_samples <= 128)
return 18;
else if (num_samples <= 256)
return 16;
else if (num_samples <= 512)
return 15;
else if (num_samples <= 1024)
return 15;
else
return 12;
} else if (std::strstr(deviceProp.name, a100Name) != nullptr) {
// Tesla A100
if (num_samples < 4)
return 11;
else if (num_samples < 8)
return 6;
else if (num_samples < 16)
return 13;
else if (num_samples < 64)
return 19;
else if (num_samples < 128)
return 1;
else if (num_samples < 512)
return 19;
else if (num_samples < 1024)
return 17;
else if (num_samples < 8192)
return 19;
else if (num_samples < 16384)
return 12;
else
return 9;
} else if (std::strstr(deviceProp.name, h100Name) != nullptr) {
// Tesla H100
if (num_samples < 1024)
return 128;
else if (num_samples < 4096)
return 64;
else
return 32;
}
#if CUDA_ARCH >= 900
// Tesla H100
return (ct_count > 10000) ? 30 : 64;
#elif CUDA_ARCH >= 890
// Tesla RTX4090
return 8;
#elif CUDA_ARCH >= 800
// Tesla A100
return (ct_count > 10000) ? 30 : 45;
#elif CUDA_ARCH >= 700
// Tesla V100
return (ct_count > 10000) ? 12 : 18;
#else
// Generic case
return (ct_count > 10000) ? 2 : 1;
#endif
return 1;
}
template void scratch_cuda_multi_bit_programmable_bootstrap<uint64_t, int64_t>(
// Returns a chunk size that is not optimal but close to
__host__ uint32_t get_average_lwe_chunk_size(uint32_t lwe_dimension,
uint32_t level_count,
uint32_t glwe_dimension,
uint32_t ct_count) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0); // Assuming device 0
const char *v100Name = "V100"; // Known name of V100 GPU
const char *a100Name = "A100"; // Known name of A100 GPU
const char *h100Name = "H100"; // Known name of H100 GPU
if (std::strstr(deviceProp.name, v100Name) != nullptr) {
// Tesla V100
return (ct_count > 10000) ? 12 : 18;
} else if (std::strstr(deviceProp.name, a100Name) != nullptr) {
// Tesla A100
return (ct_count > 10000) ? 30 : 45;
} else if (std::strstr(deviceProp.name, h100Name) != nullptr) {
// Tesla H100
return 64;
}
// Generic case
return (ct_count > 10000) ? 2 : 1;
}
// Returns the maximum buffer size required to execute batches up to
// max_input_lwe_ciphertext_count
// todo: Deprecate this function
__host__ uint64_t get_max_buffer_size_multibit_bootstrap(
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t max_input_lwe_ciphertext_count) {
uint64_t max_buffer_size = 0;
for (uint32_t input_lwe_ciphertext_count = 1;
input_lwe_ciphertext_count <= max_input_lwe_ciphertext_count;
input_lwe_ciphertext_count *= 2) {
max_buffer_size =
std::max(max_buffer_size,
get_buffer_size_multibit_bootstrap<uint64_t>(
glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count,
get_average_lwe_chunk_size(lwe_dimension, level_count,
glwe_dimension,
input_lwe_ciphertext_count)));
}
return max_buffer_size;
}
template void scratch_cuda_multi_bit_pbs<uint64_t, int64_t>(
cuda_stream_t *stream, pbs_buffer<uint64_t, MULTI_BIT> **pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory, uint32_t lwe_chunk_size);
template void
cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector<uint64_t>(
template void cuda_multi_bit_pbs_lwe_ciphertext_vector<uint64_t>(
cuda_stream_t *stream, uint64_t *lwe_array_out,
uint64_t *lwe_output_indexes, uint64_t *lut_vector,
uint64_t *lut_vector_indexes, uint64_t *lwe_array_in,
@@ -418,16 +501,14 @@ cuda_multi_bit_programmable_bootstrap_lwe_ciphertext_vector<uint64_t>(
uint32_t num_luts, uint32_t lwe_idx, uint32_t max_shared_memory,
uint32_t lwe_chunk_size);
template void
scratch_cuda_cg_multi_bit_programmable_bootstrap<uint64_t, int64_t>(
template void scratch_cuda_fast_multi_bit_pbs<uint64_t, int64_t>(
cuda_stream_t *stream, pbs_buffer<uint64_t, MULTI_BIT> **pbs_buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor,
uint32_t input_lwe_ciphertext_count, uint32_t max_shared_memory,
bool allocate_gpu_memory, uint32_t lwe_chunk_size);
template void
cuda_cg_multi_bit_programmable_bootstrap_lwe_ciphertext_vector<uint64_t>(
template void cuda_fast_multi_bit_pbs_lwe_ciphertext_vector<uint64_t>(
cuda_stream_t *stream, uint64_t *lwe_array_out,
uint64_t *lwe_output_indexes, uint64_t *lut_vector,
uint64_t *lut_vector_indexes, uint64_t *lwe_array_in,

View File

@@ -1,6 +1,9 @@
#ifndef CUDA_MULTIBIT_PBS_CUH
#define CUDA_MULTIBIT_PBS_CUH
#include "bootstrap.h"
#include "bootstrap_fast_low_latency.cuh"
#include "bootstrap_multibit.h"
#include "cooperative_groups.h"
#include "crypto/gadget.cuh"
#include "crypto/ggsw.cuh"
@@ -11,9 +14,6 @@
#include "polynomial/functions.cuh"
#include "polynomial/parameters.cuh"
#include "polynomial/polynomial_math.cuh"
#include "programmable_bootstrap.h"
#include "programmable_bootstrap_cg_classic.cuh"
#include "programmable_bootstrap_multibit.h"
#include "types/complex/operations.cuh"
#include <vector>
@@ -32,26 +32,17 @@ __device__ Torus calculates_monomial_degree(Torus *lwe_array_group,
x, 2 * params::degree); // 2 * params::log2_degree + 1);
}
template <typename Torus, class params, sharedMemDegree SMD>
__global__ void device_multi_bit_programmable_bootstrap_keybundle(
template <typename Torus, class params>
__global__ void device_multi_bit_bootstrap_keybundle(
Torus *lwe_array_in, Torus *lwe_input_indexes, double2 *keybundle_array,
Torus *bootstrapping_key, uint32_t lwe_dimension, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t grouping_factor, uint32_t base_log,
uint32_t level_count, uint32_t lwe_offset, uint32_t lwe_chunk_size,
uint32_t keybundle_size_per_input, int8_t *device_mem,
uint64_t device_memory_size_per_block) {
uint32_t keybundle_size_per_input) {
extern __shared__ int8_t sharedmem[];
int8_t *selected_memory = sharedmem;
if constexpr (SMD == FULLSM) {
selected_memory = sharedmem;
} else {
int block_index = blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y;
selected_memory = &device_mem[block_index * device_memory_size_per_block];
}
// Ids
uint32_t level_id = blockIdx.z;
uint32_t glwe_id = blockIdx.y / (glwe_dimension + 1);
@@ -108,7 +99,7 @@ __global__ void device_multi_bit_programmable_bootstrap_keybundle(
synchronize_threads_in_block();
double2 *fft = (double2 *)selected_memory;
double2 *fft = (double2 *)sharedmem;
// Move accumulator to local memory
double2 temp[params::opt / 2];
@@ -145,14 +136,13 @@ __global__ void device_multi_bit_programmable_bootstrap_keybundle(
}
}
template <typename Torus, class params, sharedMemDegree SMD>
__global__ void device_multi_bit_programmable_bootstrap_accumulate_step_one(
template <typename Torus, class params>
__global__ void device_multi_bit_bootstrap_accumulate_step_one(
Torus *lwe_array_in, Torus *lwe_input_indexes, Torus *lut_vector,
Torus *lut_vector_indexes, Torus *global_accumulator,
double2 *global_accumulator_fft, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t lwe_iteration, int8_t *device_mem,
uint64_t device_memory_size_per_block) {
uint32_t level_count, uint32_t lwe_iteration) {
// We use shared memory for the polynomials that are used often during the
// bootstrap, since shared memory is kept in L1 cache and accessing it is
@@ -162,22 +152,11 @@ __global__ void device_multi_bit_programmable_bootstrap_accumulate_step_one(
selected_memory = sharedmem;
if constexpr (SMD == FULLSM) {
selected_memory = sharedmem;
} else {
int block_index = blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y;
selected_memory = &device_mem[block_index * device_memory_size_per_block];
}
Torus *accumulator = (Torus *)selected_memory;
double2 *accumulator_fft =
(double2 *)accumulator +
(ptrdiff_t)(sizeof(Torus) * polynomial_size / sizeof(double2));
if constexpr (SMD == PARTIALSM)
accumulator_fft = (double2 *)sharedmem;
Torus *block_lwe_array_in =
&lwe_array_in[lwe_input_indexes[blockIdx.z] * (lwe_dimension + 1)];
@@ -240,14 +219,13 @@ __global__ void device_multi_bit_programmable_bootstrap_accumulate_step_one(
accumulator_fft, global_fft_slice);
}
template <typename Torus, class params, sharedMemDegree SMD>
__global__ void device_multi_bit_programmable_bootstrap_accumulate_step_two(
template <typename Torus, class params>
__global__ void device_multi_bit_bootstrap_accumulate_step_two(
Torus *lwe_array_out, Torus *lwe_output_indexes, double2 *keybundle_array,
Torus *global_accumulator, double2 *global_accumulator_fft,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t grouping_factor, uint32_t iteration,
uint32_t lwe_offset, uint32_t lwe_chunk_size, int8_t *device_mem,
uint64_t device_memory_size_per_block) {
uint32_t lwe_offset, uint32_t lwe_chunk_size) {
// We use shared memory for the polynomials that are used often during the
// bootstrap, since shared memory is kept in L1 cache and accessing it is
// much faster than global memory
@@ -255,18 +233,8 @@ __global__ void device_multi_bit_programmable_bootstrap_accumulate_step_two(
int8_t *selected_memory;
selected_memory = sharedmem;
if constexpr (SMD == FULLSM) {
selected_memory = sharedmem;
} else {
int block_index = blockIdx.x + blockIdx.y * gridDim.x +
blockIdx.z * gridDim.x * gridDim.y;
selected_memory = &device_mem[block_index * device_memory_size_per_block];
}
double2 *accumulator_fft = (double2 *)selected_memory;
//
double2 *keybundle = keybundle_array +
// select the input
blockIdx.x * lwe_chunk_size * level_count *
@@ -327,31 +295,23 @@ __global__ void device_multi_bit_programmable_bootstrap_accumulate_step_two(
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_one(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size * 2; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_partial_sm_multibit_programmable_bootstrap_step_one(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_two(
uint32_t polynomial_size) {
get_buffer_size_full_sm_multibit_bootstrap_keybundle(uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t get_buffer_size_multibit_programmable_bootstrap(
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_bootstrap_step_one(uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size * 2; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t
get_buffer_size_full_sm_multibit_bootstrap_step_two(uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size; // accumulator
}
template <typename Torus>
__host__ __device__ uint64_t get_buffer_size_multibit_bootstrap(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, uint32_t lwe_chunk_size) {
@@ -369,7 +329,7 @@ __host__ __device__ uint64_t get_buffer_size_multibit_programmable_bootstrap(
}
template <typename Torus, typename STorus, typename params>
__host__ void scratch_multi_bit_programmable_bootstrap(
__host__ void scratch_multi_bit_pbs(
cuda_stream_t *stream, pbs_buffer<Torus, MULTI_BIT> **buffer,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
@@ -379,248 +339,54 @@ __host__ void scratch_multi_bit_programmable_bootstrap(
cudaSetDevice(stream->gpu_index);
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle<Torus>(
get_buffer_size_full_sm_multibit_bootstrap_keybundle<Torus>(
polynomial_size);
uint64_t full_sm_accumulate_step_one =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_one<Torus>(
get_buffer_size_full_sm_multibit_bootstrap_step_one<Torus>(
polynomial_size);
uint64_t full_sm_accumulate_step_two =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_two<Torus>(
get_buffer_size_full_sm_multibit_bootstrap_step_two<Torus>(
polynomial_size);
uint64_t partial_sm_accumulate_step_one =
get_buffer_size_partial_sm_multibit_programmable_bootstrap_step_one<
Torus>(polynomial_size);
if (max_shared_memory < full_sm_keybundle) {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params, NOSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, 0));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params, NOSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params,
FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_keybundle));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_keybundle<Torus, params,
FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_bootstrap_keybundle<Torus, params>,
cudaFuncAttributeMaxDynamicSharedMemorySize, full_sm_keybundle));
cudaFuncSetCacheConfig(device_multi_bit_bootstrap_keybundle<Torus, params>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
if (max_shared_memory < partial_sm_accumulate_step_one) {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_accumulate_step_one<
Torus, params, NOSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, 0));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_accumulate_step_one<
Torus, params, NOSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else if (max_shared_memory < full_sm_accumulate_step_one) {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_accumulate_step_one<
Torus, params, PARTIALSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
partial_sm_accumulate_step_one));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_accumulate_step_one<
Torus, params, PARTIALSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_accumulate_step_one<
Torus, params, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
full_sm_accumulate_step_one));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_accumulate_step_one<
Torus, params, FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_bootstrap_accumulate_step_one<Torus, params>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
full_sm_accumulate_step_one));
cudaFuncSetCacheConfig(
device_multi_bit_bootstrap_accumulate_step_one<Torus, params>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
if (max_shared_memory < full_sm_accumulate_step_two) {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_accumulate_step_two<
Torus, params, NOSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, 0));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_accumulate_step_two<
Torus, params, NOSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
} else {
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_programmable_bootstrap_accumulate_step_two<
Torus, params, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
full_sm_accumulate_step_two));
cudaFuncSetCacheConfig(
device_multi_bit_programmable_bootstrap_accumulate_step_two<
Torus, params, FULLSM>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
}
check_cuda_error(cudaFuncSetAttribute(
device_multi_bit_bootstrap_accumulate_step_two<Torus, params>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
full_sm_accumulate_step_two));
cudaFuncSetCacheConfig(
device_multi_bit_bootstrap_accumulate_step_two<Torus, params>,
cudaFuncCachePreferShared);
check_cuda_error(cudaGetLastError());
if (!lwe_chunk_size)
lwe_chunk_size = get_lwe_chunk_size(input_lwe_ciphertext_count);
lwe_chunk_size = get_average_lwe_chunk_size(
lwe_dimension, level_count, glwe_dimension, input_lwe_ciphertext_count);
*buffer = new pbs_buffer<Torus, MULTI_BIT>(
stream, glwe_dimension, polynomial_size, level_count,
input_lwe_ciphertext_count, lwe_chunk_size, PBS_VARIANT::DEFAULT,
allocate_gpu_memory);
}
template <typename Torus, class params>
__host__ void execute_compute_keybundle(
cuda_stream_t *stream, Torus *lwe_array_in, Torus *lwe_input_indexes,
Torus *bootstrapping_key, pbs_buffer<Torus, MULTI_BIT> *buffer,
uint32_t num_samples, uint32_t lwe_dimension, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t grouping_factor, uint32_t base_log,
uint32_t level_count, uint32_t max_shared_memory, uint32_t lwe_chunk_size,
int lwe_offset) {
uint32_t chunk_size =
std::min(lwe_chunk_size, (lwe_dimension / grouping_factor) - lwe_offset);
uint32_t keybundle_size_per_input =
lwe_chunk_size * level_count * (glwe_dimension + 1) *
(glwe_dimension + 1) * (polynomial_size / 2);
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_programmable_bootstrap_keybundle<Torus>(
polynomial_size);
auto d_mem = buffer->d_mem_keybundle;
auto keybundle_fft = buffer->keybundle_fft;
// Compute a keybundle
dim3 grid_keybundle(num_samples * chunk_size,
(glwe_dimension + 1) * (glwe_dimension + 1), level_count);
dim3 thds(polynomial_size / params::opt, 1, 1);
if (max_shared_memory < full_sm_keybundle)
device_multi_bit_programmable_bootstrap_keybundle<Torus, params, NOSM>
<<<grid_keybundle, thds, 0, stream->stream>>>(
lwe_array_in, lwe_input_indexes, keybundle_fft, bootstrapping_key,
lwe_dimension, glwe_dimension, polynomial_size, grouping_factor,
base_log, level_count, lwe_offset, chunk_size,
keybundle_size_per_input, d_mem, full_sm_keybundle);
else
device_multi_bit_programmable_bootstrap_keybundle<Torus, params, FULLSM>
<<<grid_keybundle, thds, full_sm_keybundle, stream->stream>>>(
lwe_array_in, lwe_input_indexes, keybundle_fft, bootstrapping_key,
lwe_dimension, glwe_dimension, polynomial_size, grouping_factor,
base_log, level_count, lwe_offset, chunk_size,
keybundle_size_per_input, d_mem, 0);
check_cuda_error(cudaGetLastError());
}
template <typename Torus, class params>
__host__ void
execute_step_one(cuda_stream_t *stream, Torus *lut_vector,
Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, pbs_buffer<Torus, MULTI_BIT> *buffer,
uint32_t num_samples, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t base_log, uint32_t level_count,
uint32_t max_shared_memory, int j, int lwe_offset) {
uint64_t full_sm_accumulate_step_one =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_one<Torus>(
polynomial_size);
uint64_t partial_sm_accumulate_step_one =
get_buffer_size_partial_sm_multibit_programmable_bootstrap_step_one<
Torus>(polynomial_size);
//
auto d_mem = buffer->d_mem_acc_step_one;
auto global_accumulator = buffer->global_accumulator;
auto global_accumulator_fft = buffer->global_accumulator_fft;
dim3 grid_accumulate_step_one(level_count, glwe_dimension + 1, num_samples);
dim3 thds(polynomial_size / params::opt, 1, 1);
if (max_shared_memory < partial_sm_accumulate_step_one)
device_multi_bit_programmable_bootstrap_accumulate_step_one<Torus, params,
NOSM>
<<<grid_accumulate_step_one, thds, 0, stream->stream>>>(
lwe_array_in, lwe_input_indexes, lut_vector, lut_vector_indexes,
global_accumulator, global_accumulator_fft, lwe_dimension,
glwe_dimension, polynomial_size, base_log, level_count,
j + lwe_offset, d_mem, full_sm_accumulate_step_one);
else if (max_shared_memory < full_sm_accumulate_step_one)
device_multi_bit_programmable_bootstrap_accumulate_step_one<Torus, params,
PARTIALSM>
<<<grid_accumulate_step_one, thds, partial_sm_accumulate_step_one,
stream->stream>>>(
lwe_array_in, lwe_input_indexes, lut_vector, lut_vector_indexes,
global_accumulator, global_accumulator_fft, lwe_dimension,
glwe_dimension, polynomial_size, base_log, level_count,
j + lwe_offset, d_mem, partial_sm_accumulate_step_one);
else
device_multi_bit_programmable_bootstrap_accumulate_step_one<Torus, params,
FULLSM>
<<<grid_accumulate_step_one, thds, full_sm_accumulate_step_one,
stream->stream>>>(lwe_array_in, lwe_input_indexes, lut_vector,
lut_vector_indexes, global_accumulator,
global_accumulator_fft, lwe_dimension,
glwe_dimension, polynomial_size, base_log,
level_count, j + lwe_offset, d_mem, 0);
check_cuda_error(cudaGetLastError());
}
template <typename Torus, class params>
__host__ void execute_step_two(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
pbs_buffer<Torus, MULTI_BIT> *buffer, uint32_t num_samples,
uint32_t lwe_dimension, uint32_t glwe_dimension, uint32_t polynomial_size,
int32_t grouping_factor, uint32_t level_count, uint32_t max_shared_memory,
int j, int lwe_offset, uint32_t lwe_chunk_size) {
uint64_t full_sm_accumulate_step_two =
get_buffer_size_full_sm_multibit_programmable_bootstrap_step_two<Torus>(
polynomial_size);
//
auto d_mem = buffer->d_mem_acc_step_two;
auto keybundle_fft = buffer->keybundle_fft;
auto global_accumulator = buffer->global_accumulator;
auto global_accumulator_fft = buffer->global_accumulator_fft;
dim3 grid_accumulate_step_two(num_samples, glwe_dimension + 1);
dim3 thds(polynomial_size / params::opt, 1, 1);
if (max_shared_memory < full_sm_accumulate_step_two)
device_multi_bit_programmable_bootstrap_accumulate_step_two<Torus, params,
NOSM>
<<<grid_accumulate_step_two, thds, 0, stream->stream>>>(
lwe_array_out, lwe_output_indexes, keybundle_fft,
global_accumulator, global_accumulator_fft, lwe_dimension,
glwe_dimension, polynomial_size, level_count, grouping_factor, j,
lwe_offset, lwe_chunk_size, d_mem, full_sm_accumulate_step_two);
else
device_multi_bit_programmable_bootstrap_accumulate_step_two<Torus, params,
FULLSM>
<<<grid_accumulate_step_two, thds, full_sm_accumulate_step_two,
stream->stream>>>(lwe_array_out, lwe_output_indexes, keybundle_fft,
global_accumulator, global_accumulator_fft,
lwe_dimension, glwe_dimension, polynomial_size,
level_count, grouping_factor, j, lwe_offset,
lwe_chunk_size, d_mem, 0);
check_cuda_error(cudaGetLastError());
}
template <typename Torus, typename STorus, class params>
__host__ void host_multi_bit_programmable_bootstrap(
__host__ void host_multi_bit_pbs(
cuda_stream_t *stream, Torus *lwe_array_out, Torus *lwe_output_indexes,
Torus *lut_vector, Torus *lut_vector_indexes, Torus *lwe_array_in,
Torus *lwe_input_indexes, Torus *bootstrapping_key,
Torus *lwe_input_indexes, uint64_t *bootstrapping_key,
pbs_buffer<Torus, MULTI_BIT> *buffer, uint32_t glwe_dimension,
uint32_t lwe_dimension, uint32_t polynomial_size, uint32_t grouping_factor,
uint32_t base_log, uint32_t level_count, uint32_t num_samples,
@@ -630,31 +396,70 @@ __host__ void host_multi_bit_programmable_bootstrap(
// If a chunk size is not passed to this function, select one.
if (!lwe_chunk_size)
lwe_chunk_size = get_lwe_chunk_size(num_samples);
lwe_chunk_size = get_average_lwe_chunk_size(lwe_dimension, level_count,
glwe_dimension, num_samples);
//
double2 *keybundle_fft = buffer->keybundle_fft;
Torus *global_accumulator = buffer->global_accumulator;
double2 *global_accumulator_fft = buffer->global_accumulator_fft;
//
uint64_t full_sm_keybundle =
get_buffer_size_full_sm_multibit_bootstrap_keybundle<Torus>(
polynomial_size);
uint64_t full_sm_accumulate_step_one =
get_buffer_size_full_sm_multibit_bootstrap_step_one<Torus>(
polynomial_size);
uint64_t full_sm_accumulate_step_two =
get_buffer_size_full_sm_multibit_bootstrap_step_two<Torus>(
polynomial_size);
uint32_t keybundle_size_per_input =
lwe_chunk_size * level_count * (glwe_dimension + 1) *
(glwe_dimension + 1) * (polynomial_size / 2);
//
dim3 grid_accumulate_step_one(level_count, glwe_dimension + 1, num_samples);
dim3 grid_accumulate_step_two(num_samples, glwe_dimension + 1);
dim3 thds(polynomial_size / params::opt, 1, 1);
for (uint32_t lwe_offset = 0; lwe_offset < (lwe_dimension / grouping_factor);
lwe_offset += lwe_chunk_size) {
// Compute a keybundle
execute_compute_keybundle<Torus, params>(
stream, lwe_array_in, lwe_input_indexes, bootstrapping_key, buffer,
num_samples, lwe_dimension, glwe_dimension, polynomial_size,
grouping_factor, base_log, level_count, max_shared_memory,
lwe_chunk_size, lwe_offset);
// Accumulate
uint32_t chunk_size = std::min(
lwe_chunk_size, (lwe_dimension / grouping_factor) - lwe_offset);
for (int j = 0; j < chunk_size; j++) {
execute_step_one<Torus, params>(
stream, lut_vector, lut_vector_indexes, lwe_array_in,
lwe_input_indexes, buffer, num_samples, lwe_dimension, glwe_dimension,
polynomial_size, base_log, level_count, max_shared_memory, j,
lwe_offset);
execute_step_two<Torus, params>(
stream, lwe_array_out, lwe_output_indexes, buffer, num_samples,
lwe_dimension, glwe_dimension, polynomial_size, grouping_factor,
level_count, max_shared_memory, j, lwe_offset, lwe_chunk_size);
// Compute a keybundle
dim3 grid_keybundle(num_samples * chunk_size,
(glwe_dimension + 1) * (glwe_dimension + 1),
level_count);
device_multi_bit_bootstrap_keybundle<Torus, params>
<<<grid_keybundle, thds, full_sm_keybundle, stream->stream>>>(
lwe_array_in, lwe_input_indexes, keybundle_fft, bootstrapping_key,
lwe_dimension, glwe_dimension, polynomial_size, grouping_factor,
base_log, level_count, lwe_offset, chunk_size,
keybundle_size_per_input);
check_cuda_error(cudaGetLastError());
// Accumulate
for (int j = 0; j < chunk_size; j++) {
device_multi_bit_bootstrap_accumulate_step_one<Torus, params>
<<<grid_accumulate_step_one, thds, full_sm_accumulate_step_one,
stream->stream>>>(lwe_array_in, lwe_input_indexes, lut_vector,
lut_vector_indexes, global_accumulator,
global_accumulator_fft, lwe_dimension,
glwe_dimension, polynomial_size, base_log,
level_count, j + lwe_offset);
check_cuda_error(cudaGetLastError());
device_multi_bit_bootstrap_accumulate_step_two<Torus, params>
<<<grid_accumulate_step_two, thds, full_sm_accumulate_step_two,
stream->stream>>>(lwe_array_out, lwe_output_indexes, keybundle_fft,
global_accumulator, global_accumulator_fft,
lwe_dimension, glwe_dimension, polynomial_size,
level_count, grouping_factor, j, lwe_offset,
lwe_chunk_size);
check_cuda_error(cudaGetLastError());
}
}
}

View File

@@ -1,26 +1,30 @@
#include "bootstrapping_key.cuh"
void cuda_convert_lwe_programmable_bootstrap_key_32(
void *dest, void *src, cuda_stream_t *stream, uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count, uint32_t polynomial_size) {
void cuda_convert_lwe_bootstrap_key_32(void *dest, void *src,
cuda_stream_t *stream,
uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size) {
uint32_t total_polynomials =
input_lwe_dim * (glwe_dim + 1) * (glwe_dim + 1) * level_count;
cuda_convert_lwe_programmable_bootstrap_key<uint32_t, int32_t>(
cuda_convert_lwe_bootstrap_key<uint32_t, int32_t>(
(double2 *)dest, (int32_t *)src, stream, input_lwe_dim, glwe_dim,
level_count, polynomial_size, total_polynomials);
}
void cuda_convert_lwe_programmable_bootstrap_key_64(
void *dest, void *src, cuda_stream_t *stream, uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count, uint32_t polynomial_size) {
void cuda_convert_lwe_bootstrap_key_64(void *dest, void *src,
cuda_stream_t *stream,
uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size) {
uint32_t total_polynomials =
input_lwe_dim * (glwe_dim + 1) * (glwe_dim + 1) * level_count;
cuda_convert_lwe_programmable_bootstrap_key<uint64_t, int64_t>(
cuda_convert_lwe_bootstrap_key<uint64_t, int64_t>(
(double2 *)dest, (int64_t *)src, stream, input_lwe_dim, glwe_dim,
level_count, polynomial_size, total_polynomials);
}
void cuda_convert_lwe_multi_bit_programmable_bootstrap_key_64(
void cuda_convert_lwe_multi_bit_bootstrap_key_64(
void *dest, void *src, cuda_stream_t *stream, uint32_t input_lwe_dim,
uint32_t glwe_dim, uint32_t level_count, uint32_t polynomial_size,
uint32_t grouping_factor) {

Some files were not shown because too many files have changed in this diff Show More