Compare commits

..

1 Commits

Author SHA1 Message Date
sarah el kazdadi
69b37f3b06 feat(multibit): implement deterministic multibit pbs 2023-05-15 15:42:12 +02:00
1326 changed files with 45575 additions and 273114 deletions

View File

@@ -5,3 +5,13 @@ failure-output = "final"
fail-fast = false
retries = 0
slow-timeout = "5m"
[[profile.ci.overrides]]
filter = 'test(/^.*param_message_1_carry_[567]$/) or test(/^.*param_message_4_carry_4$/)'
retries = 3
[[profile.ci.overrides]]
filter = 'test(/^.*param_message_[23]_carry_[23]$/)'
retries = 1

View File

@@ -1,6 +1,6 @@
---
name: Bug report
about: Report a problem with TFHE-rs
about: Report a problem with concrete
title: ''
labels: triage_required
assignees: ''

View File

@@ -1,6 +1,6 @@
---
name: Feature request
about: Suggest an idea for TFHE-rs
about: Suggest an idea for concrete
title: ''
labels: feature_request
assignees: ''

View File

@@ -1,11 +0,0 @@
self-hosted-runner:
# Labels of self-hosted runner in array of strings.
labels:
- m1mac
- 4090-desktop
- large_windows_16_latest
- large_ubuntu_16
# Configuration variables in array of strings defined in your repository or
# organization. `null` means disabling configuration variables check.
# Empty array means no configuration variable is allowed.
config-variables: null

View File

@@ -1,13 +0,0 @@
<!-- Feel free to delete the template if the PR (bumping a version e.g.) does not fit the template -->
closes: _please link all relevant issues_
### PR content/description
### Check-list:
* [ ] Tests for the changes have been added (for bug fixes / features)
* [ ] Docs have been added / updated (for bug fixes / features)
* [ ] Relevant issues are marked as resolved/closed, related issues are linked in the description
* [ ] Check for breaking changes (including serialization changes) and add them to commit message following the conventional commit [specification][conventional-breaking]
[conventional-breaking]: https://www.conventionalcommits.org/en/v1.0.0/#commit-message-with-description-and-breaking-change-footer

View File

@@ -1,36 +0,0 @@
# Add labels in pull request
name: PR label manager
on:
pull_request:
pull_request_review:
types: [submitted]
jobs:
trigger-tests:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- name: Get current labels
uses: snnaplab/get-labels-action@f426df40304808ace3b5282d4f036515f7609576
# Remove label if a push is performed after an approval
- name: Remove approved label
if: ${{ github.event_name == 'pull_request' && contains(fromJSON(env.LABELS), 'approved') }}
uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
with:
# We use a PAT to have the same user (zama-bot) for label deletion as for creation.
github_token: ${{ secrets.FHE_ACTIONS_TOKEN }}
labels: approved
# Add label only if the review is approved and if the label doesn't already exist
- name: Add approved label
uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf
if: ${{ github.event_name == 'pull_request_review'
&& github.event.review.state == 'approved'
&& !contains(fromJSON(env.LABELS), 'approved') }}
with:
# We need to use a PAT to be able to trigger `labeled` event for the other workflow.
github_token: ${{ secrets.FHE_ACTIONS_TOKEN }}
labels: approved

View File

@@ -1,121 +0,0 @@
# Run backward compatibility tests
name: Backward compatibility Tests on CPU
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
jobs:
setup-instance:
name: Setup instance (backward-compat-tests)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-small
backward-compat-tests:
name: Backward compatibility tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Install git-lfs
run: |
sudo apt update && sudo apt -y install git-lfs
- name: Use specific data branch
if: ${{ contains(github.event.pull_request.labels.*.name, 'data_PR') }}
env:
PR_BRANCH: ${{ github.head_ref || github.ref_name }}
run: |
echo "BACKWARD_COMPAT_DATA_BRANCH=${PR_BRANCH}" >> "${GITHUB_ENV}"
- name: Get backward compat branch
id: backward_compat_branch
run: |
BRANCH="$(make backward_compat_branch)"
echo "branch=${BRANCH}" >> "${GITHUB_OUTPUT}"
- name: Clone test data
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
repository: zama-ai/tfhe-backward-compat-data
path: tfhe/tfhe-backward-compat-data
lfs: 'true'
ref: ${{ steps.backward_compat_branch.outputs.branch }}
- name: Run backward compatibility tests
run: |
make test_backward_compatibility_ci
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Backward compatibility tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (backward-compat-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, backward-compat-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (backward-compat-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,258 +0,0 @@
# Run a small subset of tests to ensure quick feedback.
name: Fast AWS Tests on CPU
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
csprng_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.csprng_any_changed }}
zk_pok_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.zk_pok_any_changed }}
versionable_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.versionable_any_changed }}
core_crypto_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.core_crypto_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
boolean_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.boolean_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
shortint_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.shortint_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
integer_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.integer_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
wasm_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.wasm_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
high_level_api_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.high_level_api_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
user_docs_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.user_docs_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
any_file_changed: ${{ env.IS_PULL_REQUEST == 'false' || steps.aggregated-changes.outputs.any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
dependencies:
- tfhe/Cargo.toml
- concrete-csprng/**
- tfhe-zk-pok/**
- utils/tfhe-versionable/**
- utils/tfhe-versionable-derive/**
csprng:
- concrete-csprng/**
zk_pok:
- tfhe-zk-pok/**
versionable:
- utils/tfhe-versionable/**
- utils/tfhe-versionable-derive/**
core_crypto:
- tfhe/src/core_crypto/**
boolean:
- tfhe/src/core_crypto/**
- tfhe/src/boolean/**
shortint:
- tfhe/src/core_crypto/**
- tfhe/src/shortint/**
integer:
- tfhe/src/core_crypto/**
- tfhe/src/shortint/**
- tfhe/src/integer/**
wasm:
- tfhe/src/**
- tfhe/js_on_wasm_tests/**
- tfhe/web_wasm_parallel_tests/**
- '!tfhe/src/c_api/**'
- '!tfhe/src/boolean/**'
high_level_api:
- tfhe/src/**
- '!tfhe/src/c_api/**'
- '!tfhe/src/boolean/**'
- '!tfhe/src/c_api/**'
- '!tfhe/src/js_on_wasm_api/**'
user_docs:
- tfhe/src/**
- '!tfhe/src/c_api/**'
- 'tfhe/docs/**.md'
- README.md
- name: Aggregate file changes
id: aggregated-changes
if: ( steps.changed-files.outputs.dependencies_any_changed == 'true' ||
steps.changed-files.outputs.csprng_any_changed == 'true' ||
steps.changed-files.outputs.zk_pok_any_changed == 'true' ||
steps.changed-files.outputs.versionable_any_changed == 'true' ||
steps.changed-files.outputs.core_crypto_any_changed == 'true' ||
steps.changed-files.outputs.boolean_any_changed == 'true' ||
steps.changed-files.outputs.shortint_any_changed == 'true' ||
steps.changed-files.outputs.integer_any_changed == 'true' ||
steps.changed-files.outputs.wasm_any_changed == 'true' ||
steps.changed-files.outputs.high_level_api_any_changed == 'true' ||
steps.changed-files.outputs.user_docs_any_changed == 'true')
run: |
echo "any_changed=true" >> "$GITHUB_OUTPUT"
setup-instance:
name: Setup instance (fast-tests)
if: github.event_name != 'pull_request' ||
needs.should-run.outputs.any_file_changed == 'true'
needs: should-run
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
fast-tests:
name: Fast CPU tests
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
needs: [ should-run, setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Run concrete-csprng tests
if: needs.should-run.outputs.csprng_test == 'true'
run: |
make test_concrete_csprng
- name: Run tfhe-zk-pok tests
if: needs.should-run.outputs.zk_pok_test == 'true'
run: |
make test_zk_pok
- name: Run tfhe-versionable tests
if: needs.should-run.outputs.versionable_test == 'true'
run: |
make test_versionable
- name: Run core tests
if: needs.should-run.outputs.core_crypto_test == 'true'
run: |
AVX512_SUPPORT=ON make test_core_crypto
- name: Run boolean tests
if: needs.should-run.outputs.boolean_test == 'true'
run: |
make test_boolean
- name: Run user docs tests
if: needs.should-run.outputs.user_docs_test == 'true'
run: |
make test_user_doc
- name: Run js on wasm API tests
if: needs.should-run.outputs.wasm_test == 'true'
run: |
make test_nodejs_wasm_api_in_docker
- name: Gen Keys if required
if: needs.should-run.outputs.shortint_test == 'true' ||
needs.should-run.outputs.integer_test == 'true'
run: |
make gen_key_cache
- name: Run shortint tests
if: needs.should-run.outputs.shortint_test == 'true'
run: |
BIG_TESTS_INSTANCE=TRUE FAST_TESTS=TRUE make test_shortint_ci
- name: Run integer tests
if: needs.should-run.outputs.integer_test == 'true'
run: |
BIG_TESTS_INSTANCE=TRUE FAST_TESTS=TRUE make test_integer_ci
- name: Run high-level API tests
if: needs.should-run.outputs.high_level_api_test == 'true'
run: |
make test_high_level_api
- name: Run safe deserialization tests
run: |
make test_safe_deserialization
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Fast AWS tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (fast-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, fast-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (fast-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,158 +1,77 @@
name: AWS Unsigned Integer Tests on CPU
name: AWS Integer Tests on CPU
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
# We clear the cache to reduce memory pressure because of the numerous processes of cargo
# nextest
TFHE_RS_CLEAR_IN_MEMORY_KEY_CACHE: "1"
NO_BIG_PARAMS: FALSE
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [labeled]
push:
branches:
- main
# All the inputs are provided by Slab
inputs:
instance_id:
description: "AWS instance ID"
type: string
instance_image_id:
description: "AWS instance AMI ID"
type: string
instance_type:
description: "AWS instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: 'Slab request ID'
type: string
matrix_item:
description: 'Build matrix item'
type: string
jobs:
should-run:
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'pull_request' && contains(github.event.label.name, 'approved')) ||
github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
integer_test: ${{ github.event_name == 'workflow_dispatch' ||
steps.changed-files.outputs.integer_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
persist-credentials: "false"
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
integer:
- tfhe/Cargo.toml
- concrete-csprng/**
- tfhe-zk-pok/**
- tfhe/src/core_crypto/**
- tfhe/src/shortint/**
- tfhe/src/integer/**
setup-instance:
name: Setup instance (unsigned-integer-tests)
needs: should-run
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.integer_test == 'true') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'pull_request' && contains(github.event.label.name, 'approved')) ||
github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
unsigned-integer-tests:
name: Unsigned integer tests
needs: setup-instance
integer-tests:
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
group: ${{ github.workflow }}_${{ github.ref }}_${{ github.event.inputs.instance_image_id }}_${{ github.event.inputs.instance_type }}
cancel-in-progress: true
runs-on: ${{ github.event.inputs.runner_name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: "false"
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
# Step used for log purpose.
- name: Instance configuration used
run: |
echo "ID: ${{ github.event.inputs.instance_id }}"
echo "AMI: ${{ github.event.inputs.instance_image_id }}"
echo "Type: ${{ github.event.inputs.instance_type }}"
echo "Request ID: ${{ github.event.inputs.request_id }}"
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: stable
- name: Should skip big parameters set
if: github.event_name == 'pull_request'
run: |
echo "NO_BIG_PARAMS=TRUE" >> "${GITHUB_ENV}"
- name: Gen Keys if required
run: |
make GEN_KEY_CACHE_MULTI_BIT_ONLY=TRUE gen_key_cache
- name: Run unsigned integer multi-bit tests
run: |
AVX512_SUPPORT=ON make test_unsigned_integer_multi_bit_ci
default: true
- name: Gen Keys if required
run: |
make gen_key_cache
- name: Run unsigned integer tests
- name: Run integer tests
run: |
AVX512_SUPPORT=ON NO_BIG_PARAMS=${{ env.NO_BIG_PARAMS }} BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_ci
BIG_TESTS_INSTANCE=TRUE make test_integer_ci
- name: Slack Notification
if: ${{ failure() }}
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Unsigned Integer tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (unsigned-integer-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [setup-instance, unsigned-integer-tests]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (unsigned-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Integer tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,162 +0,0 @@
name: AWS Signed Integer Tests on CPU
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
# We clear the cache to reduce memory pressure because of the numerous processes of cargo
# nextest
TFHE_RS_CLEAR_IN_MEMORY_KEY_CACHE: "1"
NO_BIG_PARAMS: FALSE
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [labeled]
push:
branches:
- main
jobs:
should-run:
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'pull_request' && contains(github.event.label.name, 'approved')) ||
github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
integer_test: ${{ github.event_name == 'workflow_dispatch' ||
steps.changed-files.outputs.integer_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
persist-credentials: "false"
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
integer:
- tfhe/Cargo.toml
- concrete-csprng/**
- tfhe-zk-pok/**
- tfhe/src/core_crypto/**
- tfhe/src/shortint/**
- tfhe/src/integer/**
setup-instance:
name: Setup instance (unsigned-integer-tests)
needs: should-run
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.integer_test == 'true') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'pull_request' && contains(github.event.label.name, 'approved')) ||
github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
signed-integer-tests:
name: Signed integer tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: "false"
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Should skip big parameters set
if: github.event_name == 'pull_request'
run: |
echo "NO_BIG_PARAMS=TRUE" >> "${GITHUB_ENV}"
- name: Gen Keys if required
run: |
make GEN_KEY_CACHE_MULTI_BIT_ONLY=TRUE gen_key_cache
- name: Run shortint multi-bit tests
run: |
make test_shortint_multi_bit_ci
- name: Run signed integer multi-bit tests
run: |
AVX512_SUPPORT=ON make test_signed_integer_multi_bit_ci
- name: Gen Keys if required
run: |
make gen_key_cache
- name: Run signed integer tests
run: |
AVX512_SUPPORT=ON NO_BIG_PARAMS=${{ env.NO_BIG_PARAMS }} BIG_TESTS_INSTANCE=TRUE make test_signed_integer_ci
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Signed Integer tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (signed-integer-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [setup-instance, signed-integer-tests]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (signed-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -4,252 +4,98 @@ env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
schedule:
# Nightly tests @ 1AM after each work day
- cron: "0 1 * * MON-FRI"
# All the inputs are provided by Slab
inputs:
instance_id:
description: "AWS instance ID"
type: string
instance_image_id:
description: "AWS instance AMI ID"
type: string
instance_type:
description: "AWS instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: 'Slab request ID'
type: string
matrix_item:
description: 'Build matrix item'
type: string
jobs:
should-run:
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
permissions:
pull-requests: write
outputs:
csprng_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.csprng_any_changed }}
zk_pok_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.zk_pok_any_changed }}
core_crypto_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.core_crypto_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
boolean_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.boolean_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
shortint_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.shortint_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
high_level_api_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.high_level_api_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
c_api_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.c_api_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
examples_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.examples_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
apps_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.apps_any_changed || steps.changed-files.outputs.dependencies_any_changed }}
user_docs_test: ${{ env.IS_PULL_REQUEST == 'false' ||
steps.changed-files.outputs.user_docs_any_changed ||
steps.changed-files.outputs.dependencies_any_changed }}
any_file_changed: ${{ env.IS_PULL_REQUEST == 'false' || steps.aggregated-changes.outputs.any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
dependencies:
- tfhe/Cargo.toml
- concrete-csprng/**
- tfhe-zk-pok/**
csprng:
- concrete-csprng/**
zk_pok:
- tfhe-zk-pok/**
core_crypto:
- tfhe/src/core_crypto/**
boolean:
- tfhe/src/core_crypto/**
- tfhe/src/boolean/**
shortint:
- tfhe/src/core_crypto/**
- tfhe/src/shortint/**
high_level_api:
- tfhe/src/**
- '!tfhe/src/c_api/**'
- '!tfhe/src/boolean/**'
- '!tfhe/src/js_on_wasm_api/**'
c_api:
- tfhe/src/**
examples:
- tfhe/src/**
- '!tfhe/src/c_api/**'
- tfhe/examples/**
apps:
- tfhe/src/**
- '!tfhe/src/c_api/**'
- apps/trivium/src/**
user_docs:
- tfhe/src/**
- '!tfhe/src/c_api/**'
- 'tfhe/docs/**.md'
- README.md
- name: Aggregate file changes
id: aggregated-changes
if: ( steps.changed-files.outputs.dependencies_any_changed == 'true' ||
steps.changed-files.outputs.csprng_any_changed == 'true' ||
steps.changed-files.outputs.zk_pok_any_changed == 'true' ||
steps.changed-files.outputs.core_crypto_any_changed == 'true' ||
steps.changed-files.outputs.boolean_any_changed == 'true' ||
steps.changed-files.outputs.shortint_any_changed == 'true' ||
steps.changed-files.outputs.high_level_api_any_changed == 'true' ||
steps.changed-files.outputs.c_api_any_changed == 'true' ||
steps.changed-files.outputs.examples_any_changed == 'true' ||
steps.changed-files.outputs.apps_any_changed == 'true' ||
steps.changed-files.outputs.user_docs_any_changed == 'true')
run: |
echo "any_changed=true" >> "$GITHUB_OUTPUT"
setup-instance:
name: Setup instance (cpu-tests)
if: github.event_name != 'pull_request' ||
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.any_file_changed == 'true')
needs: should-run
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
cpu-tests:
name: CPU tests
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
needs: [ should-run, setup-instance ]
shortint-tests:
concurrency:
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}
group: ${{ github.workflow }}_${{ github.ref }}_${{ github.event.inputs.instance_image_id }}_${{ github.event.inputs.instance_type }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
runs-on: ${{ github.event.inputs.runner_name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
# Step used for log purpose.
- name: Instance configuration used
run: |
echo "ID: ${{ github.event.inputs.instance_id }}"
echo "AMI: ${{ github.event.inputs.instance_image_id }}"
echo "Type: ${{ github.event.inputs.instance_type }}"
echo "Request ID: ${{ github.event.inputs.request_id }}"
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: stable
- name: Run concrete-csprng tests
if: needs.should-run.outputs.csprng_test == 'true'
run: |
make test_concrete_csprng
- name: Run tfhe-zk-pok tests
if: needs.should-run.outputs.zk_pok_test == 'true'
run: |
make test_zk_pok
default: true
- name: Run core tests
if: needs.should-run.outputs.core_crypto_test == 'true'
run: |
AVX512_SUPPORT=ON make test_core_crypto
- name: Run boolean tests
if: needs.should-run.outputs.boolean_test == 'true'
run: |
make test_boolean
- name: Run C API tests
if: needs.should-run.outputs.c_api_test == 'true'
run: |
make test_c_api
- name: Run user docs tests
if: needs.should-run.outputs.user_docs_test == 'true'
run: |
make test_user_doc
- name: Run js on wasm API tests
run: |
make test_nodejs_wasm_api_in_docker
- name: Gen Keys if required
if: needs.should-run.outputs.shortint_test == 'true'
run: |
make gen_key_cache
- name: Run shortint tests
if: needs.should-run.outputs.shortint_test == 'true'
run: |
BIG_TESTS_INSTANCE=TRUE make test_shortint_ci
- name: Run high-level API tests
if: needs.should-run.outputs.high_level_api_test == 'true'
run: |
BIG_TESTS_INSTANCE=TRUE make test_high_level_api
- name: Run example tests
if: needs.should-run.outputs.examples_test == 'true'
run: |
make test_examples
make dark_market
- name: Run apps tests
if: needs.should-run.outputs.apps_test == 'true'
run: |
make test_trivium
make test_kreyvium
- name: Slack Notification
if: ${{ failure() }}
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CPU tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cpu-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cpu-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cpu-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Shortint tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,110 +0,0 @@
name: AWS WASM Tests on CPU
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
jobs:
setup-instance:
name: Setup instance (wasm-tests)
if: ${{ github.event_name == 'workflow_dispatch' || contains(github.event.label.name, 'approved') }}
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-small
wasm-tests:
name: WASM tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Install web resources
run: |
make install_node
make install_chrome_browser
make install_chrome_web_driver
- name: Run fmt checks
run: |
make check_fmt_js
- name: Run js on wasm API tests
run: |
make test_nodejs_wasm_api_in_docker
- name: Run parallel wasm tests
run: |
make test_web_js_api_parallel_chrome_ci
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "WASM tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (wasm-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, wasm-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (wasm-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,150 +0,0 @@
# Run boolean benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Boolean benchmarks
on:
workflow_dispatch:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (boolean-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: bench
boolean-benchmarks:
name: Execute boolean benchmarks in EC2
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Run benchmarks with AVX512
run: |
make bench_boolean
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "hpc7a.96xlarge" \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Measure key sizes
run: |
make measure_boolean_key_sizes
- name: Parse key sizes results
run: |
python3 ./ci/benchmark_parser.py tfhe/boolean_key_sizes.csv ${{ env.RESULTS_FILENAME }} \
--key-sizes \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_boolean
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Boolean benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (boolean-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, boolean-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (boolean-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,138 +0,0 @@
# Run core crypto benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Core crypto benchmarks
on:
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (core-crypto-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: bench
core-crypto-benchmarks:
name: Execute core crypto benchmarks in EC2
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Run benchmarks with AVX512
run: |
make bench_pbs
make bench_pbs128
make bench_ks
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "hpc7a.96xlarge" \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--name-suffix avx512 \
--walk-subdirs \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "PBS benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (core-crypto-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, core-crypto-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (core-crypto-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,196 +0,0 @@
# Run benchmarks on an RTX 4090 machine and return parsed results to Slab CI bot.
name: TFHE Cuda Backend - 4090 benchmarks
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
FAST_BENCH: TRUE
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [labeled]
schedule:
# Weekly benchmarks will be triggered each Friday at 9p.m.
- cron: "0 21 * * 5"
jobs:
cuda-integer-benchmarks:
name: Cuda integer benchmarks (RTX 4090)
if: ${{ github.event_name == 'workflow_dispatch' ||
github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs' ||
contains(github.event.label.name, '4090_bench') }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}_cuda_integer_bench
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ["self-hosted", "4090-desktop"]
timeout-minutes: 1440 # 24 hours
strategy:
fail-fast: false
max-parallel: 1
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
echo "FAST_BENCH=TRUE" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Run integer benchmarks
run: |
make BENCH_OP_FLAVOR=default bench_integer_multi_bit_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "rtx4090" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_integer_multi_bit_gpu_default
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Integer RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
cuda-core-crypto-benchmarks:
name: Cuda core crypto benchmarks (RTX 4090)
if: ${{ github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || contains(github.event.label.name, '4090_bench') }}
needs: cuda-integer-benchmarks
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}_cuda_core_crypto_bench
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ["self-hosted", "4090-desktop"]
timeout-minutes: 1440 # 24 hours
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Run core crypto benchmarks
run: |
make bench_pbs_gpu
make bench_ks_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "rtx4090" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
echo "Computing HMac on results file"
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
echo "Sending results to Slab..."
curl -v -k \
-H "Content-Type: application/json" \
-H "X-Slab-Repository: ${{ github.repository }}" \
-H "X-Slab-Command: store_data_v2" \
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Core crypto RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
remove_github_label:
name: Remove 4090 bench label
if: ${{ always() && github.event_name == 'pull_request' }}
needs: [cuda-integer-benchmarks, cuda-core-crypto-benchmarks]
runs-on: ubuntu-latest
steps:
- uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
with:
labels: 4090_bench
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,185 +0,0 @@
# Run core crypto benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Core crypto GPU benchmarks
on:
workflow_dispatch:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (cuda-core-crypto-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-core-crypto-benchmarks:
name: Execute GPU core crypto benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Run benchmarks with AVX512
run: |
make bench_pbs_gpu
make bench_ks_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-H100x1" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--name-suffix avx512 \
--walk-subdirs \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-core-crypto-benchmarks ]
runs-on: ubuntu-latest
if: ${{ !success() && !cancelled() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-core-crypto-benchmarks.result }}
SLACK_MESSAGE: "PBS GPU benchmarks finished with status: ${{ needs.cuda-core-crypto-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-integer-full-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-core-crypto-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-core-crypto-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,201 +0,0 @@
# Run integer benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Integer GPU benchmarks
on:
workflow_dispatch:
push:
branches:
- main
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
PARSE_INTEGER_BENCH_CSV_FILE: tfhe_rs_integer_benches_${{ github.sha }}.csv
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (cuda-integer-benchmarks)
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-integer-benchmarks:
name: Execute GPU integer benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run benchmarks with AVX512
run: |
make FAST_BENCH=TRUE BENCH_OP_FLAVOR=default bench_integer_gpu
- name: Parse benchmarks to csv
run: |
make PARSE_INTEGER_BENCH_CSV_FILE=${{ env.PARSE_INTEGER_BENCH_CSV_FILE }} \
parse_integer_benches
- name: Upload csv results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_csv_integer
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-H100x1" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_integer
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-integer-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-integer-benchmarks.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-integer-benchmarks.result }}
SLACK_MESSAGE: "Integer GPU benchmarks finished with status: ${{ needs.cuda-integer-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-integer-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-integer-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-integer-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,194 +0,0 @@
# Run integer benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Integer 2xH100 benchmarks
on:
workflow_dispatch:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (cuda-integer-full-2-gpu-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: 2-h100
cuda-integer-full-2-gpu-benchmarks:
name: Execute 2xH100 integer benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
continue-on-error: true
strategy:
fail-fast: false
max-parallel: 1
matrix:
command: [integer_multi_bit]
op_flavor: [default]
# explicit include-based build matrix, of known valid options
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run benchmarks with AVX512
run: |
make BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_${{ matrix.command }}_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-H100x2" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-integer-full-2-gpu-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-integer-full-2-gpu-benchmarks.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-integer-full-2-gpu-benchmarks.result }}
SLACK_MESSAGE: "Integer GPU 2xH100 benchmarks finished with status: ${{ needs.cuda-integer-full-2-gpu-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-integer-full-2-gpu-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-integer-full-2-gpu-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-integer-full-2-gpu-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,200 +0,0 @@
# Run all integer benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Integer GPU full benchmarks
on:
workflow_dispatch:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (cuda-integer-full-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-integer-full-benchmarks:
name: Execute GPU integer benchmarks for all operations flavor
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
continue-on-error: true
strategy:
fail-fast: false
max-parallel: 1
matrix:
command: [integer, integer_multi_bit]
op_flavor: [default]
# explicit include-based build matrix, of known valid options
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run benchmarks with AVX512
run: |
make BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_${{ matrix.command }}_gpu
# Run these benchmarks only once
- name: Run compression benchmarks with AVX512
if: matrix.op_flavor == 'default' && matrix.command == 'integer'
run: |
make bench_integer_compression_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-H100x1" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-integer-full-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-integer-full-benchmarks.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-integer-full-benchmarks.result }}
SLACK_MESSAGE: "Integer GPU full benchmarks finished with status: ${{ needs.cuda-integer-full-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-integer-full-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-integer-full-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-integer-full-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,224 +0,0 @@
# Run integer benchmarks with multi-bit cryptographic parameters on an instance and return parsed results to Slab CI bot.
name: Integer GPU Multi-bit benchmarks
on:
workflow_dispatch:
inputs:
all_precisions:
description: "Run all precisions"
type: boolean
default: false
fast_default:
description: "Run only deduplicated default operations without scalar variants"
type: boolean
default: false
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
PARSE_INTEGER_BENCH_CSV_FILE: tfhe_rs_integer_benches_${{ github.sha }}.csv
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
FAST_BENCH: TRUE
BENCH_OP_FLAVOR: default
jobs:
setup-instance:
name: Setup instance (cuda-integer-multi-bit-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-integer-multi-bit-benchmarks:
name: Execute GPU integer multi-bit benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Should run benchmarks with all precisions
if: inputs.all_precisions
run: |
echo "FAST_BENCH=FALSE" >> "${GITHUB_ENV}"
- name: Should run fast subset benchmarks
if: inputs.fast_default
run: |
echo "BENCH_OP_FLAVOR=fast_default" >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run multi-bit benchmarks with AVX512
run: |
make bench_unsigned_integer_multi_bit_gpu
- name: Parse benchmarks to csv
run: |
make PARSE_INTEGER_BENCH_CSV_FILE=${{ env.PARSE_INTEGER_BENCH_CSV_FILE }} \
parse_integer_benches
- name: Upload csv results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_csv_integer
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-H100x1" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_integer
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-integer-multi-bit-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-integer-multi-bit-benchmarks.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-integer-multi-bit-benchmarks.result }}
SLACK_MESSAGE: "Integer GPU multi-bit benchmarks finished with status: ${{ needs.cuda-integer-multi-bit-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-integer-full-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-integer-multi-bit-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-integer-multi-bit-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,214 +0,0 @@
# Run 64-bit multi-bit integer benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Integer multi GPU Multi-bit benchmarks
on:
workflow_dispatch:
inputs:
all_precisions:
description: "Run all precisions"
type: boolean
default: false
fast_default:
description: "Run only deduplicated default operations without scalar variants"
type: boolean
default: false
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
FAST_BENCH: TRUE
BENCH_OP_FLAVOR: default
jobs:
setup-instance:
name: Setup instance (cuda-integer-multi-bit-multi-gpu-benchmarks)
runs-on: ubuntu-latest
if: ${{ (github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
github.event_name == 'workflow_dispatch' }}
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: multi-h100
cuda-integer-multi-bit-multi-gpu-benchmarks:
name: Execute multi GPU integer multi-bit benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
continue-on-error: true
strategy:
fail-fast: false
max-parallel: 1
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Should run benchmarks with all precisions
if: inputs.all_precisions
run: |
echo "FAST_BENCH=FALSE" >> "${GITHUB_ENV}"
- name: Should run fast subset benchmarks
if: inputs.fast_default
run: |
echo "BENCH_OP_FLAVOR=fast_default" >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run multi-bit benchmarks with AVX512
run: |
make bench_unsigned_integer_multi_bit_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-H100x8" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_integer
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-integer-multi-bit-multi-gpu-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-integer-multi-bit-multi-gpu-benchmarks.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-integer-multi-bit-multi-gpu-benchmarks.result }}
SLACK_MESSAGE: "Integer multi GPU multi-bit benchmarks finished with status: ${{ needs.cuda-integer-multi-bit-multi-gpu-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-integer-multi-bit-multi-gpu-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-integer-multi-bit-multi-gpu-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-integer-multi-bit-multi-gpu-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,194 +0,0 @@
# Run all integer benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Integer multi GPU full benchmarks
on:
workflow_dispatch:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (cuda-integer-full-multi-gpu-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: multi-h100
cuda-integer-full-multi-gpu-benchmarks:
name: Execute multi GPU integer benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
continue-on-error: true
strategy:
fail-fast: false
max-parallel: 1
matrix:
command: [integer_multi_bit]
op_flavor: [default]
# explicit include-based build matrix, of known valid options
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run benchmarks with AVX512
run: |
make BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_${{ matrix.command }}_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-H100x8" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-integer-full-multi-gpu-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-integer-full-multi-gpu-benchmarks.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-integer-full-multi-gpu-benchmarks.result }}
SLACK_MESSAGE: "Integer GPU full benchmarks finished with status: ${{ needs.cuda-integer-full-multi-gpu-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-integer-full-multi-gpu-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-integer-full-multi-gpu-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-integer-full-multi-gpu-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,206 +0,0 @@
# Run benchmarks on an L40 VM and return parsed results to Slab CI bot.
name: Cuda benchmarks (L40)
on:
workflow_dispatch:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (cuda-l40-benchmarks)
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: l40
cuda-l40-benchmarks:
name: Cuda benchmarks (L40)
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
continue-on-error: true
strategy:
fail-fast: false
max-parallel: 1
matrix:
command: [integer_multi_bit]
op_flavor: [default]
# explicit include-based build matrix, of known valid options
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run benchmarks with AVX512
run: |
make BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_${{ matrix.command }}_gpu
- name: Run compression benchmarks with AVX512
run: |
make bench_integer_compression_gpu
- name: Run PBS benchmarks
run: |
make bench_pbs_gpu
- name: Run KS benchmarks
run: |
make bench_ks_gpu
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "n3-L40x1" \
--backend gpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-l40-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-l40-benchmarks.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-l40-benchmarks.result }}
SLACK_MESSAGE: "Cuda benchmarks (L40) finished with status: ${{ needs.cuda-l40-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-l40-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-l40-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-l40-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,190 +0,0 @@
# Run all integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Integer benchmarks
on:
workflow_dispatch:
inputs:
all_precisions:
description: "Run all precisions"
type: boolean
default: false
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
# Quarterly benchmarks will be triggered right before end of quarter, the 25th of the current month at 4a.m.
# These benchmarks are far longer to execute hence the reason to run them only four time a year.
- cron: '0 4 25 MAR,JUN,SEP,DEC *'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
FAST_BENCH: TRUE
jobs:
prepare-matrix:
name: Prepare operations matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
op_flavor: ${{ steps.set_op_flavor.outputs.op_flavor }}
steps:
- name: Weekly benchmarks
if: github.event_name == 'workflow_dispatch' ||
github.event.schedule == '0 1 * * 6'
run: |
echo "OP_FLAVOR=[\"default\"]" >> "${GITHUB_ENV}"
- name: Quarterly benchmarks
if: github.event.schedule == '0 4 25 MAR,JUN,SEP,DEC *'
run: |
echo "OP_FLAVOR=[\"default\", \"smart\", \"unchecked\", \"misc\"]" >> "${GITHUB_ENV}"
- name: Set operation flavor output
id: set_op_flavor
run: |
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (integer-benchmarks)
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: bench
integer-benchmarks:
name: Execute integer benchmarks for all operations flavor
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
timeout-minutes: 1440 # 24 hours
strategy:
max-parallel: 1
matrix:
command: [ integer, integer_multi_bit]
op_flavor: ${{ fromJson(needs.prepare-matrix.outputs.op_flavor) }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Should run benchmarks with all precisions
if: inputs.all_precisions
run: |
echo "FAST_BENCH=FALSE" >> "${GITHUB_ENV}"
- name: Run benchmarks with AVX512
run: |
make BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_${{ matrix.command }}
# Run these benchmarks only once
- name: Run compression benchmarks with AVX512
if: matrix.op_flavor == 'default' && matrix.command == 'integer'
run: |
make bench_integer_compression
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "hpc7a.96xlarge" \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (integer-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, integer-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (integer-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,186 +0,0 @@
# Run all shortint benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Shortint full benchmarks
on:
workflow_dispatch:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
# Quarterly benchmarks will be triggered right before end of quarter, the 25th of the current month at 4a.m.
# These benchmarks are far longer to execute hence the reason to run them only four time a year.
- cron: '0 4 25 MAR,JUN,SEP,DEC *'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
prepare-matrix:
name: Prepare operations matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
op_flavor: ${{ steps.set_op_flavor.outputs.op_flavor }}
steps:
- name: Weekly benchmarks
if: github.event_name == 'workflow_dispatch' ||
github.event.schedule == '0 1 * * 6'
run: |
echo "OP_FLAVOR=[\"default\"]" >> "${GITHUB_ENV}"
- name: Quarterly benchmarks
if: github.event.schedule == '0 4 25 MAR,JUN,SEP,DEC *'
run: |
echo "OP_FLAVOR=[\"default\", \"smart\", \"unchecked\"]" >> "${GITHUB_ENV}"
- name: Set operation flavor output
id: set_op_flavor
run: |
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (shortint-benchmarks)
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: bench
shortint-benchmarks:
name: Execute shortint benchmarks for all operations flavor
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
strategy:
max-parallel: 1
matrix:
op_flavor: ${{ fromJson(needs.prepare-matrix.outputs.op_flavor) }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Run benchmarks with AVX512
run: |
make BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_shortint
- name: Parse results
run: |
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
COMMIT_HASH="$(git describe --tags --dirty)"
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "hpc7a.96xlarge" \
--project-version "${COMMIT_HASH}" \
--branch ${{ github.ref_name }} \
--commit-date "${COMMIT_DATE}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
# This small benchmark needs to be executed only once.
- name: Measure key sizes
if: matrix.op_flavor == 'default'
run: |
make measure_shortint_key_sizes
- name: Parse key sizes results
if: matrix.op_flavor == 'default'
run: |
python3 ./ci/benchmark_parser.py tfhe/shortint_key_sizes.csv ${{ env.RESULTS_FILENAME }} \
--key-sizes \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_shortint_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Shortint full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (shortint-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, shortint-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (shortint-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,184 +0,0 @@
# Run all signed integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Signed Integer full benchmarks
on:
workflow_dispatch:
inputs:
all_precisions:
description: "Run all precisions"
type: boolean
default: false
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
# Quarterly benchmarks will be triggered right before end of quarter, the 25th of the current month at 4a.m.
# These benchmarks are far longer to execute hence the reason to run them only four time a year.
- cron: '0 4 25 MAR,JUN,SEP,DEC *'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
FAST_BENCH: TRUE
jobs:
prepare-matrix:
name: Prepare operations matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
op_flavor: ${{ steps.set_op_flavor.outputs.op_flavor }}
steps:
- name: Weekly benchmarks
if: github.event_name == 'workflow_dispatch' ||
github.event.schedule == '0 1 * * 6'
run: |
echo "OP_FLAVOR=[\"default\"]" >> "${GITHUB_ENV}"
- name: Quarterly benchmarks
if: github.event.schedule == '0 4 25 MAR,JUN,SEP,DEC *'
run: |
echo "OP_FLAVOR=[\"default\", \"unchecked\"]" >> "${GITHUB_ENV}"
- name: Set operation flavor output
id: set_op_flavor
run: |
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (signed-integer-benchmarks)
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: bench
signed-integer-benchmarks:
name: Execute signed integer benchmarks for all operations flavor
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
timeout-minutes: 1440 # 24 hours
strategy:
max-parallel: 1
matrix:
command: [ integer, integer_multi_bit ]
op_flavor: [ default, unchecked ]
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Should run benchmarks with all precisions
if: inputs.all_precisions
run: |
echo "FAST_BENCH=FALSE" >> "${GITHUB_ENV}"
- name: Run benchmarks with AVX512
run: |
make BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_signed_${{ matrix.command }}
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "hpc7a.96xlarge" \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Signed integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (integer-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, signed-integer-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (signed-integer-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,187 +0,0 @@
# Run WASM client benchmarks on an instance and return parsed results to Slab CI bot.
name: WASM client benchmarks
on:
workflow_dispatch:
push:
branches:
- main
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
should-run:
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs')
permissions:
pull-requests: write
outputs:
wasm_bench: ${{ steps.changed-files.outputs.wasm_bench_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
wasm_bench:
- tfhe/Cargo.toml
- concrete-csprng/**
- tfhe-zk-pok/**
- tfhe/src/**
- '!tfhe/src/c_api/**'
- tfhe/web_wasm_parallel_tests/**
- .github/workflows/wasm_client_benchmark.yml
setup-instance:
name: Setup instance (wasm-client-benchmarks)
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.wasm_bench)
needs: should-run
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-small
wasm-client-benchmarks:
name: Execute WASM client benchmarks
needs: setup-instance
if: needs.setup-instance.result != 'skipped'
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Install web resources
run: |
make install_node
make install_chrome_browser
make install_chrome_web_driver
- name: Run benchmarks
run: |
make bench_web_js_api_parallel_chrome_ci
- name: Parse results
run: |
make parse_wasm_benchmarks
python3 ./ci/benchmark_parser.py tfhe/wasm_pk_gen.csv ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "m6i.4xlarge" \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--key-gen
- name: Measure public key and ciphertext sizes in HL Api
run: |
make measure_hlapi_compact_pk_ct_sizes
- name: Parse key and ciphertext sizes results
run: |
python3 ./ci/benchmark_parser.py tfhe/hlapi_cpk_and_cctl_sizes.csv ${{ env.RESULTS_FILENAME }} \
--key-gen \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_wasm
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "WASM benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (wasm-client-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, wasm-client-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (wasm-client-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,190 +0,0 @@
# Run PKE Zero-Knowledge benchmarks on an instance and return parsed results to Slab CI bot.
name: PKE ZK benchmarks
on:
workflow_dispatch:
push:
branches:
- main
schedule:
# Weekly benchmarks will be triggered each Saturday at 3a.m.
- cron: '0 3 * * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
PARSE_INTEGER_BENCH_CSV_FILE: tfhe_rs_integer_benches_${{ github.sha }}.csv
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
should-run:
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
((github.event_name == 'push' || github.event_name == 'schedule') && github.repository == 'zama-ai/tfhe-rs')
outputs:
zk_pok_changed: ${{ steps.changed-files.outputs.zk_pok_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
zk_pok:
- tfhe/Cargo.toml
- concrete-csprng/**
- tfhe-zk-pok/**
- tfhe/src/core_crypto/**
- tfhe/src/shortint/**
- tfhe/src/integer/**
- tfhe/src/zk.rs
- tfhe/benches/integer/zk_pke.rs
- .github/workflows/zk_pke_benchmark.yml
setup-instance:
name: Setup instance (pke-zk-benchmarks)
runs-on: ubuntu-latest
needs: should-run
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'push' &&
github.repository == 'zama-ai/tfhe-rs' &&
needs.should-run.outputs.zk_pok_changed == 'true')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: bench
pke-zk-benchmarks:
name: Execute PKE ZK benchmarks
if: needs.setup-instance.result != 'skipped'
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Get benchmark details
run: |
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Run benchmarks with AVX512
run: |
make bench_integer_zk
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware "hpc7a.96xlarge" \
--backend cpu \
--project-version "${{ env.COMMIT_HASH }}" \
--branch ${{ github.ref_name }} \
--commit-date "${{ env.COMMIT_DATE }}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Parse CRS sizes results
run: |
python3 ./ci/benchmark_parser.py tfhe/pke_zk_crs_sizes.csv ${{ env.RESULTS_FILENAME }} \
--key-sizes \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874
with:
name: ${{ github.sha }}_integer_zk
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py ${{ env.RESULTS_FILENAME }} "${{ secrets.JOB_SECRET }}" \
--slab-url "${{ secrets.SLAB_URL }}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "PKE ZK benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (pke-zk-benchmarks)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, pke-zk-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (pke-zk-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

114
.github/workflows/boolean_benchmark.yml vendored Normal file
View File

@@ -0,0 +1,114 @@
# Run boolean benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Boolean benchmarks
on:
workflow_dispatch:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
jobs:
run-boolean-benchmarks:
name: Execute boolean benchmarks in EC2
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !cancelled() }}
steps:
- name: Instance configuration used
run: |
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Get benchmark date
run: |
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: nightly
override: true
- name: Run benchmarks with AVX512
run: |
make AVX512_SUPPORT=ON bench_boolean
- name: Parse results
run: |
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
COMMIT_HASH="$(git describe --tags --dirty)"
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware ${{ inputs.instance_type }} \
--project-version "${COMMIT_HASH}" \
--branch ${{ github.ref_name }} \
--commit-date "${COMMIT_DATE}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Measure key sizes
run: |
make measure_boolean_key_sizes
- name: Parse key sizes results
run: |
python3 ./ci/benchmark_parser.py tfhe/boolean_key_sizes.csv ${{ env.RESULTS_FILENAME }} \
--key-sizes \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_boolean
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
echo "Computing HMac on results file"
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
echo "Sending results to Slab..."
curl -v -k \
-H "Content-Type: application/json" \
-H "X-Slab-Repository: ${{ github.repository }}" \
-H "X-Slab-Command: store_data_v2" \
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}

View File

@@ -6,8 +6,6 @@ on:
env:
CARGO_TERM_COLOR: always
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
@@ -19,57 +17,30 @@ jobs:
strategy:
matrix:
# GitHub macos-latest are now M1 macs, so use ours, we limit what runs so it will be fast
# even with a few PRs
os: [large_ubuntu_16, macos-latest, windows-latest]
os: [ubuntu-latest, macos-latest, windows-latest]
fail-fast: false
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Install and run newline linter checks
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
wget https://github.com/fernandrone/linelint/releases/download/0.0.6/linelint-linux-amd64
echo "16b70fb7b471d6f95cbdc0b4e5dc2b0ac9e84ba9ecdc488f7bdf13df823aca4b linelint-linux-amd64" > checksum
sha256sum -c checksum || exit 1
chmod +x linelint-linux-amd64
mv linelint-linux-amd64 /usr/local/bin/linelint
make check_newline
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Run pcc checks
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make pcc
- name: Build concrete-csprng
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_concrete_csprng
- name: Build Release core
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_core AVX512_SUPPORT=ON
make build_core_experimental AVX512_SUPPORT=ON
- name: Build Release boolean
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_boolean
- name: Build Release shortint
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_shortint
- name: Build Release integer
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_integer
@@ -78,14 +49,8 @@ jobs:
make build_tfhe_full
- name: Build Release c_api
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_c_api
- name: Build coverage tests
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_tfhe_coverage
# The wasm build check is a bit annoying to set-up here and is done during the tests in
# aws_tfhe_tests.yml

View File

@@ -10,7 +10,7 @@ jobs:
- name: Check first line
uses: gsactions/commit-message-checker@16fa2d5de096ae0d35626443bcd24f1e756cafee
with:
pattern: '^((feat|fix|chore|refactor|style|test|docs|doc)(\([\w\-_]+\))?\!?\:) .+$'
pattern: '^((feat|fix|chore|refactor|style|test|docs|doc)\(\w+\)\:) .+$'
flags: "gs"
error: 'Your first line has to contain a commit type and scope like "feat(my_feature): msg".'
excludeDescription: "true" # optional: this excludes the description body of a pull request

View File

@@ -1,33 +0,0 @@
# Lint and check CI
name: CI Lint and Checks
on:
pull_request:
env:
ACTIONLINT_VERSION: 1.6.27
jobs:
lint-check:
name: Lint and checks
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
- name: Get actionlint
run: |
bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) ${{ env.ACTIONLINT_VERSION }}
echo "f2ee6d561ce00fa93aab62a7791c1a0396ec7e8876b2a8f2057475816c550782 actionlint" > checksum
sha256sum -c checksum
ln -s "$(pwd)/actionlint" /usr/local/bin/
- name: Lint workflows
run: |
make lint_workflow
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@0901cf7b71c7ea6261ec69a3dc2bd3f9264f893e # v3.0.12
with:
allowlist: |
slsa-framework/slsa-github-generator

View File

@@ -1,142 +0,0 @@
name: Code Coverage
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
# Code coverage workflow is only run via workflow_dispatch event since execution duration is not stabilized yet.
jobs:
setup-instance:
name: Setup instance (code-coverage)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-small
code-coverage:
name: Code coverage tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.event_name }}_${{ github.ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 5760 # 4 days
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
files_yaml: |
tfhe:
- tfhe/src/**
concrete_csprng:
- concrete-csprng/src/**
- name: Generate Keys
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make GEN_KEY_CACHE_COVERAGE_ONLY=TRUE gen_key_cache
make gen_key_cache_core_crypto
- name: Run coverage for core_crypto
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make test_core_crypto_cov AVX512_SUPPORT=ON
- name: Run coverage for boolean
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make test_boolean_cov
- name: Run coverage for shortint
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make test_shortint_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
directory: ./coverage/
fail_ci_if_error: true
files: shortint/cobertura.xml,boolean/cobertura.xml,core_crypto/cobertura.xml,core_crypto_avx512/cobertura.xml
- name: Run integer coverage
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make test_integer_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
directory: ./coverage/
fail_ci_if_error: true
files: integer/cobertura.xml
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Code coverage finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (code-coverage)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, code-coverage ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (code-coverage) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,96 +0,0 @@
name: CSPRNG randomness testing Workflow
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
jobs:
setup-instance:
name: Setup instance (csprng-randomness-tests)
if: ${{ github.event_name == 'workflow_dispatch' || contains(github.event.label.name, 'approved') }}
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-small
csprng-randomness-tests:
name: CSPRNG randomness tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Dieharder randomness test suite
run: |
make dieharder_csprng
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "concrete-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (csprng-randomness-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, csprng-randomness-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (csprng-randomness-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,123 +0,0 @@
name: Close or Merge corresponding PR on the data repo
# When a PR with the data_PR tag is closed or merged, this will close the corresponding PR in the data repo.
env:
TARGET_REPO_API_URL: ${{ github.api_url }}/repos/zama-ai/tfhe-backward-compat-data
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
PR_BRANCH: ${{ github.head_ref || github.ref_name }}
CLOSE_TYPE: ${{ github.event.pull_request.merged && 'merge' || 'close' }}
# only trigger on pull request closed events
on:
pull_request:
types: [ closed ]
# The same pattern is used for jobs that use the github api:
# - save the result of the API call in the env var "GH_API_RES". Since the var is multiline
# we use this trick: https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#example-of-a-multiline-string
# - "set +e" will make sure we reach the last "echo EOF" even in case of error
# - "set -o" pipefail makes one line piped command return the error of the first failure
# - 'RES="$?"' and 'exit $RES' are used to return the error code if a command failed. Without it, with "set +e"
# the script will always return 0 because of the "echo EOF".
jobs:
auto_close_job:
if: ${{ contains(github.event.pull_request.labels.*.name, 'data_PR') }}
runs-on: ubuntu-latest
steps:
- name: Find corresponding Pull Request in the data repo
run: |
{
set +e
set -o pipefail
echo 'TARGET_REPO_PR<<EOF'
curl --fail-with-body --no-progress-meter -L -X GET \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
${{ env.TARGET_REPO_API_URL }}/pulls\?head=${{ github.repository_owner }}:${{ env.PR_BRANCH }} | jq -e '.[0]' | sed 's/null/{ "message": "corresponding PR not found" }/'
RES="$?"
echo EOF
} >> "${GITHUB_ENV}"
exit $RES
- name: Comment on the PR to indicate the reason of the close
run: |
{
set +e
set -o pipefail
echo 'GH_API_RES<<EOF'
curl --fail-with-body --no-progress-meter -L -X POST \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
${{ fromJson(env.TARGET_REPO_PR).comments_url }} \
-d '{ "body": "PR ${{ env.CLOSE_TYPE }}d because the corresponding PR in main repo was ${{ env.CLOSE_TYPE }}d: ${{ github.repository }}#${{ github.event.number }}" }'
RES="$?"
echo EOF
} >> "${GITHUB_ENV}"
exit $RES
- name: Merge the Pull Request in the data repo
if: ${{ github.event.pull_request.merged }}
run: |
{
set +e
set -o pipefail
echo 'GH_API_RES<<EOF'
curl --fail-with-body --no-progress-meter -L -X PUT \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
${{ fromJson(env.TARGET_REPO_PR).url }}/merge \
-d '{ "merge_method": "rebase" }'
RES="$?"
echo EOF
} >> "${GITHUB_ENV}"
exit $RES
- name: Close the Pull Request in the data repo
if: ${{ !github.event.pull_request.merged }}
run: |
{
set +e
set -o pipefail
echo 'GH_API_RES<<EOF'
curl --fail-with-body --no-progress-meter -L -X PATCH \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
${{ fromJson(env.TARGET_REPO_PR).url }} \
-d '{ "state": "closed" }'
RES="$?"
echo EOF
} >> "${GITHUB_ENV}"
exit $RES
- name: Delete the associated branch in the data repo
run: |
{
set +e
set -o pipefail
echo 'GH_API_RES<<EOF'
curl --fail-with-body --no-progress-meter -L -X DELETE \
-H "Accept: application/vnd.github+json" \
-H "Authorization: Bearer ${{ secrets.FHE_ACTIONS_TOKEN }}" \
-H "X-GitHub-Api-Version: 2022-11-28" \
${{ env.TARGET_REPO_API_URL }}/git/refs/heads/${{ env.PR_BRANCH }}
RES="$?"
echo EOF
} >> "${GITHUB_ENV}"
exit $RES
- name: Slack Notification
if: ${{ always() && job.status == 'failure' }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Failed to auto-${{ env.CLOSE_TYPE }} PR on data repo: ${{ fromJson(env.GH_API_RES || env.TARGET_REPO_PR).message }}"

View File

@@ -1,83 +0,0 @@
# Compile and test tfhe-cuda-backend on an RTX 4090 machine
name: TFHE Cuda Backend - 4090 full tests
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
schedule:
# Nightly tests @ 1AM after each work day
- cron: "0 1 * * MON-FRI"
jobs:
cuda-tests-linux:
name: CUDA tests (RTX 4090)
if: github.event_name == 'workflow_dispatch' ||
contains(github.event.label.name, '4090_test') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: true
runs-on: ["self-hosted", "4090-desktop"]
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Run fmt checks
run: |
make check_fmt_gpu
- name: Run clippy checks
run: |
make pcc_gpu
- name: Run core crypto, integer and internal CUDA backend tests
run: |
make test_gpu
- name: Run user docs tests
run: |
make test_user_doc_gpu
- name: Test C API
run: |
make test_c_api_gpu
- name: Run High Level API Tests
run: |
make test_high_level_api_gpu
- uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
if: ${{ always() && github.event_name == 'pull_request' }}
with:
labels: 4090_test
github_token: ${{ secrets.GITHUB_TOKEN }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CUDA RTX 4090 tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,201 +0,0 @@
# Compile and test tfhe-cuda-backend on an H100 VM on hyperstack
name: TFHE Cuda Backend - Fast tests on H100
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
gpu:
- tfhe/Cargo.toml
- tfhe/build.rs
- backends/tfhe-cuda-backend/**
- tfhe/src/core_crypto/gpu/**
- tfhe/src/integer/gpu/**
- tfhe/src/shortint/parameters/**
- tfhe/src/high_level_api/**
- tfhe/src/c_api/**
- 'tfhe/docs/**.md'
- Makefile
- '.github/workflows/gpu_fast_h100_tests.yml'
- scripts/**
- ci/**
setup-instance:
name: Setup instance (cuda-h100-tests)
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-tests-linux:
name: CUDA H100 tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run core crypto and internal CUDA backend tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_core_crypto_gpu
BIG_TESTS_INSTANCE=TRUE make test_integer_compression_gpu
BIG_TESTS_INSTANCE=TRUE make test_cuda_backend
- name: Run user docs tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_user_doc_gpu
- name: Test C API
run: |
BIG_TESTS_INSTANCE=TRUE make test_c_api_gpu
- name: Run High Level API Tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_high_level_api_gpu
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Fast H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,199 +0,0 @@
# Compile and test tfhe-cuda-backend on an AWS instance
name: TFHE Cuda Backend - Fast tests
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
gpu:
- tfhe/Cargo.toml
- tfhe/build.rs
- backends/tfhe-cuda-backend/**
- tfhe/src/core_crypto/gpu/**
- tfhe/src/integer/gpu/**
- tfhe/src/shortint/parameters/**
- tfhe/src/high_level_api/**
- tfhe/src/c_api/**
- 'tfhe/docs/**.md'
- '.github/workflows/gpu_fast_tests.yml'
- Makefile
- scripts/**
- ci/**
setup-instance:
name: Setup instance (cuda-tests)
needs: should-run
if: github.event_name != 'pull_request' ||
needs.should-run.outputs.gpu_test == 'true'
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-test
cuda-tests-linux:
name: CUDA tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run core crypto and internal CUDA backend tests
run: |
make test_core_crypto_gpu
make test_integer_compression_gpu
make test_cuda_backend
- name: Run user docs tests
run: |
make test_user_doc_gpu
- name: Test C API
run: |
make test_c_api_gpu
- name: Run High Level API Tests
run: |
make test_high_level_api_gpu
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Base GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,156 +0,0 @@
# Compile and test tfhe-cuda-backend on an H100 VM on hyperstack
name: TFHE Cuda Backend - Full tests on H100
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
workflow_dispatch:
jobs:
setup-instance:
name: Setup instance (cuda-h100-tests)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@447a2d0fd2d1a9d647aa0d0723a6e9255372f261
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-tests-linux:
name: CUDA H100 tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run core crypto, integer and internal CUDA backend tests
run: |
make test_gpu
- name: Run user docs tests
run: |
make test_user_doc_gpu
- name: Test C API
run: |
make test_c_api_gpu
- name: Run High Level API Tests
run: |
make test_high_level_api_gpu
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Full H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@447a2d0fd2d1a9d647aa0d0723a6e9255372f261
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,204 +0,0 @@
# Compile and test tfhe-cuda-backend on an AWS instance
name: TFHE Cuda Backend - Full tests multi-GPU
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
gpu:
- tfhe/Cargo.toml
- tfhe/build.rs
- backends/tfhe-cuda-backend/**
- tfhe/src/core_crypto/gpu/**
- tfhe/src/integer/gpu/**
- tfhe/src/shortint/parameters/**
- tfhe/src/high_level_api/**
- tfhe/src/c_api/**
- 'tfhe/docs/**.md'
- Makefile
- '.github/workflows/**_multi_gpu_tests.yml'
- scripts/**
- ci/**
setup-instance:
name: Setup instance (cuda-tests-multi-gpu)
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: multi-gpu-test
cuda-tests-linux:
name: CUDA multi-GPU tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run multi-bit CUDA integer compression tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_integer_compression_gpu
# No need to test core_crypto and classic PBS in integer since it's already tested on single GPU.
- name: Run multi-bit CUDA integer tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_integer_multi_bit_gpu_ci
- name: Run user docs tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_user_doc_gpu
- name: Test C API
run: |
BIG_TESTS_INSTANCE=TRUE make test_c_api_gpu
- name: Run High Level API Tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_high_level_api_gpu
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Multi-GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-tests-multi-gpu)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-tests-multi-gpu) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,127 +0,0 @@
# Perfom tfhe-cuda-backend post-commit checks on an AWS instance
name: TFHE Cuda Backend - Post-commit Checks
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
on:
pull_request:
jobs:
setup-instance:
name: Setup instance (cuda-pcc)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: gpu-build
cuda-pcc:
name: CUDA post-commit checks
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 9
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Run fmt checks
run: |
make check_fmt_gpu
- name: Run clippy checks
run: |
make pcc_gpu
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CUDA AWS post-commit checks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-pcc)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-pcc ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-pcc) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,189 +0,0 @@
# Signed integer GPU tests on an H100 VM on hyperstack
name: TFHE Cuda Backend - Signed integer tests on H100
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
gpu:
- tfhe/Cargo.toml
- tfhe/build.rs
- backends/tfhe-cuda-backend/**
- tfhe/src/core_crypto/gpu/**
- tfhe/src/integer/gpu/**
- tfhe/src/shortint/parameters/**
- tfhe/src/high_level_api/**
- tfhe/src/c_api/**
- 'tfhe/docs/**.md'
- Makefile
- '.github/workflows/gpu_signed_integer_h100_tests.yml'
- scripts/**
- ci/**
setup-instance:
name: Setup instance (cuda-h100-tests)
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-tests-linux:
name: CUDA H100 signed integer tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run signed integer tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_signed_integer_gpu_ci
- name: Run signed integer multi-bit tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_signed_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,203 +0,0 @@
# Compile and test tfhe-cuda-backend signed integer on an AWS instance
name: TFHE Cuda Backend - Signed integer tests
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
FAST_TESTS: TRUE
NIGHTLY_TESTS: FALSE
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types:
- opened
- synchronize
- labeled
schedule:
# Nightly tests @ 1AM after each work day
- cron: "0 1 * * MON-FRI"
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
gpu:
- tfhe/Cargo.toml
- tfhe/build.rs
- backends/tfhe-cuda-backend/**
- tfhe/src/core_crypto/gpu/**
- tfhe/src/integer/gpu/**
- tfhe/src/shortint/parameters/**
- tfhe/src/high_level_api/**
- tfhe/src/c_api/**
- 'tfhe/docs/**.md'
- '.github/workflows/gpu_signed_integer_tests.yml'
- Makefile
- scripts/**
- ci/**
setup-instance:
name: Setup instance (cuda-signed-integer-tests)
runs-on: ubuntu-latest
needs: should-run
if: (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
github.event_name == 'workflow_dispatch' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-test
cuda-signed-integer-tests:
name: CUDA signed integer tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Should run nightly tests
if: github.event_name == 'schedule'
run: |
{
echo "FAST_TESTS=FALSE";
echo "NIGHTLY_TESTS=TRUE";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run signed integer multi-bit tests
run: |
make test_signed_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-signed-integer-tests ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-signed-integer-tests.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-signed-integer-tests.result }}
SLACK_MESSAGE: "Base GPU tests finished with status: ${{ needs.cuda-signed-integer-tests.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-signed-integer-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-signed-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,189 +0,0 @@
# Test unsigned integers on an H100 VM on hyperstack
name: TFHE Cuda Backend - Unsigned integer tests on H100
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
gpu:
- tfhe/Cargo.toml
- tfhe/build.rs
- backends/tfhe-cuda-backend/**
- tfhe/src/core_crypto/gpu/**
- tfhe/src/integer/gpu/**
- tfhe/src/shortint/parameters/**
- tfhe/src/high_level_api/**
- tfhe/src/c_api/**
- 'tfhe/docs/**.md'
- Makefile
- '.github/workflows/gpu_unsigned_integer_tests.yml'
- scripts/**
- ci/**
setup-instance:
name: Setup instance (cuda-h100-tests)
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
cuda-tests-linux:
name: CUDA H100 unsigned integer tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run unsigned integer tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_gpu_ci
- name: Run unsigned integer multi-bit tests
run: |
BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Unsigned integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,200 +0,0 @@
# Compile and test tfhe-cuda-backend unsigned integer on an AWS instance
name: TFHE Cuda Backend - Unsigned integer tests
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
FAST_TESTS: TRUE
NIGHTLY_TESTS: FALSE
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types:
- opened
- synchronize
- labeled
schedule:
# Nightly tests @ 1AM after each work day
- cron: "0 1 * * MON-FRI"
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: write
outputs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@48d8f15b2aaa3d255ca5af3eba4870f807ce6b3c
with:
since_last_remote_commit: true
files_yaml: |
gpu:
- tfhe/Cargo.toml
- tfhe/build.rs
- backends/tfhe-cuda-backend/**
- tfhe/src/core_crypto/gpu/**
- tfhe/src/integer/gpu/**
- tfhe/src/shortint/parameters/**
- tfhe/src/high_level_api/**
- tfhe/src/c_api/**
- 'tfhe/docs/**.md'
- '.github/workflows/gpu_unsigned_integer_tests.yml'
- Makefile
- scripts/**
- ci/**
setup-instance:
name: Setup instance (cuda-unsigned-integer-tests)
needs: should-run
if: (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
github.event_name == 'workflow_dispatch' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: gpu-test
cuda-unsigned-integer-tests:
name: CUDA unsigned integer tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
CMAKE_VERSION: 3.29.6
steps:
# Mandatory on hyperstack since a bootable volume is not re-usable yet.
- name: Install dependencies
run: |
sudo apt update
sudo apt install -y checkinstall zlib1g-dev libssl-dev
wget https://github.com/Kitware/CMake/releases/download/v${{ env.CMAKE_VERSION }}/cmake-${{ env.CMAKE_VERSION }}.tar.gz
tar -zxvf cmake-${{ env.CMAKE_VERSION }}.tar.gz
cd cmake-${{ env.CMAKE_VERSION }}
./bootstrap
make -j"$(nproc)"
sudo make install
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc" >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Should run nightly tests
if: github.event_name == 'schedule'
run: |
{
echo "FAST_TESTS=FALSE";
echo "NIGHTLY_TESTS=TRUE";
} >> "${GITHUB_ENV}"
- name: Check device is detected
if: ${{ !cancelled() }}
run: nvidia-smi
- name: Run unsigned integer multi-bit tests
run: |
make test_unsigned_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
needs: [ setup-instance, cuda-unsigned-integer-tests ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-unsigned-integer-tests.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ needs.cuda-unsigned-integer-tests.result }}
SLACK_MESSAGE: "Unsigned integer GPU tests finished with status: ${{ needs.cuda-unsigned-integer-tests.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-tests)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, cuda-unsigned-integer-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-unsigned-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

104
.github/workflows/integer_benchmark.yml vendored Normal file
View File

@@ -0,0 +1,104 @@
# Run integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Integer benchmarks
on:
workflow_dispatch:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
jobs:
run-integer-benchmarks:
name: Execute integer benchmarks in EC2
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !cancelled() }}
steps:
- name: Instance configuration used
run: |
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Get benchmark date
run: |
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: nightly
override: true
- name: Run benchmarks with AVX512
run: |
make AVX512_SUPPORT=ON bench_integer
- name: Parse results
run: |
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
COMMIT_HASH="$(git describe --tags --dirty)"
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware ${{ inputs.instance_type }} \
--project-version "${COMMIT_HASH}" \
--branch ${{ github.ref_name }} \
--commit-date "${COMMIT_DATE}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_integer
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
echo "Computing HMac on results file"
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
echo "Sending results to Slab..."
curl -v -k \
-H "Content-Type: application/json" \
-H "X-Slab-Repository: ${{ github.repository }}" \
-H "X-Slab-Command: store_data_v2" \
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}

View File

@@ -3,24 +3,12 @@ name: Tests on M1 CPU
on:
workflow_dispatch:
pull_request:
types: [ labeled ]
# Have a nightly build for M1 tests
schedule:
# * is a special character in YAML so you have to quote this string
# At 22:00 every day
# Timezone is UTC, so Paris time is +2 during the summer and +1 during winter
- cron: "0 22 * * *"
types: [labeled]
env:
CARGO_TERM_COLOR: always
RUSTFLAGS: "-C target-cpu=native"
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
FAST_TESTS: "TRUE"
# We clear the cache to reduce memory pressure because of the numerous processes of cargo
# nextest
TFHE_RS_CLEAR_IN_MEMORY_KEY_CACHE: "1"
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
@@ -28,29 +16,22 @@ concurrency:
jobs:
cargo-builds:
if: ${{ (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') || github.event_name == 'workflow_dispatch' || contains(github.event.label.name, 'm1_test') }}
if: "github.event_name != 'pull_request' || contains(github.event.label.name, 'm1_test')"
runs-on: ["self-hosted", "m1mac"]
# 12 hours, default is 6 hours, hopefully this is more than enough
timeout-minutes: 720
steps:
- uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
persist-credentials: 'false'
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: stable
default: true
- name: Run pcc checks
run: |
make pcc
- name: Build concrete-csprng
run: |
make build_concrete_csprng
- name: Build Release core
run: |
make build_core
@@ -75,14 +56,6 @@ jobs:
run: |
make build_c_api
- name: Run concrete-csprng tests
run: |
make test_concrete_csprng
- name: Run tfhe-zk-pok tests
run: |
make test_zk_pok
- name: Run core tests
run: |
make test_core_crypto
@@ -91,13 +64,6 @@ jobs:
run: |
make test_boolean
# Because we do "illegal" things with the build system which Cargo does not seem to like much
# we need to clear the cache to make sure the C API is built properly and does not use a stale
# cached version
- name: Clear build cache
run: |
cargo clean
- name: Run C API tests
run: |
make test_c_api
@@ -121,18 +87,6 @@ jobs:
run: |
make test_integer_ci
- name: Gen Keys if required
run: |
make GEN_KEY_CACHE_MULTI_BIT_ONLY=TRUE gen_key_cache
- name: Run shortint multi bit tests
run: |
make test_shortint_multi_bit_ci
- name: Run integer multi bit tests
run: |
make test_integer_multi_bit_ci
remove_label:
name: Remove m1_test label
runs-on: ubuntu-latest
@@ -141,7 +95,6 @@ jobs:
if: ${{ always() }}
steps:
- uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
if: ${{ github.event_name == 'pull_request' }}
with:
labels: m1_test
github_token: ${{ secrets.GITHUB_TOKEN }}
@@ -149,7 +102,7 @@ jobs:
- name: Slack Notification
if: ${{ needs.cargo-builds.result != 'skipped' }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
env:
SLACK_COLOR: ${{ needs.cargo-builds.result }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}

View File

@@ -8,125 +8,36 @@ on:
description: "Dry-run"
type: boolean
default: true
push_to_crates:
description: "Push to crate"
type: boolean
default: true
push_web_package:
description: "Push web js package"
type: boolean
default: true
push_node_package:
description: "Push node js package"
type: boolean
default: true
npm_latest_tag:
description: "Set NPM tag as latest"
type: boolean
default: false
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
NPM_TAG: ""
jobs:
package:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Prepare package
run: |
cargo package -p tfhe
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
with:
name: crate
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
# Needed to create the provenance via GitHub OIDC
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: Publish Release
needs: [package] # for comparing hashes
runs-on: ubuntu-latest
permissions:
contents: read
id-token: write
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Create NPM version tag
if: ${{ inputs.npm_latest_tag }}
run: |
echo "NPM_TAG=latest" >> "${GITHUB_ENV}"
- name: Download artifact
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: crate
path: target/package
- name: Publish crate.io package
if: ${{ inputs.push_to_crates }}
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
cargo publish -p tfhe --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: failure
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "SLSA tfhe crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
- name: Build web package
if: ${{ inputs.push_web_package }}
run: |
make build_web_js_api_parallel
make build_web_js_api
- name: Publish web package
if: ${{ inputs.push_web_package }}
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
uses: JS-DevTools/npm-publish@541aa6b21b4a1e9990c95a92c21adc16b35e9551
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
dry-run: ${{ inputs.dry_run }}
tag: ${{ env.NPM_TAG }}
provenance: true
- name: Build Node package
if: ${{ inputs.push_node_package }}
run: |
rm -rf tfhe/pkg
@@ -134,23 +45,8 @@ jobs:
sed -i 's/"tfhe"/"node-tfhe"/g' tfhe/pkg/package.json
- name: Publish Node package
if: ${{ inputs.push_node_package }}
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
uses: JS-DevTools/npm-publish@541aa6b21b4a1e9990c95a92c21adc16b35e9551
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
dry-run: ${{ inputs.dry_run }}
tag: ${{ env.NPM_TAG }}
provenance: true
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "tfhe release failed: (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,41 +0,0 @@
name: Publish concrete-csprng release
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
jobs:
publish_release:
name: Publish concrete-csprng Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Publish crate.io package
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
cargo publish -p concrete-csprng --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "concrete-csprng release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,36 +0,0 @@
name: Publish tfhe-versionable release
on:
workflow_dispatch:
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
jobs:
publish_release:
name: Publish tfhe-versionable Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Publish crate.io package
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: |
cargo publish -p tfhe-versionable-derive --token ${{ env.CRATES_TOKEN }}
cargo publish -p tfhe-versionable --token ${{ env.CRATES_TOKEN }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "tfhe-versionable release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,129 +0,0 @@
# Publish new release of tfhe-cuda-backend on crates.io.
name: Publish CUDA release
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
push_to_crates:
description: "Push to crate"
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
setup-instance:
name: Setup instance (publish-cuda-release)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: gpu-build
publish-cuda-release:
name: Publish CUDA Release
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
fail-fast: false
# explicit include-based build matrix, of known valid options
matrix:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 9
env:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: dtolnay/rust-toolchain@7b1c307e0dcbda6122208f10795a713336a9b35a
with:
toolchain: stable
- name: Export CUDA variables
if: ${{ !cancelled() }}
run: |
echo "$CUDA_PATH/bin" >> "${GITHUB_PATH}"
{
echo "CUDA_PATH=$CUDA_PATH";
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib:$LD_LIBRARY_PATH";
echo "CUDACXX=/usr/local/cuda-${{ matrix.cuda }}/bin/nvcc";
} >> "${GITHUB_ENV}"
# Specify the correct host compilers
- name: Export gcc and g++ variables
if: ${{ !cancelled() }}
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "HOME=/home/ubuntu";
} >> "${GITHUB_ENV}"
- name: Publish crate.io package
if: ${{ inputs.push_to_crates }}
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
cargo publish -p tfhe-cuda-backend --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-cuda-backend release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (publish-release)
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, publish-cuda-release ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@c0e7168795bd78f61f61146951ed9d0c73c9b701
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (publish-cuda-release) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,42 +0,0 @@
# Publish new release of tfhe-zk-pok on crates.io.
name: Publish tfhe-zk-pok release
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
jobs:
publish_release:
name: Publish tfhe-zk-pok Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
fetch-depth: 0
- name: Publish crate.io package
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
cargo publish -p tfhe-zk-pok --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "tfhe-zk-pok release failed: (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,52 +0,0 @@
# Perform a security check on all the cryptographic parameters set
name: Parameters curves security check
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
on:
push:
branches:
- "main"
workflow_dispatch:
jobs:
params-curves-security-check:
runs-on: large_ubuntu_16
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
- name: Checkout lattice-estimator
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
with:
repository: malb/lattice-estimator
path: lattice_estimator
ref: 'e80ec6bbbba212428b0e92d0467c18629cf9ed67'
- name: Install Sage
run: |
sudo apt update
sudo apt install -y sagemath
- name: Collect parameters
run: |
CARGO_PROFILE=devo make write_params_to_file
- name: Perform security check
run: |
PYTHONPATH=lattice_estimator sage ci/lattice_estimator.sage
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@4e5fb42d249be6a45a298f3c9543b111b02f7907
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Security check for parameters finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

104
.github/workflows/pbs_benchmark.yml vendored Normal file
View File

@@ -0,0 +1,104 @@
# Run PBS benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: PBS benchmarks
on:
workflow_dispatch:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
jobs:
run-pbs-benchmarks:
name: Execute PBS benchmarks in EC2
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !cancelled() }}
steps:
- name: Instance configuration used
run: |
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Get benchmark date
run: |
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: nightly
override: true
- name: Run benchmarks with AVX512
run: |
make AVX512_SUPPORT=ON bench_pbs
- name: Parse results
run: |
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
COMMIT_HASH="$(git describe --tags --dirty)"
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware ${{ inputs.instance_type }} \
--project-version "${COMMIT_HASH}" \
--branch ${{ github.ref_name }} \
--commit-date "${COMMIT_DATE}" \
--bench-date "${{ env.BENCH_DATE }}" \
--name-suffix avx512 \
--walk-subdirs \
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_pbs
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
echo "Computing HMac on downloaded artifact"
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
echo "Sending results to Slab..."
curl -v -k \
-H "Content-Type: application/json" \
-H "X-Slab-Repository: ${{ github.repository }}" \
-H "X-Slab-Command: store_data_v2" \
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}

View File

@@ -1,14 +0,0 @@
# Placeholder workflow file allowing running it without having to merge to main first
name: Placeholder Workflow
on:
workflow_dispatch:
jobs:
placeholder:
name: Placeholder
runs-on: ubuntu-latest
steps:
- run: |
echo "Hello this is a Placeholder Workflow"

114
.github/workflows/shortint_benchmark.yml vendored Normal file
View File

@@ -0,0 +1,114 @@
# Run shortint benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Shortint benchmarks
on:
workflow_dispatch:
inputs:
instance_id:
description: "Instance ID"
type: string
instance_image_id:
description: "Instance AMI ID"
type: string
instance_type:
description: "Instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: "Slab request ID"
type: string
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
jobs:
run-shortint-benchmarks:
name: Execute shortint benchmarks in EC2
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !cancelled() }}
steps:
- name: Instance configuration used
run: |
echo "IDs: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
- name: Get benchmark date
run: |
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Set up home
# "Install rust" step require root user to have a HOME directory which is not set.
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install rust
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: nightly
override: true
- name: Run benchmarks with AVX512
run: |
make AVX512_SUPPORT=ON bench_shortint
- name: Parse results
run: |
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
COMMIT_HASH="$(git describe --tags --dirty)"
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
--database tfhe_rs \
--hardware ${{ inputs.instance_type }} \
--project-version "${COMMIT_HASH}" \
--branch ${{ github.ref_name }} \
--commit-date "${COMMIT_DATE}" \
--bench-date "${{ env.BENCH_DATE }}" \
--walk-subdirs \
--name-suffix avx512 \
--throughput
- name: Measure key sizes
run: |
make measure_shortint_key_sizes
- name: Parse key sizes results
run: |
python3 ./ci/benchmark_parser.py tfhe/shortint_key_sizes.csv ${{ env.RESULTS_FILENAME }} \
--key-sizes \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_shortint
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
echo "Computing HMac on results file"
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
echo "Sending results to Slab..."
curl -v -k \
-H "Content-Type: application/json" \
-H "X-Slab-Repository: ${{ github.repository }}" \
-H "X-Slab-Command: store_data_v2" \
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
-d @${{ env.RESULTS_FILENAME }} \
${{ secrets.SLAB_URL }}

37
.github/workflows/start_benchmarks.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
# Start all benchmark jobs on Slab CI bot.
name: Start all benchmarks
on:
push:
branches:
- "main"
workflow_dispatch:
jobs:
start-benchmarks:
if: ${{ (github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') || github.event_name == 'workflow_dispatch' }}
strategy:
matrix:
command: [boolean_bench, shortint_bench, integer_bench, pbs_bench]
runs-on: ubuntu-latest
steps:
- name: Checkout Slab repo
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Start AWS job in Slab
shell: bash
run: |
echo -n '{"command": "${{ matrix.command }}", "git_ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' > command.json
SIGNATURE="$(slab/scripts/hmac_calculator.sh command.json '${{ secrets.JOB_SECRET }}')"
curl -v -k \
--fail-with-body \
-H "Content-Type: application/json" \
-H "X-Slab-Repository: ${{ github.repository }}" \
-H "X-Slab-Command: start_aws" \
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
-d @command.json \
${{ secrets.SLAB_URL }}

View File

@@ -13,20 +13,25 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938
uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab
with:
fetch-depth: 0
- name: Save repo
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: repo-archive
path: '.'
- name: git-sync
uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83
with:
source_repo: "zama-ai/tfhe-rs"
source_branch: "main"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.CONCRETE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_branch: "main"
- name: git-sync tags
uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83
with:
source_repo: "zama-ai/tfhe-rs"
source_branch: "refs/tags/*"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.CONCRETE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_branch: "refs/tags/*"

View File

@@ -0,0 +1,18 @@
# Trigger an AWS build each time commits are pushed to a pull request.
name: PR AWS build trigger
on:
pull_request:
jobs:
test:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: mshick/add-pr-comment@a65df5f64fc741e91c59b8359a4bc56e57aaf5b1
with:
allow-repeats: true
message: |
@slab-ci cpu_test
@slab-ci cpu_integer_test

23
.gitignore vendored
View File

@@ -3,31 +3,12 @@ target/
.vscode/
# Path we use for internal-keycache during tests
/keys/
./keys/
# In case of symlinked keys
/keys
./keys
**/*.rmeta
**/Cargo.lock
**/*.bin
# Some of our bench outputs
/tfhe/benchmarks_parameters
**/*.csv
# dieharder run log
dieharder_run.log
# Coverage reports
/coverage/
# Cuda local build
backends/tfhe-cuda-backend/cuda/cmake-build-debug/
# WASM tests
tfhe/web_wasm_parallel_tests/server.PID
venv/
web-test-runner/
# Dir used for backward compatibility test data
tfhe/tfhe-backward-compat-data/

View File

@@ -1,14 +0,0 @@
ignore:
- .git
- target
- tfhe/benchmarks_parameters
- tfhe/web_wasm_parallel_tests/node_modules
- tfhe/web_wasm_parallel_tests/dist
- keys
- coverage
rules:
# checks if file ends in a newline character
end-of-file:
enable: true
single-new-line: true

View File

@@ -1,131 +0,0 @@
# Contributor Covenant Code of Conduct
## Our pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our standards
Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
- Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or advances of
any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email address,
without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting us anonymously through [this form](https://forms.gle/569j3cZqGRFgrR3u9).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.1, available at
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder][mozilla coc].
For answers to common questions about this code of conduct, see the FAQ at
[https://www.contributor-covenant.org/faq][faq]. Translations are available at
[https://www.contributor-covenant.org/translations][translations].
[faq]: https://www.contributor-covenant.org/faq
[homepage]: https://www.contributor-covenant.org
[mozilla coc]: https://github.com/mozilla/diversity
[translations]: https://www.contributor-covenant.org/translations
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html

View File

@@ -1,21 +1,6 @@
[workspace]
resolver = "2"
members = [
"tfhe",
"tfhe-zk-pok",
"tasks",
"apps/trivium",
"concrete-csprng",
"backends/tfhe-cuda-backend",
"utils/tfhe-versionable",
"utils/tfhe-versionable-derive",
]
exclude = [
"tfhe/backward_compatibility_tests",
"utils/cargo-tfhe-lints-inner",
"utils/cargo-tfhe-lints"
]
members = ["tfhe", "tasks"]
[profile.bench]
lto = "fat"
@@ -23,13 +8,8 @@ lto = "fat"
[profile.release]
lto = "fat"
[profile.release_lto_off]
inherits = "release"
lto = "off"
# Compiles much faster for tests and allows reasonable performance for iterating
[profile.devo]
inherits = "dev"
opt-level = 3
lto = "off"
debug-assertions = false

View File

@@ -1,6 +1,6 @@
BSD 3-Clause Clear License
Copyright © 2024 ZAMA.
Copyright © 2023 ZAMA.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,

1021
Makefile

File diff suppressed because it is too large Load Diff

342
README.md
View File

@@ -1,73 +1,37 @@
<p align="center">
<!-- product name logo -->
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/5283e0ba-da1e-43af-9f2a-c5221367a12b">
<source media="(prefers-color-scheme: light)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/b94a8c96-7595-400b-9311-70765c706955">
<img width=600 alt="Zama TFHE-rs">
</picture>
<img width=600 src="https://user-images.githubusercontent.com/5758427/231206749-8f146b97-3c5a-4201-8388-3ffa88580415.png">
</p>
<hr/>
<p align="center">
<a href="https://docs.zama.ai/tfhe-rs"> 📒 Read documentation</a> | <a href="https://zama.ai/community"> 💛 Community support</a>
</p>
<p align="center">
<!-- Version badge using shields.io -->
<a href="https://github.com/zama-ai/tfhe-rs/releases">
<img src="https://img.shields.io/github/v/release/zama-ai/tfhe-rs?style=flat-square">
</a>
<!-- Zama Bounty Program -->
<a href="https://github.com/zama-ai/bounty-program">
<img src="https://img.shields.io/badge/Contribute-Zama%20Bounty%20Program-yellow?style=flat-square">
</a>
</p>
<hr/>
<p align="center">
<a href="https://docs.zama.ai/tfhe-rs"> 📒 Documentation</a> | <a href="https://zama.ai/community"> 💛 Community support</a> | <a href="https://github.com/zama-ai/awesome-zama"> 📚 FHE resources by Zama</a>
</p>
**TFHE-rs** is a pure Rust implementation of TFHE for boolean and integer
arithmetics over encrypted data. It includes:
- a **Rust** API
- a **C** API
- and a **client-side WASM** API
<p align="center">
<a href="https://github.com/zama-ai/tfhe-rs/releases"><img src="https://img.shields.io/github/v/release/zama-ai/tfhe-rs?style=flat-square"></a>
<a href="LICENSE"><img src="https://img.shields.io/badge/License-BSD--3--Clause--Clear-%23ffb243?style=flat-square"></a>
<a href="https://github.com/zama-ai/bounty-program"><img src="https://img.shields.io/badge/Contribute-Zama%20Bounty%20Program-%23ffd208?style=flat-square"></a>
</p>
## About
### What is TFHE-rs
**TFHE-rs** is a pure Rust implementation of TFHE for boolean and integer arithmetics over encrypted data.
It includes:
- a **Rust** API
- a **C** API
- and a **client-side WASM** API
TFHE-rs is designed for developers and researchers who want full control over
what they can do with TFHE, while not having to worry about the low-level
**TFHE-rs** is meant for developers and researchers who want full control over
what they can do with TFHE, while not having to worry about the low level
implementation. The goal is to have a stable, simple, high-performance, and
production-ready library for all the advanced features of TFHE.
<br></br>
### Main features
## Getting Started
- **Low-level cryptographic library** that implements Zamas variant of TFHE, including programmable bootstrapping
- **Implementation of the original TFHE boolean API** that can be used as a drop-in replacement for other TFHE libraries
- **Short integer API** that enables exact, unbounded FHE integer arithmetics with up to 8 bits of message space
- **Size-efficient public key encryption**
- **Ciphertext and server key compression** for efficient data transfer
- **Full Rust API, C bindings to the Rust High-Level API, and client-side Javascript API using WASM**.
*Learn more about TFHE-rs features in the [documentation](https://docs.zama.ai/tfhe-rs/readme).*
<br></br>
## Table of Contents
- **[Getting started](#getting-started)**
- [Cargo.toml configuration](#cargotoml-configuration)
- [A simple example](#a-simple-example)
- **[Resources](#resources)**
- [TFHE deep dive](#tfhe-deep-dive)
- [Tutorials](#tutorials)
- [Documentation](#documentation)
- **[Working with TFHE-rs](#working-with-tfhe-rs)**
- [Disclaimers](#disclaimers)
- [Citations](#citations)
- [Contributing](#contributing)
- [License](#license)
- **[Support](#support)**
<br></br>
## Getting started
### Cargo.toml configuration
To use the latest version of `TFHE-rs` in your project, you first need to add it as a dependency in your `Cargo.toml`:
+ For x86_64-based machines running Unix-like OSes:
@@ -81,124 +45,136 @@ tfhe = { version = "*", features = ["boolean", "shortint", "integer", "x86_64-un
```toml
tfhe = { version = "*", features = ["boolean", "shortint", "integer", "aarch64-unix"] }
```
Note: users with ARM devices must use `TFHE-rs` by compiling using the `nightly` toolchain.
+ For x86_64-based machines with the [`rdseed instruction`](https://en.wikipedia.org/wiki/RDRAND) running Windows:
+ For x86_64-based machines with the [`rdseed instruction`](https://en.wikipedia.org/wiki/RDRAND)
running Windows:
```toml
tfhe = { version = "*", features = ["boolean", "shortint", "integer", "x86_64"] }
```
> [!Note]
> Note: You need to use a Rust version >= 1.73 to compile TFHE-rs.
Note: aarch64-based machines are not yet supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs
> [!Note]
> Note: aarch64-based machines are not yet supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs.
Note that when running code that uses `tfhe-rs`, it is highly recommended
to run in release mode with cargo's `--release` flag to have the best performances possible,
eg: `cargo run --release`.
<p align="right">
<a href="#about" > ↑ Back to top </a>
</p>
Here is a full example evaluating a Boolean circuit:
### A simple example
```rust
use tfhe::boolean::prelude::*;
Here is a full example:
fn main() {
// We generate a set of client/server keys, using the default parameters:
let (mut client_key, mut server_key) = gen_keys();
``` rust
use tfhe::prelude::*;
use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheUint32, FheUint8};
// We use the client secret key to encrypt two messages:
let ct_1 = client_key.encrypt(true);
let ct_2 = client_key.encrypt(false);
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Basic configuration to use homomorphic integers
let config = ConfigBuilder::default().build();
// We use the server public key to execute a boolean circuit:
// if ((NOT ct_2) NAND (ct_1 AND ct_2)) then (NOT ct_2) else (ct_1 AND ct_2)
let ct_3 = server_key.not(&ct_2);
let ct_4 = server_key.and(&ct_1, &ct_2);
let ct_5 = server_key.nand(&ct_3, &ct_4);
let ct_6 = server_key.mux(&ct_5, &ct_3, &ct_4);
// Key generation
let (client_key, server_keys) = generate_keys(config);
let clear_a = 1344u32;
let clear_b = 5u32;
let clear_c = 7u8;
// Encrypting the input data using the (private) client_key
// FheUint32: Encrypted equivalent to u32
let mut encrypted_a = FheUint32::try_encrypt(clear_a, &client_key)?;
let encrypted_b = FheUint32::try_encrypt(clear_b, &client_key)?;
// FheUint8: Encrypted equivalent to u8
let encrypted_c = FheUint8::try_encrypt(clear_c, &client_key)?;
// On the server side:
set_server_key(server_keys);
// Clear equivalent computations: 1344 * 5 = 6720
let encrypted_res_mul = &encrypted_a * &encrypted_b;
// Clear equivalent computations: 6720 >> 5 = 210
encrypted_a = &encrypted_res_mul >> &encrypted_b;
// Clear equivalent computations: let casted_a = a as u8;
let casted_a: FheUint8 = encrypted_a.cast_into();
// Clear equivalent computations: min(210, 7) = 7
let encrypted_res_min = &casted_a.min(&encrypted_c);
// Operation between clear and encrypted data:
// Clear equivalent computations: 7 & 1 = 1
let encrypted_res = encrypted_res_min & 1_u8;
// Decrypting on the client side:
let clear_res: u8 = encrypted_res.decrypt(&client_key);
assert_eq!(clear_res, 1_u8);
Ok(())
// We use the client key to decrypt the output of the circuit:
let output = client_key.decrypt(&ct_6);
assert_eq!(output, true);
}
```
To run this code, use the following command:
<p align="center"> <code> cargo run --release </code> </p>
Another example of how the library can be used with shortints:
> [!Note]
> Note that when running code that uses `TFHE-rs`, it is highly recommended
to run in release mode with cargo's `--release` flag to have the best performances possible.
```rust
use tfhe::shortint::prelude::*;
*Find an example with more explanations in [this part of the documentation](https://docs.zama.ai/tfhe-rs/get-started/quick_start)*
fn main() {
// Generate a set of client/server keys
// with 2 bits of message and 2 bits of carry
let (client_key, server_key) = gen_keys(PARAM_MESSAGE_2_CARRY_2);
<p align="right">
<a href="#about" > ↑ Back to top </a>
</p>
let msg1 = 3;
let msg2 = 2;
// Encrypt two messages using the (private) client key:
let ct_1 = client_key.encrypt(msg1);
let ct_2 = client_key.encrypt(msg2);
// Homomorphically compute an addition
let ct_add = server_key.unchecked_add(&ct_1, &ct_2);
## Resources
// Define the Hamming weight function
// f: x -> sum of the bits of x
let f = |x:u64| x.count_ones() as u64;
### TFHE deep dive
- [TFHE Deep Dive - Part I - Ciphertext types](https://www.zama.ai/post/tfhe-deep-dive-part-1)
- [TFHE Deep Dive - Part II - Encodings and linear leveled operations](https://www.zama.ai/post/tfhe-deep-dive-part-2)
- [TFHE Deep Dive - Part III - Key switching and leveled multiplications](https://www.zama.ai/post/tfhe-deep-dive-part-3)
- [TFHE Deep Dive - Part IV - Programmable Bootstrapping](https://www.zama.ai/post/tfhe-deep-dive-part-4)
<br></br>
// Generate the accumulator for the function
let acc = server_key.generate_accumulator(f);
### Tutorials
- [[Video tutorial] Implement signed integers using TFHE-rs ](https://www.zama.ai/post/video-tutorial-implement-signed-integers-ssing-tfhe-rs)
- [Homomorphic parity bit](https://docs.zama.ai/tfhe-rs/tutorials/parity_bit)
- [Homomorphic case changing on Ascii string](https://docs.zama.ai/tfhe-rs/tutorials/ascii_fhe_string)
- [Boolean SHA256 with TFHE-rs](https://www.zama.ai/post/boolean-sha256-tfhe-rs)
- [Dark market with TFHE-rs](https://www.zama.ai/post/dark-market-tfhe-rs)
- [Regular expression engine with TFHE-rs](https://www.zama.ai/post/regex-engine-tfhe-rs)
// Compute the function over the ciphertext using the PBS
let ct_res = server_key.apply_lookup_table(&ct_add, &acc);
*Explore more useful resources in [TFHE-rs tutorials](https://docs.zama.ai/tfhe-rs/tutorials) and [Awesome Zama repo](https://github.com/zama-ai/awesome-zama)*
<br></br>
### Documentation
// Decrypt the ciphertext using the (private) client key
let output = client_key.decrypt(&ct_res);
assert_eq!(output, f(msg1 + msg2));
}
```
Full, comprehensive documentation is available here: [https://docs.zama.ai/tfhe-rs](https://docs.zama.ai/tfhe-rs).
<p align="right">
<a href="#about" > ↑ Back to top </a>
</p>
An example using integer:
```rust
use tfhe::integer::gen_keys_radix;
use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2;
## Working with TFHE-rs
fn main() {
// We create keys to create 16 bits integers
// using 8 blocks of 2 bits
let (cks, sks) = gen_keys_radix(&PARAM_MESSAGE_2_CARRY_2, 8);
### Disclaimers
let clear_a = 2382u16;
let clear_b = 29374u16;
#### Security estimation
let mut a = cks.encrypt(clear_a as u64);
let mut b = cks.encrypt(clear_b as u64);
let encrypted_max = sks.smart_max_parallelized(&mut a, &mut b);
let decrypted_max: u64 = cks.decrypt(&encrypted_max);
assert_eq!(decrypted_max as u16, clear_a.max(clear_b))
}
```
## Contributing
There are two ways to contribute to TFHE-rs:
- you can open issues to report bugs or typos, or to suggest new ideas
- you can ask to become an official contributor by emailing [hello@zama.ai](mailto:hello@zama.ai).
(becoming an approved contributor involves signing our Contributor License Agreement (CLA))
Only approved contributors can send pull requests, so please make sure to get in touch before you do!
## Credits
This library uses several dependencies and we would like to thank the contributors of those
libraries.
## Need support?
<a target="_blank" href="https://community.zama.ai">
<img src="https://user-images.githubusercontent.com/5758427/231115030-21195b55-2629-4c01-9809-be5059243999.png">
</a>
## License
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions,
please contact us at `hello@zama.ai`.
## Disclaimers
### Security Estimation
Security estimations are done using the
[Lattice Estimator](https://github.com/malb/lattice-estimator)
@@ -206,75 +182,7 @@ with `red_cost_model = reduction.RC.BDGL16`.
When a new update is published in the Lattice Estimator, we update parameters accordingly.
### Security model
### Side-Channel Attacks
The default parameters for the TFHE-rs library are chosen considering the IND-CPA security model, and are selected with a bootstrapping failure probability fixed at p_error = $2^{-64}$. In particular, it is assumed that the results of decrypted computations are not shared by the secret key owner with any third parties, as such an action can lead to leakage of the secret encryption key. If you are designing an application where decryptions must be shared, you will need to craft custom encryption parameters which are chosen in consideration of the IND-CPA^D security model [1].
[1] Li, Baiyu, et al. "Securing approximate homomorphic encryption using differential privacy." Annual International Cryptology Conference. Cham: Springer Nature Switzerland, 2022. https://eprint.iacr.org/2022/816.pdf
#### Side-channel attacks
Mitigation for side-channel attacks has not yet been implemented in TFHE-rs,
Mitigation for side channel attacks have not yet been implemented in TFHE-rs,
and will be released in upcoming versions.
<br></br>
### Citations
To cite TFHE-rs in academic papers, please use the following entry:
```text
@Misc{TFHE-rs,
title={{TFHE-rs: A Pure Rust Implementation of the TFHE Scheme for Boolean and Integer Arithmetics Over Encrypted Data}},
author={Zama},
year={2022},
note={\url{https://github.com/zama-ai/tfhe-rs}},
}
```
### Contributing
There are two ways to contribute to TFHE-rs:
- [Open issues](https://github.com/zama-ai/tfhe-rs/issues/new/choose) to report bugs and typos, or to suggest new ideas
- Request to become an official contributor by emailing [hello@zama.ai](mailto:hello@zama.ai).
Becoming an approved contributor involves signing our Contributor License Agreement (CLA). Only approved contributors can send pull requests, so please make sure to get in touch before you do!
<br></br>
### License
This software is distributed under the **BSD-3-Clause-Clear** license. Read [this](LICENSE) for more details.
#### FAQ
**Is Zamas technology free to use?**
>Zamas libraries are free to use under the BSD 3-Clause Clear license only for development, research, prototyping, and experimentation purposes. However, for any commercial use of Zama's open source code, companies must purchase Zamas commercial patent license.
>
>Everything we do is open source and we are very transparent on what it means for our users, you can read more about how we monetize our open source products at Zama in [this blogpost](https://www.zama.ai/post/open-source).
**What do I need to do if I want to use Zamas technology for commercial purposes?**
>To commercially use Zamas technology you need to be granted Zamas patent license. Please contact us hello@zama.ai for more information.
**Do you file IP on your technology?**
>Yes, all Zamas technologies are patented.
**Can you customize a solution for my specific use case?**
>We are open to collaborating and advancing the FHE space with our partners. If you have specific needs, please email us at hello@zama.ai.
<p align="right">
<a href="#about" > ↑ Back to top </a>
</p>
## Support
<a target="_blank" href="https://community.zama.ai">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/08656d0a-3f44-4126-b8b6-8c601dff5380">
<source media="(prefers-color-scheme: light)" srcset="https://github.com/zama-ai/tfhe-rs/assets/157474013/1c9c9308-50ac-4aab-a4b9-469bb8c536a4">
<img alt="Support">
</picture>
</a>
🌟 If you find this project helpful or interesting, please consider giving it a star on GitHub! Your support helps to grow the community and motivates further development.
<p align="right">
<a href="#about" > ↑ Back to top </a>
</p>

View File

@@ -1,15 +0,0 @@
[default]
extend-ignore-identifiers-re = [
# Related to serialized object
"ser",
"unser",
# Used when dumping tfhe-rs parameters set into Sage format
"ND.*",
# Related to FHE strings example handling "banana"
"ba",
"enc_ba",
# Example with string replacing "hello" with "herlo"
"herlo",
# Example in trivium
"C9217BA0D762ACA1"
]

View File

@@ -1,24 +0,0 @@
[package]
name = "tfhe-trivium"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rayon = { version = "1.7.0"}
[target.'cfg(target_arch = "x86_64")'.dependencies.tfhe]
path = "../../tfhe"
features = [ "boolean", "shortint", "integer", "x86_64" ]
[target.'cfg(target_arch = "aarch64")'.dependencies.tfhe]
path = "../../tfhe"
features = [ "boolean", "shortint", "integer", "aarch64-unix" ]
[dev-dependencies]
criterion = { version = "0.5.1", features = [ "html_reports" ]}
[[bench]]
name = "trivium"
harness = false

View File

@@ -1,201 +0,0 @@
# FHE boolean Trivium implementation using TFHE-rs
The cleartext boolean Trivium is available to be built using the function `TriviumStream::<bool>::new`.
This takes as input 2 arrays of 80 bool: the Trivium key and the IV. After initialization, it returns a TriviumStream on
which the user can call `next`, getting the next bit of the cipher stream, or `next_64`, which will compute 64 values at once,
using multithreading to accelerate the computation.
Quite similarly, the function `TriviumStream::<FheBool>::new` will return a very similar object running in FHE space. Its arguments are
2 arrays of 80 FheBool representing the encrypted Trivium key, and the encrypted IV. It also requires a reference to the the server key of the
current scheme. This means that any user of this feature must also have the `tfhe-rs` crate as a dependency.
Example of a Rust main below:
```rust
use tfhe::{ConfigBuilder, generate_keys, FheBool};
use tfhe::prelude::*;
use tfhe_trivium::TriviumStream;
fn get_hexadecimal_string_from_lsb_first_stream(a: Vec<bool>) -> String {
assert!(a.len() % 8 == 0);
let mut hexadecimal: String = "".to_string();
for test in a.chunks(8) {
// Encoding is bytes in LSB order
match test[4..8] {
[false, false, false, false] => hexadecimal.push('0'),
[true, false, false, false] => hexadecimal.push('1'),
[false, true, false, false] => hexadecimal.push('2'),
[true, true, false, false] => hexadecimal.push('3'),
[false, false, true, false] => hexadecimal.push('4'),
[true, false, true, false] => hexadecimal.push('5'),
[false, true, true, false] => hexadecimal.push('6'),
[true, true, true, false] => hexadecimal.push('7'),
[false, false, false, true] => hexadecimal.push('8'),
[true, false, false, true] => hexadecimal.push('9'),
[false, true, false, true] => hexadecimal.push('A'),
[true, true, false, true] => hexadecimal.push('B'),
[false, false, true, true] => hexadecimal.push('C'),
[true, false, true, true] => hexadecimal.push('D'),
[false, true, true, true] => hexadecimal.push('E'),
[true, true, true, true] => hexadecimal.push('F'),
_ => ()
};
match test[0..4] {
[false, false, false, false] => hexadecimal.push('0'),
[true, false, false, false] => hexadecimal.push('1'),
[false, true, false, false] => hexadecimal.push('2'),
[true, true, false, false] => hexadecimal.push('3'),
[false, false, true, false] => hexadecimal.push('4'),
[true, false, true, false] => hexadecimal.push('5'),
[false, true, true, false] => hexadecimal.push('6'),
[true, true, true, false] => hexadecimal.push('7'),
[false, false, false, true] => hexadecimal.push('8'),
[true, false, false, true] => hexadecimal.push('9'),
[false, true, false, true] => hexadecimal.push('A'),
[true, true, false, true] => hexadecimal.push('B'),
[false, false, true, true] => hexadecimal.push('C'),
[true, false, true, true] => hexadecimal.push('D'),
[false, true, true, true] => hexadecimal.push('E'),
[true, true, true, true] => hexadecimal.push('F'),
_ => ()
};
}
return hexadecimal;
}
fn main() {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [false; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i+2], 16).unwrap();
for j in 0..8 {
key[8*(i>>1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [false; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i+2], 16).unwrap();
for j in 0..8 {
iv[8*(i>>1) + j] = val % 2 == 1;
val >>= 1;
}
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let cipher_key = key.map(|x| FheBool::encrypt(x, &client_key));
let cipher_iv = iv.map(|x| FheBool::encrypt(x, &client_key));
let mut trivium = TriviumStream::<FheBool>::new(cipher_key, cipher_iv, &server_key);
let mut vec = Vec::<bool>::with_capacity(64*8);
while vec.len() < 64*8 {
let cipher_outputs = trivium.next_64();
for c in cipher_outputs {
vec.push(c.decrypt(&client_key))
}
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output_0_63, hexadecimal[0..64*2]);
}
```
# FHE byte Trivium implementation
The same objects have also been implemented to stream bytes instead of booleans. They can be constructed and used in the same way via the functions `TriviumStreamByte::<u8>::new` and
`TriviumStreamByte::<FheUint8>::new` with the same arguments as before. The `FheUint8` version is significantly slower than the `FheBool` version, because not running
with the same cryptographic parameters. Its interest lie in its trans-ciphering capabilities: `TriviumStreamByte<FheUint8>` implements the trait `TransCiphering`,
meaning it implements the functions `trans_encrypt_64`. This function takes as input a `FheUint64` and outputs a `FheUint64`, the output being
encrypted via tfhe and trivium. For convenience we also provide `trans_decrypt_64`, but this is of course the exact same function.
Other sizes than 64 bit are expected to be available in the future.
# FHE shortint Trivium implementation
The same implementation is also available for generic Ciphertexts representing bits (meant to be used with parameters `PARAM_MESSAGE_1_CARRY_1_KS_PBS`). It uses a lower level API
of tfhe-rs, so the syntax is a little bit different. It also implements the `TransCiphering` trait. For optimization purposes, it does not internally run on the same
cryptographic parameters as the high level API of tfhe-rs. As such, it requires the usage of a casting key, to switch from one parameter space to another, which makes
its setup a little more intricate.
Example code:
```rust
use tfhe::shortint::prelude::*;
use tfhe::shortint::CastingKey;
use tfhe::{ConfigBuilder, generate_keys, FheUint64};
use tfhe::prelude::*;
use tfhe_trivium::TriviumStreamShortint;
fn test_shortint() {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = CastingKey::new((&client_key, &server_key), (&hl_client_key, &hl_server_key));
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i+2], 16).unwrap();
for j in 0..8 {
key[8*(i>>1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i+2], 16).unwrap();
for j in 0..8 {
iv[8*(i>>1) + j] = val % 2;
val >>= 1;
}
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let cipher_key = key.map(|x| client_key.encrypt(x));
let cipher_iv = iv.map(|x| client_key.encrypt(x));
let mut ciphered_message = vec![FheUint64::try_encrypt(0u64, &hl_client_key).unwrap(); 9];
let mut trivium = TriviumStreamShortint::new(cipher_key, cipher_iv, &server_key, &ksk);
let mut vec = Vec::<u64>::with_capacity(8);
while vec.len() < 8 {
let trans_ciphered_message = trivium.trans_encrypt_64(ciphered_message.pop().unwrap(), &hl_server_key);
vec.push(trans_ciphered_message.decrypt(&hl_client_key));
}
let hexadecimal = get_hexagonal_string_from_u64(vec);
assert_eq!(output_0_63, hexadecimal[0..64*2]);
}
```
# FHE Kreyvium implementation using tfhe-rs crate
This will work in exactly the same way as the Trivium implementation, except that the key and iv need to be 128 bits now. Available for the same internal types as Trivium, with similar syntax.
`KreyviumStreamByte<FheUint8>` and `KreyviumStreamShortint` also implement the `TransCiphering` trait.
# Testing
If you wish to run tests on this app, please run `cargo test -r trivium -- --test-threads=1` as multithreading provokes interferences between several running
Triviums at the same time.

View File

@@ -1,73 +0,0 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool};
use tfhe_trivium::KreyviumStream;
pub fn kreyvium_bool_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [false; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [false; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let cipher_key = key.map(|x| FheBool::encrypt(x, &client_key));
let mut kreyvium = KreyviumStream::<FheBool>::new(cipher_key, iv, &server_key);
c.bench_function("kreyvium bool generate 64 bits", |b| {
b.iter(|| kreyvium.next_64())
});
}
pub fn kreyvium_bool_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [false; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [false; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
c.bench_function("kreyvium bool warmup", |b| {
b.iter(|| {
let cipher_key = key.map(|x| FheBool::encrypt(x, &client_key));
let _kreyvium = KreyviumStream::<FheBool>::new(cipher_key, iv, &server_key);
})
});
}

View File

@@ -1,88 +0,0 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64, FheUint8};
use tfhe_trivium::{KreyviumStreamByte, TransCiphering};
pub fn kreyvium_byte_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0u8; 16];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0u8; 16];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let mut kreyvium = KreyviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
c.bench_function("kreyvium byte generate 64 bits", |b| {
b.iter(|| kreyvium.next_64())
});
}
pub fn kreyvium_byte_trans(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0u8; 16];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0u8; 16];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let ciphered_message = FheUint64::try_encrypt(0u64, &client_key).unwrap();
let mut kreyvium = KreyviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
c.bench_function("kreyvium byte transencrypt 64 bits", |b| {
b.iter(|| kreyvium.trans_encrypt_64(ciphered_message.clone()))
});
}
pub fn kreyvium_byte_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0u8; 16];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0u8; 16];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
c.bench_function("kreyvium byte warmup", |b| {
b.iter(|| {
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let _kreyvium = KreyviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
})
});
}

View File

@@ -1,146 +0,0 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
use tfhe_trivium::{KreyviumStreamShortint, TransCiphering};
pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
c.bench_function("kreyvium 1_1 warmup", |b| {
b.iter(|| {
let cipher_key = key.map(|x| client_key.encrypt(x));
let _kreyvium = KreyviumStreamShortint::new(
cipher_key,
iv,
server_key.clone(),
ksk.clone(),
hl_server_key.clone(),
);
})
});
}
pub fn kreyvium_shortint_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let cipher_key = key.map(|x| client_key.encrypt(x));
let mut kreyvium = KreyviumStreamShortint::new(cipher_key, iv, server_key, ksk, hl_server_key);
c.bench_function("kreyvium 1_1 generate 64 bits", |b| {
b.iter(|| kreyvium.next_64())
});
}
pub fn kreyvium_shortint_trans(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let cipher_key = key.map(|x| client_key.encrypt(x));
let ciphered_message = FheUint64::try_encrypt(0u64, &hl_client_key).unwrap();
let mut kreyvium = KreyviumStreamShortint::new(cipher_key, iv, server_key, ksk, hl_server_key);
c.bench_function("kreyvium 1_1 transencrypt 64 bits", |b| {
b.iter(|| kreyvium.trans_encrypt_64(ciphered_message.clone()))
});
}

View File

@@ -1,53 +0,0 @@
use criterion::{criterion_group, criterion_main};
mod trivium_bool;
criterion_group!(
trivium_bool,
trivium_bool::trivium_bool_gen,
trivium_bool::trivium_bool_warmup
);
mod kreyvium_bool;
criterion_group!(
kreyvium_bool,
kreyvium_bool::kreyvium_bool_gen,
kreyvium_bool::kreyvium_bool_warmup
);
mod trivium_shortint;
criterion_group!(
trivium_shortint,
trivium_shortint::trivium_shortint_gen,
trivium_shortint::trivium_shortint_warmup,
trivium_shortint::trivium_shortint_trans
);
mod kreyvium_shortint;
criterion_group!(
kreyvium_shortint,
kreyvium_shortint::kreyvium_shortint_gen,
kreyvium_shortint::kreyvium_shortint_warmup,
kreyvium_shortint::kreyvium_shortint_trans
);
mod trivium_byte;
criterion_group!(
trivium_byte,
trivium_byte::trivium_byte_gen,
trivium_byte::trivium_byte_trans,
trivium_byte::trivium_byte_warmup
);
mod kreyvium_byte;
criterion_group!(
kreyvium_byte,
kreyvium_byte::kreyvium_byte_gen,
kreyvium_byte::kreyvium_byte_trans,
kreyvium_byte::kreyvium_byte_warmup
);
criterion_main!(
trivium_bool,
trivium_shortint,
trivium_byte,
kreyvium_bool,
kreyvium_shortint,
kreyvium_byte,
);

View File

@@ -1,73 +0,0 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool};
use tfhe_trivium::TriviumStream;
pub fn trivium_bool_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [false; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [false; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let cipher_key = key.map(|x| FheBool::encrypt(x, &client_key));
let mut trivium = TriviumStream::<FheBool>::new(cipher_key, iv, &server_key);
c.bench_function("trivium bool generate 64 bits", |b| {
b.iter(|| trivium.next_64())
});
}
pub fn trivium_bool_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [false; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [false; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
c.bench_function("trivium bool warmup", |b| {
b.iter(|| {
let cipher_key = key.map(|x| FheBool::encrypt(x, &client_key));
let _trivium = TriviumStream::<FheBool>::new(cipher_key, iv, &server_key);
})
});
}

View File

@@ -1,85 +0,0 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64, FheUint8};
use tfhe_trivium::{TransCiphering, TriviumStreamByte};
pub fn trivium_byte_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0u8; 10];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0u8; 10];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let mut trivium = TriviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
c.bench_function("trivium byte generate 64 bits", |b| {
b.iter(|| trivium.next_64())
});
}
pub fn trivium_byte_trans(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0u8; 10];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0u8; 10];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let ciphered_message = FheUint64::try_encrypt(0u64, &client_key).unwrap();
let mut trivium = TriviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
c.bench_function("trivium byte transencrypt 64 bits", |b| {
b.iter(|| trivium.trans_encrypt_64(ciphered_message.clone()))
});
}
pub fn trivium_byte_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0u8; 10];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0u8; 10];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
c.bench_function("trivium byte warmup", |b| {
b.iter(|| {
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let _trivium = TriviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
})
});
}

View File

@@ -1,146 +0,0 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
use tfhe_trivium::{TransCiphering, TriviumStreamShortint};
pub fn trivium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
c.bench_function("trivium 1_1 warmup", |b| {
b.iter(|| {
let cipher_key = key.map(|x| client_key.encrypt(x));
let _trivium = TriviumStreamShortint::new(
cipher_key,
iv,
server_key.clone(),
ksk.clone(),
hl_server_key.clone(),
);
})
});
}
pub fn trivium_shortint_gen(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let cipher_key = key.map(|x| client_key.encrypt(x));
let mut trivium = TriviumStreamShortint::new(cipher_key, iv, server_key, ksk, hl_server_key);
c.bench_function("trivium 1_1 generate 64 bits", |b| {
b.iter(|| trivium.next_64())
});
}
pub fn trivium_shortint_trans(c: &mut Criterion) {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let cipher_key = key.map(|x| client_key.encrypt(x));
let ciphered_message = FheUint64::try_encrypt(0u64, &hl_client_key).unwrap();
let mut trivium = TriviumStreamShortint::new(cipher_key, iv, server_key, ksk, hl_server_key);
c.bench_function("trivium 1_1 transencrypt 64 bits", |b| {
b.iter(|| trivium.trans_encrypt_64(ciphered_message.clone()))
});
}

View File

@@ -1,248 +0,0 @@
//! This module implements the Kreyvium stream cipher, using booleans or FheBool
//! for the representation of the inner bits.
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheBool, ServerKey};
/// Internal trait specifying which operations are necessary for KreyviumStream generic type
pub trait KreyviumBoolInput<OpOutput>:
Sized
+ Clone
+ std::ops::BitXor<Output = OpOutput>
+ std::ops::BitAnd<Output = OpOutput>
+ std::ops::Not<Output = OpOutput>
{
}
impl KreyviumBoolInput<bool> for bool {}
impl KreyviumBoolInput<bool> for &bool {}
impl KreyviumBoolInput<FheBool> for FheBool {}
impl KreyviumBoolInput<FheBool> for &FheBool {}
/// KreyviumStream: a struct implementing the Kreyvium stream cipher, using T for the internal
/// representation of bits (bool or FheBool). To be able to compute FHE operations, it also owns
/// an Option for a ServerKey.
pub struct KreyviumStream<T> {
a: StaticDeque<93, T>,
b: StaticDeque<84, T>,
c: StaticDeque<111, T>,
k: StaticDeque<128, T>,
iv: StaticDeque<128, T>,
fhe_key: Option<ServerKey>,
}
impl KreyviumStream<bool> {
/// Constructor for `KreyviumStream<bool>`: arguments are the secret key and the input vector.
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(mut key: [bool; 128], mut iv: [bool; 128]) -> KreyviumStream<bool> {
// Initialization of Kreyvium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_register = [false; 93];
let mut b_register = [false; 84];
let mut c_register = [false; 111];
for i in 0..93 {
a_register[i] = key[128 - 93 + i];
}
for i in 0..84 {
b_register[i] = iv[128 - 84 + i];
}
for i in 0..44 {
c_register[111 - 44 + i] = iv[i];
}
for i in 0..66 {
c_register[i + 1] = true;
}
key.reverse();
iv.reverse();
KreyviumStream::<bool>::new_from_registers(
a_register, b_register, c_register, key, iv, None,
)
}
}
impl KreyviumStream<FheBool> {
/// Constructor for `KreyviumStream<FheBool>`: arguments are the encrypted secret key and input
/// vector, and the FHE server key.
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(
mut key: [FheBool; 128],
mut iv: [bool; 128],
sk: &ServerKey,
) -> KreyviumStream<FheBool> {
set_server_key(sk.clone());
// Initialization of Kreyvium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_register = [false; 93].map(FheBool::encrypt_trivial);
let mut b_register = [false; 84].map(FheBool::encrypt_trivial);
let mut c_register = [false; 111].map(FheBool::encrypt_trivial);
for i in 0..93 {
a_register[i] = key[128 - 93 + i].clone();
}
for i in 0..84 {
b_register[i] = FheBool::encrypt_trivial(iv[128 - 84 + i]);
}
for i in 0..44 {
c_register[111 - 44 + i] = FheBool::encrypt_trivial(iv[i]);
}
for i in 0..66 {
c_register[i + 1] = FheBool::encrypt_trivial(true);
}
key.reverse();
iv.reverse();
let iv = iv.map(FheBool::encrypt_trivial);
unset_server_key();
KreyviumStream::<FheBool>::new_from_registers(
a_register,
b_register,
c_register,
key,
iv,
Some(sk.clone()),
)
}
}
impl<T> KreyviumStream<T>
where
T: KreyviumBoolInput<T> + std::marker::Send + std::marker::Sync,
for<'a> &'a T: KreyviumBoolInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 93],
b_register: [T; 84],
c_register: [T; 111],
k_register: [T; 128],
iv_register: [T; 128],
key: Option<ServerKey>,
) -> Self {
let mut ret = Self {
a: StaticDeque::<93, T>::new(a_register),
b: StaticDeque::<84, T>::new(b_register),
c: StaticDeque::<111, T>::new(c_register),
k: StaticDeque::<128, T>::new(k_register),
iv: StaticDeque::<128, T>::new(iv_register),
fhe_key: key,
};
ret.init();
ret
}
/// The specification of Kreyvium includes running 1152 (= 18*64) unused steps to mix up the
/// registers, before starting the proper stream
fn init(&mut self) {
for _ in 0..18 {
self.next_64();
}
}
/// Computes one turn of the stream, updating registers and outputting the new bit.
pub fn next_bool(&mut self) -> T {
if let Some(sk) = &self.fhe_key {
set_server_key(sk.clone());
}
let [o, a, b, c] = self.get_output_and_values(0);
self.a.push(a);
self.b.push(b);
self.c.push(c);
self.k.shift();
self.iv.shift();
o
}
/// Computes a potential future step of Kreyvium, n terms in the future. This does not update
/// registers, but rather returns with the output, the three values that will be used to
/// update the registers, when the time is right. This function is meant to be used in
/// parallel.
fn get_output_and_values(&self, n: usize) -> [T; 4] {
assert!(n < 65);
let (((temp_a, temp_b), (temp_c, a_and)), (b_and, c_and)) = rayon::join(
|| {
rayon::join(
|| {
rayon::join(
|| &self.a[65 - n] ^ &self.a[92 - n],
|| &self.b[68 - n] ^ &self.b[83 - n],
)
},
|| {
rayon::join(
|| &(&self.c[65 - n] ^ &self.c[110 - n]) ^ &self.k[127 - n],
|| &(&self.a[91 - n] & &self.a[90 - n]) ^ &self.iv[127 - n],
)
},
)
},
|| {
rayon::join(
|| &self.b[82 - n] & &self.b[81 - n],
|| &self.c[109 - n] & &self.c[108 - n],
)
},
);
let ((o, a), (b, c)) = rayon::join(
|| {
rayon::join(
|| &(&temp_a ^ &temp_b) ^ &temp_c,
|| &temp_c ^ &(&c_and ^ &self.a[68 - n]),
)
},
|| {
rayon::join(
|| &temp_a ^ &(&a_and ^ &self.b[77 - n]),
|| &temp_b ^ &(&b_and ^ &self.c[86 - n]),
)
},
);
[o, a, b, c]
}
/// This calls `get_output_and_values` in parallel 64 times, and stores all results in a Vec.
fn get_64_output_and_values(&self) -> Vec<[T; 4]> {
(0..64)
.into_par_iter()
.map(|x| self.get_output_and_values(x))
.rev()
.collect()
}
/// Computes 64 turns of the stream, outputting the 64 bits all at once in a
/// Vec (first value is oldest, last is newest)
pub fn next_64(&mut self) -> Vec<T> {
if let Some(sk) = &self.fhe_key {
rayon::broadcast(|_| set_server_key(sk.clone()));
}
let mut values = self.get_64_output_and_values();
if self.fhe_key.is_some() {
rayon::broadcast(|_| unset_server_key());
}
let mut ret = Vec::<T>::with_capacity(64);
while let Some([o, a, b, c]) = values.pop() {
ret.push(o);
self.a.push(a);
self.b.push(b);
self.c.push(c);
}
self.k.n_shifts(64);
self.iv.n_shifts(64);
ret
}
}

View File

@@ -1,285 +0,0 @@
//! This module implements the Kreyvium stream cipher, using u8 or FheUint8
//! for the representation of the inner bits.
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheUint8, ServerKey};
/// Internal trait specifying which operations are necessary for KreyviumStreamByte generic type
pub trait KreyviumByteInput<OpOutput>:
Sized
+ Send
+ Sync
+ Clone
+ StaticByteDequeInput<OpOutput>
+ std::ops::BitXor<Output = OpOutput>
+ std::ops::BitAnd<Output = OpOutput>
+ std::ops::Shr<u8, Output = OpOutput>
+ std::ops::Shl<u8, Output = OpOutput>
+ std::ops::Add<Output = OpOutput>
{
}
impl KreyviumByteInput<u8> for u8 {}
impl KreyviumByteInput<u8> for &u8 {}
impl KreyviumByteInput<FheUint8> for FheUint8 {}
impl KreyviumByteInput<FheUint8> for &FheUint8 {}
/// KreyviumStreamByte: a struct implementing the Kreyvium stream cipher, using T for the internal
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
/// an Option for a ServerKey.
/// Since the original Kreyvium registers' sizes are not a multiple of 8, these registers (which
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
/// originals' sizes.
pub struct KreyviumStreamByte<T> {
a_byte: StaticByteDeque<12, T>,
b_byte: StaticByteDeque<11, T>,
c_byte: StaticByteDeque<14, T>,
k_byte: StaticByteDeque<16, T>,
iv_byte: StaticByteDeque<16, T>,
fhe_key: Option<ServerKey>,
}
impl KreyviumStreamByte<u8> {
/// Constructor for `KreyviumStreamByte<u8>`: arguments are the secret key and the input vector.
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(key_bytes: [u8; 16], iv_bytes: [u8; 16]) -> KreyviumStreamByte<u8> {
// Initialization of Kreyvium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_byte_reg = [0u8; 12];
let mut b_byte_reg = [0u8; 11];
let mut c_byte_reg = [0u8; 14];
// Copy key bits into a register
a_byte_reg.copy_from_slice(&key_bytes[4..]);
// Copy iv bits into a register
b_byte_reg.copy_from_slice(&iv_bytes[5..]);
// Copy a lot of ones in the c register
c_byte_reg[0] = 252;
c_byte_reg[1..8].fill(255);
// Copy iv bits in the c register
c_byte_reg[8] = (iv_bytes[0] << 4) | 31;
for b in 9..14 {
c_byte_reg[b] = (iv_bytes[b - 9] >> 4) | (iv_bytes[b - 8] << 4);
}
// Key and iv are stored in reverse in their shift registers
let mut key = key_bytes.map(|b| b.reverse_bits());
let mut iv = iv_bytes.map(|b| b.reverse_bits());
key.reverse();
iv.reverse();
let mut ret = KreyviumStreamByte::<u8>::new_from_registers(
a_byte_reg, b_byte_reg, c_byte_reg, key, iv, None,
);
ret.init();
ret
}
}
impl KreyviumStreamByte<FheUint8> {
/// Constructor for `KreyviumStream<FheUint8>`: arguments are the encrypted secret key and input
/// vector, and the FHE server key.
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(
key_bytes: [FheUint8; 16],
iv_bytes: [u8; 16],
server_key: &ServerKey,
) -> KreyviumStreamByte<FheUint8> {
set_server_key(server_key.clone());
// Initialization of Kreyvium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_byte_reg = [0u8; 12].map(FheUint8::encrypt_trivial);
let mut b_byte_reg = [0u8; 11].map(FheUint8::encrypt_trivial);
let mut c_byte_reg = [0u8; 14].map(FheUint8::encrypt_trivial);
// Copy key bits into a register
a_byte_reg.clone_from_slice(&key_bytes[4..]);
// Copy iv bits into a register
for b in 0..11 {
b_byte_reg[b] = FheUint8::encrypt_trivial(iv_bytes[b + 5]);
}
// Copy a lot of ones in the c register
c_byte_reg[0] = FheUint8::encrypt_trivial(252u8);
c_byte_reg[1..8].fill_with(|| FheUint8::encrypt_trivial(255u8));
// Copy iv bits in the c register
c_byte_reg[8] = FheUint8::encrypt_trivial((&iv_bytes[0] << 4u8) | 31u8);
for b in 9..14 {
c_byte_reg[b] =
FheUint8::encrypt_trivial((&iv_bytes[b - 9] >> 4u8) | (&iv_bytes[b - 8] << 4u8));
}
// Key and iv are stored in reverse in their shift registers
let mut key = key_bytes.map(|b| b.reverse_bits());
let mut iv = iv_bytes.map(|x| FheUint8::encrypt_trivial(x.reverse_bits()));
key.reverse();
iv.reverse();
unset_server_key();
let mut ret = KreyviumStreamByte::<FheUint8>::new_from_registers(
a_byte_reg,
b_byte_reg,
c_byte_reg,
key,
iv,
Some(server_key.clone()),
);
ret.init();
ret
}
}
impl<T> KreyviumStreamByte<T>
where
T: KreyviumByteInput<T> + Send,
for<'a> &'a T: KreyviumByteInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 12],
b_register: [T; 11],
c_register: [T; 14],
k_register: [T; 16],
iv_register: [T; 16],
sk: Option<ServerKey>,
) -> Self {
Self {
a_byte: StaticByteDeque::<12, T>::new(a_register),
b_byte: StaticByteDeque::<11, T>::new(b_register),
c_byte: StaticByteDeque::<14, T>::new(c_register),
k_byte: StaticByteDeque::<16, T>::new(k_register),
iv_byte: StaticByteDeque::<16, T>::new(iv_register),
fhe_key: sk,
}
}
/// The specification of Kreyvium includes running 1152 (= 18*64) unused steps to mix up the
/// registers, before starting the proper stream
fn init(&mut self) {
for _ in 0..18 {
self.next_64();
}
}
/// Computes 8 potential future step of Kreyvium, b*8 terms in the future. This does not update
/// registers, but rather returns with the output, the three values that will be used to
/// update the registers, when the time is right. This function is meant to be used in
/// parallel.
fn get_output_and_values(&self, b: usize) -> [T; 4] {
let n = b * 8 + 7;
assert!(n < 65);
let (((k, iv), (a1, a2, a3, a4, a5)), ((b1, b2, b3, b4, b5), (c1, c2, c3, c4, c5))) =
rayon::join(
|| {
rayon::join(
|| (self.k_byte.byte(127 - n), self.iv_byte.byte(127 - n)),
|| Self::get_bytes(&self.a_byte, [91 - n, 90 - n, 68 - n, 65 - n, 92 - n]),
)
},
|| {
rayon::join(
|| Self::get_bytes(&self.b_byte, [82 - n, 81 - n, 77 - n, 68 - n, 83 - n]),
|| {
Self::get_bytes(
&self.c_byte,
[109 - n, 108 - n, 86 - n, 65 - n, 110 - n],
)
},
)
},
);
let (((temp_a, temp_b), (temp_c, a_and)), (b_and, c_and)) = rayon::join(
|| {
rayon::join(
|| rayon::join(|| a4 ^ a5, || b4 ^ b5),
|| rayon::join(|| c4 ^ c5 ^ k, || a1 & a2 ^ iv),
)
},
|| rayon::join(|| b1 & b2, || c1 & c2),
);
let (temp_a_2, temp_b_2, temp_c_2) = (temp_a.clone(), temp_b.clone(), temp_c.clone());
let ((o, a), (b, c)) = rayon::join(
|| {
rayon::join(
|| (temp_a_2 ^ temp_b_2) ^ temp_c_2,
|| temp_c ^ ((c_and) ^ a3),
)
},
|| rayon::join(|| temp_a ^ (a_and ^ b3), || temp_b ^ (b_and ^ c3)),
);
[o, a, b, c]
}
/// This calls `get_output_and_values` in parallel 8 times, and stores all results in a Vec.
fn get_64_output_and_values(&self) -> Vec<[T; 4]> {
(0..8)
.into_par_iter()
.map(|i| self.get_output_and_values(i))
.collect()
}
/// Computes 64 turns of the stream, outputting the 64 bits (in 8 bytes) all at once in a
/// Vec (first value is oldest, last is newest)
pub fn next_64(&mut self) -> Vec<T> {
if let Some(sk) = &self.fhe_key {
rayon::broadcast(|_| set_server_key(sk.clone()));
}
let values = self.get_64_output_and_values();
if self.fhe_key.is_some() {
rayon::broadcast(|_| unset_server_key());
}
let mut bytes = Vec::<T>::with_capacity(8);
for [o, a, b, c] in values {
self.a_byte.push(a);
self.b_byte.push(b);
self.c_byte.push(c);
bytes.push(o);
}
self.k_byte.n_shifts(8);
self.iv_byte.n_shifts(8);
bytes
}
/// Reconstructs a bunch of 5 bytes in a parallel fashion.
fn get_bytes<const N: usize>(
reg: &StaticByteDeque<N, T>,
offsets: [usize; 5],
) -> (T, T, T, T, T) {
let mut ret = offsets
.par_iter()
.rev()
.map(|&i| reg.byte(i))
.collect::<Vec<_>>();
(
ret.pop().unwrap(),
ret.pop().unwrap(),
ret.pop().unwrap(),
ret.pop().unwrap(),
ret.pop().unwrap(),
)
}
}
impl KreyviumStreamByte<FheUint8> {
pub fn get_server_key(&self) -> &ServerKey {
self.fhe_key.as_ref().unwrap()
}
}

View File

@@ -1,203 +0,0 @@
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::shortint::prelude::*;
/// KreyviumStreamShortint: a struct implementing the Kreyvium stream cipher, using a generic
/// Ciphertext for the internal representation of bits (intended to represent a single bit). To be
/// able to compute FHE operations, it also owns a ServerKey.
pub struct KreyviumStreamShortint {
a: StaticDeque<93, Ciphertext>,
b: StaticDeque<84, Ciphertext>,
c: StaticDeque<111, Ciphertext>,
k: StaticDeque<128, Ciphertext>,
iv: StaticDeque<128, Ciphertext>,
internal_server_key: ServerKey,
transciphering_casting_key: KeySwitchingKey,
hl_server_key: tfhe::ServerKey,
}
impl KreyviumStreamShortint {
/// Constructor for KreyviumStreamShortint: arguments are the secret key and the input vector,
/// and a ServerKey reference. Outputs a KreyviumStream object already initialized (1152
/// steps have been run before returning)
pub fn new(
mut key: [Ciphertext; 128],
mut iv: [u64; 128],
sk: ServerKey,
ksk: KeySwitchingKey,
hl_sk: tfhe::ServerKey,
) -> Self {
// Initialization of Kreyvium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_register: [Ciphertext; 93] = [0; 93].map(|x| sk.create_trivial(x));
let mut b_register: [Ciphertext; 84] = [0; 84].map(|x| sk.create_trivial(x));
let mut c_register: [Ciphertext; 111] = [0; 111].map(|x| sk.create_trivial(x));
for i in 0..93 {
a_register[i].clone_from(&key[128 - 93 + i]);
}
for i in 0..84 {
b_register[i] = sk.create_trivial(iv[128 - 84 + i]);
}
for i in 0..44 {
c_register[111 - 44 + i] = sk.create_trivial(iv[i]);
}
for i in 0..66 {
c_register[i + 1] = sk.create_trivial(1);
}
key.reverse();
iv.reverse();
let iv = iv.map(|x| sk.create_trivial(x));
let mut ret = Self {
a: StaticDeque::<93, Ciphertext>::new(a_register),
b: StaticDeque::<84, Ciphertext>::new(b_register),
c: StaticDeque::<111, Ciphertext>::new(c_register),
k: StaticDeque::<128, Ciphertext>::new(key),
iv: StaticDeque::<128, Ciphertext>::new(iv),
internal_server_key: sk,
transciphering_casting_key: ksk,
hl_server_key: hl_sk,
};
ret.init();
ret
}
/// The specification of Kreyvium includes running 1152 (= 18*64) unused steps to mix up the
/// registers, before starting the proper stream
fn init(&mut self) {
for _ in 0..18 {
self.next_64();
}
}
/// Computes one turn of the stream, updating registers and outputting the new bit.
pub fn next_ct(&mut self) -> Ciphertext {
let [o, a, b, c] = self.get_output_and_values(0);
self.a.push(a);
self.b.push(b);
self.c.push(c);
o
}
/// Computes a potential future step of Kreyvium, n terms in the future. This does not update
/// registers, but rather returns with the output, the three values that will be used to
/// update the registers, when the time is right. This function is meant to be used in
/// parallel.
fn get_output_and_values(&self, n: usize) -> [Ciphertext; 4] {
let (k, iv) = (&self.k[127 - n], &self.iv[127 - n]);
let (a1, a2, a3, a4, a5) = (
&self.a[65 - n],
&self.a[92 - n],
&self.a[91 - n],
&self.a[90 - n],
&self.a[68 - n],
);
let (b1, b2, b3, b4, b5) = (
&self.b[68 - n],
&self.b[83 - n],
&self.b[82 - n],
&self.b[81 - n],
&self.b[77 - n],
);
let (c1, c2, c3, c4, c5) = (
&self.c[65 - n],
&self.c[110 - n],
&self.c[109 - n],
&self.c[108 - n],
&self.c[86 - n],
);
let temp_a = self.internal_server_key.unchecked_add(a1, a2);
let temp_b = self.internal_server_key.unchecked_add(b1, b2);
let mut temp_c = self.internal_server_key.unchecked_add(c1, c2);
self.internal_server_key
.unchecked_add_assign(&mut temp_c, k);
let ((new_a, new_b), (new_c, o)) = rayon::join(
|| {
rayon::join(
|| {
let mut new_a = self.internal_server_key.unchecked_bitand(c3, c4);
self.internal_server_key
.unchecked_add_assign(&mut new_a, a5);
self.internal_server_key.add_assign(&mut new_a, &temp_c);
new_a
},
|| {
let mut new_b = self.internal_server_key.unchecked_bitand(a3, a4);
self.internal_server_key
.unchecked_add_assign(&mut new_b, b5);
self.internal_server_key
.unchecked_add_assign(&mut new_b, &temp_a);
self.internal_server_key.add_assign(&mut new_b, iv);
new_b
},
)
},
|| {
rayon::join(
|| {
let mut new_c = self.internal_server_key.unchecked_bitand(b3, b4);
self.internal_server_key
.unchecked_add_assign(&mut new_c, c5);
self.internal_server_key
.unchecked_add_assign(&mut new_c, &temp_b);
self.internal_server_key.message_extract_assign(&mut new_c);
new_c
},
|| {
self.internal_server_key.bitxor(
&self.internal_server_key.unchecked_add(&temp_a, &temp_b),
&temp_c,
)
},
)
},
);
[o, new_a, new_b, new_c]
}
/// This calls `get_output_and_values` in parallel 64 times, and stores all results in a Vec.
fn get_64_output_and_values(&self) -> Vec<[Ciphertext; 4]> {
(0..64)
.into_par_iter()
.map(|x| self.get_output_and_values(x))
.rev()
.collect()
}
/// Computes 64 turns of the stream, outputting the 64 bits all at once in a
/// Vec (first value is oldest, last is newest)
pub fn next_64(&mut self) -> Vec<Ciphertext> {
let mut values = self.get_64_output_and_values();
let mut ret = Vec::<Ciphertext>::with_capacity(64);
while let Some([o, a, b, c]) = values.pop() {
ret.push(o);
self.a.push(a);
self.b.push(b);
self.c.push(c);
}
self.k.n_shifts(64);
self.iv.n_shifts(64);
ret
}
pub fn get_internal_server_key(&self) -> &ServerKey {
&self.internal_server_key
}
pub fn get_casting_key(&self) -> &KeySwitchingKey {
&self.transciphering_casting_key
}
pub fn get_hl_server_key(&self) -> &tfhe::ServerKey {
&self.hl_server_key
}
}

View File

@@ -1,12 +0,0 @@
#[allow(clippy::module_inception)]
mod kreyvium;
pub use kreyvium::KreyviumStream;
mod kreyvium_byte;
pub use kreyvium_byte::KreyviumStreamByte;
mod kreyvium_shortint;
pub use kreyvium_shortint::KreyviumStreamShortint;
#[cfg(test)]
mod test;

View File

@@ -1,371 +0,0 @@
use crate::{KreyviumStream, KreyviumStreamByte, KreyviumStreamShortint, TransCiphering};
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
// Values for these tests come from the github repo renaud1239/Kreyvium,
// commit fd6828f68711276c25f55e605935028f5e843f43
fn get_hexadecimal_string_from_lsb_first_stream(a: Vec<bool>) -> String {
assert!(a.len() % 8 == 0);
let mut hexadecimal: String = "".to_string();
for test in a.chunks(8) {
// Encoding is bytes in LSB order
match test[4..8] {
[false, false, false, false] => hexadecimal.push('0'),
[true, false, false, false] => hexadecimal.push('1'),
[false, true, false, false] => hexadecimal.push('2'),
[true, true, false, false] => hexadecimal.push('3'),
[false, false, true, false] => hexadecimal.push('4'),
[true, false, true, false] => hexadecimal.push('5'),
[false, true, true, false] => hexadecimal.push('6'),
[true, true, true, false] => hexadecimal.push('7'),
[false, false, false, true] => hexadecimal.push('8'),
[true, false, false, true] => hexadecimal.push('9'),
[false, true, false, true] => hexadecimal.push('A'),
[true, true, false, true] => hexadecimal.push('B'),
[false, false, true, true] => hexadecimal.push('C'),
[true, false, true, true] => hexadecimal.push('D'),
[false, true, true, true] => hexadecimal.push('E'),
[true, true, true, true] => hexadecimal.push('F'),
_ => (),
};
match test[0..4] {
[false, false, false, false] => hexadecimal.push('0'),
[true, false, false, false] => hexadecimal.push('1'),
[false, true, false, false] => hexadecimal.push('2'),
[true, true, false, false] => hexadecimal.push('3'),
[false, false, true, false] => hexadecimal.push('4'),
[true, false, true, false] => hexadecimal.push('5'),
[false, true, true, false] => hexadecimal.push('6'),
[true, true, true, false] => hexadecimal.push('7'),
[false, false, false, true] => hexadecimal.push('8'),
[true, false, false, true] => hexadecimal.push('9'),
[false, true, false, true] => hexadecimal.push('A'),
[true, true, false, true] => hexadecimal.push('B'),
[false, false, true, true] => hexadecimal.push('C'),
[true, false, true, true] => hexadecimal.push('D'),
[false, true, true, true] => hexadecimal.push('E'),
[true, true, true, true] => hexadecimal.push('F'),
_ => (),
};
}
hexadecimal
}
fn get_hexagonal_string_from_bytes(a: Vec<u8>) -> String {
assert!(a.len() % 8 == 0);
let mut hexadecimal: String = "".to_string();
for test in a {
hexadecimal.push_str(&format!("{:02X?}", test));
}
hexadecimal
}
fn get_hexagonal_string_from_u64(a: Vec<u64>) -> String {
let mut hexadecimal: String = "".to_string();
for test in a {
hexadecimal.push_str(&format!("{:016X?}", test));
}
hexadecimal
}
#[test]
fn kreyvium_test_1() {
let key = [false; 128];
let iv = [false; 128];
let output = "26DCF1F4BC0F1922";
let mut kreyvium = KreyviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(64);
while vec.len() < 64 {
vec.push(kreyvium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output, hexadecimal);
}
#[test]
fn kreyvium_test_2() {
let mut key = [false; 128];
let iv = [false; 128];
key[0] = true;
let output = "4FD421D4DA3D2C8A";
let mut kreyvium = KreyviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(64);
while vec.len() < 64 {
vec.push(kreyvium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output, hexadecimal);
}
#[test]
fn kreyvium_test_3() {
let key = [false; 128];
let mut iv = [false; 128];
iv[0] = true;
let output = "C9217BA0D762ACA1";
let mut kreyvium = KreyviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(64);
while vec.len() < 64 {
vec.push(kreyvium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output, hexadecimal);
}
#[test]
fn kreyvium_test_4() {
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [false; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [false; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let output = "D1F0303482061111";
let mut kreyvium = KreyviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(64);
while vec.len() < 64 {
vec.push(kreyvium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(hexadecimal, output);
}
#[test]
fn kreyvium_test_fhe_long() {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [false; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [false; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let output = "D1F0303482061111";
let cipher_key = key.map(|x| FheBool::encrypt(x, &client_key));
let mut kreyvium = KreyviumStream::<FheBool>::new(cipher_key, iv, &server_key);
let mut vec = Vec::<bool>::with_capacity(64);
while vec.len() < 64 {
let cipher_outputs = kreyvium.next_64();
for c in cipher_outputs {
vec.push(c.decrypt(&client_key))
}
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output, hexadecimal);
}
use tfhe::shortint::prelude::*;
#[test]
fn kreyvium_test_shortint_long() {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0; 128];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0; 128];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let output = "D1F0303482061111".to_string();
let cipher_key = key.map(|x| client_key.encrypt(x));
let ciphered_message = FheUint64::try_encrypt(0u64, &hl_client_key).unwrap();
let mut kreyvium = KreyviumStreamShortint::new(cipher_key, iv, server_key, ksk, hl_server_key);
let trans_ciphered_message = kreyvium.trans_encrypt_64(ciphered_message);
let ciphered_message = trans_ciphered_message.decrypt(&hl_client_key);
let hexadecimal = get_hexagonal_string_from_u64(vec![ciphered_message]);
assert_eq!(output, hexadecimal);
}
#[test]
fn kreyvium_test_clear_byte() {
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key_bytes = [0u8; 16];
for i in (0..key_string.len()).step_by(2) {
key_bytes[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv_bytes = [0u8; 16];
for i in (0..iv_string.len()).step_by(2) {
iv_bytes[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let output = "D1F0303482061111".to_string();
let mut kreyvium = KreyviumStreamByte::<u8>::new(key_bytes, iv_bytes);
let mut vec = Vec::<u8>::with_capacity(8);
while vec.len() < 8 {
let outputs = kreyvium.next_64();
for c in outputs {
vec.push(c)
}
}
let hexadecimal = get_hexagonal_string_from_bytes(vec);
assert_eq!(output, hexadecimal);
}
#[test]
fn kreyvium_test_byte_long() {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key_bytes = [0u8; 16];
for i in (0..key_string.len()).step_by(2) {
key_bytes[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv_bytes = [0u8; 16];
for i in (0..iv_string.len()).step_by(2) {
iv_bytes[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let cipher_key = key_bytes.map(|x| FheUint8::encrypt(x, &client_key));
let output = "D1F0303482061111".to_string();
let mut kreyvium = KreyviumStreamByte::<FheUint8>::new(cipher_key, iv_bytes, &server_key);
let mut vec = Vec::<u8>::with_capacity(8);
while vec.len() < 8 {
let cipher_outputs = kreyvium.next_64();
for c in cipher_outputs {
vec.push(c.decrypt(&client_key))
}
}
let hexadecimal = get_hexagonal_string_from_bytes(vec);
assert_eq!(output, hexadecimal);
}
#[test]
fn kreyvium_test_fhe_byte_transciphering_long() {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
let mut key = [0u8; 16];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC000000000000".to_string();
let mut iv = [0u8; 16];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let output = "D1F0303482061111".to_string();
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let ciphered_message = FheUint64::try_encrypt(0u64, &client_key).unwrap();
let mut kreyvium = KreyviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
let trans_ciphered_message = kreyvium.trans_encrypt_64(ciphered_message);
let ciphered_message = trans_ciphered_message.decrypt(&client_key);
let hexadecimal = get_hexagonal_string_from_u64(vec![ciphered_message]);
assert_eq!(output, hexadecimal);
}

View File

@@ -1,12 +0,0 @@
#![allow(clippy::too_long_first_doc_paragraph)]
mod static_deque;
mod kreyvium;
pub use kreyvium::{KreyviumStream, KreyviumStreamByte, KreyviumStreamShortint};
mod trivium;
pub use trivium::{TriviumStream, TriviumStreamByte, TriviumStreamShortint};
mod trans_ciphering;
pub use trans_ciphering::TransCiphering;

View File

@@ -1,6 +0,0 @@
#[allow(clippy::module_inception)]
mod static_deque;
pub use static_deque::StaticDeque;
mod static_byte_deque;
pub use static_byte_deque::{StaticByteDeque, StaticByteDequeInput};

View File

@@ -1,140 +0,0 @@
//! This module implements the StaticByteDeque struct: a deque of bytes. The idea
//! is that this is a wrapper around StaticDeque, but StaticByteDeque has an additional
//! functionality: it can construct the "intermediate" bytes, made of parts of other bytes.
//! This is pretending to store bits, and allows accessing bits in chunks of 8 consecutive.
use crate::static_deque::StaticDeque;
use tfhe::FheUint8;
/// Internal trait specifying which operations are needed by StaticByteDeque
pub trait StaticByteDequeInput<OpOutput>:
Clone
+ std::ops::Shr<u8, Output = OpOutput>
+ std::ops::Shl<u8, Output = OpOutput>
+ std::ops::BitOr<Output = OpOutput>
{
}
impl StaticByteDequeInput<u8> for u8 {}
impl StaticByteDequeInput<u8> for &u8 {}
impl StaticByteDequeInput<FheUint8> for FheUint8 {}
impl StaticByteDequeInput<FheUint8> for &FheUint8 {}
/// Here T must represent a type covering a byte, like u8 or FheUint8.
#[derive(Clone)]
pub struct StaticByteDeque<const N: usize, T> {
deque: StaticDeque<N, T>,
}
impl<const N: usize, T> StaticByteDeque<N, T>
where
T: StaticByteDequeInput<T>,
for<'a> &'a T: StaticByteDequeInput<T>,
{
/// Constructor always uses a fully initialized array, the first element of
/// which is oldest, the last is newest
pub fn new(_arr: [T; N]) -> Self {
Self {
deque: StaticDeque::<N, T>::new(_arr),
}
}
/// Elements are pushed via a byte element (covering 8 underlying bits)
pub fn push(&mut self, val: T) {
self.deque.push(val)
}
/// computes n shift in a row
pub fn n_shifts(&mut self, n: usize) {
self.deque.n_shifts(n);
}
/// Getter for the internal memory
#[allow(dead_code)]
fn get_arr(&self) -> &[T; N] {
self.deque.get_arr()
}
/// This returns a byte full of zeros, except maybe a one
/// at the specified location, if it is present in the deque
#[allow(dead_code)]
fn bit(&self, i: usize) -> T
where
for<'a> &'a T: std::ops::BitAnd<u8, Output = T>,
{
let byte: &T = &self.deque[i / 8];
let bit_selector: u8 = 1u8 << (i % 8);
byte & bit_selector
}
/// This function reconstructs an intermediate byte if necessary
pub fn byte(&self, i: usize) -> T {
let byte: &T = &self.deque[i / 8];
let bit_idx: u8 = (i % 8) as u8;
if bit_idx == 0 {
return byte.clone();
}
let byte_next: &T = &self.deque[i / 8 + 1];
(byte << bit_idx) | (byte_next >> (8 - bit_idx))
}
}
#[cfg(test)]
mod tests {
use crate::static_deque::StaticByteDeque;
#[test]
fn byte_deque_test() {
let mut deque = StaticByteDeque::<3, u8>::new([2, 64, 128]);
deque.push(4);
// Youngest: 4
assert!(deque.bit(0) == 0);
assert!(deque.bit(1) == 0);
assert!(deque.bit(2) > 0);
assert!(deque.bit(3) == 0);
assert!(deque.bit(4) == 0);
assert!(deque.bit(5) == 0);
assert!(deque.bit(6) == 0);
assert!(deque.bit(7) == 0);
// second youngest: 128
assert!(deque.bit(8) == 0);
assert!(deque.bit(8 + 1) == 0);
assert!(deque.bit(8 + 2) == 0);
assert!(deque.bit(8 + 3) == 0);
assert!(deque.bit(8 + 4) == 0);
assert!(deque.bit(8 + 5) == 0);
assert!(deque.bit(8 + 6) == 0);
assert!(deque.bit(8 + 7) > 0);
// oldest: 64
assert!(deque.bit(16) == 0);
assert!(deque.bit(16 + 1) == 0);
assert!(deque.bit(16 + 2) == 0);
assert!(deque.bit(16 + 3) == 0);
assert!(deque.bit(16 + 4) == 0);
assert!(deque.bit(16 + 5) == 0);
assert!(deque.bit(16 + 6) > 0);
assert!(deque.bit(16 + 7) == 0);
assert_eq!(deque.byte(0), 4u8);
assert_eq!(deque.byte(1), 9u8);
assert_eq!(deque.byte(2), 18u8);
assert_eq!(deque.byte(3), 36u8);
assert_eq!(deque.byte(4), 72u8);
assert_eq!(deque.byte(5), 144u8);
assert_eq!(deque.byte(6), 32u8);
assert_eq!(deque.byte(7), 64u8);
assert_eq!(deque.byte(8), 128u8);
assert_eq!(deque.byte(9), 0u8);
assert_eq!(deque.byte(10), 1u8);
assert_eq!(deque.byte(11), 2u8);
assert_eq!(deque.byte(12), 4u8);
assert_eq!(deque.byte(13), 8u8);
assert_eq!(deque.byte(14), 16u8);
assert_eq!(deque.byte(15), 32u8);
assert_eq!(deque.byte(16), 64u8);
}
}

View File

@@ -1,135 +0,0 @@
//! This module implements the StaticDeque struct: a deque utility whose size
//! is known at compile time. Construction, push, and indexing are publicly
//! available.
use core::ops::{Index, IndexMut};
/// StaticDeque: a struct implementing a deque whose size is known at compile time.
/// It has 2 members: the static array containing the data (never empty), and a cursor
/// equal to the index of the oldest element (and the next one to be overwritten).
#[derive(Clone)]
pub struct StaticDeque<const N: usize, T> {
arr: [T; N],
cursor: usize,
}
impl<const N: usize, T> StaticDeque<N, T> {
/// Constructor always uses a fully initialized array, the first element of
/// which is oldest, the last is newest
pub fn new(_arr: [T; N]) -> Self {
Self {
arr: _arr,
cursor: 0,
}
}
/// Push a new element to the deque, overwriting the oldest at the same time.
pub fn push(&mut self, val: T) {
self.arr[self.cursor] = val;
self.shift();
}
/// Shift: equivalent to pushing the oldest element
pub fn shift(&mut self) {
self.n_shifts(1);
}
/// computes n shift in a row
pub fn n_shifts(&mut self, n: usize) {
self.cursor += n;
self.cursor %= N;
}
/// Getter for the internal memory
#[allow(dead_code)]
pub fn get_arr(&self) -> &[T; N] {
&self.arr
}
}
/// Index trait for the StaticDeque: 0 is the youngest element, N-1 is the oldest,
/// and above N will panic.
impl<const N: usize, T> Index<usize> for StaticDeque<N, T> {
type Output = T;
/// 0 is youngest
fn index(&self, i: usize) -> &T {
if i >= N {
panic!("Index {:?} too high for size {:?}", i, N);
}
&self.arr[(N + self.cursor - i - 1) % N]
}
}
/// IndexMut trait for the StaticDeque: 0 is the youngest element, N-1 is the oldest,
/// and above N will panic.
impl<const N: usize, T> IndexMut<usize> for StaticDeque<N, T> {
/// 0 is youngest
fn index_mut(&mut self, i: usize) -> &mut T {
if i >= N {
panic!("Index {:?} too high for size {:?}", i, N);
}
&mut self.arr[(N + self.cursor - i - 1) % N]
}
}
#[cfg(test)]
mod tests {
use crate::static_deque::StaticDeque;
#[test]
fn test_static_deque() {
let a = [1, 2, 3, 4, 5, 6];
let mut static_deque = StaticDeque::new(a);
for i in 7..11 {
static_deque.push(i);
}
assert_eq!(*static_deque.get_arr(), [7, 8, 9, 10, 5, 6]);
for i in 11..15 {
static_deque.push(i);
}
assert_eq!(*static_deque.get_arr(), [13, 14, 9, 10, 11, 12]);
assert_eq!(static_deque[0], 14);
assert_eq!(static_deque[1], 13);
assert_eq!(static_deque[2], 12);
assert_eq!(static_deque[3], 11);
assert_eq!(static_deque[4], 10);
assert_eq!(static_deque[5], 9);
}
#[test]
fn test_static_deque_indexmut() {
let a = [1, 2, 3, 4, 5, 6];
let mut static_deque = StaticDeque::new(a);
for i in 7..11 {
static_deque.push(i);
}
assert_eq!(*static_deque.get_arr(), [7, 8, 9, 10, 5, 6]);
for i in 11..15 {
static_deque.push(i);
}
assert_eq!(*static_deque.get_arr(), [13, 14, 9, 10, 11, 12]);
static_deque[1] = 100;
assert_eq!(static_deque[0], 14);
assert_eq!(static_deque[1], 100);
assert_eq!(static_deque[2], 12);
assert_eq!(static_deque[3], 11);
assert_eq!(static_deque[4], 10);
assert_eq!(static_deque[5], 9);
}
#[test]
#[should_panic]
fn test_static_deque_index_fail() {
let a = [1, 2, 3, 4, 5, 6];
let static_deque = StaticDeque::new(a);
let _ = static_deque[6];
}
}

View File

@@ -1,117 +0,0 @@
//! This module will contain extensions of some TriviumStream of KreyviumStream objects,
//! when trans ciphering is available to them.
use crate::{KreyviumStreamByte, KreyviumStreamShortint, TriviumStreamByte, TriviumStreamShortint};
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::shortint::Ciphertext;
use tfhe::{set_server_key, unset_server_key, FheUint64, FheUint8, ServerKey};
/// Triat specifying the interface for trans ciphering a FheUint64 object. Since it is meant
/// to be used with stream ciphers, encryption and decryption are by default the same.
pub trait TransCiphering {
fn trans_encrypt_64(&mut self, cipher: FheUint64) -> FheUint64;
fn trans_decrypt_64(&mut self, cipher: FheUint64) -> FheUint64 {
self.trans_encrypt_64(cipher)
}
}
fn transcipher_from_fheu8_stream(
stream: Vec<FheUint8>,
cipher: FheUint64,
fhe_server_key: &ServerKey,
) -> FheUint64 {
assert_eq!(stream.len(), 8);
set_server_key(fhe_server_key.clone());
rayon::broadcast(|_| set_server_key(fhe_server_key.clone()));
let ret: FheUint64 = stream
.into_par_iter()
.enumerate()
.map(|(i, x)| &cipher ^ &(FheUint64::cast_from(x) << (8 * (7 - i) as u8)))
.reduce_with(|a, b| a | b)
.unwrap();
unset_server_key();
rayon::broadcast(|_| unset_server_key());
ret
}
fn transcipher_from_1_1_stream(
stream: Vec<Ciphertext>,
cipher: FheUint64,
hl_server_key: &ServerKey,
internal_server_key: &tfhe::shortint::ServerKey,
casting_key: &tfhe::shortint::KeySwitchingKey,
) -> FheUint64 {
assert_eq!(stream.len(), 64);
let pairs = (0..32)
.into_par_iter()
.map(|i| {
let byte_idx = 7 - i / 4;
let pair_idx = i % 4;
let b0 = &stream[8 * byte_idx + 2 * pair_idx];
let b1 = &stream[8 * byte_idx + 2 * pair_idx + 1];
casting_key.cast(
&internal_server_key
.unchecked_add(b0, &internal_server_key.unchecked_scalar_mul(b1, 2)),
)
})
.collect::<Vec<_>>();
set_server_key(hl_server_key.clone());
let ret = &cipher ^ &FheUint64::try_from(pairs).unwrap();
unset_server_key();
ret
}
impl TransCiphering for TriviumStreamByte<FheUint8> {
/// `TriviumStreamByte<FheUint8>`: since a full step outputs 8 bytes, these bytes
/// are each shifted by a number in [0, 8), and XORed with the input cipher
fn trans_encrypt_64(&mut self, cipher: FheUint64) -> FheUint64 {
transcipher_from_fheu8_stream(self.next_64(), cipher, self.get_server_key())
}
}
impl TransCiphering for KreyviumStreamByte<FheUint8> {
/// `KreyviumStreamByte<FheUint8>`: since a full step outputs 8 bytes, these bytes
/// are each shifted by a number in [0, 8), and XORed with the input cipher
fn trans_encrypt_64(&mut self, cipher: FheUint64) -> FheUint64 {
transcipher_from_fheu8_stream(self.next_64(), cipher, self.get_server_key())
}
}
impl TransCiphering for TriviumStreamShortint {
/// TriviumStreamShortint: since a full step outputs 64 shortints, these bits
/// are paired 2 by 2 in the HL parameter space and packed in a full word,
/// and XORed with the input cipher
fn trans_encrypt_64(&mut self, cipher: FheUint64) -> FheUint64 {
transcipher_from_1_1_stream(
self.next_64(),
cipher,
self.get_hl_server_key(),
self.get_internal_server_key(),
self.get_casting_key(),
)
}
}
impl TransCiphering for KreyviumStreamShortint {
/// KreyviumStreamShortint: since a full step outputs 64 shortints, these bits
/// are paired 2 by 2 in the HL parameter space and packed in a full word,
/// and XORed with the input cipher
fn trans_encrypt_64(&mut self, cipher: FheUint64) -> FheUint64 {
transcipher_from_1_1_stream(
self.next_64(),
cipher,
self.get_hl_server_key(),
self.get_internal_server_key(),
self.get_casting_key(),
)
}
}

View File

@@ -1,11 +0,0 @@
mod trivium_bool;
pub use trivium_bool::TriviumStream;
mod trivium_byte;
pub use trivium_byte::TriviumStreamByte;
mod trivium_shortint;
pub use trivium_shortint::TriviumStreamShortint;
#[cfg(test)]
mod test;

View File

@@ -1,405 +0,0 @@
use crate::{TransCiphering, TriviumStream, TriviumStreamByte, TriviumStreamShortint};
use tfhe::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
// Values for these tests come from the github repo cantora/avr-crypto-lib, commit 2a5b018,
// file testvectors/trivium-80.80.test-vectors
fn get_hexadecimal_string_from_lsb_first_stream(a: Vec<bool>) -> String {
assert!(a.len() % 8 == 0);
let mut hexadecimal: String = "".to_string();
for test in a.chunks(8) {
// Encoding is bytes in LSB order
match test[4..8] {
[false, false, false, false] => hexadecimal.push('0'),
[true, false, false, false] => hexadecimal.push('1'),
[false, true, false, false] => hexadecimal.push('2'),
[true, true, false, false] => hexadecimal.push('3'),
[false, false, true, false] => hexadecimal.push('4'),
[true, false, true, false] => hexadecimal.push('5'),
[false, true, true, false] => hexadecimal.push('6'),
[true, true, true, false] => hexadecimal.push('7'),
[false, false, false, true] => hexadecimal.push('8'),
[true, false, false, true] => hexadecimal.push('9'),
[false, true, false, true] => hexadecimal.push('A'),
[true, true, false, true] => hexadecimal.push('B'),
[false, false, true, true] => hexadecimal.push('C'),
[true, false, true, true] => hexadecimal.push('D'),
[false, true, true, true] => hexadecimal.push('E'),
[true, true, true, true] => hexadecimal.push('F'),
_ => (),
};
match test[0..4] {
[false, false, false, false] => hexadecimal.push('0'),
[true, false, false, false] => hexadecimal.push('1'),
[false, true, false, false] => hexadecimal.push('2'),
[true, true, false, false] => hexadecimal.push('3'),
[false, false, true, false] => hexadecimal.push('4'),
[true, false, true, false] => hexadecimal.push('5'),
[false, true, true, false] => hexadecimal.push('6'),
[true, true, true, false] => hexadecimal.push('7'),
[false, false, false, true] => hexadecimal.push('8'),
[true, false, false, true] => hexadecimal.push('9'),
[false, true, false, true] => hexadecimal.push('A'),
[true, true, false, true] => hexadecimal.push('B'),
[false, false, true, true] => hexadecimal.push('C'),
[true, false, true, true] => hexadecimal.push('D'),
[false, true, true, true] => hexadecimal.push('E'),
[true, true, true, true] => hexadecimal.push('F'),
_ => (),
};
}
hexadecimal
}
fn get_hexagonal_string_from_bytes(a: Vec<u8>) -> String {
assert!(a.len() % 8 == 0);
let mut hexadecimal: String = "".to_string();
for test in a {
hexadecimal.push_str(&format!("{:02X?}", test));
}
hexadecimal
}
fn get_hexagonal_string_from_u64(a: Vec<u64>) -> String {
let mut hexadecimal: String = "".to_string();
for test in a {
hexadecimal.push_str(&format!("{:016X?}", test));
}
hexadecimal
}
#[test]
fn trivium_test_1() {
let key = [false; 80];
let iv = [false; 80];
let output_0_63 = "FBE0BF265859051B517A2E4E239FC97F563203161907CF2DE7A8790FA1B2E9CDF75292030268B7382B4C1A759AA2599A285549986E74805903801A4CB5A5D4F2".to_string();
let output_192_255 = "0F1BE95091B8EA857B062AD52BADF47784AC6D9B2E3F85A9D79995043302F0FDF8B76E5BC8B7B4F0AA46CD20DDA04FDD197BC5E1635496828F2DBFB23F6BD5D0".to_string();
let output_256_319 = "80F9075437BAC73F696D0ABE3972F5FCE2192E5FCC13C0CB77D0ABA09126838D31A2D38A2087C46304C8A63B54109F679B0B1BC71E72A58D6DD3E0A3FF890D4A".to_string();
let output_448_511 = "68450EB0910A98EF1853E0FC1BED8AB6BB08DF5F167D34008C2A85284D4B886DD56883EE92BF18E69121670B4C81A5689C9B0538373D22EB923A28A2DB44C0EB".to_string();
let mut trivium = TriviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(512 * 8);
while vec.len() < 512 * 8 {
vec.push(trivium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
assert_eq!(output_192_255, hexadecimal[192 * 2..256 * 2]);
assert_eq!(output_256_319, hexadecimal[256 * 2..320 * 2]);
assert_eq!(output_448_511, hexadecimal[448 * 2..512 * 2]);
}
#[test]
fn trivium_test_2() {
let mut key = [false; 80];
let iv = [false; 80];
key[7] = true;
let output_0_63 = "38EB86FF730D7A9CAF8DF13A4420540DBB7B651464C87501552041C249F29A64D2FBF515610921EBE06C8F92CECF7F8098FF20CCCC6A62B97BE8EF7454FC80F9".to_string();
let output_192_255 = "EAF2625D411F61E41F6BAEEDDD5FE202600BD472F6C9CD1E9134A745D900EF6C023E4486538F09930CFD37157C0EB57C3EF6C954C42E707D52B743AD83CFF297".to_string();
let output_256_319 = "9A203CF7B2F3F09C43D188AA13A5A2021EE998C42F777E9B67C3FA221A0AA1B041AA9E86BC2F5C52AFF11F7D9EE480CB1187B20EB46D582743A52D7CD080A24A".to_string();
let output_448_511 = "EBF14772061C210843C18CEA2D2A275AE02FCB18E5D7942455FF77524E8A4CA51E369A847D1AEEFB9002FCD02342983CEAFA9D487CC2032B10192CD416310FA4".to_string();
let mut trivium = TriviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(512 * 8);
while vec.len() < 512 * 8 {
vec.push(trivium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
assert_eq!(output_192_255, hexadecimal[192 * 2..256 * 2]);
assert_eq!(output_256_319, hexadecimal[256 * 2..320 * 2]);
assert_eq!(output_448_511, hexadecimal[448 * 2..512 * 2]);
}
#[test]
fn trivium_test_3() {
let key = [false; 80];
let mut iv = [false; 80];
iv[7] = true;
let output_0_63 = "F8901736640549E3BA7D42EA2D07B9F49233C18D773008BD755585B1A8CBAB86C1E9A9B91F1AD33483FD6EE3696D659C9374260456A36AAE11F033A519CBD5D7".to_string();
let output_192_255 = "87423582AF64475C3A9C092E32A53C5FE07D35B4C9CA288A89A43DEF3913EA9237CA43342F3F8E83AD3A5C38D463516F94E3724455656A36279E3E924D442F06".to_string();
let output_256_319 = "D94389A90E6F3BF2BB4C8B057339AAD8AA2FEA238C29FCAC0D1FF1CB2535A07058BA995DD44CFC54CCEC54A5405B944C532D74E50EA370CDF1BA1CBAE93FC0B5".to_string();
let output_448_511 = "4844151714E56A3A2BBFBA426A1D60F9A4F265210A91EC29259AE2035234091C49FFB1893FA102D425C57C39EB4916F6D148DC83EBF7DE51EEB9ABFE045FB282".to_string();
let mut trivium = TriviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(512 * 8);
while vec.len() < 512 * 8 {
vec.push(trivium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
assert_eq!(output_192_255, hexadecimal[192 * 2..256 * 2]);
assert_eq!(output_256_319, hexadecimal[256 * 2..320 * 2]);
assert_eq!(output_448_511, hexadecimal[448 * 2..512 * 2]);
}
#[test]
fn trivium_test_4() {
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [false; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [false; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let output_65472_65535 = "C04C24A6938C8AF8A491D5E481271E0E601338F01067A86A795CA493AA4FF265619B8D448B706B7C88EE8395FC79E5B51AB40245BBF7773AE67DF86FCFB71F30".to_string();
let output_65536_65599 = "011A0D7EC32FA102C66C164CFCB189AED9F6982E8C7370A6A37414781192CEB155C534C1C8C9E53FDEADF2D3D0577DAD3A8EB2F6E5265F1E831C86844670BC69".to_string();
let output_131008_131071 = "48107374A9CE3AAF78221AE77789247CF6896A249ED75DCE0CF2D30EB9D889A0C61C9F480E5C07381DED9FAB2AD54333E82C89BA92E6E47FD828F1A66A8656E0".to_string();
let mut trivium = TriviumStream::<bool>::new(key, iv);
let mut vec = Vec::<bool>::with_capacity(131072 * 8);
while vec.len() < 131072 * 8 {
vec.push(trivium.next_bool());
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
assert_eq!(output_65472_65535, hexadecimal[65472 * 2..65536 * 2]);
assert_eq!(output_65536_65599, hexadecimal[65536 * 2..65600 * 2]);
assert_eq!(output_131008_131071, hexadecimal[131008 * 2..131072 * 2]);
}
#[test]
fn trivium_test_clear_byte() {
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0u8; 10];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0u8; 10];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let output_65472_65535 = "C04C24A6938C8AF8A491D5E481271E0E601338F01067A86A795CA493AA4FF265619B8D448B706B7C88EE8395FC79E5B51AB40245BBF7773AE67DF86FCFB71F30".to_string();
let output_65536_65599 = "011A0D7EC32FA102C66C164CFCB189AED9F6982E8C7370A6A37414781192CEB155C534C1C8C9E53FDEADF2D3D0577DAD3A8EB2F6E5265F1E831C86844670BC69".to_string();
let output_131008_131071 = "48107374A9CE3AAF78221AE77789247CF6896A249ED75DCE0CF2D30EB9D889A0C61C9F480E5C07381DED9FAB2AD54333E82C89BA92E6E47FD828F1A66A8656E0".to_string();
let mut trivium = TriviumStreamByte::<u8>::new(key, iv);
let mut vec = Vec::<u8>::with_capacity(131072);
while vec.len() < 131072 {
let outputs = trivium.next_64();
for c in outputs {
vec.push(c)
}
}
let hexadecimal = get_hexagonal_string_from_bytes(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
assert_eq!(output_65472_65535, hexadecimal[65472 * 2..65536 * 2]);
assert_eq!(output_65536_65599, hexadecimal[65536 * 2..65600 * 2]);
assert_eq!(output_131008_131071, hexadecimal[131008 * 2..131072 * 2]);
}
#[test]
fn trivium_test_fhe_long() {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [false; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [false; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val: u8 = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2 == 1;
val >>= 1;
}
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let cipher_key = key.map(|x| FheBool::encrypt(x, &client_key));
let mut trivium = TriviumStream::<FheBool>::new(cipher_key, iv, &server_key);
let mut vec = Vec::<bool>::with_capacity(64 * 8);
while vec.len() < 64 * 8 {
let cipher_outputs = trivium.next_64();
for c in cipher_outputs {
vec.push(c.decrypt(&client_key))
}
}
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
}
#[test]
fn trivium_test_fhe_byte_long() {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0u8; 10];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0u8; 10];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let mut trivium = TriviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
let mut vec = Vec::<u8>::with_capacity(64);
while vec.len() < 64 {
let cipher_outputs = trivium.next_64();
for c in cipher_outputs {
vec.push(c.decrypt(&client_key))
}
}
let hexadecimal = get_hexagonal_string_from_bytes(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
}
#[test]
fn trivium_test_fhe_byte_transciphering_long() {
let config = ConfigBuilder::default().build();
let (client_key, server_key) = generate_keys(config);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0u8; 10];
for i in (0..key_string.len()).step_by(2) {
key[i >> 1] = u8::from_str_radix(&key_string[i..i + 2], 16).unwrap();
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0u8; 10];
for i in (0..iv_string.len()).step_by(2) {
iv[i >> 1] = u8::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let cipher_key = key.map(|x| FheUint8::encrypt(x, &client_key));
let mut ciphered_message = vec![FheUint64::try_encrypt(0u64, &client_key).unwrap(); 9];
let mut trivium = TriviumStreamByte::<FheUint8>::new(cipher_key, iv, &server_key);
let mut vec = Vec::<u64>::with_capacity(8);
while vec.len() < 8 {
let trans_ciphered_message = trivium.trans_encrypt_64(ciphered_message.pop().unwrap());
vec.push(trans_ciphered_message.decrypt(&client_key));
}
let hexadecimal = get_hexagonal_string_from_u64(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
}
use tfhe::shortint::prelude::*;
#[test]
fn trivium_test_shortint_long() {
let config = ConfigBuilder::default().build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(PARAM_MESSAGE_1_CARRY_1_KS_PBS);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
let mut key = [0; 80];
for i in (0..key_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&key_string[i..i + 2], 16).unwrap();
for j in 0..8 {
key[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let iv_string = "0D74DB42A91077DE45AC".to_string();
let mut iv = [0; 80];
for i in (0..iv_string.len()).step_by(2) {
let mut val = u64::from_str_radix(&iv_string[i..i + 2], 16).unwrap();
for j in 0..8 {
iv[8 * (i >> 1) + j] = val % 2;
val >>= 1;
}
}
let output_0_63 = "F4CD954A717F26A7D6930830C4E7CF0819F80E03F25F342C64ADC66ABA7F8A8E6EAA49F23632AE3CD41A7BD290A0132F81C6D4043B6E397D7388F3A03B5FE358".to_string();
let cipher_key = key.map(|x| client_key.encrypt(x));
let mut ciphered_message = vec![FheUint64::try_encrypt(0u64, &hl_client_key).unwrap(); 9];
let mut trivium = TriviumStreamShortint::new(cipher_key, iv, server_key, ksk, hl_server_key);
let mut vec = Vec::<u64>::with_capacity(8);
while vec.len() < 8 {
let trans_ciphered_message = trivium.trans_encrypt_64(ciphered_message.pop().unwrap());
vec.push(trans_ciphered_message.decrypt(&hl_client_key));
}
let hexadecimal = get_hexagonal_string_from_u64(vec);
assert_eq!(output_0_63, hexadecimal[0..64 * 2]);
}

View File

@@ -1,216 +0,0 @@
//! This module implements the Trivium stream cipher, using booleans or FheBool
//! for the representation of the inner bits.
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheBool, ServerKey};
/// Internal trait specifying which operations are necessary for TriviumStream generic type
pub trait TriviumBoolInput<OpOutput>:
Sized
+ Clone
+ std::ops::BitXor<Output = OpOutput>
+ std::ops::BitAnd<Output = OpOutput>
+ std::ops::Not<Output = OpOutput>
{
}
impl TriviumBoolInput<bool> for bool {}
impl TriviumBoolInput<bool> for &bool {}
impl TriviumBoolInput<FheBool> for FheBool {}
impl TriviumBoolInput<FheBool> for &FheBool {}
/// TriviumStream: a struct implementing the Trivium stream cipher, using T for the internal
/// representation of bits (bool or FheBool). To be able to compute FHE operations, it also owns
/// an Option for a ServerKey.
pub struct TriviumStream<T> {
a: StaticDeque<93, T>,
b: StaticDeque<84, T>,
c: StaticDeque<111, T>,
fhe_key: Option<ServerKey>,
}
impl TriviumStream<bool> {
/// Constructor for `TriviumStream<bool>`: arguments are the secret key and the input vector.
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(key: [bool; 80], iv: [bool; 80]) -> TriviumStream<bool> {
// Initialization of Trivium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_register = [false; 93];
let mut b_register = [false; 84];
let mut c_register = [false; 111];
for i in 0..80 {
a_register[93 - 80 + i] = key[i];
b_register[84 - 80 + i] = iv[i];
}
c_register[0] = true;
c_register[1] = true;
c_register[2] = true;
TriviumStream::<bool>::new_from_registers(a_register, b_register, c_register, None)
}
}
impl TriviumStream<FheBool> {
/// Constructor for `TriviumStream<FheBool>`: arguments are the encrypted secret key and input
/// vector, and the FHE server key.
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(key: [FheBool; 80], iv: [bool; 80], sk: &ServerKey) -> TriviumStream<FheBool> {
set_server_key(sk.clone());
// Initialization of Trivium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_register = [false; 93].map(FheBool::encrypt_trivial);
let mut b_register = [false; 84].map(FheBool::encrypt_trivial);
let mut c_register = [false; 111].map(FheBool::encrypt_trivial);
for i in 0..80 {
a_register[93 - 80 + i] = key[i].clone();
b_register[84 - 80 + i] = FheBool::encrypt_trivial(iv[i]);
}
c_register[0] = FheBool::try_encrypt_trivial(true).unwrap();
c_register[1] = FheBool::try_encrypt_trivial(true).unwrap();
c_register[2] = FheBool::try_encrypt_trivial(true).unwrap();
unset_server_key();
TriviumStream::<FheBool>::new_from_registers(
a_register,
b_register,
c_register,
Some(sk.clone()),
)
}
}
impl<T> TriviumStream<T>
where
T: TriviumBoolInput<T> + std::marker::Send + std::marker::Sync,
for<'a> &'a T: TriviumBoolInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 93],
b_register: [T; 84],
c_register: [T; 111],
key: Option<ServerKey>,
) -> Self {
let mut ret = Self {
a: StaticDeque::<93, T>::new(a_register),
b: StaticDeque::<84, T>::new(b_register),
c: StaticDeque::<111, T>::new(c_register),
fhe_key: key,
};
ret.init();
ret
}
/// The specification of Trivium includes running 1152 (= 18*64) unused steps to mix up the
/// registers, before starting the proper stream
fn init(&mut self) {
for _ in 0..18 {
self.next_64();
}
}
/// Computes one turn of the stream, updating registers and outputting the new bit.
pub fn next_bool(&mut self) -> T {
if let Some(sk) = &self.fhe_key {
set_server_key(sk.clone());
}
let [o, a, b, c] = self.get_output_and_values(0);
self.a.push(a);
self.b.push(b);
self.c.push(c);
o
}
/// Computes a potential future step of Trivium, n terms in the future. This does not update
/// registers, but rather returns with the output, the three values that will be used to
/// update the registers, when the time is right. This function is meant to be used in
/// parallel.
fn get_output_and_values(&self, n: usize) -> [T; 4] {
assert!(n < 65);
let (((temp_a, temp_b), (temp_c, a_and)), (b_and, c_and)) = rayon::join(
|| {
rayon::join(
|| {
rayon::join(
|| &self.a[65 - n] ^ &self.a[92 - n],
|| &self.b[68 - n] ^ &self.b[83 - n],
)
},
|| {
rayon::join(
|| &self.c[65 - n] ^ &self.c[110 - n],
|| &self.a[91 - n] & &self.a[90 - n],
)
},
)
},
|| {
rayon::join(
|| &self.b[82 - n] & &self.b[81 - n],
|| &self.c[109 - n] & &self.c[108 - n],
)
},
);
let ((o, a), (b, c)) = rayon::join(
|| {
rayon::join(
|| &(&temp_a ^ &temp_b) ^ &temp_c,
|| &temp_c ^ &(&c_and ^ &self.a[68 - n]),
)
},
|| {
rayon::join(
|| &temp_a ^ &(&a_and ^ &self.b[77 - n]),
|| &temp_b ^ &(&b_and ^ &self.c[86 - n]),
)
},
);
[o, a, b, c]
}
/// This calls `get_output_and_values` in parallel 64 times, and stores all results in a Vec.
fn get_64_output_and_values(&self) -> Vec<[T; 4]> {
(0..64)
.into_par_iter()
.map(|x| self.get_output_and_values(x))
.rev()
.collect()
}
/// Computes 64 turns of the stream, outputting the 64 bits all at once in a
/// Vec (first value is oldest, last is newest)
pub fn next_64(&mut self) -> Vec<T> {
if let Some(sk) = &self.fhe_key {
rayon::broadcast(|_| set_server_key(sk.clone()));
}
let mut values = self.get_64_output_and_values();
if self.fhe_key.is_some() {
rayon::broadcast(|_| unset_server_key());
}
let mut ret = Vec::<T>::with_capacity(64);
while let Some([o, a, b, c]) = values.pop() {
ret.push(o);
self.a.push(a);
self.b.push(b);
self.c.push(c);
}
ret
}
}

View File

@@ -1,233 +0,0 @@
//! This module implements the Trivium stream cipher, using u8 or FheUint8
//! for the representation of the inner bits.
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
use rayon::prelude::*;
use tfhe::prelude::*;
use tfhe::{set_server_key, unset_server_key, FheUint8, ServerKey};
/// Internal trait specifying which operations are necessary for TriviumStreamByte generic type
pub trait TriviumByteInput<OpOutput>:
Sized
+ Clone
+ Send
+ Sync
+ StaticByteDequeInput<OpOutput>
+ std::ops::BitXor<Output = OpOutput>
+ std::ops::BitAnd<Output = OpOutput>
+ std::ops::Shr<u8, Output = OpOutput>
+ std::ops::Shl<u8, Output = OpOutput>
+ std::ops::Add<Output = OpOutput>
{
}
impl TriviumByteInput<u8> for u8 {}
impl TriviumByteInput<u8> for &u8 {}
impl TriviumByteInput<FheUint8> for FheUint8 {}
impl TriviumByteInput<FheUint8> for &FheUint8 {}
/// TriviumStreamByte: a struct implementing the Trivium stream cipher, using T for the internal
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
/// an Option for a ServerKey.
/// Since the original Trivium registers' sizes are not a multiple of 8, these registers (which
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
/// originals' sizes.
pub struct TriviumStreamByte<T> {
a_byte: StaticByteDeque<12, T>,
b_byte: StaticByteDeque<11, T>,
c_byte: StaticByteDeque<14, T>,
fhe_key: Option<ServerKey>,
}
impl TriviumStreamByte<u8> {
/// Constructor for `TriviumStreamByte<u8>`: arguments are the secret key and the input vector.
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(key: [u8; 10], iv: [u8; 10]) -> TriviumStreamByte<u8> {
// Initialization of Trivium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_byte_reg = [0u8; 12];
let mut b_byte_reg = [0u8; 11];
let mut c_byte_reg = [0u8; 14];
for i in 0..10 {
a_byte_reg[12 - 10 + i] = key[i];
b_byte_reg[11 - 10 + i] = iv[i];
}
// Magic number 14, aka 00001110: this represents the 3 ones at the beginning of the c
// registers, with additional zeros to make the register's size a multiple of 8.
c_byte_reg[0] = 14;
let mut ret =
TriviumStreamByte::<u8>::new_from_registers(a_byte_reg, b_byte_reg, c_byte_reg, None);
ret.init();
ret
}
}
impl TriviumStreamByte<FheUint8> {
/// Constructor for `TriviumStream<FheUint8>`: arguments are the encrypted secret key and input
/// vector, and the FHE server key.
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(
key: [FheUint8; 10],
iv: [u8; 10],
server_key: &ServerKey,
) -> TriviumStreamByte<FheUint8> {
set_server_key(server_key.clone());
// Initialization of Trivium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_byte_reg = [0u8; 12].map(FheUint8::encrypt_trivial);
let mut b_byte_reg = [0u8; 11].map(FheUint8::encrypt_trivial);
let mut c_byte_reg = [0u8; 14].map(FheUint8::encrypt_trivial);
for i in 0..10 {
a_byte_reg[12 - 10 + i] = key[i].clone();
b_byte_reg[11 - 10 + i] = FheUint8::encrypt_trivial(iv[i]);
}
// Magic number 14, aka 00001110: this represents the 3 ones at the beginning of the c
// registers, with additional zeros to make the register's size a multiple of 8.
c_byte_reg[0] = FheUint8::encrypt_trivial(14u8);
unset_server_key();
let mut ret = TriviumStreamByte::<FheUint8>::new_from_registers(
a_byte_reg,
b_byte_reg,
c_byte_reg,
Some(server_key.clone()),
);
ret.init();
ret
}
}
impl<T> TriviumStreamByte<T>
where
T: TriviumByteInput<T> + Send,
for<'a> &'a T: TriviumByteInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 12],
b_register: [T; 11],
c_register: [T; 14],
sk: Option<ServerKey>,
) -> Self {
Self {
a_byte: StaticByteDeque::<12, T>::new(a_register),
b_byte: StaticByteDeque::<11, T>::new(b_register),
c_byte: StaticByteDeque::<14, T>::new(c_register),
fhe_key: sk,
}
}
/// The specification of Trivium includes running 1152 (= 18*64) unused steps to mix up the
/// registers, before starting the proper stream
fn init(&mut self) {
for _ in 0..18 {
self.next_64();
}
}
/// Computes 8 potential future step of Trivium, b*8 terms in the future. This does not update
/// registers, but rather returns with the output, the three values that will be used to
/// update the registers, when the time is right. This function is meant to be used in
/// parallel.
fn get_output_and_values(&self, b: usize) -> [T; 4] {
let n = b * 8 + 7;
assert!(n < 65);
let ((a1, a2, a3, a4, a5), ((b1, b2, b3, b4, b5), (c1, c2, c3, c4, c5))) = rayon::join(
|| Self::get_bytes(&self.a_byte, [91 - n, 90 - n, 68 - n, 65 - n, 92 - n]),
|| {
rayon::join(
|| Self::get_bytes(&self.b_byte, [82 - n, 81 - n, 77 - n, 68 - n, 83 - n]),
|| Self::get_bytes(&self.c_byte, [109 - n, 108 - n, 86 - n, 65 - n, 110 - n]),
)
},
);
let (((temp_a, temp_b), (temp_c, a_and)), (b_and, c_and)) = rayon::join(
|| {
rayon::join(
|| rayon::join(|| a4 ^ a5, || b4 ^ b5),
|| rayon::join(|| c4 ^ c5, || a1 & a2),
)
},
|| rayon::join(|| b1 & b2, || c1 & c2),
);
let (temp_a_2, temp_b_2, temp_c_2) = (temp_a.clone(), temp_b.clone(), temp_c.clone());
let ((o, a), (b, c)) = rayon::join(
|| {
rayon::join(
|| (temp_a_2 ^ temp_b_2) ^ temp_c_2,
|| temp_c ^ ((c_and) ^ a3),
)
},
|| rayon::join(|| temp_a ^ (a_and ^ b3), || temp_b ^ (b_and ^ c3)),
);
[o, a, b, c]
}
/// This calls `get_output_and_values` in parallel 8 times, and stores all results in a Vec.
fn get_64_output_and_values(&self) -> Vec<[T; 4]> {
(0..8)
.into_par_iter()
.map(|i| self.get_output_and_values(i))
.collect()
}
/// Computes 64 turns of the stream, outputting the 64 bits (in 8 bytes) all at once in a
/// Vec (first value is oldest, last is newest)
pub fn next_64(&mut self) -> Vec<T> {
if let Some(sk) = &self.fhe_key {
rayon::broadcast(|_| set_server_key(sk.clone()));
}
let values = self.get_64_output_and_values();
if self.fhe_key.is_some() {
rayon::broadcast(|_| unset_server_key());
}
let mut bytes = Vec::<T>::with_capacity(8);
for [o, a, b, c] in values {
self.a_byte.push(a);
self.b_byte.push(b);
self.c_byte.push(c);
bytes.push(o);
}
bytes
}
/// Reconstructs a bunch of 5 bytes in a parallel fashion.
fn get_bytes<const N: usize>(
reg: &StaticByteDeque<N, T>,
offsets: [usize; 5],
) -> (T, T, T, T, T) {
let mut ret = offsets
.par_iter()
.rev()
.map(|&i| reg.byte(i))
.collect::<Vec<_>>();
(
ret.pop().unwrap(),
ret.pop().unwrap(),
ret.pop().unwrap(),
ret.pop().unwrap(),
ret.pop().unwrap(),
)
}
}
impl TriviumStreamByte<FheUint8> {
pub fn get_server_key(&self) -> &ServerKey {
self.fhe_key.as_ref().unwrap()
}
}

View File

@@ -1,187 +0,0 @@
use crate::static_deque::StaticDeque;
use rayon::prelude::*;
use tfhe::shortint::prelude::*;
/// TriviumStreamShortint: a struct implementing the Trivium stream cipher, using a generic
/// Ciphertext for the internal representation of bits (intended to represent a single bit). To be
/// able to compute FHE operations, it also owns a ServerKey.
pub struct TriviumStreamShortint {
a: StaticDeque<93, Ciphertext>,
b: StaticDeque<84, Ciphertext>,
c: StaticDeque<111, Ciphertext>,
internal_server_key: ServerKey,
transciphering_casting_key: KeySwitchingKey,
hl_server_key: tfhe::ServerKey,
}
impl TriviumStreamShortint {
/// Constructor for TriviumStreamShortint: arguments are the secret key and the input vector,
/// and a ServerKey reference. Outputs a TriviumStream object already initialized (1152
/// steps have been run before returning)
pub fn new(
key: [Ciphertext; 80],
iv: [u64; 80],
sk: ServerKey,
ksk: KeySwitchingKey,
hl_sk: tfhe::ServerKey,
) -> Self {
// Initialization of Trivium registers: a has the secret key, b the input vector,
// and c a few ones.
let mut a_register: [Ciphertext; 93] = [0; 93].map(|x| sk.create_trivial(x));
let mut b_register: [Ciphertext; 84] = [0; 84].map(|x| sk.create_trivial(x));
let mut c_register: [Ciphertext; 111] = [0; 111].map(|x| sk.create_trivial(x));
for i in 0..80 {
a_register[93 - 80 + i].clone_from(&key[i]);
b_register[84 - 80 + i] = sk.create_trivial(iv[i]);
}
c_register[0] = sk.create_trivial(1);
c_register[1] = sk.create_trivial(1);
c_register[2] = sk.create_trivial(1);
let mut ret = Self {
a: StaticDeque::<93, Ciphertext>::new(a_register),
b: StaticDeque::<84, Ciphertext>::new(b_register),
c: StaticDeque::<111, Ciphertext>::new(c_register),
internal_server_key: sk,
transciphering_casting_key: ksk,
hl_server_key: hl_sk,
};
ret.init();
ret
}
/// The specification of Trivium includes running 1152 (= 18*64) unused steps to mix up the
/// registers, before starting the proper stream
fn init(&mut self) {
for _ in 0..18 {
self.next_64();
}
}
/// Computes one turn of the stream, updating registers and outputting the new bit.
pub fn next_ct(&mut self) -> Ciphertext {
let [o, a, b, c] = self.get_output_and_values(0);
self.a.push(a);
self.b.push(b);
self.c.push(c);
o
}
/// Computes a potential future step of Trivium, n terms in the future. This does not update
/// registers, but rather returns with the output, the three values that will be used to
/// update the registers, when the time is right. This function is meant to be used in
/// parallel.
fn get_output_and_values(&self, n: usize) -> [Ciphertext; 4] {
let (a1, a2, a3, a4, a5) = (
&self.a[65 - n],
&self.a[92 - n],
&self.a[91 - n],
&self.a[90 - n],
&self.a[68 - n],
);
let (b1, b2, b3, b4, b5) = (
&self.b[68 - n],
&self.b[83 - n],
&self.b[82 - n],
&self.b[81 - n],
&self.b[77 - n],
);
let (c1, c2, c3, c4, c5) = (
&self.c[65 - n],
&self.c[110 - n],
&self.c[109 - n],
&self.c[108 - n],
&self.c[86 - n],
);
let temp_a = self.internal_server_key.unchecked_add(a1, a2);
let temp_b = self.internal_server_key.unchecked_add(b1, b2);
let temp_c = self.internal_server_key.unchecked_add(c1, c2);
let ((new_a, new_b), (new_c, o)) = rayon::join(
|| {
rayon::join(
|| {
let mut new_a = self.internal_server_key.unchecked_bitand(c3, c4);
self.internal_server_key
.unchecked_add_assign(&mut new_a, a5);
self.internal_server_key
.unchecked_add_assign(&mut new_a, &temp_c);
self.internal_server_key.message_extract_assign(&mut new_a);
new_a
},
|| {
let mut new_b = self.internal_server_key.unchecked_bitand(a3, a4);
self.internal_server_key
.unchecked_add_assign(&mut new_b, b5);
self.internal_server_key
.unchecked_add_assign(&mut new_b, &temp_a);
self.internal_server_key.message_extract_assign(&mut new_b);
new_b
},
)
},
|| {
rayon::join(
|| {
let mut new_c = self.internal_server_key.unchecked_bitand(b3, b4);
self.internal_server_key
.unchecked_add_assign(&mut new_c, c5);
self.internal_server_key
.unchecked_add_assign(&mut new_c, &temp_b);
self.internal_server_key.message_extract_assign(&mut new_c);
new_c
},
|| {
self.internal_server_key.bitxor(
&self.internal_server_key.unchecked_add(&temp_a, &temp_b),
&temp_c,
)
},
)
},
);
[o, new_a, new_b, new_c]
}
/// This calls `get_output_and_values` in parallel 64 times, and stores all results in a Vec.
fn get_64_output_and_values(&self) -> Vec<[Ciphertext; 4]> {
(0..64)
.into_par_iter()
.map(|x| self.get_output_and_values(x))
.rev()
.collect()
}
/// Computes 64 turns of the stream, outputting the 64 bits all at once in a
/// Vec (first value is oldest, last is newest)
pub fn next_64(&mut self) -> Vec<Ciphertext> {
let mut values = self.get_64_output_and_values();
let mut ret = Vec::<Ciphertext>::with_capacity(64);
while let Some([o, a, b, c]) = values.pop() {
ret.push(o);
self.a.push(a);
self.b.push(b);
self.c.push(c);
}
ret
}
pub fn get_internal_server_key(&self) -> &ServerKey {
&self.internal_server_key
}
pub fn get_casting_key(&self) -> &KeySwitchingKey {
&self.transciphering_casting_key
}
pub fn get_hl_server_key(&self) -> &tfhe::ServerKey {
&self.hl_server_key
}
}

View File

@@ -1,16 +0,0 @@
[package]
name = "tfhe-cuda-backend"
version = "0.4.0"
edition = "2021"
authors = ["Zama team"]
license = "BSD-3-Clause-Clear"
description = "Cuda implementation of TFHE-rs primitives."
homepage = "https://www.zama.ai/"
documentation = "https://docs.zama.ai/tfhe-rs"
repository = "https://github.com/zama-ai/tfhe-rs"
readme = "README.md"
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
[build-dependencies]
cmake = { version = "0.1" }
pkg-config = { version = "0.3" }

View File

@@ -1,28 +0,0 @@
BSD 3-Clause Clear License
Copyright © 2024 ZAMA.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
3. Neither the name of ZAMA nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
THIS SOFTWARE IS PROVIDED BY THE ZAMA AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
ZAMA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,52 +0,0 @@
# TFHE Cuda backend
## Introduction
The `tfhe-cuda-backend` holds the code for GPU acceleration of Zama's variant of TFHE.
It implements CUDA/C++ functions to perform homomorphic operations on LWE ciphertexts.
It provides functions to allocate memory on the GPU, to copy data back
and forth between the CPU and the GPU, to create and destroy Cuda streams, etc.:
- `cuda_create_stream`, `cuda_destroy_stream`
- `cuda_malloc`, `cuda_check_valid_malloc`
- `cuda_memcpy_async_to_cpu`, `cuda_memcpy_async_to_gpu`
- `cuda_get_number_of_gpus`
- `cuda_synchronize_device`
The cryptographic operations it provides are:
- an amortized implementation of the TFHE programmable bootstrap: `cuda_bootstrap_amortized_lwe_ciphertext_vector_32` and `cuda_bootstrap_amortized_lwe_ciphertext_vector_64`
- a low latency implementation of the TFHE programmable bootstrap: `cuda_bootstrap_low latency_lwe_ciphertext_vector_32` and `cuda_bootstrap_low_latency_lwe_ciphertext_vector_64`
- the keyswitch: `cuda_keyswitch_lwe_ciphertext_vector_32` and `cuda_keyswitch_lwe_ciphertext_vector_64`
- the larger precision programmable bootstrap (wop PBS, which supports up to 16 bits of message while the classical PBS only supports up to 8 bits of message) and its sub-components: `cuda_wop_pbs_64`, `cuda_extract_bits_64`, `cuda_circuit_bootstrap_64`, `cuda_cmux_tree_64`, `cuda_blind_rotation_sample_extraction_64`
- acceleration for leveled operations: `cuda_negate_lwe_ciphertext_vector_64`, `cuda_add_lwe_ciphertext_vector_64`, `cuda_add_lwe_ciphertext_vector_plaintext_vector_64`, `cuda_mult_lwe_ciphertext_vector_cleartext_vector`.
## Dependencies
**Disclaimer**: Compilation on Windows/Mac is not supported yet. Only Nvidia GPUs are supported.
- nvidia driver - for example, if you're running Ubuntu 20.04 check this [page](https://linuxconfig.org/how-to-install-the-nvidia-drivers-on-ubuntu-20-04-focal-fossa-linux) for installation
- [nvcc](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) >= 10.0
- [gcc](https://gcc.gnu.org/) >= 8.0 - check this [page](https://gist.github.com/ax3l/9489132) for more details about nvcc/gcc compatible versions
- [cmake](https://cmake.org/) >= 3.24
## Build
The Cuda project held in `tfhe-cuda-backend` can be compiled independently from TFHE-rs in the following way:
```
git clone git@github.com:zama-ai/tfhe-rs
cd backends/tfhe-cuda-backend/cuda
mkdir build
cd build
cmake ..
make
```
The compute capability is detected automatically (with the first GPU information) and set accordingly.
If your machine does not have an available Nvidia GPU, the compilation will work if you have the nvcc compiler installed. The generated executable will target a 7.0 compute capability (sm_70).
## Links
- [TFHE](https://eprint.iacr.org/2018/421.pdf)
## License
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions,
please contact us at `hello@zama.ai`.

View File

@@ -1,59 +0,0 @@
use std::env;
use std::process::Command;
fn main() {
if let Ok(val) = env::var("DOCS_RS") {
if val.parse::<u32>() == Ok(1) {
return;
}
}
// This is a workaround to the current nightly toolchain (2024-06-27 which started with
// toolchain 2024-05-05) build issue
// Essentially if cbindgen is running, a wrong argument ends up forwarded to the cuda backend
// "make" command during macro expansions for TFHE-rs C API, crashing make for make < 4.4 and
// thus crashing the build
// On the other hand, this speeds up C API build greatly given we don't have macro expansions
// in the CUDA backend so this skips the second compilation of TFHE-rs for macro inspection by
// cbindgen
if std::env::var("_CBINDGEN_IS_RUNNING").is_ok() {
return;
}
println!("Build tfhe-cuda-backend");
println!("cargo::rerun-if-changed=cuda/include");
println!("cargo::rerun-if-changed=cuda/src");
println!("cargo::rerun-if-changed=cuda/tests_and_benchmarks");
println!("cargo::rerun-if-changed=cuda/CMakeLists.txt");
println!("cargo::rerun-if-changed=src");
if env::consts::OS == "linux" {
let output = Command::new("./get_os_name.sh").output().unwrap();
let distribution = String::from_utf8(output.stdout).unwrap();
if distribution != "Ubuntu\n" {
println!(
"cargo:warning=This Linux distribution is not officially supported. \
Only Ubuntu is supported by tfhe-cuda-backend at this time. Build may fail\n"
);
}
let dest = cmake::build("cuda");
println!("cargo:rustc-link-search=native={}", dest.display());
println!("cargo:rustc-link-lib=static=tfhe_cuda_backend");
// Try to find the cuda libs with pkg-config, default to the path used by the nvidia runfile
if pkg_config::Config::new()
.atleast_version("10")
.probe("cuda")
.is_err()
{
println!("cargo:rustc-link-search=native=/usr/local/cuda/lib64");
}
println!("cargo:rustc-link-lib=gomp");
println!("cargo:rustc-link-lib=cudart");
println!("cargo:rustc-link-search=native=/usr/lib/x86_64-linux-gnu/");
println!("cargo:rustc-link-lib=stdc++");
} else {
panic!(
"Error: platform not supported, tfhe-cuda-backend not built (only Linux is supported)"
);
}
}

View File

@@ -1,10 +0,0 @@
# -----------------------------
# Options effecting formatting.
# -----------------------------
with section("format"):
# How wide to allow formatted cmake files
line_width = 120
# How many spaces to tab for indent
tab_size = 2

View File

@@ -1,2 +0,0 @@
/build/
include/cuda_config.h

View File

@@ -1,108 +0,0 @@
cmake_minimum_required(VERSION 3.24 FATAL_ERROR)
project(tfhe_cuda_backend LANGUAGES CXX)
# See if the minimum CUDA version is available. If not, only enable documentation building.
set(MINIMUM_SUPPORTED_CUDA_VERSION 10.0)
include(CheckLanguage)
# See if CUDA is available
check_language(CUDA)
# If so, enable CUDA to check the version.
if(CMAKE_CUDA_COMPILER)
enable_language(CUDA)
endif()
# If CUDA is not available, or the minimum version is too low do not build
if(NOT CMAKE_CUDA_COMPILER)
message(FATAL_ERROR "Cuda compiler not found.")
endif()
if(CMAKE_CUDA_COMPILER_VERSION VERSION_LESS ${MINIMUM_SUPPORTED_CUDA_VERSION})
message(FATAL_ERROR "CUDA ${MINIMUM_SUPPORTED_CUDA_VERSION} or greater is required for compilation.")
endif()
# Get CUDA compute capability
set(OUTPUTFILE ${CMAKE_CURRENT_SOURCE_DIR}/cuda_script) # No suffix required
set(CUDAFILE ${CMAKE_CURRENT_SOURCE_DIR}/check_cuda.cu)
execute_process(COMMAND nvcc -lcuda ${CUDAFILE} -o ${OUTPUTFILE})
execute_process(
COMMAND ${OUTPUTFILE}
RESULT_VARIABLE CUDA_RETURN_CODE
OUTPUT_VARIABLE ARCH)
file(REMOVE ${OUTPUTFILE})
if(${CUDA_RETURN_CODE} EQUAL 0)
set(CUDA_SUCCESS "TRUE")
else()
set(CUDA_SUCCESS "FALSE")
endif()
if(${CUDA_SUCCESS})
message(STATUS "CUDA Architecture: ${ARCH}")
message(STATUS "CUDA Version: ${CUDA_VERSION_STRING}")
message(STATUS "CUDA Path: ${CUDA_TOOLKIT_ROOT_DIR}")
message(STATUS "CUDA Libraries: ${CUDA_LIBRARIES}")
message(STATUS "CUDA Performance Primitives: ${CUDA_npp_LIBRARY}")
else()
message(WARNING ${ARCH})
endif()
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
# Add OpenMP support
find_package(OpenMP REQUIRED)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler ${OpenMP_CXX_FLAGS}")
if(${CUDA_SUCCESS})
set(CMAKE_CUDA_ARCHITECTURES native)
string(REPLACE "-arch=sm_" "" CUDA_ARCH "${ARCH}")
set(CUDA_ARCH "${CUDA_ARCH}0")
else()
set(CMAKE_CUDA_ARCHITECTURES 70)
set(CUDA_ARCH "700")
endif()
add_compile_definitions(CUDA_ARCH=${CUDA_ARCH})
# Check if the DEBUG flag is defined
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
# Debug mode
message("Compiling in Debug mode")
add_definitions(-DDEBUG)
set(OPTIMIZATION_FLAGS "${OPTIMIZATION_FLAGS} -O0 -G -g")
else()
# Release mode
message("Compiling in Release mode")
set(OPTIMIZATION_FLAGS "${OPTIMIZATION_FLAGS} -O3")
endif()
# in production, should use -arch=sm_70 --ptxas-options=-v to see register spills -lineinfo for better debugging
set(CMAKE_CUDA_FLAGS
"${CMAKE_CUDA_FLAGS} -ccbin ${CMAKE_CXX_COMPILER} ${OPTIMIZATION_FLAGS}\
-std=c++17 --no-exceptions --expt-relaxed-constexpr -rdc=true \
--use_fast_math -Xcompiler -fPIC")
set(INCLUDE_DIR include)
add_subdirectory(src)
enable_testing()
add_subdirectory(tests_and_benchmarks)
target_include_directories(tfhe_cuda_backend PRIVATE ${INCLUDE_DIR})
# This is required for rust cargo build
install(TARGETS tfhe_cuda_backend DESTINATION .)
install(TARGETS tfhe_cuda_backend DESTINATION lib)
# Define a function to add a lint target.
find_file(CPPLINT NAMES cpplint cpplint.exe)
if(CPPLINT)
# Add a custom target to lint all child projects. Dependencies are specified in child projects.
add_custom_target(all_lint)
# Don't trigger this target on ALL_BUILD or Visual Studio 'Rebuild Solution'
set_target_properties(all_lint PROPERTIES EXCLUDE_FROM_ALL TRUE)
# set_target_properties(all_lint PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD TRUE)
endif()

View File

@@ -1,3 +0,0 @@
set noparent
linelength=240
filter=-legal/copyright,-readability/todo,-runtime/references,-build/c++17

View File

@@ -1,22 +0,0 @@
#include <stdio.h>
int main(int argc, char **argv) {
cudaDeviceProp dP;
float min_cc = 3.0;
int rc = cudaGetDeviceProperties(&dP, 0);
if (rc != cudaSuccess) {
cudaError_t error = cudaGetLastError();
printf("CUDA error: %s", cudaGetErrorString(error));
return rc; /* Failure */
}
if ((dP.major + (dP.minor / 10)) < min_cc) {
printf("Min Compute Capability of %2.1f required: %d.%d found\n Not "
"Building CUDA Code",
min_cc, dP.major, dP.minor);
return 1; /* Failure */
} else {
printf("-arch=sm_%d%d", dP.major, dP.minor);
return 0; /* Success */
}
}

Some files were not shown because too many files have changed in this diff Show More