mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-11 15:48:20 -05:00
Compare commits
249 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfb1d1340e | ||
|
|
95593b1ea9 | ||
|
|
231d0c5e50 | ||
|
|
1d0a5c96a4 | ||
|
|
b0b49ae533 | ||
|
|
70773e442c | ||
|
|
7b797b8af9 | ||
|
|
b6efb109aa | ||
|
|
fd6323b311 | ||
|
|
b02a3b16ff | ||
|
|
a95ee140f5 | ||
|
|
62780ac500 | ||
|
|
c10f1def70 | ||
|
|
31a0136655 | ||
|
|
859d5e4e1f | ||
|
|
92dcd38e30 | ||
|
|
777bbe437a | ||
|
|
3842032f08 | ||
|
|
23246f63f7 | ||
|
|
11c79b5237 | ||
|
|
a694e08ddc | ||
|
|
e12638dabe | ||
|
|
79f1d22573 | ||
|
|
f9c89212ea | ||
|
|
b918f77859 | ||
|
|
054c5028a1 | ||
|
|
7b621e57b0 | ||
|
|
b4b6275ca5 | ||
|
|
42644349ef | ||
|
|
20b7b06ffb | ||
|
|
39fbc20360 | ||
|
|
c3ae852aa2 | ||
|
|
4a89792579 | ||
|
|
205b767fc1 | ||
|
|
39862c2861 | ||
|
|
eed5a6c5ba | ||
|
|
0dd0ead4e2 | ||
|
|
5d5e9d47e9 | ||
|
|
45b7491726 | ||
|
|
f84a4275ef | ||
|
|
34ffbadc72 | ||
|
|
4322214d8f | ||
|
|
cf20d73a5f | ||
|
|
c30835fc30 | ||
|
|
70b0c0ff19 | ||
|
|
206553e9ee | ||
|
|
f78bea23be | ||
|
|
106b46be7c | ||
|
|
2cdc804670 | ||
|
|
23d7e0d844 | ||
|
|
0e1082f465 | ||
|
|
c22e63895e | ||
|
|
375a4f80ae | ||
|
|
21b6863c5d | ||
|
|
20a91337c1 | ||
|
|
a8520a2e22 | ||
|
|
c8db338376 | ||
|
|
e849394ea7 | ||
|
|
126e779533 | ||
|
|
353237c0d6 | ||
|
|
7bad509f9a | ||
|
|
c99bc6d97f | ||
|
|
a84cf4ed21 | ||
|
|
ab40df4b7f | ||
|
|
3b9eb360c1 | ||
|
|
498b0e6e5c | ||
|
|
a9d0b9a3fb | ||
|
|
cf3f25efdd | ||
|
|
c3ed1a7558 | ||
|
|
6347f25668 | ||
|
|
58ae2f5359 | ||
|
|
9ea5c04be6 | ||
|
|
79fdb33632 | ||
|
|
91b263d480 | ||
|
|
41a41278e6 | ||
|
|
30938eec74 | ||
|
|
516789bd5d | ||
|
|
027792d659 | ||
|
|
1ed9d6a85e | ||
|
|
126138a59d | ||
|
|
241685fccc | ||
|
|
e739f43ec5 | ||
|
|
3073d60f11 | ||
|
|
a05d228899 | ||
|
|
63055d5ca8 | ||
|
|
46a3008739 | ||
|
|
f2674da031 | ||
|
|
12c2a2a8b7 | ||
|
|
b61dd21ef7 | ||
|
|
ca4159f123 | ||
|
|
ab25919187 | ||
|
|
1b38f8ccfc | ||
|
|
6a676551d8 | ||
|
|
afb79a0b1c | ||
|
|
0277403c45 | ||
|
|
18159d6458 | ||
|
|
728409aef8 | ||
|
|
034f3b3c25 | ||
|
|
c30e9c39f6 | ||
|
|
1513c3bc8c | ||
|
|
e07f07c4c8 | ||
|
|
81cc0c31b4 | ||
|
|
c95e38e26f | ||
|
|
f0f3dd76eb | ||
|
|
0604d237eb | ||
|
|
e523fd2cb6 | ||
|
|
33dee7673c | ||
|
|
9b5596ca66 | ||
|
|
aefec1fe64 | ||
|
|
f9e876730a | ||
|
|
f3cddb5635 | ||
|
|
a395cfe9bf | ||
|
|
602c6faf8a | ||
|
|
563502a6a6 | ||
|
|
5f30569452 | ||
|
|
39b81a8ded | ||
|
|
da223b36b6 | ||
|
|
db16276715 | ||
|
|
a59742f518 | ||
|
|
2bf595d0e2 | ||
|
|
fb2b1a13e7 | ||
|
|
9fdaa983e3 | ||
|
|
73de886c07 | ||
|
|
45a849ad36 | ||
|
|
ef5b984448 | ||
|
|
6abed1f228 | ||
|
|
71b45c14da | ||
|
|
4f5d711c4e | ||
|
|
2602c9e1b3 | ||
|
|
06dffc60bd | ||
|
|
2a82076121 | ||
|
|
7549474aac | ||
|
|
4fcff55745 | ||
|
|
3d345a648b | ||
|
|
3975e0115b | ||
|
|
6494a82fb3 | ||
|
|
8aa60f8882 | ||
|
|
15cab8b413 | ||
|
|
23d46ba2bc | ||
|
|
daf0e79e4a | ||
|
|
c5ad73865c | ||
|
|
9aab79e23a | ||
|
|
6ca48132e1 | ||
|
|
f53c75636d | ||
|
|
ce63cabc05 | ||
|
|
4fec2e17ae | ||
|
|
e87c36beb4 | ||
|
|
24c6ffc24a | ||
|
|
3680f796af | ||
|
|
3ded3fe7c9 | ||
|
|
451cfe3aba | ||
|
|
1e28cf7f3b | ||
|
|
91b62c737f | ||
|
|
da12bb29d8 | ||
|
|
d60028c47c | ||
|
|
d5b5369a9a | ||
|
|
9457ca786c | ||
|
|
8b5d7321fb | ||
|
|
736185bb31 | ||
|
|
e4b230aaf1 | ||
|
|
7ed827808c | ||
|
|
6e7aaac90f | ||
|
|
d1c190fac6 | ||
|
|
7e1c8f7db5 | ||
|
|
d30c2060bf | ||
|
|
4ccd5ea262 | ||
|
|
1ab3022df8 | ||
|
|
a257849c66 | ||
|
|
0f4f8dd755 | ||
|
|
aaaa929c2e | ||
|
|
d397ea3a39 | ||
|
|
3e25536021 | ||
|
|
1c19851491 | ||
|
|
4b0623da4a | ||
|
|
d415d47894 | ||
|
|
e22f9c09e3 | ||
|
|
4d02d3abb4 | ||
|
|
ae6f96e0ec | ||
|
|
70e1828c58 | ||
|
|
1b1e6a7068 | ||
|
|
fc447fd2d0 | ||
|
|
d5e5902f61 | ||
|
|
9f54777ee1 | ||
|
|
4a73b7bb4b | ||
|
|
022cb3b18a | ||
|
|
c4feabbfa3 | ||
|
|
3c6ed37a18 | ||
|
|
fe6e81ff78 | ||
|
|
87c0d646a4 | ||
|
|
e5b39a6d4d | ||
|
|
27e2fbd972 | ||
|
|
f54fbf52ce | ||
|
|
2a0dfa5b17 | ||
|
|
a4841036b7 | ||
|
|
1dcc3c8c89 | ||
|
|
1a2643d1da | ||
|
|
bc257904e3 | ||
|
|
8982844a5b | ||
|
|
e80d2548af | ||
|
|
c0ab0a5752 | ||
|
|
f7bfe2f10c | ||
|
|
29c390d92c | ||
|
|
becd08db71 | ||
|
|
ffd7470ef1 | ||
|
|
a3750504c4 | ||
|
|
378c5ccb73 | ||
|
|
4ba1787e12 | ||
|
|
366d359441 | ||
|
|
0ece9e684a | ||
|
|
f8684d1f67 | ||
|
|
b4066df77f | ||
|
|
6b94872a00 | ||
|
|
d88caff6dd | ||
|
|
75a265f93b | ||
|
|
bfbf638fed | ||
|
|
01651d6fb2 | ||
|
|
b2624d1a76 | ||
|
|
9fb7b56629 | ||
|
|
24feeb8609 | ||
|
|
757c2fc828 | ||
|
|
4ff0d6cac2 | ||
|
|
1530f52c79 | ||
|
|
9918dacd6a | ||
|
|
2b503acf18 | ||
|
|
57cc326a64 | ||
|
|
84eb8aeb63 | ||
|
|
f09acfa581 | ||
|
|
8335a6b6b5 | ||
|
|
f80fd157ae | ||
|
|
0ed97cfba8 | ||
|
|
daee3f1850 | ||
|
|
e8dc403ebd | ||
|
|
63e5504c80 | ||
|
|
d664e4ada6 | ||
|
|
c78cc2d2e9 | ||
|
|
b566d78621 | ||
|
|
7da6786d59 | ||
|
|
6edf6b9e26 | ||
|
|
6fde90ad9c | ||
|
|
5d70ae4232 | ||
|
|
89b36ebca0 | ||
|
|
bfc97385f4 | ||
|
|
7ab763abba | ||
|
|
a05db18ba3 | ||
|
|
a3168eb1b5 | ||
|
|
7fccb851d7 | ||
|
|
a78d5cc57b | ||
|
|
9c0d078e1a | ||
|
|
6016755f9d |
12
.cargo/audit.toml
Normal file
12
.cargo/audit.toml
Normal file
@@ -0,0 +1,12 @@
|
||||
[advisories]
|
||||
ignore = [
|
||||
# Ignoring unmaintained 'paste' advisory as it is a widely used, low-risk build dependency.
|
||||
"RUSTSEC-2024-0436",
|
||||
]
|
||||
|
||||
[output]
|
||||
# Deny advisories that are warnings by default.
|
||||
# At the moment this works if we allow paste, we might want to disable this in the future if it
|
||||
# becomes too tedious
|
||||
deny = ["warnings"]
|
||||
quiet = false
|
||||
2
.github/actionlint.yaml
vendored
2
.github/actionlint.yaml
vendored
@@ -7,6 +7,8 @@ self-hosted-runner:
|
||||
- large_ubuntu_16
|
||||
- large_ubuntu_16-22.04
|
||||
- v80-desktop
|
||||
- v80-marais
|
||||
- v80-couperin
|
||||
# Configuration variables in array of strings defined in your repository or
|
||||
# organization. `null` means disabling configuration variables check.
|
||||
# Empty array means no configuration variable is allowed.
|
||||
|
||||
3
.github/workflows/approve_label.yml
vendored
3
.github/workflows/approve_label.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Add labels in pull request
|
||||
name: PR label manager
|
||||
name: approve_label
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -11,6 +11,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
trigger-tests:
|
||||
name: approve_label/trigger-tests
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Run backward compatibility tests
|
||||
name: Backward compatibility Tests on CPU
|
||||
name: aws_tfhe_backward_compat_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -22,13 +22,16 @@ on:
|
||||
# Allows you to run this workflow manually from the Actions tab as an alternative.
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (backward-compat-tests)
|
||||
name: aws_tfhe_backward_compat_tests/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
|
||||
@@ -53,24 +56,19 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
backward-compat-tests:
|
||||
name: Backward compatibility tests
|
||||
name: aws_tfhe_backward_compat_tests/backward-compat-tests (bpr)
|
||||
needs: [ setup-instance ]
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
persist-credentials: 'true' # Needed to pull lfs data
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
# Cache key is an aggregated hash of lfs files hashes
|
||||
- name: Get LFS data sha
|
||||
id: hash-lfs-data
|
||||
@@ -80,7 +78,7 @@ jobs:
|
||||
|
||||
- name: Retrieve data from cache
|
||||
id: retrieve-data-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
utils/tfhe-backward-compat-data/**/*.cbor
|
||||
@@ -92,6 +90,16 @@ jobs:
|
||||
run: |
|
||||
make pull_backward_compat_data
|
||||
|
||||
# Pull token was stored by action/checkout to be used by lfs, we don't need it anymore
|
||||
- name: Remove git credentials
|
||||
run: |
|
||||
git config --local --unset-all http.https://github.com/.extraheader
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Run backward compatibility tests
|
||||
run: |
|
||||
make test_backward_compatibility_ci
|
||||
@@ -99,7 +107,7 @@ jobs:
|
||||
- name: Store data in cache
|
||||
if: steps.retrieve-data-cache.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
utils/tfhe-backward-compat-data/**/*.cbor
|
||||
@@ -123,7 +131,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Backward compatibility tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (backward-compat-tests)
|
||||
name: aws_tfhe_backward_compat_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, backward-compat-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
13
.github/workflows/aws_tfhe_fast_tests.yml
vendored
13
.github/workflows/aws_tfhe_fast_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run a small subset of tests to ensure quick feedback.
|
||||
name: Fast AWS Tests on CPU
|
||||
name: aws_tfhe_fast_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -29,6 +29,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: aws_tfhe_fast_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -68,7 +69,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
dependencies:
|
||||
@@ -132,7 +133,7 @@ jobs:
|
||||
echo "any_changed=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (fast-tests)
|
||||
name: aws_tfhe_fast_tests/setup-instance
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name != 'workflow_dispatch' && needs.should-run.outputs.any_file_changed == 'true')
|
||||
needs: should-run
|
||||
@@ -216,7 +217,7 @@ jobs:
|
||||
|
||||
- name: Node cache restoration
|
||||
id: node-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
~/.nvm
|
||||
@@ -229,7 +230,7 @@ jobs:
|
||||
make install_node
|
||||
|
||||
- name: Node cache save
|
||||
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
if: steps.node-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: |
|
||||
@@ -288,7 +289,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Fast AWS tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (fast-tests)
|
||||
name: aws_tfhe_fast_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, fast-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
12
.github/workflows/aws_tfhe_integer_tests.yml
vendored
12
.github/workflows/aws_tfhe_integer_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AWS Unsigned Integer Tests on CPU
|
||||
name: aws_tfhe_integer_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -35,6 +35,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: aws_tfhe_integer_tests/should-run
|
||||
if:
|
||||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
(github.event_name == 'pull_request' && contains(github.event.label.name, 'approved')) ||
|
||||
@@ -55,7 +56,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
integer:
|
||||
@@ -69,7 +70,7 @@ jobs:
|
||||
- .github/workflows/aws_tfhe_integer_tests.yml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (unsigned-integer-tests)
|
||||
name: aws_tfhe_integer_tests/setup-instance
|
||||
needs: should-run
|
||||
if:
|
||||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.integer_test == 'true') ||
|
||||
@@ -100,12 +101,13 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
unsigned-integer-tests:
|
||||
name: Unsigned integer tests
|
||||
name: aws_tfhe_integer_tests/unsigned-integer-tests
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
timeout-minutes: 480 # 8 hours
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
@@ -156,7 +158,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Unsigned Integer tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (unsigned-integer-tests)
|
||||
name: aws_tfhe_integer_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [setup-instance, unsigned-integer-tests]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
8
.github/workflows/aws_tfhe_noise_checks.yml
vendored
8
.github/workflows/aws_tfhe_noise_checks.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Run noise checks on CPU
|
||||
name: aws_tfhe_noise_checks
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -25,7 +25,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (noise-checks)
|
||||
name: aws_tfhe_noise_checks/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
exit 1
|
||||
|
||||
noise-checks:
|
||||
name: CPU noise checks
|
||||
name: aws_tfhe_noise_checks/noise-checks
|
||||
needs: setup-instance
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
timeout-minutes: 1440
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Noise checks tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (noise-checks)
|
||||
name: aws_tfhe_noise_checks/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, noise-checks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: AWS Signed Integer Tests on CPU
|
||||
name: aws_tfhe_signed_integer_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -35,6 +35,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: aws_tfhe_signed_integer_tests/should-run
|
||||
if:
|
||||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
@@ -56,7 +57,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
integer:
|
||||
@@ -70,7 +71,7 @@ jobs:
|
||||
- .github/workflows/aws_tfhe_signed_integer_tests.yml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (unsigned-integer-tests)
|
||||
name: aws_tfhe_signed_integer_tests/setup-instance
|
||||
needs: should-run
|
||||
if:
|
||||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.integer_test == 'true') ||
|
||||
@@ -101,7 +102,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
signed-integer-tests:
|
||||
name: Signed integer tests
|
||||
name: aws_tfhe_signed_integer_tests/signed-integer-tests
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
|
||||
@@ -161,7 +162,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Signed Integer tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (signed-integer-tests)
|
||||
name: aws_tfhe_signed_integer_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [setup-instance, signed-integer-tests]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
11
.github/workflows/aws_tfhe_tests.yml
vendored
11
.github/workflows/aws_tfhe_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AWS Tests on CPU
|
||||
name: aws_tfhe_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -32,6 +32,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: aws_tfhe_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -77,7 +78,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
dependencies:
|
||||
@@ -141,7 +142,7 @@ jobs:
|
||||
echo "any_changed=true" >> "$GITHUB_OUTPUT"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cpu-tests)
|
||||
name: aws_tfhe_tests/setup-instance
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.any_file_changed == 'true')
|
||||
needs: should-run
|
||||
@@ -169,7 +170,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cpu-tests:
|
||||
name: CPU tests
|
||||
name: aws_tfhe_tests/cpu-tests
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
needs: [ should-run, setup-instance ]
|
||||
@@ -268,7 +269,7 @@ jobs:
|
||||
SLACK_MESSAGE: "CPU tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cpu-tests)
|
||||
name: aws_tfhe_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cpu-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
12
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
12
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AWS WASM Tests on CPU
|
||||
name: aws_tfhe_wasm_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -28,7 +28,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (wasm-tests)
|
||||
name: aws_tfhe_wasm_tests/setup-instance
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || contains(github.event.label.name, 'approved') }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
wasm-tests:
|
||||
name: WASM tests
|
||||
name: aws_tfhe_wasm_tests/wasm-tests
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
@@ -78,7 +78,7 @@ jobs:
|
||||
|
||||
- name: Node cache restoration
|
||||
id: node-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
~/.nvm
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
make install_node
|
||||
|
||||
- name: Node cache save
|
||||
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
if: steps.node-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: |
|
||||
@@ -137,7 +137,7 @@ jobs:
|
||||
SLACK_MESSAGE: "WASM tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (wasm-tests)
|
||||
name: aws_tfhe_wasm_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, wasm-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
10
.github/workflows/benchmark_boolean.yml
vendored
10
.github/workflows/benchmark_boolean.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run boolean benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Boolean benchmarks
|
||||
name: benchmark_boolean
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -23,7 +23,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (boolean-benchmarks)
|
||||
name: benchmark_boolean/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
boolean-benchmarks:
|
||||
name: Execute boolean benchmarks in EC2
|
||||
name: benchmark_boolean/boolean-benchmarks
|
||||
needs: setup-instance
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
@@ -101,7 +101,7 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_boolean
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Boolean benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (boolean-benchmarks)
|
||||
name: benchmark_boolean/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, boolean-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
99
.github/workflows/benchmark_core_crypto.yml
vendored
99
.github/workflows/benchmark_core_crypto.yml
vendored
@@ -1,8 +1,29 @@
|
||||
# Run core crypto benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Core crypto benchmarks
|
||||
name: benchmark_core_crypto
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
param_type:
|
||||
description: "Parameters type"
|
||||
type: choice
|
||||
default: classical
|
||||
options:
|
||||
- classical
|
||||
- multi_bit
|
||||
- classical + multi_bit
|
||||
- classical_documentation
|
||||
- multi_bit_documentation
|
||||
- classical_documentation + multi_bit_documentation
|
||||
bench_type:
|
||||
description: "Benchmarks type"
|
||||
type: choice
|
||||
default: latency
|
||||
options:
|
||||
- latency
|
||||
- throughput
|
||||
- both
|
||||
|
||||
schedule:
|
||||
# Weekly benchmarks will be triggered each Saturday at 5a.m.
|
||||
- cron: '0 5 * * 6'
|
||||
@@ -22,8 +43,63 @@ env:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: benchmark_core_crypto/prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
outputs:
|
||||
param_type: ${{ steps.set_param_type.outputs.param_type }}
|
||||
bench_type: ${{ steps.set_bench_type.outputs.bench_type }}
|
||||
steps:
|
||||
- name: Set parameters types
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
if [[ "${INPUTS_PARAM_TYPE}" == "classical + multi_bit" ]]; then
|
||||
echo "PARAM_TYPE=[\"classical\", \"multi_bit\"]" >> "${GITHUB_ENV}"
|
||||
elif [[ "${INPUTS_PARAM_TYPE}" == "classical_documentation + multi_bit_documentation" ]]; then
|
||||
echo "PARAM_TYPE=[\"classical_documentation\", \"multi_bit_documentation\"]" >> "${GITHUB_ENV}"
|
||||
else
|
||||
echo "PARAM_TYPE=[\"${INPUTS_PARAM_TYPE}\"]" >> "${GITHUB_ENV}"
|
||||
fi
|
||||
env:
|
||||
INPUTS_PARAM_TYPE: ${{ inputs.param_type }}
|
||||
|
||||
- name: Default parameters type
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
echo "PARAM_TYPE=[\"classical\"]" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set benchmark types
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
run: |
|
||||
echo "OP_FLAVOR=[\"default\"]" >> "${GITHUB_ENV}"
|
||||
if [[ "${INPUTS_BENCH_TYPE}" == "both" ]]; then
|
||||
echo "BENCH_TYPE=[\"latency\", \"throughput\"]" >> "${GITHUB_ENV}"
|
||||
else
|
||||
echo "BENCH_TYPE=[\"${INPUTS_BENCH_TYPE}\"]" >> "${GITHUB_ENV}"
|
||||
fi
|
||||
env:
|
||||
INPUTS_BENCH_TYPE: ${{ inputs.bench_type }}
|
||||
|
||||
- name: Default benchmark type
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
echo "BENCH_TYPE=[\"latency\"]" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set parameters types output
|
||||
id: set_param_type
|
||||
run: | # zizmor: ignore[template-injection] this env variable is safe
|
||||
echo "param_type=${{ toJSON(env.PARAM_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Set benchmark types output
|
||||
id: set_bench_type
|
||||
run: | # zizmor: ignore[template-injection] this env variable is safe
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (core-crypto-benchmarks)
|
||||
name: benchmark_core_crypto/setup-instance
|
||||
needs: prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -42,12 +118,18 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
core-crypto-benchmarks:
|
||||
name: Execute core crypto benchmarks in EC2
|
||||
needs: setup-instance
|
||||
name: benchmark_core_crypto/core-crypto-benchmarks
|
||||
needs: [ prepare-matrix, setup-instance ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
timeout-minutes: 1440 # 24 hours
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
param_type: ${{ fromJSON(needs.prepare-matrix.outputs.param_type) }}
|
||||
bench_type: ${{ fromJSON(needs.prepare-matrix.outputs.bench_type) }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
@@ -78,6 +160,9 @@ jobs:
|
||||
make bench_pbs
|
||||
make bench_pbs128
|
||||
make bench_ks
|
||||
env:
|
||||
BENCH_PARAM_TYPE: ${{ matrix.param_type }}
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
@@ -94,9 +179,9 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_core_crypto
|
||||
name: ${{ github.sha }}_core_crypto_${{ matrix.param_type }}_pbs
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
@@ -125,7 +210,7 @@ jobs:
|
||||
SLACK_MESSAGE: "PBS benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (core-crypto-benchmarks)
|
||||
name: benchmark_core_crypto/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, core-crypto-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
152
.github/workflows/benchmark_ct_key_sizes.yml
vendored
Normal file
152
.github/workflows/benchmark_ct_key_sizes.yml
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
# Run sizes benchmarks on an instance and return parsed results to Slab CI bot.
|
||||
name: Ciphertext and Keys sizes benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Monthly benchmarks will be triggered each 24th of the month at 1a.m.
|
||||
- cron: '0 1 24 * 6'
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUST_BACKTRACE: "full"
|
||||
RUST_MIN_STACK: "8388608"
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (sizes-benchmarks)
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-instance.outputs.label }}
|
||||
steps:
|
||||
- name: Start instance
|
||||
id: start-instance
|
||||
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
|
||||
with:
|
||||
mode: start
|
||||
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
backend: aws
|
||||
profile: cpu-big
|
||||
|
||||
sizes-benchmarks:
|
||||
name: Execute sizes client benchmarks
|
||||
needs: setup-instance
|
||||
if: needs.setup-instance.result != 'skipped'
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: nightly
|
||||
|
||||
- name: Measure public key and ciphertext sizes in HL Api
|
||||
run: |
|
||||
make measure_hlapi_compact_pk_ct_sizes
|
||||
|
||||
- name: Parse key and ciphertext sizes results
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py tfhe-benchmark/hlapi_ct_key_sizes.csv "${RESULTS_FILENAME}" \
|
||||
--database tfhe_rs \
|
||||
--hardware "m6i.32xlarge" \
|
||||
--project-version "${COMMIT_HASH}" \
|
||||
--branch "${REF_NAME}" \
|
||||
--commit-date "${COMMIT_DATE}" \
|
||||
--bench-date "${BENCH_DATE}" \
|
||||
--object-sizes
|
||||
env:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Measure key sizes in shortint
|
||||
run: |
|
||||
make measure_shortint_key_sizes
|
||||
|
||||
- name: Parse key sizes results
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py tfhe-benchmark/shortint_key_sizes.csv "${RESULTS_FILENAME}" \
|
||||
--object-sizes \
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_ct_key_sizes
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "Sizes benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (sizes-benchmarks)
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, sizes-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Stop instance
|
||||
id: stop-instance
|
||||
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
|
||||
with:
|
||||
mode: stop
|
||||
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
label: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "Instance teardown (sizes-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
10
.github/workflows/benchmark_dex.yml
vendored
10
.github/workflows/benchmark_dex.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run all DEX benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: DEX benchmarks
|
||||
name: benchmark_dex
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -22,7 +22,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (dex-benchmarks)
|
||||
name: benchmark_dex/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
dex-benchmarks:
|
||||
name: Execute DEX benchmarks
|
||||
name: benchmark_dex/dex-benchmarks
|
||||
needs: setup-instance
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
@@ -123,7 +123,7 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_dex
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
SLACK_MESSAGE: "DEX benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (dex-benchmarks)
|
||||
name: benchmark_dex/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, dex-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
10
.github/workflows/benchmark_erc20.yml
vendored
10
.github/workflows/benchmark_erc20.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run all ERC20 benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: ERC20 benchmarks
|
||||
name: benchmark_erc20
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -23,7 +23,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (erc20-benchmarks)
|
||||
name: benchmark_erc20/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
erc20-benchmarks:
|
||||
name: Execute ERC20 benchmarks
|
||||
name: benchmark_erc20/erc20-benchmarks
|
||||
needs: setup-instance
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_erc20
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -129,7 +129,7 @@ jobs:
|
||||
SLACK_MESSAGE: "ERC20 benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (erc20-benchmarks)
|
||||
name: benchmark_erc20/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, erc20-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
10
.github/workflows/benchmark_gpu.yml
vendored
10
.github/workflows/benchmark_gpu.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run CUDA benchmarks on a Hyperstack VM and return parsed results to Slab CI bot.
|
||||
name: Cuda benchmarks
|
||||
name: benchmark_gpu
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -59,13 +59,17 @@ on:
|
||||
options:
|
||||
- classical
|
||||
- multi_bit
|
||||
- both
|
||||
- classical + multi_bit
|
||||
- classical_documentation
|
||||
- multi_bit_documentation
|
||||
- classical_documentation + multi_bit_documentation
|
||||
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
parse-inputs:
|
||||
name: benchmark_gpu/parse-inputs
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
profile: ${{ steps.parse_profile.outputs.profile }}
|
||||
@@ -90,7 +94,7 @@ jobs:
|
||||
echo "name=${NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
run-benchmarks:
|
||||
name: Run benchmarks
|
||||
name: benchmark_gpu/run-benchmarks
|
||||
needs: parse-inputs
|
||||
uses: ./.github/workflows/benchmark_gpu_common.yml
|
||||
with:
|
||||
|
||||
12
.github/workflows/benchmark_gpu_4090.yml
vendored
12
.github/workflows/benchmark_gpu_4090.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run benchmarks on an RTX 4090 machine and return parsed results to Slab CI bot.
|
||||
name: TFHE Cuda Backend - 4090 benchmarks
|
||||
name: benchmark_gpu_4090
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -27,7 +27,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
cuda-integer-benchmarks:
|
||||
name: Cuda integer benchmarks (RTX 4090)
|
||||
name: benchmark_gpu_4090/cuda-integer-benchmarks
|
||||
if: ${{ github.event_name == 'workflow_dispatch' ||
|
||||
github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs' ||
|
||||
contains(github.event.label.name, '4090_bench') }}
|
||||
@@ -88,7 +88,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_integer_multi_bit_gpu_default
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -111,7 +111,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Integer RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
cuda-core-crypto-benchmarks:
|
||||
name: Cuda core crypto benchmarks (RTX 4090)
|
||||
name: benchmark_gpu_4090/cuda-core-crypto-benchmarks
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || contains(github.event.label.name, '4090_bench') }}
|
||||
needs: cuda-integer-benchmarks
|
||||
concurrency:
|
||||
@@ -172,7 +172,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_core_crypto
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -195,7 +195,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Core crypto RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
remove_github_label:
|
||||
name: Remove 4090 bench label
|
||||
name: benchmark_gpu_4090/remove_github_label
|
||||
if: ${{ always() && github.event_name == 'pull_request' }}
|
||||
needs: [cuda-integer-benchmarks, cuda-core-crypto-benchmarks]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
16
.github/workflows/benchmark_gpu_common.yml
vendored
16
.github/workflows/benchmark_gpu_common.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run benchmarks on CUDA instance and return parsed results to Slab CI bot.
|
||||
name: Cuda benchmarks - common
|
||||
name: benchmark_gpu_common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -63,7 +63,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
name: benchmark_gpu_common/prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
command: ${{ steps.set_command.outputs.command }}
|
||||
@@ -141,7 +141,7 @@ jobs:
|
||||
echo "params_type=${{ toJSON(env.PARAMS_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-${{ inputs.profile }}-benchmarks)
|
||||
name: benchmark_gpu_common/setup-instance
|
||||
needs: prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
|
||||
# Install dependencies only once since cuda-benchmarks uses a matrix strategy, thus running multiple times.
|
||||
install-dependencies:
|
||||
name: Install dependencies
|
||||
name: benchmark_gpu_common/install-dependencies
|
||||
needs: [ setup-instance ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
strategy:
|
||||
@@ -210,7 +210,7 @@ jobs:
|
||||
gcc-version: ${{ matrix.gcc }}
|
||||
|
||||
cuda-benchmarks:
|
||||
name: Cuda benchmarks (${{ inputs.profile }})
|
||||
name: benchmark_gpu_common/cuda-benchmarks
|
||||
needs: [ prepare-matrix, setup-instance, install-dependencies ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
timeout-minutes: 1440 # 24 hours
|
||||
@@ -306,7 +306,7 @@ jobs:
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ inputs.profile }}_${{ matrix.bench_type }}_${{ matrix.params_type }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -329,7 +329,7 @@ jobs:
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: benchmark_gpu_common/slack-notify
|
||||
needs: [ setup-instance, cuda-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-benchmarks.result != 'skipped' && failure() }}
|
||||
@@ -342,7 +342,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Cuda benchmarks (${{ inputs.profile }}) finished with status: ${{ needs.cuda-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-${{ inputs.profile }}-benchmarks)
|
||||
name: benchmark_gpu_common/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
|
||||
needs: [ setup-instance, cuda-benchmarks, slack-notify ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,8 +1,24 @@
|
||||
# Run all fhevm coprocessor benchmarks on a GPU instance on Hyperstack and return parsed results to Slab CI bot.
|
||||
name: Cuda Coprocessor benchmarks
|
||||
name: benchmark_gpu_coprocessor
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile:
|
||||
description: "Instance type"
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- "l40 (n3-L40x1)"
|
||||
- "4-l40 (n3-L40x4)"
|
||||
- "single-h100 (n3-H100x1)"
|
||||
- "2-h100 (n3-H100x2)"
|
||||
- "4-h100 (n3-H100x4)"
|
||||
- "multi-h100 (n3-H100x8)"
|
||||
- "multi-h100-nvlink (n3-H100x8-NVLink)"
|
||||
- "multi-h100-sxm5 (n3-H100x8-SXM5)"
|
||||
- "multi-h100-sxm5_fallback (n3-H100x8-SXM5)"
|
||||
|
||||
schedule:
|
||||
# Weekly tests @ 1AM
|
||||
- cron: "0 1 * * 6"
|
||||
@@ -17,7 +33,9 @@ env:
|
||||
RUST_BACKTRACE: "full"
|
||||
RUST_MIN_STACK: "8388608"
|
||||
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
PROFILE: "multi-h100-sxm5 (n3-H100x8-SXM5)"
|
||||
PROFILE_SCHEDULED_RUN: "multi-h100-sxm5 (n3-H100x8-SXM5)"
|
||||
PROFILE_MANUAL_RUN: ${{ inputs.profile }}
|
||||
IS_MANUAL_RUN: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
BENCHMARK_TYPE: "ALL"
|
||||
OPTIMIZATION_TARGET: "throughput"
|
||||
BATCH_SIZE: "5000"
|
||||
@@ -29,7 +47,7 @@ env:
|
||||
|
||||
jobs:
|
||||
parse-inputs:
|
||||
name: coprocessor-benchmark-gpu/parse-inputs
|
||||
name: benchmark_gpu_coprocessor/parse-inputs
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: 'read'
|
||||
@@ -40,19 +58,29 @@ jobs:
|
||||
- name: Parse profile
|
||||
id: parse_profile
|
||||
run: |
|
||||
if [[ ${IS_MANUAL_RUN} == true ]]; then
|
||||
PROFILE_RAW="${PROFILE_MANUAL_RUN}"
|
||||
else
|
||||
PROFILE_RAW="${PROFILE_SCHEDULED_RUN}"
|
||||
fi
|
||||
# shellcheck disable=SC2001
|
||||
PROFILE_VAL=$(echo "${PROFILE}" | sed 's|\(.*\)[[:space:]](.*)|\1|')
|
||||
PROFILE_VAL=$(echo "${PROFILE_RAW}" | sed 's|\(.*\)[[:space:]](.*)|\1|')
|
||||
echo "profile=$PROFILE_VAL" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Parse hardware name
|
||||
id: parse_hardware_name
|
||||
run: |
|
||||
if [[ ${IS_MANUAL_RUN} == true ]]; then
|
||||
PROFILE_RAW="${PROFILE_MANUAL_RUN}"
|
||||
else
|
||||
PROFILE_RAW="${PROFILE}"
|
||||
fi
|
||||
# shellcheck disable=SC2001
|
||||
PROFILE_VAL=$(echo "${PROFILE}" | sed 's|.*[[:space:]](\(.*\))|\1|')
|
||||
PROFILE_VAL=$(echo "${PROFILE_RAW}" | sed 's|.*[[:space:]](\(.*\))|\1|')
|
||||
echo "name=$PROFILE_VAL" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: coprocessor-benchmark-gpu/setup-instance
|
||||
name: benchmark_gpu_coprocessor/setup-instance
|
||||
needs: parse-inputs
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -71,8 +99,8 @@ jobs:
|
||||
backend: hyperstack
|
||||
profile: ${{ needs.parse-inputs.outputs.profile }}
|
||||
|
||||
benchmark:
|
||||
name: coprocessor-benchmark-gpu/benchmark-gpu (bpr)
|
||||
benchmark-gpu:
|
||||
name: benchmark_gpu_coprocessor/benchmark-gpu (bpr)
|
||||
needs: [ parse-inputs, setup-instance ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
continue-on-error: true
|
||||
@@ -130,6 +158,13 @@ jobs:
|
||||
} >> "${GITHUB_ENV}"
|
||||
working-directory: tfhe-rs/
|
||||
|
||||
- name: Setup Hyperstack dependencies
|
||||
uses: ./tfhe-rs/.github/actions/gpu_setup
|
||||
with:
|
||||
cuda-version: ${{ matrix.cuda }}
|
||||
gcc-version: ${{ matrix.gcc }}
|
||||
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
|
||||
|
||||
- name: Check fhEVM and TFHE-rs repos
|
||||
run: |
|
||||
pwd
|
||||
@@ -140,13 +175,6 @@ jobs:
|
||||
run: git lfs checkout
|
||||
working-directory: fhevm/
|
||||
|
||||
- name: Setup Hyperstack dependencies
|
||||
uses: ./fhevm/.github/actions/gpu_setup
|
||||
with:
|
||||
cuda-version: ${{ matrix.cuda }}
|
||||
gcc-version: ${{ matrix.gcc }}
|
||||
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
@@ -154,7 +182,7 @@ jobs:
|
||||
|
||||
- name: Install cargo dependencies
|
||||
run: |
|
||||
sudo apt-get install -y protobuf-compiler cmake pkg-config libssl-dev \
|
||||
sudo apt-get install -y protobuf-compiler pkg-config libssl-dev \
|
||||
libclang-dev docker-compose-v2 docker.io acl
|
||||
sudo usermod -aG docker "$USER"
|
||||
newgrp docker
|
||||
@@ -162,10 +190,10 @@ jobs:
|
||||
cargo install sqlx-cli
|
||||
|
||||
- name: Install foundry
|
||||
uses: foundry-rs/foundry-toolchain@82dee4ba654bd2146511f85f0d013af94670c4de
|
||||
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
@@ -175,18 +203,25 @@ jobs:
|
||||
restore-keys: ${{ runner.os }}-cargo-
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Chainguard Registry
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: cgr.dev
|
||||
username: ${{ secrets.CGR_USERNAME }}
|
||||
password: ${{ secrets.CGR_PASSWORD }}
|
||||
|
||||
- name: Init database
|
||||
run: make init_db
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
|
||||
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: 20.x
|
||||
|
||||
@@ -197,14 +232,19 @@ jobs:
|
||||
ls
|
||||
pwd
|
||||
cp ./host-contracts/.env.example ./host-contracts/.env
|
||||
npm --prefix ./host-contracts ci --include=optional
|
||||
cd host-contracts && npm install && npm run deploy:emptyProxies && npx hardhat compile
|
||||
cd ./host-contracts
|
||||
npm ci --include=optional
|
||||
npm install && npm run deploy:emptyProxies && npx hardhat compile
|
||||
working-directory: fhevm/
|
||||
|
||||
- name: Profile erc20 no-cmux benchmark on GPU
|
||||
run: |
|
||||
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" BENCHMARK_TYPE="LATENCY" OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" make -e "profile_erc20_gpu"
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
|
||||
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" \
|
||||
FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" \
|
||||
BENCHMARK_TYPE="THROUGHPUT_200" \
|
||||
OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" \
|
||||
make -e "profile_erc20_gpu"
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
|
||||
|
||||
- name: Get nsys profile name
|
||||
id: nsys_profile_name
|
||||
@@ -215,25 +255,25 @@ jobs:
|
||||
REPORT_NAME: ${{ steps.nsys_profile_name.outputs.profile }}
|
||||
run: |
|
||||
mv report1.nsys-rep ${{ env.REPORT_NAME }}
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
|
||||
|
||||
- name: Upload profile artifact
|
||||
env:
|
||||
REPORT_NAME: ${{ steps.nsys_profile_name.outputs.profile }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ env.REPORT_NAME }}
|
||||
path: fhevm/coprocessor/fhevm-engine/coprocessor/${{ env.REPORT_NAME }}
|
||||
path: fhevm/coprocessor/fhevm-engine/tfhe-worker/${{ env.REPORT_NAME }}
|
||||
|
||||
- name: Run latency benchmark on GPU
|
||||
run: |
|
||||
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" BENCHMARK_TYPE="LATENCY" OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" make -e "benchmark_${BENCHMARKS}_gpu"
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
|
||||
|
||||
- name: Run throughput benchmarks on GPU
|
||||
run: |
|
||||
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" BENCHMARK_TYPE="THROUGHPUT_200" OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" make -e "benchmark_${BENCHMARKS}_gpu"
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
|
||||
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
@@ -246,12 +286,12 @@ jobs:
|
||||
--commit-date "${COMMIT_DATE}" \
|
||||
--bench-date "${BENCH_DATE}" \
|
||||
--walk-subdirs \
|
||||
--crate "coprocessor/fhevm-engine/coprocessor" \
|
||||
--crate "coprocessor/fhevm-engine/tfhe-worker" \
|
||||
--name-suffix "operation_batch_size_${BATCH_SIZE}-schedule_${SCHEDULING_POLICY}-optimization_target_${OPTIMIZATION_TARGET}"
|
||||
working-directory: fhevm/
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${COMMIT_SHA}_${BENCHMARKS}_${{ needs.parse-inputs.outputs.profile }}
|
||||
path: fhevm/$${{ env.RESULTS_FILENAME }}
|
||||
@@ -273,9 +313,9 @@ jobs:
|
||||
--slab-url "${SLAB_URL}"
|
||||
|
||||
teardown-instance:
|
||||
name: coprocessor-benchmark-gpu/teardown
|
||||
name: benchmark_gpu_coprocessor/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, benchmark ]
|
||||
needs: [ setup-instance, benchmark-gpu ]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: 'read'
|
||||
5
.github/workflows/benchmark_gpu_dex.yml
vendored
5
.github/workflows/benchmark_gpu_dex.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run CUDA DEX benchmarks on a Hyperstack VM and return parsed results to Slab CI bot.
|
||||
name: Cuda DEX benchmarks
|
||||
name: benchmark_gpu_dex/
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -23,6 +23,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
parse-inputs:
|
||||
name: benchmark_gpu_dex/parse-inputs
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
profile: ${{ steps.parse_profile.outputs.profile }}
|
||||
@@ -47,7 +48,7 @@ jobs:
|
||||
echo "name=${NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
run-benchmarks:
|
||||
name: Run benchmarks
|
||||
name: benchmark_gpu_dex/run-benchmarks
|
||||
needs: parse-inputs
|
||||
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
|
||||
with:
|
||||
|
||||
12
.github/workflows/benchmark_gpu_dex_common.yml
vendored
12
.github/workflows/benchmark_gpu_dex_common.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run DEX benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
|
||||
name: Cuda DEX benchmarks - common
|
||||
name: benchmark_gpu_dex_common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -47,7 +47,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-dex-benchmarks)
|
||||
name: benchmark_gpu_dex_common/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-dex-benchmarks:
|
||||
name: Cuda DEX benchmarks (${{ inputs.profile }})
|
||||
name: benchmark_gpu_dex_common/cuda-dex-benchmarks
|
||||
needs: setup-instance
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
strategy:
|
||||
@@ -154,7 +154,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_dex_${{ inputs.profile }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -177,7 +177,7 @@ jobs:
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: benchmark_gpu_dex_common/slack-notify
|
||||
needs: [ setup-instance, cuda-dex-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-dex-benchmarks.result != 'skipped' && failure() }}
|
||||
@@ -190,7 +190,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Cuda DEX benchmarks (${{ inputs.profile }}) finished with status: ${{ needs.cuda-dex-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-dex-${{ inputs.profile }}-benchmarks)
|
||||
name: benchmark_gpu_dex_common/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
|
||||
needs: [ setup-instance, cuda-dex-benchmarks, slack-notify ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Run CUDA DEX benchmarks on multiple Hyperstack VMs and return parsed results to Slab CI bot.
|
||||
name: Cuda DEX weekly benchmarks
|
||||
name: benchmark_gpu_dex_weekly
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@@ -10,7 +10,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-benchmarks-1-h100:
|
||||
name: Run benchmarks (1xH100)
|
||||
name: benchmark_gpu_dex_weekly/run-benchmarks-1-h100
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
|
||||
with:
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-2-h100:
|
||||
name: Run benchmarks (2xH100)
|
||||
name: benchmark_gpu_dex_weekly/run-benchmarks-2-h100
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
|
||||
with:
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-8-h100:
|
||||
name: Run benchmarks (8xH100)
|
||||
name: benchmark_gpu_dex_weekly/run-benchmarks-8-h100
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
|
||||
with:
|
||||
|
||||
5
.github/workflows/benchmark_gpu_erc20.yml
vendored
5
.github/workflows/benchmark_gpu_erc20.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run CUDA ERC20 benchmarks on a Hyperstack VM and return parsed results to Slab CI bot.
|
||||
name: Cuda ERC20 benchmarks
|
||||
name: benchmark_gpu_erc20
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -24,6 +24,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
parse-inputs:
|
||||
name: benchmark_gpu_erc20/parse-inputs
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
profile: ${{ steps.parse_profile.outputs.profile }}
|
||||
@@ -48,7 +49,7 @@ jobs:
|
||||
echo "name=${NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
run-benchmarks:
|
||||
name: Run benchmarks
|
||||
name: benchmark_gpu_erc20/run-benchmarks
|
||||
needs: parse-inputs
|
||||
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
|
||||
with:
|
||||
|
||||
12
.github/workflows/benchmark_gpu_erc20_common.yml
vendored
12
.github/workflows/benchmark_gpu_erc20_common.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run ERC20 benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
|
||||
name: Cuda ERC20 benchmarks - common
|
||||
name: benchmark_gpu_erc20_common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -48,7 +48,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-erc20-benchmarks)
|
||||
name: benchmark_gpu_erc20_common/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-erc20-benchmarks:
|
||||
name: Cuda ERC20 benchmarks (${{ inputs.profile }})
|
||||
name: benchmark_gpu_erc20_common/cuda-erc20-benchmarks
|
||||
needs: setup-instance
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
strategy:
|
||||
@@ -155,7 +155,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_erc20_${{ inputs.profile }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -178,7 +178,7 @@ jobs:
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: benchmark_gpu_erc20_common/slack-notify
|
||||
needs: [ setup-instance, cuda-erc20-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-erc20-benchmarks.result != 'skipped' && failure() }}
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Cuda ERC20 benchmarks (${{ inputs.profile }}) finished with status: ${{ needs.cuda-erc20-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-erc20-${{ inputs.profile }}-benchmarks)
|
||||
name: benchmark_gpu_erc20_common/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
|
||||
needs: [ setup-instance, cuda-erc20-benchmarks, slack-notify ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Run CUDA ERC20 benchmarks on multiple Hyperstack VMs and return parsed results to Slab CI bot.
|
||||
name: Cuda ERC20 weekly benchmarks
|
||||
name: benchmark_gpu_erc20_weekly
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-benchmarks-1-h100:
|
||||
name: Run benchmarks (1xH100)
|
||||
name: benchmark_gpu_erc20_weekly/run-benchmarks-1-h100
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
|
||||
with:
|
||||
@@ -28,7 +28,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-2-h100:
|
||||
name: Run benchmarks (2xH100)
|
||||
name: benchmark_gpu_erc20_weekly/run-benchmarks-2-h100
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
|
||||
with:
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-8-h100:
|
||||
name: Run benchmarks (8xH100)
|
||||
name: benchmark_gpu_erc20_weekly/run-benchmarks-8-h100
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
|
||||
with:
|
||||
|
||||
12
.github/workflows/benchmark_gpu_weekly.yml
vendored
12
.github/workflows/benchmark_gpu_weekly.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run CUDA benchmarks on multiple Hyperstack VMs and return parsed results to Slab CI bot.
|
||||
name: Cuda weekly benchmarks
|
||||
name: benchmark_gpu_weekly
|
||||
|
||||
on:
|
||||
schedule:
|
||||
@@ -11,7 +11,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
run-benchmarks-8-h100-sxm5-integer:
|
||||
name: Run integer benchmarks (8xH100-SXM5)
|
||||
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-integer
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_common.yml
|
||||
with:
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-8-h100-sxm5-integer-compression:
|
||||
name: Run integer compression benchmarks (8xH100-SXM5)
|
||||
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-integer-compression
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_common.yml
|
||||
with:
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-8-h100-sxm5-integer-zk:
|
||||
name: Run integer zk benchmarks (8xH100-SXM5)
|
||||
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-integer-zk
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_common.yml
|
||||
with:
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-8-h100-sxm5-noise-squash:
|
||||
name: Run integer zk benchmarks (8xH100-SXM5)
|
||||
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-noise-squash
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_common.yml
|
||||
with:
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
|
||||
|
||||
run-benchmarks-1-h100-core-crypto:
|
||||
name: Run core-crypto benchmarks (1xH100)
|
||||
name: benchmark_gpu_weekly/run-benchmarks-1-h100-core-crypto (1xH100)
|
||||
if: github.repository == 'zama-ai/tfhe-rs'
|
||||
uses: ./.github/workflows/benchmark_gpu_common.yml
|
||||
with:
|
||||
|
||||
11
.github/workflows/benchmark_hpu_hlapi.yml
vendored
11
.github/workflows/benchmark_hpu_hlapi.yml
vendored
@@ -16,7 +16,7 @@ permissions: {}
|
||||
jobs:
|
||||
hlapi-benchmarks-hpu:
|
||||
name: Execute HLAPI benchmarks for HPU backend
|
||||
runs-on: v80-desktop
|
||||
runs-on: v80-marais
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}_${{ github.ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
@@ -60,11 +60,14 @@ jobs:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Select HPU board
|
||||
run: |
|
||||
echo "V80_PCIE_DEV=24" >> "${GITHUB_ENV}"
|
||||
echo "V80_SERIAL_NUMBER=XFL12NWY3ZKG" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Run benchmarks
|
||||
run: |
|
||||
make pull_hpu_files
|
||||
export V80_SERIAL_NUMBER=XFL12E4XJXWK
|
||||
source /opt/xilinx/Vivado/2024.2/settings64.sh
|
||||
make bench_hlapi_erc20_hpu
|
||||
make bench_hlapi_hpu
|
||||
|
||||
@@ -83,7 +86,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_hlapi_benchmarks
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
31
.github/workflows/benchmark_hpu_integer.yml
vendored
31
.github/workflows/benchmark_hpu_integer.yml
vendored
@@ -1,9 +1,12 @@
|
||||
# Run all integer benchmarks on a permanent HPU instance and return parsed results to Slab CI bot.
|
||||
name: Hpu Integer Benchmarks
|
||||
name: benchmark_hpu_integer
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
all_precisions:
|
||||
description: "Run all precisions"
|
||||
type: boolean
|
||||
bench_type:
|
||||
description: "Benchmarks type"
|
||||
type: choice
|
||||
@@ -19,13 +22,14 @@ env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUST_BACKTRACE: "full"
|
||||
RUST_MIN_STACK: "8388608"
|
||||
FAST_BENCH: TRUE
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
runs-on: v80-desktop
|
||||
runs-on: v80-marais
|
||||
outputs:
|
||||
bench_type: ${{ steps.set_bench_type.outputs.bench_type }}
|
||||
steps:
|
||||
@@ -44,17 +48,17 @@ jobs:
|
||||
if: github.event_name != 'workflow_dispatch'
|
||||
run: |
|
||||
echo "BENCH_TYPE=[\"latency\"]" >> "${GITHUB_ENV}"
|
||||
|
||||
|
||||
|
||||
- name: Set benchmark types output
|
||||
id: set_bench_type
|
||||
run: | # zizmor: ignore[template-injection] this env variable is safe
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
|
||||
integer-benchmarks-hpu:
|
||||
name: Execute integer & erc20 benchmarks for HPU backend
|
||||
name: benchmark_hpu_integer/integer-benchmarks-hpu
|
||||
needs: prepare-matrix
|
||||
runs-on: v80-desktop
|
||||
runs-on: v80-marais
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}_${{ github.ref }}
|
||||
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
|
||||
@@ -102,11 +106,20 @@ jobs:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Should run benchmarks with all precisions
|
||||
if: inputs.all_precisions
|
||||
run: |
|
||||
echo "FAST_BENCH=FALSE" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Select HPU board
|
||||
run: |
|
||||
echo "V80_PCIE_DEV=24" >> "${GITHUB_ENV}"
|
||||
echo "V80_SERIAL_NUMBER=XFL12NWY3ZKG" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Run benchmarks
|
||||
run: |
|
||||
echo "${V80_PCIE_DEV} ${V80_SERIAL_NUMBER}"
|
||||
make pull_hpu_files
|
||||
export V80_SERIAL_NUMBER=XFL12E4XJXWK
|
||||
source /opt/xilinx/Vivado/2024.2/settings64.sh
|
||||
make BENCH_TYPE="${BENCH_TYPE}" bench_integer_hpu
|
||||
env:
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
@@ -128,7 +141,7 @@ jobs:
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.bench_type }}_integer_benchmarks
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
12
.github/workflows/benchmark_integer.yml
vendored
12
.github/workflows/benchmark_integer.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run all integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Integer benchmarks
|
||||
name: benchmark_integer
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -41,7 +41,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
name: benchmark_integer/prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (integer-benchmarks)
|
||||
name: benchmark_integer/setup-instance
|
||||
needs: prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
integer-benchmarks:
|
||||
name: Execute integer benchmarks for all operations flavor
|
||||
name: benchmark_integer/integer-benchmarks
|
||||
needs: [ prepare-matrix, setup-instance ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
@@ -188,7 +188,7 @@ jobs:
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -211,7 +211,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (integer-benchmarks)
|
||||
name: benchmark_integer/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, integer-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
403
.github/workflows/benchmark_perf_regression.yml
vendored
Normal file
403
.github/workflows/benchmark_perf_regression.yml
vendored
Normal file
@@ -0,0 +1,403 @@
|
||||
# Run performance regression benchmarks and return parsed results to associated pull-request.
|
||||
name: benchmark_perf_regression
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [ created ]
|
||||
pull_request:
|
||||
types: [ labeled ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUST_BACKTRACE: "full"
|
||||
RUST_MIN_STACK: "8388608"
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
permissions: { }
|
||||
|
||||
jobs:
|
||||
verify-triggering-actor:
|
||||
name: benchmark_perf_regression/verify-actor
|
||||
if: (github.event_name == 'pull_request' &&
|
||||
(contains(github.event.label.name, 'bench-perfs-cpu') ||
|
||||
contains(github.event.label.name, 'bench-perfs-gpu'))) ||
|
||||
(github.event.issue.pull_request && startsWith(github.event.comment.body, '/bench'))
|
||||
uses: ./.github/workflows/verify_triggering_actor.yml
|
||||
secrets:
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
prepare-benchmarks:
|
||||
name: benchmark_perf_regression/prepare-benchmarks
|
||||
needs: verify-triggering-actor
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
commands: ${{ steps.set_commands.outputs.commands }}
|
||||
slab-backend: ${{ steps.set_slab_details.outputs.backend }}
|
||||
slab-profile: ${{ steps.set_slab_details.outputs.profile }}
|
||||
hardware-name: ${{ steps.get_hardware_name.outputs.name }}
|
||||
tfhe-backend: ${{ steps.set_regression_details.outputs.tfhe-backend }}
|
||||
selected-regression-profile: ${{ steps.set_regression_details.outputs.selected-profile }}
|
||||
custom-env: ${{ steps.get_custom_env.outputs.custom_env }}
|
||||
permissions:
|
||||
# Needed to write a comment in a pull-request
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Acknowledge issue comment
|
||||
if: github.event_name == 'issue_comment'
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
with:
|
||||
comment-id: ${{ github.event.comment.id }}
|
||||
reactions: '+1'
|
||||
|
||||
- name: Display workflow run URL
|
||||
if: github.event_name == 'issue_comment'
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
with:
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
body: |
|
||||
User triggered performance regression benchmark.
|
||||
Workflow run URL: ${{ env.ACTION_RUN_URL }}
|
||||
|
||||
- name: Generate CPU benchmarks command from label
|
||||
if: (github.event_name == 'pull_request' && contains(github.event.label.name, 'bench-perfs-cpu'))
|
||||
run: |
|
||||
echo "DEFAULT_BENCH_OPTIONS=--backend cpu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Generate GPU benchmarks command from label
|
||||
if: (github.event_name == 'pull_request' && contains(github.event.label.name, 'bench-perfs-gpu'))
|
||||
run: |
|
||||
echo "DEFAULT_BENCH_OPTIONS=--backend gpu" >> "${GITHUB_ENV}"
|
||||
|
||||
# TODO add support for HPU backend
|
||||
|
||||
- name: Install Python requirements
|
||||
run: |
|
||||
python3 -m pip install -r ci/perf_regression/requirements.txt
|
||||
|
||||
- name: Generate cargo commands and env from label
|
||||
if: github.event_name == 'pull_request'
|
||||
run: |
|
||||
python3 ci/perf_regression/perf_regression.py parse_profile --issue-comment "/bench ${DEFAULT_BENCH_OPTIONS}"
|
||||
echo "COMMANDS=$(cat ci/perf_regression/perf_regression_generated_commands.json)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Dump issue comment into file # To avoid possible code-injection
|
||||
if: github.event_name == 'issue_comment'
|
||||
run: |
|
||||
echo "${COMMENT_BODY}" >> dumped_comment.txt
|
||||
env:
|
||||
COMMENT_BODY: ${{ github.event.comment.body }}
|
||||
|
||||
- name: Generate cargo commands and env
|
||||
if: github.event_name == 'issue_comment'
|
||||
run: |
|
||||
python3 ci/perf_regression/perf_regression.py parse_profile --issue-comment "$(cat dumped_comment.txt)"
|
||||
echo "COMMANDS=$(cat ci/perf_regression/perf_regression_generated_commands.json)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set commands output
|
||||
id: set_commands
|
||||
run: | # zizmor: ignore[template-injection] this env variable is safe
|
||||
echo "commands=${{ toJSON(env.COMMANDS) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Set Slab details outputs
|
||||
id: set_slab_details
|
||||
run: |
|
||||
echo "backend=$(cat ci/perf_regression/perf_regression_slab_backend_config.txt)" >> "${GITHUB_OUTPUT}"
|
||||
echo "profile=$(cat ci/perf_regression/perf_regression_slab_profile_config.txt)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Get hardware name
|
||||
id: get_hardware_name
|
||||
run: | # zizmor: ignore[template-injection] these interpolations are safe
|
||||
HARDWARE_NAME=$(python3 ci/hardware_finder.py "${{ steps.set_slab_details.outputs.backend }}" "${{ steps.set_slab_details.outputs.profile }}");
|
||||
echo "name=${HARDWARE_NAME}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Set regression details outputs
|
||||
id: set_regression_details
|
||||
run: |
|
||||
echo "tfhe-backend=$(cat ci/perf_regression/perf_regression_tfhe_rs_backend_config.txt)" >> "${GITHUB_OUTPUT}"
|
||||
echo "selected-profile=$(cat ci/perf_regression/perf_regression_selected_profile_config.txt)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Get custom env vars
|
||||
id: get_custom_env
|
||||
run: |
|
||||
echo "custom_env=$(cat ci/perf_regression/perf_regression_custom_env.sh)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: benchmark_perf_regression/setup-instance
|
||||
needs: prepare-benchmarks
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-instance.outputs.label }}
|
||||
steps:
|
||||
- name: Start instance
|
||||
id: start-instance
|
||||
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
|
||||
with:
|
||||
mode: start
|
||||
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
backend: ${{ needs.prepare-benchmarks.outputs.slab-backend }}
|
||||
profile: ${{ needs.prepare-benchmarks.outputs.slab-profile }}
|
||||
|
||||
install-cuda-dependencies-if-required:
|
||||
name: benchmark_perf_regression/install-cuda-dependencies-if-required
|
||||
needs: [ prepare-benchmarks, setup-instance ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
strategy:
|
||||
matrix:
|
||||
# explicit include-based build matrix, of known valid options
|
||||
include:
|
||||
- cuda: "12.8"
|
||||
gcc: 11
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Setup Hyperstack dependencies
|
||||
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
|
||||
uses: ./.github/actions/gpu_setup
|
||||
with:
|
||||
cuda-version: ${{ matrix.cuda }}
|
||||
gcc-version: ${{ matrix.gcc }}
|
||||
|
||||
regression-benchmarks:
|
||||
name: benchmark_perf_regression/regression-benchmarks
|
||||
needs: [ prepare-benchmarks, setup-instance, install-cuda-dependencies-if-required ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}_${{ needs.prepare-benchmarks.outputs.slab-backend }}_${{ needs.prepare-benchmarks.outputs.slab-profile }}
|
||||
cancel-in-progress: true
|
||||
timeout-minutes: 720 # 12 hours
|
||||
strategy:
|
||||
fail-fast: false
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
command: ${{ fromJson(needs.prepare-benchmarks.outputs.commands) }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
fetch-depth: 0 # Needed to get commit hash
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||
{
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
|
||||
- name: Export custom env variables
|
||||
run: | # zizmor: ignore[template-injection] this env variable is safe
|
||||
{
|
||||
${{ needs.prepare-benchmarks.outputs.custom-env }}
|
||||
} >> "$GITHUB_ENV"
|
||||
|
||||
# Re-export environment variables as dependencies setup perform this task in the previous job.
|
||||
# Local env variables are cleaned at the end of each job.
|
||||
- name: Export CUDA variables
|
||||
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
|
||||
echo "PATH=$PATH:$CUDA_PATH/bin" >> "${GITHUB_PATH}"
|
||||
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib64:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
|
||||
echo "CUDA_MODULE_LOADER=EAGER" >> "${GITHUB_ENV}"
|
||||
env:
|
||||
CUDA_PATH: /usr/local/cuda-12.8
|
||||
|
||||
- name: Export gcc and g++ variables
|
||||
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
|
||||
shell: bash
|
||||
run: |
|
||||
{
|
||||
echo "CC=/usr/bin/gcc-${GCC_VERSION}";
|
||||
echo "CXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
echo "CUDAHOSTCXX=/usr/bin/g++-${GCC_VERSION}";
|
||||
} >> "${GITHUB_ENV}"
|
||||
env:
|
||||
GCC_VERSION: 11
|
||||
|
||||
- name: Install rust
|
||||
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: nightly
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Run regression benchmarks
|
||||
run: |
|
||||
make BENCH_CUSTOM_COMMAND="${BENCH_COMMAND}" bench_custom
|
||||
env:
|
||||
BENCH_COMMAND: ${{ matrix.command }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py target/criterion "${RESULTS_FILENAME}" \
|
||||
--database tfhe_rs \
|
||||
--hardware "${HARDWARE_NAME}" \
|
||||
--backend "${TFHE_BACKEND}" \
|
||||
--project-version "${COMMIT_HASH}" \
|
||||
--branch "${REF_NAME}" \
|
||||
--commit-date "${COMMIT_DATE}" \
|
||||
--bench-date "${BENCH_DATE}" \
|
||||
--walk-subdirs \
|
||||
--name-suffix regression \
|
||||
--bench-type "${BENCH_TYPE}"
|
||||
|
||||
echo "RESULTS_FILE_SHA=$(sha256sum "${RESULTS_FILENAME}" | cut -d " " -f1)" >> "${GITHUB_ENV}"
|
||||
env:
|
||||
HARDWARE_NAME: ${{ needs.prepare-benchmarks.outputs.hardware-name }}
|
||||
TFHE_BACKEND: ${{ needs.prepare-benchmarks.outputs.tfhe-backend }}
|
||||
REF_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
BENCH_TYPE: ${{ env.__TFHE_RS_BENCH_TYPE }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_regression_${{ env.RESULTS_FILE_SHA }} # RESULT_FILE_SHA is needed to avoid collision between matrix.command runs
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||
--slab-url "${SLAB_URL}"
|
||||
env:
|
||||
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||
|
||||
check-regressions:
|
||||
name: benchmark_perf_regression/check-regressions
|
||||
needs: [ prepare-benchmarks, regression-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to write a comment in a pull-request
|
||||
pull-requests: write
|
||||
# Needed to set up Python dependencies
|
||||
contents: read
|
||||
env:
|
||||
REF_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Install recent Python
|
||||
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Fetch data
|
||||
run: |
|
||||
python3 -m pip install -r ci/data_extractor/requirements.txt
|
||||
python3 ci/data_extractor/src/data_extractor.py regression_data \
|
||||
--generate-regression-json \
|
||||
--regression-profiles ci/regression.toml \
|
||||
--regression-selected-profile "${REGRESSION_PROFILE}" \
|
||||
--backend "${TFHE_BACKEND}" \
|
||||
--hardware "${HARDWARE_NAME}" \
|
||||
--branch "${REF_NAME}" \
|
||||
--time-span-days 60
|
||||
env:
|
||||
REGRESSION_PROFILE: ${{ needs.prepare-benchmarks.outputs.selected-regression-profile }}
|
||||
TFHE_BACKEND: ${{ needs.prepare-benchmarks.outputs.tfhe-backend }}
|
||||
HARDWARE_NAME: ${{ needs.prepare-benchmarks.outputs.hardware-name }}
|
||||
DATA_EXTRACTOR_DATABASE_HOST: ${{ secrets.DATABASE_HOST }}
|
||||
DATA_EXTRACTOR_DATABASE_USER: ${{ secrets.DATABASE_USER }}
|
||||
DATA_EXTRACTOR_DATABASE_PASSWORD: ${{ secrets.DATABASE_PASSWORD }}
|
||||
|
||||
- name: Generate regression report
|
||||
run: |
|
||||
python3 -m pip install -r ci/perf_regression/requirements.txt
|
||||
python3 ci/perf_regression/perf_regression.py check_regression \
|
||||
--results-file regression_data.json \
|
||||
--generate-report
|
||||
|
||||
- name: Write report in pull-request
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number || github.event.issue.number }}
|
||||
body-path: ci/perf_regression/regression_report.md
|
||||
|
||||
comment-on-failure:
|
||||
name: benchmark_perf_regression/comment-on-failure
|
||||
needs: [ prepare-benchmarks, setup-instance, regression-benchmarks, check-regressions ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ failure() && github.event_name == 'issue_comment' }}
|
||||
continue-on-error: true
|
||||
permissions:
|
||||
# Needed to write a comment in a pull-request
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Write failure message
|
||||
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
|
||||
with:
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
body: |
|
||||
:x: Performance regression benchmark failed ([workflow run](${{ env.ACTION_RUN_URL }}))
|
||||
|
||||
slack-notify:
|
||||
name: benchmark_perf_regression/slack-notify
|
||||
needs: [ prepare-benchmarks, setup-instance, regression-benchmarks, check-regressions ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Send message
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "Performance regression benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: benchmark_perf_regression/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, regression-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Stop instance
|
||||
id: stop-instance
|
||||
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
|
||||
with:
|
||||
mode: stop
|
||||
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
label: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "Instance teardown (regression-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
25
.github/workflows/benchmark_shortint.yml
vendored
25
.github/workflows/benchmark_shortint.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run all shortint benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Shortint full benchmarks
|
||||
name: benchmark_shortint
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -27,7 +27,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
name: benchmark_shortint/prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (shortint-benchmarks)
|
||||
name: benchmark_shortint/setup-instance
|
||||
needs: prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
shortint-benchmarks:
|
||||
name: Execute shortint benchmarks for all operations flavor
|
||||
name: benchmark_shortint/shortint-benchmarks
|
||||
needs: [ prepare-matrix, setup-instance ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
@@ -131,21 +131,8 @@ jobs:
|
||||
env:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
# This small benchmark needs to be executed only once.
|
||||
- name: Measure key sizes
|
||||
if: matrix.op_flavor == 'default'
|
||||
run: |
|
||||
make measure_shortint_key_sizes
|
||||
|
||||
- name: Parse key sizes results
|
||||
if: matrix.op_flavor == 'default'
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py tfhe-benchmark/shortint_key_sizes.csv "${RESULTS_FILENAME}" \
|
||||
--object-sizes \
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_shortint_${{ matrix.op_flavor }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -168,7 +155,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Shortint full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (shortint-benchmarks)
|
||||
name: benchmark_shortint/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, shortint-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
12
.github/workflows/benchmark_signed_integer.yml
vendored
12
.github/workflows/benchmark_signed_integer.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run all signed integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Signed Integer full benchmarks
|
||||
name: benchmark_signed_integer
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -41,7 +41,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
name: benchmark_signed_integer/prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -87,7 +87,7 @@ jobs:
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (signed-integer-benchmarks)
|
||||
name: benchmark_signed_integer/setup-instance
|
||||
needs: prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
signed-integer-benchmarks:
|
||||
name: Execute signed integer benchmarks for all operations flavor
|
||||
name: benchmark_signed_integer/signed-integer-benchmarks
|
||||
needs: [ prepare-matrix, setup-instance ]
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
concurrency:
|
||||
@@ -180,7 +180,7 @@ jobs:
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Signed integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (integer-benchmarks)
|
||||
name: benchmark_signed_integer/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, signed-integer-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
26
.github/workflows/benchmark_tfhe_fft.yml
vendored
26
.github/workflows/benchmark_tfhe_fft.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run FFT benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: FFT benchmarks
|
||||
name: benchmark_tfhe_fft
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -27,8 +27,8 @@ on:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-ec2:
|
||||
name: Setup EC2 instance (fft-benchmarks)
|
||||
setup-instance:
|
||||
name: benchmark_tfhe_fft/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-instance.outputs.label }}
|
||||
@@ -45,12 +45,12 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
fft-benchmarks:
|
||||
name: Execute FFT benchmarks in EC2
|
||||
needs: setup-ec2
|
||||
name: benchmark_tfhe_fft/fft-benchmarks
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
@@ -94,7 +94,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_fft
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -124,10 +124,10 @@ jobs:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-fft benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-ec2:
|
||||
name: Teardown EC2 instance (fft-benchmarks)
|
||||
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
|
||||
needs: [ setup-ec2, fft-benchmarks ]
|
||||
teardown-instance:
|
||||
name: benchmark_tfhe_fft/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
|
||||
needs: [ setup-instance, fft-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Stop instance
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
label: ${{ needs.setup-ec2.outputs.runner-name }}
|
||||
label: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
@@ -146,4 +146,4 @@ jobs:
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "EC2 teardown (fft-benchmarks) failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_MESSAGE: "Instance teardown (fft-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
26
.github/workflows/benchmark_tfhe_ntt.yml
vendored
26
.github/workflows/benchmark_tfhe_ntt.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run NTT benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: NTT benchmarks
|
||||
name: benchmark_tfhe_ntt
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -27,8 +27,8 @@ on:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-ec2:
|
||||
name: Setup EC2 instance (ntt-benchmarks)
|
||||
setup-instance:
|
||||
name: benchmark_tfhe_ntt/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-instance.outputs.label }}
|
||||
@@ -45,12 +45,12 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
ntt-benchmarks:
|
||||
name: Execute NTT benchmarks in EC2
|
||||
needs: setup-ec2
|
||||
name: benchmark_tfhe_ntt/ntt-benchmarks
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
@@ -94,7 +94,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_ntt
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -124,10 +124,10 @@ jobs:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-ntt benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-ec2:
|
||||
name: Teardown EC2 instance (ntt-benchmarks)
|
||||
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
|
||||
needs: [setup-ec2, ntt-benchmarks]
|
||||
teardown-instance:
|
||||
name: benchmark_tfhe_ntt/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
|
||||
needs: [setup-instance, ntt-benchmarks]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Stop instance
|
||||
@@ -138,7 +138,7 @@ jobs:
|
||||
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
label: ${{ needs.setup-ec2.outputs.runner-name }}
|
||||
label: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
@@ -146,4 +146,4 @@ jobs:
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "EC2 teardown (ntt-benchmarks) failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_MESSAGE: "EC2 teardown (ntt-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
13
.github/workflows/benchmark_tfhe_zk_pok.yml
vendored
13
.github/workflows/benchmark_tfhe_zk_pok.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run benchmarks of the tfhe-zk-pok crate on an instance and return parsed results to Slab CI bot.
|
||||
name: tfhe-zk-pok benchmarks
|
||||
name: benchmark_tfhe_zk_pok
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -35,6 +35,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: benchmark_tfhe_zk_pok/should-run
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
((github.event_name == 'push' || github.event_name == 'schedule') && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -50,7 +51,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
zk_pok:
|
||||
@@ -58,7 +59,7 @@ jobs:
|
||||
- .github/workflows/benchmark_tfhe_zk_pok.yml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (tfhe-zk-pok-benchmarks)
|
||||
name: benchmark_tfhe_zk_pok/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
needs: should-run
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
@@ -81,7 +82,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
tfhe-zk-pok-benchmarks:
|
||||
name: Execute tfhe-zk-pok benchmarks
|
||||
name: benchmark_tfhe_zk_pok/tfhe-zk-pok-benchmarks
|
||||
if: needs.setup-instance.result != 'skipped'
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
@@ -142,7 +143,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_tfhe_zk_pok_${{ env.BENCH_TYPE }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -173,7 +174,7 @@ jobs:
|
||||
SLACK_MESSAGE: "tfhe-zk-pok benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (tfhe-zk-pok-benchmarks)
|
||||
name: benchmark_tfhe_zk_pok/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, tfhe-zk-pok-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
30
.github/workflows/benchmark_wasm_client.yml
vendored
30
.github/workflows/benchmark_wasm_client.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run WASM client benchmarks on an instance and return parsed results to Slab CI bot.
|
||||
name: WASM client benchmarks
|
||||
name: benchmark_wasm_client
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -26,6 +26,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: benchmark_wasm_client/should-run
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
@@ -44,7 +45,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
wasm_bench:
|
||||
@@ -57,7 +58,7 @@ jobs:
|
||||
- .github/workflows/wasm_client_benchmark.yml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (wasm-client-benchmarks)
|
||||
name: benchmark_wasm_client/setup-instance
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.wasm_bench)
|
||||
@@ -78,7 +79,7 @@ jobs:
|
||||
profile: cpu-small
|
||||
|
||||
wasm-client-benchmarks:
|
||||
name: Execute WASM client benchmarks
|
||||
name: benchmark_wasm_client/wasm-client-benchmarks
|
||||
needs: setup-instance
|
||||
if: needs.setup-instance.result != 'skipped'
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
@@ -116,7 +117,7 @@ jobs:
|
||||
|
||||
- name: Node cache restoration
|
||||
id: node-cache
|
||||
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
with:
|
||||
path: |
|
||||
~/.nvm
|
||||
@@ -129,7 +130,7 @@ jobs:
|
||||
make install_node
|
||||
|
||||
- name: Node cache save
|
||||
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
if: steps.node-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: |
|
||||
@@ -165,21 +166,8 @@ jobs:
|
||||
env:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
# Run these benchmarks only once
|
||||
- name: Measure public key and ciphertext sizes in HL Api
|
||||
if: matrix.browser == 'chrome'
|
||||
run: |
|
||||
make measure_hlapi_compact_pk_ct_sizes
|
||||
|
||||
- name: Parse key and ciphertext sizes results
|
||||
if: matrix.browser == 'chrome'
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py tfhe-benchmark/hlapi_cpk_and_cctl_sizes.csv "${RESULTS_FILENAME}" \
|
||||
--key-gen \
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_wasm_${{ matrix.browser }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -210,7 +198,7 @@ jobs:
|
||||
SLACK_MESSAGE: "WASM benchmarks (${{ matrix.browser }}) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (wasm-client-benchmarks)
|
||||
name: benchmark_wasm_client/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, wasm-client-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
15
.github/workflows/benchmark_zk_pke.yml
vendored
15
.github/workflows/benchmark_zk_pke.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Run PKE Zero-Knowledge benchmarks on an instance and return parsed results to Slab CI bot.
|
||||
name: PKE ZK benchmarks
|
||||
name: benchmark_zk_pke
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -36,6 +36,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: benchmark_zk_pke/should-run
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
((github.event_name == 'push' || github.event_name == 'schedule') && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -51,7 +52,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
zk_pok:
|
||||
@@ -67,7 +68,7 @@ jobs:
|
||||
- .github/workflows/zk_pke_benchmark.yml
|
||||
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
name: benchmark_zk_pke/prepare-matrix
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
@@ -96,7 +97,7 @@ jobs:
|
||||
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (pke-zk-benchmarks)
|
||||
name: benchmark_zk_pke/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ should-run, prepare-matrix ]
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
@@ -119,7 +120,7 @@ jobs:
|
||||
profile: bench
|
||||
|
||||
pke-zk-benchmarks:
|
||||
name: Execute PKE ZK benchmarks
|
||||
name: benchmark_zk_pke/pke-zk-benchmarks
|
||||
if: needs.setup-instance.result != 'skipped'
|
||||
needs: [ prepare-matrix, setup-instance ]
|
||||
concurrency:
|
||||
@@ -192,7 +193,7 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
with:
|
||||
name: ${{ github.sha }}_integer_zk_${{ matrix.bench_type }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -223,7 +224,7 @@ jobs:
|
||||
SLACK_MESSAGE: "PKE ZK benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (pke-zk-benchmarks)
|
||||
name: benchmark_zk_pke/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, pke-zk-benchmarks ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
42
.github/workflows/cargo_audit.yml
vendored
Normal file
42
.github/workflows/cargo_audit.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
# Run cargo audit
|
||||
name: cargo_audit
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# runs every day at 4am UTC
|
||||
- cron: '0 4 * * *'
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACKIFY_MARKDOWN: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
audit:
|
||||
name: cargo_audit/audit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Audit dependencies
|
||||
run: |
|
||||
make audit_dependencies
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "cargo-audit finished with status: ${{ job.status }}. ([action run](${{ env.ACTION_RUN_URL }}))"
|
||||
171
.github/workflows/cargo_build.yml
vendored
171
.github/workflows/cargo_build.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Cargo Build TFHE-rs
|
||||
name: cargo_build
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -18,16 +18,95 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
cargo-builds:
|
||||
runs-on: ${{ matrix.os }}
|
||||
prepare-parallel-pcc-matrix:
|
||||
name: cargo_build/prepare-parallel-pcc-matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix_command: ${{ steps.set-pcc-commands-matrix.outputs.commands }}
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: "false"
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
# Fetch all the Make recipes that start with `pcc_batch_`
|
||||
- name: Set pcc commands matrix
|
||||
id: set-pcc-commands-matrix
|
||||
run: |
|
||||
COMMANDS=$(grep -oE '^pcc_batch_[^:]*:' Makefile | sed 's/:/\"/; s/^/\"/' | paste -sd,)
|
||||
echo "commands=[${COMMANDS}]" >> "$GITHUB_OUTPUT"
|
||||
|
||||
parallel-pcc-cpu:
|
||||
name: cargo_build/parallel-pcc-cpu
|
||||
needs: prepare-parallel-pcc-matrix
|
||||
runs-on: large_ubuntu_16
|
||||
strategy:
|
||||
matrix:
|
||||
command: ${{fromJson(needs.prepare-parallel-pcc-matrix.outputs.matrix_command)}}
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout tfhe-rs repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Run pcc checks batch
|
||||
run: |
|
||||
make "${COMMAND}"
|
||||
env:
|
||||
COMMAND: ${{ matrix.command }}
|
||||
|
||||
pcc-hpu:
|
||||
name: cargo_build/pcc-hpu
|
||||
runs-on: large_ubuntu_16
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Run Hpu pcc checks
|
||||
run: |
|
||||
make pcc_hpu
|
||||
|
||||
build-tfhe-full:
|
||||
name: cargo_build/build-tfhe-full
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
# GitHub macos-latest are now M1 macs, so use ours, we limit what runs so it will be fast
|
||||
# even with a few PRs
|
||||
os: [large_ubuntu_16, macos-latest, windows-latest]
|
||||
os: [large_ubuntu_16, macos-latest-xlarge, large_windows_16_latest]
|
||||
fail-fast: false
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Build Release tfhe full
|
||||
run: |
|
||||
make build_tfhe_full
|
||||
|
||||
build:
|
||||
name: cargo_build/build
|
||||
runs-on: large_ubuntu_16
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
@@ -40,7 +119,6 @@ jobs:
|
||||
toolchain: stable
|
||||
|
||||
- name: Install and run newline linter checks
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
wget https://github.com/fernandrone/linelint/releases/download/0.0.6/linelint-linux-amd64
|
||||
echo "16b70fb7b471d6f95cbdc0b4e5dc2b0ac9e84ba9ecdc488f7bdf13df823aca4b linelint-linux-amd64" > checksum
|
||||
@@ -49,60 +127,93 @@ jobs:
|
||||
mv linelint-linux-amd64 /usr/local/bin/linelint
|
||||
make check_newline
|
||||
|
||||
- name: Run pcc checks
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make pcc
|
||||
|
||||
- name: Build tfhe-csprng
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_tfhe_csprng
|
||||
|
||||
- name: Build with MSRV
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_tfhe_msrv
|
||||
|
||||
- name: Build coverage tests
|
||||
run: |
|
||||
make build_tfhe_coverage
|
||||
|
||||
build-layers:
|
||||
name: cargo_build/build-layers
|
||||
runs-on: large_ubuntu_16
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Build Release core
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_core AVX512_SUPPORT=ON
|
||||
make build_core_experimental AVX512_SUPPORT=ON
|
||||
|
||||
- name: Build Release boolean
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_boolean
|
||||
|
||||
- name: Build Release shortint
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_shortint
|
||||
|
||||
- name: Build Release integer
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_integer
|
||||
|
||||
- name: Build Release tfhe full
|
||||
run: |
|
||||
make build_tfhe_full
|
||||
build-c-api:
|
||||
name: cargo_build/build-c-api
|
||||
runs-on: large_ubuntu_16
|
||||
steps:
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
persist-credentials: 'false'
|
||||
token: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Install latest stable
|
||||
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Build Release c_api
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_c_api
|
||||
|
||||
- name: Build coverage tests
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make build_tfhe_coverage
|
||||
|
||||
- name: Run Hpu pcc checks
|
||||
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||
run: |
|
||||
make pcc_hpu
|
||||
|
||||
# The wasm build check is a bit annoying to set-up here and is done during the tests in
|
||||
# aws_tfhe_tests.yml
|
||||
|
||||
cargo-builds:
|
||||
name: cargo_build/cargo-builds (bpr)
|
||||
needs: [ parallel-pcc-cpu, pcc-hpu, build-tfhe-full, build, build-layers, build-c-api ]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check all builds success
|
||||
if: needs.parallel-pcc-cpu.result == 'success' &&
|
||||
needs.pcc-hpu.result == 'success' &&
|
||||
needs.build-tfhe-full.result == 'success' &&
|
||||
needs.build.result == 'success' &&
|
||||
needs.build-layers.result == 'success' &&
|
||||
needs.build-c-api.result == 'success'
|
||||
run: |
|
||||
echo "All tfhe-rs build checks passed"
|
||||
|
||||
- name: Check builds failure
|
||||
if: needs.parallel-pcc-cpu.result != 'success' ||
|
||||
needs.pcc-hpu.result != 'success' ||
|
||||
needs.build-tfhe-full.result != 'success' ||
|
||||
needs.build.result != 'success' ||
|
||||
needs.build-layers.result != 'success' ||
|
||||
needs.build-c-api.result != 'success'
|
||||
run: |
|
||||
echo "Some tfhe-rs build checks failed"
|
||||
exit 1
|
||||
|
||||
3
.github/workflows/cargo_build_tfhe_fft.yml
vendored
3
.github/workflows/cargo_build_tfhe_fft.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Build tfhe-fft
|
||||
name: Cargo Build tfhe-fft
|
||||
name: cargo_build_tfhe_fft
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -17,6 +17,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
cargo-builds-fft:
|
||||
name: cargo_build_tfhe_fft/cargo-builds-fft (bpr)
|
||||
runs-on: ${{ matrix.runner_type }}
|
||||
|
||||
strategy:
|
||||
|
||||
3
.github/workflows/cargo_build_tfhe_ntt.yml
vendored
3
.github/workflows/cargo_build_tfhe_ntt.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Build tfhe-ntt
|
||||
name: Cargo Build tfhe-ntt
|
||||
name: cargo_build_tfhe_ntt
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -17,6 +17,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
cargo-builds-ntt:
|
||||
name: cargo_build_tfhe_ntt/cargo-builds-ntt (bpr)
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
|
||||
9
.github/workflows/cargo_test_fft.yml
vendored
9
.github/workflows/cargo_test_fft.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Test tfhe-fft
|
||||
name: Cargo Test tfhe-fft
|
||||
name: cargo_test_fft
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -21,6 +21,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: cargo_test_fft/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -36,7 +37,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
fft:
|
||||
@@ -46,6 +47,7 @@ jobs:
|
||||
- '.github/workflows/cargo_test_fft.yml'
|
||||
|
||||
cargo-tests-fft:
|
||||
name: cargo_test_fft/cargo-tests-fft
|
||||
needs: should-run
|
||||
if: needs.should-run.outputs.fft_test == 'true'
|
||||
runs-on: ${{ matrix.runner_type }}
|
||||
@@ -77,6 +79,7 @@ jobs:
|
||||
make test_fft_no_std
|
||||
|
||||
cargo-tests-fft-nightly:
|
||||
name: cargo_test_fft/cargo-tests-fft-nightly
|
||||
needs: should-run
|
||||
if: needs.should-run.outputs.fft_test == 'true'
|
||||
runs-on: ${{ matrix.runner_type }}
|
||||
@@ -104,6 +107,7 @@ jobs:
|
||||
make test_fft_no_std_nightly
|
||||
|
||||
cargo-tests-fft-node-js:
|
||||
name: cargo_test_fft/cargo-tests-fft-node-js
|
||||
needs: should-run
|
||||
if: needs.should-run.outputs.fft_test == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
@@ -119,6 +123,7 @@ jobs:
|
||||
make test_fft_node_js_ci
|
||||
|
||||
cargo-tests-fft-successful:
|
||||
name: cargo_test_fft/cargo-tests-fft-successful (bpr)
|
||||
needs: [ should-run, cargo-tests-fft, cargo-tests-fft-nightly, cargo-tests-fft-node-js ]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
11
.github/workflows/cargo_test_ntt.yml
vendored
11
.github/workflows/cargo_test_ntt.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Test tfhe-ntt
|
||||
name: Cargo Test tfhe-ntt
|
||||
name: cargo_test_ntt
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -22,6 +22,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: cargo_test_ntt/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -37,7 +38,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
ntt:
|
||||
@@ -47,6 +48,7 @@ jobs:
|
||||
- '.github/workflows/cargo_test_ntt.yml'
|
||||
|
||||
setup-instance:
|
||||
name: cargo_test_ntt/setup-instance
|
||||
needs: should-run
|
||||
if: needs.should-run.outputs.ntt_test == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
@@ -75,6 +77,7 @@ jobs:
|
||||
echo "matrix_os=[\"${INSTANCE_TO_USE}\", \"macos-latest\", \"windows-latest\"]" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cargo-tests-ntt:
|
||||
name: cargo_test_ntt/cargo-tests-ntt
|
||||
needs: [should-run, setup-instance]
|
||||
if: needs.should-run.outputs.ntt_test == 'true'
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -101,6 +104,7 @@ jobs:
|
||||
run: make test_ntt_no_std
|
||||
|
||||
cargo-tests-ntt-nightly:
|
||||
name: cargo_test_ntt/cargo-tests-ntt-nightly
|
||||
needs: [should-run, setup-instance]
|
||||
if: needs.should-run.outputs.ntt_test == 'true'
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -126,6 +130,7 @@ jobs:
|
||||
run: make test_ntt_no_std_nightly
|
||||
|
||||
cargo-tests-ntt-successful:
|
||||
name: cargo_test_ntt/cargo-tests-ntt-successful (bpr)
|
||||
needs: [should-run, cargo-tests-ntt, cargo-tests-ntt-nightly]
|
||||
if: ${{ always() }}
|
||||
runs-on: ubuntu-latest
|
||||
@@ -151,7 +156,7 @@ jobs:
|
||||
exit 1
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cargo-tests-ntt-successful)
|
||||
name: cargo_test_ntt/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [setup-instance, cargo-tests-ntt-successful]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/check_commit.yml
vendored
4
.github/workflows/check_commit.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Check commit and PR compliance
|
||||
name: Check commit and PR compliance
|
||||
name: check_commit
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
@@ -7,7 +7,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
check-commit-pr:
|
||||
name: Check commit and PR
|
||||
name: check_commit/check-commit-pr (bpr)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
6
.github/workflows/ci_lint.yml
vendored
6
.github/workflows/ci_lint.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Lint and check CI
|
||||
name: CI Lint and Checks
|
||||
name: ci_lint
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -14,7 +14,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
lint-check:
|
||||
name: Lint and checks
|
||||
name: ci_lint/lint-check (bpr)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
GH_TOKEN: ${{ env.CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Ensure SHA pinned actions
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@fc87bb5b5a97953d987372e74478de634726b3e5 # v3.0.25
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@9e9574ef04ea69da568d6249bd69539ccc704e74 # v4.0.0
|
||||
with:
|
||||
allowlist: |
|
||||
slsa-framework/slsa-github-generator
|
||||
|
||||
20
.github/workflows/code_coverage.yml
vendored
20
.github/workflows/code_coverage.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Code Coverage
|
||||
name: code_coverage
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -22,7 +22,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (code-coverage)
|
||||
name: code_coverage/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-instance.outputs.label }}
|
||||
@@ -38,8 +38,8 @@ jobs:
|
||||
backend: aws
|
||||
profile: cpu-small
|
||||
|
||||
code-coverage:
|
||||
name: Code coverage tests
|
||||
code-coverage-tests:
|
||||
name: code_coverage/code-coverage-tests
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}_${{ github.event_name }}
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
tfhe:
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
make test_shortint_cov
|
||||
|
||||
- name: Upload tfhe coverage to Codecov
|
||||
uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
make test_integer_cov
|
||||
|
||||
- name: Upload tfhe coverage to Codecov
|
||||
uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -121,9 +121,9 @@ jobs:
|
||||
SLACK_MESSAGE: "Code coverage finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (code-coverage)
|
||||
name: code_coverage/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, code-coverage ]
|
||||
needs: [ setup-instance, code-coverage-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Stop instance
|
||||
@@ -142,4 +142,4 @@ jobs:
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "Instance teardown (code-coverage) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_MESSAGE: "Instance teardown (code-coverage-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: CSPRNG randomness testing Workflow
|
||||
name: csprng_randomness_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -26,7 +26,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (csprng-randomness-tests)
|
||||
name: csprng_randomness_tests/setup-instance
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || contains(github.event.label.name, 'approved') }}
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
csprng-randomness-tests:
|
||||
name: CSPRNG randomness tests
|
||||
name: csprng_randomness_tests/csprng-randomness-tests
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
SLACK_MESSAGE: "tfhe-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (csprng-randomness-tests)
|
||||
name: csprng_randomness_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, csprng-randomness-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
4
.github/workflows/gpu_4090_tests.yml
vendored
4
.github/workflows/gpu_4090_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend on an RTX 4090 machine
|
||||
name: Cuda - 4090 full tests
|
||||
name: gpu_4090_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -27,7 +27,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
cuda-tests-linux:
|
||||
name: CUDA tests (RTX 4090)
|
||||
name: gpu_4090_tests/cuda-tests-linux
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
contains(github.event.label.name, '4090_test') ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
|
||||
10
.github/workflows/gpu_code_validation_tests.yml
vendored
10
.github/workflows/gpu_code_validation_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend on an AWS instance
|
||||
name: Cuda - CPU Memory Checks
|
||||
name: gpu_code_validation_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -31,7 +31,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-tests)
|
||||
name: gpu_code_validation_tests/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event.action == 'labeled' && github.event.label.name == 'approved')
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA Memory Checks tests
|
||||
name: gpu_code_validation_tests/cuda-tests-linux
|
||||
needs: [ setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
make test_high_level_api_gpu_valgrind
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_code_validation_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -127,7 +127,7 @@ jobs:
|
||||
SLACK_MESSAGE: "GPU Memory Checks tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-tests)
|
||||
name: gpu_code_validation_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
13
.github/workflows/gpu_fast_h100_tests.yml
vendored
13
.github/workflows/gpu_fast_h100_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend on an H100 VM on hyperstack
|
||||
name: Cuda - Fast tests on H100
|
||||
name: gpu_fast_h100_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,6 +30,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_fast_h100_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -45,7 +46,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -66,7 +67,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-h100-tests)
|
||||
name: gpu_fast_h100_tests/setup-instance
|
||||
needs: should-run
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
|
||||
@@ -108,7 +109,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA H100 tests
|
||||
name: gpu_fast_h100_tests/cuda-tests-linux
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -165,7 +166,7 @@ jobs:
|
||||
BIG_TESTS_INSTANCE=TRUE make test_high_level_api_gpu
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_fast_h100_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -187,7 +188,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Fast H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-h100-tests)
|
||||
name: gpu_fast_h100_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
13
.github/workflows/gpu_fast_tests.yml
vendored
13
.github/workflows/gpu_fast_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend on an AWS instance
|
||||
name: Cuda - Fast tests
|
||||
name: gpu_fast_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -29,6 +29,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_fast_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -44,7 +45,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -65,7 +66,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-tests)
|
||||
name: gpu_fast_tests/setup-instance
|
||||
needs: should-run
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
needs.should-run.outputs.gpu_test == 'true'
|
||||
@@ -93,7 +94,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA tests
|
||||
name: gpu_fast_tests/cuda-tests-linux
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -151,7 +152,7 @@ jobs:
|
||||
make test_high_level_api_gpu
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_fast_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -173,7 +174,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Base GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-tests)
|
||||
name: gpu_fast_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
10
.github/workflows/gpu_full_h100_tests.yml
vendored
10
.github/workflows/gpu_full_h100_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend on an H100 VM on hyperstack
|
||||
name: Cuda - Full tests on H100
|
||||
name: gpu_full_h100_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -20,7 +20,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-h100-tests)
|
||||
name: gpu_full_h100_tests/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA H100 tests
|
||||
name: gpu_full_h100_tests/cuda-tests-linux
|
||||
needs: [ setup-instance ]
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
make test_high_level_api_gpu
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_full_h100_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ failure() }}
|
||||
@@ -115,7 +115,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Full H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-h100-tests)
|
||||
name: gpu_full_h100_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
15
.github/workflows/gpu_full_multi_gpu_tests.yml
vendored
15
.github/workflows/gpu_full_multi_gpu_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend on an AWS instance
|
||||
name: Cuda - Full tests multi-GPU
|
||||
name: gpu_full_multi_gpu_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,6 +30,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_full_multi_gpu_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -45,7 +46,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -66,7 +67,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-tests-multi-gpu)
|
||||
name: gpu_full_multi_gpu_tests/setup-instance
|
||||
needs: should-run
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
|
||||
@@ -85,7 +86,7 @@ jobs:
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
backend: hyperstack
|
||||
profile: multi-gpu-test
|
||||
profile: 4-l40
|
||||
|
||||
# This instance will be spawned especially for pull-request from forked repository
|
||||
- name: Start GitHub instance
|
||||
@@ -95,7 +96,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA multi-GPU tests
|
||||
name: gpu_full_multi_gpu_tests/cuda-tests-linux
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -154,7 +155,7 @@ jobs:
|
||||
make test_high_level_api_gpu
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_full_multi_gpu_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -176,7 +177,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Multi-GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-tests-multi-gpu)
|
||||
name: gpu_full_multi_gpu_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
12
.github/workflows/gpu_integer_long_run_tests.yml
vendored
12
.github/workflows/gpu_integer_long_run_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Cuda - Long Run Tests on GPU
|
||||
name: gpu_integer_long_run_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -27,7 +27,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (gpu-tests)
|
||||
name: gpu_integer_long_run_tests/setup-instance
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
runs-on: ubuntu-latest
|
||||
@@ -43,10 +43,10 @@ jobs:
|
||||
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||
job-secret: ${{ secrets.JOB_SECRET }}
|
||||
backend: hyperstack
|
||||
profile: multi-gpu-test
|
||||
profile: 4-l40
|
||||
|
||||
cuda-tests:
|
||||
name: Long run GPU tests
|
||||
name: gpu_integer_long_run_tests/cuda-tests
|
||||
needs: [ setup-instance ]
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}_${{github.event_name}}
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
fi
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_integer_long_run_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests.result != 'skipped' && failure() }}
|
||||
@@ -103,7 +103,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Integer GPU long run tests finished with status: ${{ needs.cuda-tests.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (gpu-tests)
|
||||
name: gpu_integer_long_run_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
10
.github/workflows/gpu_memory_sanitizer.yml
vendored
10
.github/workflows/gpu_memory_sanitizer.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend on an AWS instance
|
||||
name: Cuda - GPU Memory Checks
|
||||
name: gpu_memory_sanitizer
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,7 +30,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-tests)
|
||||
name: gpu_memory_sanitizer/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event.action == 'labeled' && github.event.label.name == 'approved')
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA Memory Checks tests
|
||||
name: gpu_memory_sanitizer/cuda-tests-linux
|
||||
needs: [ setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -102,7 +102,7 @@ jobs:
|
||||
make test_high_level_api_gpu_sanitizer
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_memory_sanitizer/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
SLACK_MESSAGE: "GPU Memory Checks tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-tests)
|
||||
name: gpu_memory_sanitizer/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
8
.github/workflows/gpu_pcc.yml
vendored
8
.github/workflows/gpu_pcc.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Perform tfhe-cuda-backend post-commit checks on an AWS instance
|
||||
name: Cuda - Post-commit Checks
|
||||
name: gpu_pcc
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -28,7 +28,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-pcc)
|
||||
name: gpu_pcc/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-pcc:
|
||||
name: CUDA post-commit checks
|
||||
name: gpu_pcc/cuda-pcc (bpr)
|
||||
needs: setup-instance
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}
|
||||
@@ -149,7 +149,7 @@ jobs:
|
||||
SLACK_MESSAGE: "CUDA AWS post-commit checks finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-pcc)
|
||||
name: cuda_pcc/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-pcc ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Signed integer GPU tests on an RTXA6000 VM on hyperstack with classical PBS
|
||||
name: Cuda - Signed integer tests with classical PBS
|
||||
name: gpu_signed_integer_classic_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,6 +30,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_signed_integer_classic_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -45,7 +46,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -66,7 +67,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-signed-classic-tests)
|
||||
name: gpu_signed_integer_classic_tests/setup-instance
|
||||
needs: should-run
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
|
||||
@@ -95,7 +96,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA signed integer tests with classical PBS
|
||||
name: gpu_signed_integer_classic_tests/cuda-tests-linux
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -137,7 +138,7 @@ jobs:
|
||||
BIG_TESTS_INSTANCE=TRUE make test_signed_integer_gpu_ci
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_signed_integer_classic_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -159,7 +160,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Integer GPU signed integer tests with classical PBS finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-signed-classic-tests)
|
||||
name: gpu_signed_integer_classic_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Signed integer GPU tests on an H100 VM on hyperstack
|
||||
name: Cuda - Signed integer tests on H100
|
||||
name: gpu_signed_integer_h100_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,6 +30,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_signed_integer_h100_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -45,7 +46,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -66,7 +67,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-h100-tests)
|
||||
name: gpu_signed_integer_h100_tests/setup-instance
|
||||
needs: should-run
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
|
||||
@@ -108,7 +109,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA H100 signed integer tests
|
||||
name: gpu_signed_integer_h100_tests/cuda-tests-linux
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -151,7 +152,7 @@ jobs:
|
||||
BIG_TESTS_INSTANCE=TRUE make test_signed_integer_multi_bit_gpu_ci
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_signed_integer_h100_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -173,7 +174,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-h100-tests)
|
||||
name: gpu_signed_integer_h100_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
13
.github/workflows/gpu_signed_integer_tests.yml
vendored
13
.github/workflows/gpu_signed_integer_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend signed integer on an AWS instance
|
||||
name: Cuda - Signed integer tests
|
||||
name: gpu_signed_integer_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -31,6 +31,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_signed_integer_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -46,7 +47,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -67,7 +68,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-signed-integer-tests)
|
||||
name: gpu_signed_integer_tests/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
needs: should-run
|
||||
if: (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
@@ -96,7 +97,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-signed-integer-tests:
|
||||
name: CUDA signed integer tests
|
||||
name: gpu_signed_integer_tests/cuda-signed-integer-tests
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -146,7 +147,7 @@ jobs:
|
||||
make test_signed_integer_multi_bit_gpu_ci
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_signed_integer_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-signed-integer-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-signed-integer-tests.result != 'skipped' && failure() }}
|
||||
@@ -168,7 +169,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Signed GPU tests finished with status: ${{ needs.cuda-signed-integer-tests.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-tests)
|
||||
name: gpu_signed_integer_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-signed-integer-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Test unsigned integers on an RTXA6000 VM on hyperstack with the classical PBS
|
||||
name: Cuda - Unsigned integer tests with classical PBS
|
||||
name: gpu_unsigned_integer_classic_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,6 +30,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_unsigned_integer_classic_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -45,7 +46,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -66,7 +67,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-unsigned-classic-tests)
|
||||
name: gpu_unsigned_integer_classic_tests/setup-instance
|
||||
needs: should-run
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
|
||||
@@ -95,7 +96,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA unsigned integer tests with classical PBS
|
||||
name: gpu_unsigned_integer_classic_tests/cuda-tests-linux
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -137,7 +138,7 @@ jobs:
|
||||
BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_gpu_ci
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_unsigned_integer_classic_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -159,7 +160,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Unsigned integer GPU classic tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-unsigned-classic-tests)
|
||||
name: gpu_unsigned_integer_classic_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Test unsigned integers on an H100 VM on hyperstack
|
||||
name: Cuda - Unsigned integer tests on H100
|
||||
name: gpu_unsigned_integer_h100_tests/
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,6 +30,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_unsigned_integer_h100_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -45,7 +46,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -66,7 +67,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-h100-tests)
|
||||
name: gpu_unsigned_integer_h100_tests/setup-instance
|
||||
needs: should-run
|
||||
if: github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
|
||||
@@ -108,7 +109,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-tests-linux:
|
||||
name: CUDA H100 unsigned integer tests
|
||||
name: gpu_unsigned_integer_h100_tests/cuda-tests-linux
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -151,7 +152,7 @@ jobs:
|
||||
BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_multi_bit_gpu_ci
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_unsigned_integer_h100_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
|
||||
@@ -173,7 +174,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Unsigned integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-h100-tests)
|
||||
name: gpu_unsigned_integer_h100_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
|
||||
needs: [ setup-instance, cuda-tests-linux ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
13
.github/workflows/gpu_unsigned_integer_tests.yml
vendored
13
.github/workflows/gpu_unsigned_integer_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Compile and test tfhe-cuda-backend unsigned integer on an AWS instance
|
||||
name: Cuda - Unsigned integer tests
|
||||
name: gpu_unsigned_integer_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -31,6 +31,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: gpu_unsigned_integer_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -46,7 +47,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
@@ -67,7 +68,7 @@ jobs:
|
||||
- ci/slab.toml
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (cuda-unsigned-integer-tests)
|
||||
name: gpu_unsigned_integer_tests/setup-instance
|
||||
runs-on: ubuntu-latest
|
||||
needs: should-run
|
||||
if: (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
@@ -96,7 +97,7 @@ jobs:
|
||||
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
cuda-unsigned-integer-tests:
|
||||
name: CUDA unsigned integer tests
|
||||
name: gpu_unsigned_integer_tests/cuda-unsigned-integer-tests
|
||||
needs: [ should-run, setup-instance ]
|
||||
if: github.event_name != 'pull_request' ||
|
||||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
|
||||
@@ -146,7 +147,7 @@ jobs:
|
||||
make test_unsigned_integer_multi_bit_gpu_ci
|
||||
|
||||
slack-notify:
|
||||
name: Slack Notification
|
||||
name: gpu_unsigned_integer_tests/slack-notify
|
||||
needs: [ setup-instance, cuda-unsigned-integer-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ always() && needs.cuda-unsigned-integer-tests.result != 'skipped' && failure() }}
|
||||
@@ -168,7 +169,7 @@ jobs:
|
||||
SLACK_MESSAGE: "Unsigned integer GPU tests finished with status: ${{ needs.cuda-unsigned-integer-tests.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cuda-tests)
|
||||
name: gpu_unsigned_integer_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cuda-unsigned-integer-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
8
.github/workflows/hpu_hlapi_tests.yml
vendored
8
.github/workflows/hpu_hlapi_tests.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Test tfhe-fft
|
||||
name: Cargo Test HLAPI HPU
|
||||
# Test HPU backend HLAPI layer
|
||||
name: hpu_hlapi_tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -21,6 +21,7 @@ permissions: { }
|
||||
|
||||
jobs:
|
||||
should-run:
|
||||
name: hpu_hlapi_tests/should-run
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: read
|
||||
@@ -36,7 +37,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
with:
|
||||
files_yaml: |
|
||||
hpu:
|
||||
@@ -46,6 +47,7 @@ jobs:
|
||||
- mockups/tfhe-hpu-mockup/**
|
||||
|
||||
cargo-tests-hpu:
|
||||
name: hpu_hlapi_tests/cargo-tests-hpu (bpr)
|
||||
needs: should-run
|
||||
if: needs.should-run.outputs.hpu_test == 'true'
|
||||
runs-on: large_ubuntu_16
|
||||
|
||||
8
.github/workflows/integer_long_run_tests.yml
vendored
8
.github/workflows/integer_long_run_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AWS Long Run Tests on CPU
|
||||
name: integer_long_run_tests
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -23,7 +23,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
setup-instance:
|
||||
name: Setup instance (cpu-tests)
|
||||
name: integer_long_run_tests/setup-instance
|
||||
if: github.event_name != 'schedule' ||
|
||||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
|
||||
runs-on: ubuntu-latest
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
profile: cpu-big
|
||||
|
||||
cpu-tests:
|
||||
name: Long run CPU tests
|
||||
name: integer_long_run_tests/cpu-tests
|
||||
needs: [ setup-instance ]
|
||||
concurrency:
|
||||
group: ${{ github.workflow_ref }}_${{github.event_name}}
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
SLACK_MESSAGE: "CPU long run tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (cpu-tests)
|
||||
name: integer_long_run_tests/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [ setup-instance, cpu-tests ]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
5
.github/workflows/m1_tests.yml
vendored
5
.github/workflows/m1_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Tests on M1 CPU
|
||||
name: m1_tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -32,6 +32,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
cargo-builds-m1:
|
||||
name: m1_tests/cargo-builds-m1
|
||||
if: ${{ (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
contains(github.event.label.name, 'm1_test') }}
|
||||
@@ -178,7 +179,7 @@ jobs:
|
||||
make test_integer_multi_bit_ci
|
||||
|
||||
remove_label:
|
||||
name: Remove m1_test label
|
||||
name: m1_tests/remove_label
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- cargo-builds-m1
|
||||
|
||||
172
.github/workflows/make_release.yml
vendored
172
.github/workflows/make_release.yml
vendored
@@ -1,172 +0,0 @@
|
||||
# Publish new release of tfhe-rs on various platform.
|
||||
name: Publish release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: "Dry-run"
|
||||
type: boolean
|
||||
default: true
|
||||
push_to_crates:
|
||||
description: "Push to crate"
|
||||
type: boolean
|
||||
default: true
|
||||
push_web_package:
|
||||
description: "Push web js package"
|
||||
type: boolean
|
||||
default: true
|
||||
push_node_package:
|
||||
description: "Push node js package"
|
||||
type: boolean
|
||||
default: true
|
||||
npm_latest_tag:
|
||||
description: "Set NPM tag as latest"
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
NPM_TAG: ""
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
package:
|
||||
runs-on: ubuntu-latest
|
||||
needs: verify_tag
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
needs: [package]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
# Needed to create the provenance via GitHub OIDC
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
publish_release:
|
||||
name: Publish Release
|
||||
needs: [package] # for comparing hashes
|
||||
runs-on: ubuntu-latest
|
||||
# For provenance of npmjs publish
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write # also needed for OIDC token exchange on crates.io
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Create NPM version tag
|
||||
if: ${{ inputs.npm_latest_tag }}
|
||||
run: |
|
||||
echo "NPM_TAG=latest" >> "${GITHUB_ENV}"
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: crate
|
||||
path: target/package
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
- name: Publish crate.io package
|
||||
if: ${{ inputs.push_to_crates }}
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
|
||||
# would fail. This is safe since DRY_RUN is handled in the env section above.
|
||||
# shellcheck disable=SC2086
|
||||
cargo publish -p tfhe ${DRY_RUN}
|
||||
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
- name: Build web package
|
||||
if: ${{ inputs.push_web_package }}
|
||||
run: |
|
||||
make build_web_js_api_parallel
|
||||
|
||||
- name: Publish web package
|
||||
if: ${{ inputs.push_web_package }}
|
||||
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
|
||||
with:
|
||||
token: ${{ secrets.NPM_TOKEN }}
|
||||
package: tfhe/pkg/package.json
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
tag: ${{ env.NPM_TAG }}
|
||||
provenance: true
|
||||
|
||||
- name: Build Node package
|
||||
if: ${{ inputs.push_node_package }}
|
||||
run: |
|
||||
rm -rf tfhe/pkg
|
||||
|
||||
make build_node_js_api
|
||||
sed -i 's/"tfhe"/"node-tfhe"/g' tfhe/pkg/package.json
|
||||
|
||||
- name: Publish Node package
|
||||
if: ${{ inputs.push_node_package }}
|
||||
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
|
||||
with:
|
||||
token: ${{ secrets.NPM_TOKEN }}
|
||||
package: tfhe/pkg/package.json
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
tag: ${{ env.NPM_TAG }}
|
||||
provenance: true
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
143
.github/workflows/make_release_common.yml
vendored
Normal file
143
.github/workflows/make_release_common.yml
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
# Common workflow to make crate release
|
||||
name: make_release_common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
package-name:
|
||||
type: string
|
||||
required: true
|
||||
dry-run:
|
||||
type: boolean
|
||||
default: true
|
||||
secrets:
|
||||
REPO_CHECKOUT_TOKEN:
|
||||
required: true
|
||||
SLACK_CHANNEL:
|
||||
required: true
|
||||
BOT_USERNAME:
|
||||
required: true
|
||||
SLACK_WEBHOOK:
|
||||
required: true
|
||||
ALLOWED_TEAM:
|
||||
required: true
|
||||
READ_ORG_TOKEN:
|
||||
required: true
|
||||
|
||||
env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify-triggering-actor:
|
||||
name: make_release_common/verify-triggering-actor
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: ./.github/workflows/verify_triggering_actor.yml
|
||||
secrets:
|
||||
ALLOWED_TEAM: ${{ secrets.ALLOWED_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
package:
|
||||
name: make_release_common/package
|
||||
runs-on: ubuntu-latest
|
||||
needs: verify-triggering-actor
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
env:
|
||||
PACKAGE: ${{ inputs.package-name }}
|
||||
run: |
|
||||
cargo package -p "${PACKAGE}"
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: crate-${{ inputs.package-name }}
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
|
||||
provenance:
|
||||
name: make_release_common/provenance
|
||||
if: ${{ !inputs.dry-run }}
|
||||
needs: package
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
# Needed to create the provenance via GitHub OIDC
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
|
||||
publish_release:
|
||||
name: make_release_common/publish-release
|
||||
needs: package
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed for OIDC token exchange on crates.io
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: crate-${{ inputs.package-name }}
|
||||
path: target/package
|
||||
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@041cce5b4b821e6b0ebc9c9c38b58cac4e34dcc2 # v1.0.2
|
||||
id: auth
|
||||
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
PACKAGE: ${{ inputs.package-name }}
|
||||
DRY_RUN: ${{ inputs.dry-run && '--dry-run' || '' }}
|
||||
run: |
|
||||
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
|
||||
# would fail. This is safe since DRY_RUN is handled in the env section above.
|
||||
# shellcheck disable=SC2086
|
||||
cargo publish -p "${PACKAGE}" ${DRY_RUN}
|
||||
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA ${{ inputs.package-name }} - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "${{ inputs.package-name }} release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
35
.github/workflows/make_release_cuda.yml
vendored
35
.github/workflows/make_release_cuda.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Publish CUDA release
|
||||
name: make_release_cuda
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -18,15 +18,17 @@ env:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
verify-triggering-actor:
|
||||
name: make_release_cuda/verify-triggering-actor
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: ./.github/workflows/verify_triggering_actor.yml
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
setup-instance:
|
||||
name: Setup instance (publish-cuda-release)
|
||||
needs: verify_tag
|
||||
name: make_release_cuda/setup-instance
|
||||
needs: verify-triggering-actor
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
runner-name: ${{ steps.start-instance.outputs.label }}
|
||||
@@ -43,7 +45,7 @@ jobs:
|
||||
profile: gpu-build
|
||||
|
||||
package:
|
||||
name: Package CUDA Release for provenance
|
||||
name: make_release_cuda/package
|
||||
needs: setup-instance
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
outputs:
|
||||
@@ -99,11 +101,18 @@ jobs:
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-cuda-backend
|
||||
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
with:
|
||||
name: crate-tfhe-cuda-backend
|
||||
path: target/package/*.crate
|
||||
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
name: make_release_cuda/provenance
|
||||
if: ${{ !inputs.dry_run }}
|
||||
needs: [package]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
@@ -119,7 +128,7 @@ jobs:
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
publish-cuda-release:
|
||||
name: Publish CUDA Release
|
||||
name: make_release_cuda/publish-cuda-release
|
||||
needs: [setup-instance, package] # for comparing hashes
|
||||
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||
permissions:
|
||||
@@ -166,8 +175,14 @@ jobs:
|
||||
env:
|
||||
GCC_VERSION: ${{ matrix.gcc }}
|
||||
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
with:
|
||||
name: crate-tfhe-cuda-backend
|
||||
path: target/package
|
||||
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
uses: rust-lang/crates-io-auth-action@041cce5b4b821e6b0ebc9c9c38b58cac4e34dcc2 # v1.0.2
|
||||
id: auth
|
||||
|
||||
- name: Publish crate.io package
|
||||
@@ -201,7 +216,7 @@ jobs:
|
||||
SLACK_MESSAGE: "tfhe-cuda-backend release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
teardown-instance:
|
||||
name: Teardown instance (publish-release)
|
||||
name: make_release_cuda/teardown-instance
|
||||
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||
needs: [setup-instance, publish-cuda-release]
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
100
.github/workflows/make_release_hpu.yml
vendored
100
.github/workflows/make_release_hpu.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Publish HPU release
|
||||
name: make_release_hpu
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -18,39 +18,12 @@ env:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
package:
|
||||
runs-on: ubuntu-latest
|
||||
needs: verify_tag
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-hpu-backend
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
needs: [package]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
make-release:
|
||||
name: make_release_hpu/make-release
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe-hpu-backend"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
@@ -58,55 +31,10 @@ jobs:
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
publish_release:
|
||||
name: Publish tfhe-hpu-backend Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [verify_tag, package] # for comparing hashes
|
||||
permissions:
|
||||
# Needed for OIDC token exchange on crates.io
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
|
||||
# would fail. This is safe since DRY_RUN is handled in the env section above.
|
||||
# shellcheck disable=SC2086
|
||||
cargo publish -p tfhe-hpu-backend ${DRY_RUN}
|
||||
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe-hpu-backend crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-hpu-backend release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
secrets:
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
124
.github/workflows/make_release_tfhe.yml
vendored
Normal file
124
.github/workflows/make_release_tfhe.yml
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
# Publish new release of tfhe-rs on various platform.
|
||||
name: make_release_tfhe
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: "Dry-run"
|
||||
type: boolean
|
||||
default: true
|
||||
push_to_crates:
|
||||
description: "Push to crate"
|
||||
type: boolean
|
||||
default: true
|
||||
push_web_package:
|
||||
description: "Push web js package"
|
||||
type: boolean
|
||||
default: true
|
||||
push_node_package:
|
||||
description: "Push node js package"
|
||||
type: boolean
|
||||
default: true
|
||||
npm_latest_tag:
|
||||
description: "Set NPM tag as latest"
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
NPM_TAG: ""
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
make-release:
|
||||
name: make_release_tfhe/make-release
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
# Needed to create the provenance via GitHub OIDC
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
secrets:
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
make-release-js:
|
||||
name: make_release_tfhe/make-release-js
|
||||
needs: make-release
|
||||
runs-on: ubuntu-latest
|
||||
# For provenance of npmjs publish
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write # also needed for OIDC token exchange on crates.io and npmjs.com
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Create NPM version tag
|
||||
if: ${{ inputs.npm_latest_tag }}
|
||||
run: |
|
||||
echo "NPM_TAG=latest" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Build web package
|
||||
if: ${{ inputs.push_web_package }}
|
||||
run: |
|
||||
make build_web_js_api_parallel
|
||||
|
||||
- name: Authenticate on NPM
|
||||
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
|
||||
with:
|
||||
node-version: '22'
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
|
||||
- name: Publish web package
|
||||
if: ${{ inputs.push_web_package }}
|
||||
uses: JS-DevTools/npm-publish@7f8fe47b3bea1be0c3aec2b717c5ec1f3e03410b
|
||||
with:
|
||||
package: tfhe/pkg/package.json
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
tag: ${{ env.NPM_TAG }}
|
||||
provenance: true
|
||||
|
||||
- name: Build Node package
|
||||
if: ${{ inputs.push_node_package }}
|
||||
run: |
|
||||
rm -rf tfhe/pkg
|
||||
|
||||
make build_node_js_api
|
||||
sed -i 's/"tfhe"/"node-tfhe"/g' tfhe/pkg/package.json
|
||||
|
||||
- name: Publish Node package
|
||||
if: ${{ inputs.push_node_package }}
|
||||
uses: JS-DevTools/npm-publish@7f8fe47b3bea1be0c3aec2b717c5ec1f3e03410b
|
||||
with:
|
||||
package: tfhe/pkg/package.json
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
tag: ${{ env.NPM_TAG }}
|
||||
provenance: true
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
108
.github/workflows/make_release_tfhe_csprng.yml
vendored
108
.github/workflows/make_release_tfhe_csprng.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Publish tfhe-csprng release
|
||||
name: make_release_tfhe_csprng
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -8,49 +8,15 @@ on:
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
package:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-csprng
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate-tfhe-csprng
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
|
||||
provenance:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
needs: [package]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
make-release:
|
||||
name: make_release_tfhe_csprng/make-release
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe-csprng"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
@@ -58,56 +24,10 @@ jobs:
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
|
||||
publish_release:
|
||||
name: Publish tfhe-csprng Release
|
||||
needs: [verify_tag, package]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed for OIDC token exchange on crates.io
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: crate-tfhe-csprng
|
||||
path: target/package
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
|
||||
# would fail. This is safe since DRY_RUN is handled in the env section above.
|
||||
# shellcheck disable=SC2086
|
||||
cargo publish -p tfhe-csprng ${DRY_RUN}
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe-csprng - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-csprng release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
secrets:
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
100
.github/workflows/make_release_tfhe_fft.yml
vendored
100
.github/workflows/make_release_tfhe_fft.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Publish new release of tfhe-fft
|
||||
name: Publish tfhe-fft release
|
||||
name: make_release_tfhe_fft
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -19,39 +19,12 @@ env:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
package:
|
||||
runs-on: ubuntu-latest
|
||||
needs: verify_tag
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-fft
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
needs: [package]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
make-release:
|
||||
name: make_release_tfhe_fft/make-release
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe-fft"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
@@ -59,55 +32,10 @@ jobs:
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
publish_release:
|
||||
name: Publish tfhe-fft Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [verify_tag, package] # for comparing hashes
|
||||
permissions:
|
||||
# Needed for OIDC token exchange on crates.io
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
|
||||
# would fail. This is safe since DRY_RUN is handled in the env section above.
|
||||
# shellcheck disable=SC2086
|
||||
cargo publish -p tfhe-fft ${DRY_RUN}
|
||||
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe-fft crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-fft release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
secrets:
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
100
.github/workflows/make_release_tfhe_ntt.yml
vendored
100
.github/workflows/make_release_tfhe_ntt.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Publish new release of tfhe-ntt
|
||||
name: Publish tfhe-ntt release
|
||||
name: make_release_tfhe_ntt
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -19,39 +19,12 @@ env:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
package:
|
||||
runs-on: ubuntu-latest
|
||||
needs: verify_tag
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-ntt
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
needs: [package]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
make-release:
|
||||
name: make_release_tfhe_ntt/make-release
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe-ntt"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
@@ -59,55 +32,10 @@ jobs:
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
publish_release:
|
||||
name: Publish tfhe-ntt Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [verify_tag, package] # for comparing hashes
|
||||
permissions:
|
||||
# Needed for OIDC token exchange on crates.io
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
|
||||
# would fail. This is safe since DRY_RUN is handled in the env section above.
|
||||
# shellcheck disable=SC2086
|
||||
cargo publish -p tfhe-ntt ${DRY_RUN}
|
||||
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe-ntt crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-ntt release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
secrets:
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
196
.github/workflows/make_release_tfhe_versionable.yml
vendored
196
.github/workflows/make_release_tfhe_versionable.yml
vendored
@@ -1,7 +1,12 @@
|
||||
name: Publish tfhe-versionable release
|
||||
name: make_release_tfhe_versionable
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: "Dry-run"
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
@@ -13,38 +18,34 @@ env:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
make-release-derive:
|
||||
name: make_release_tfhe_versionable/make-release-derive
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe-versionable-derive"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
# Needed to create the provenance via GitHub OIDC
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
package-derive:
|
||||
name: Package tfhe-versionable-derive Release
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-versionable-derive
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate-tfhe-versionable-derive
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance-derive:
|
||||
needs: [package-derive]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
make-release:
|
||||
name: make_release_tfhe_versionable/make-release
|
||||
needs: make-release-derive
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe-versionable"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
@@ -52,131 +53,10 @@ jobs:
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package-derive.outputs.hash }}
|
||||
|
||||
publish_release-derive:
|
||||
name: Publish tfhe-versionable-derive Release
|
||||
needs: [ verify_tag, package-derive ] # for comparing hashes
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed for OIDC token exchange on crates.io
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: crate-tfhe-versionable-derive
|
||||
path: target/package
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
run: |
|
||||
cargo publish -p tfhe-versionable-derive
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package-derive.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe-versionable-derive - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-versionable-derive release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
package:
|
||||
name: Package tfhe-versionable Release
|
||||
needs: publish_release-derive
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-versionable
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate-tfhe-versionable
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
needs: package
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
# Needed to create the provenance via GitHub OIDC
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
publish_release:
|
||||
name: Publish tfhe-versionable Release
|
||||
needs: package # for comparing hashes
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: crate-tfhe-versionable
|
||||
path: target/package
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
run: |
|
||||
cargo publish -p tfhe-versionable
|
||||
- name: Generate hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe-versionable - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-versionable release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
secrets:
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
96
.github/workflows/make_release_zk_pok.yml
vendored
96
.github/workflows/make_release_zk_pok.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Publish tfhe-zk-pok release
|
||||
name: make_release_zk_pok
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -15,34 +15,15 @@ env:
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
permissions: {}
|
||||
permissions: { }
|
||||
|
||||
jobs:
|
||||
package:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
hash: ${{ steps.hash.outputs.hash }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Prepare package
|
||||
run: |
|
||||
cargo package -p tfhe-zk-pok
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: crate-zk-pok
|
||||
path: target/package/*.crate
|
||||
- name: generate hash
|
||||
id: hash
|
||||
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
provenance:
|
||||
if: ${{ !inputs.dry_run }}
|
||||
needs: [package]
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
make-release:
|
||||
name: make_release_zk_pok/make-release
|
||||
uses: ./.github/workflows/make_release_common.yml
|
||||
with:
|
||||
package-name: "tfhe-zk-pok"
|
||||
dry-run: ${{ inputs.dry_run }}
|
||||
permissions:
|
||||
# Needed to detect the GitHub Actions environment
|
||||
actions: read
|
||||
@@ -50,61 +31,10 @@ jobs:
|
||||
id-token: write
|
||||
# Needed to upload assets/artifacts
|
||||
contents: write
|
||||
with:
|
||||
# SHA-256 hashes of the Crate package.
|
||||
base64-subjects: ${{ needs.package.outputs.hash }}
|
||||
|
||||
verify_tag:
|
||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
||||
secrets:
|
||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
publish_release:
|
||||
name: Publish tfhe-zk-pok Release
|
||||
needs: [verify_tag, package] # for comparing hashes
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed for OIDC token exchange on crates.io
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: crate-zk-pok
|
||||
path: target/package
|
||||
- name: Authenticate on registry
|
||||
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
|
||||
id: auth
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
|
||||
# would fail. This is safe since DRY_RUN is handled in the env section above.
|
||||
# shellcheck disable=SC2086
|
||||
cargo publish -p tfhe-zk-pok ${DRY_RUN}
|
||||
- name: Verify hash
|
||||
id: published_hash
|
||||
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
- name: Slack notification (hashes comparison)
|
||||
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: failure
|
||||
SLACK_MESSAGE: "SLSA tfhe-zk-pok crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_MESSAGE: "tfhe-zk-pok release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
|
||||
5
.github/workflows/parameters_check.yml
vendored
5
.github/workflows/parameters_check.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Perform a security check on all the cryptographic parameters set
|
||||
name: Parameters curves security check
|
||||
name: parameters_check
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -16,6 +16,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
params-curves-security-check:
|
||||
name: parameters_check/params-curves-security-check
|
||||
runs-on: large_ubuntu_16-22.04
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
@@ -29,7 +30,7 @@ jobs:
|
||||
with:
|
||||
repository: malb/lattice-estimator
|
||||
path: lattice_estimator
|
||||
ref: '52f4b7a99ae7b5dfd088c5c295070bd38ff0d1e0'
|
||||
ref: 'e35f45b7976a90a79c3c6625a45bbc344c1abc67'
|
||||
persist-credentials: 'false'
|
||||
|
||||
- name: Install Sage
|
||||
|
||||
4
.github/workflows/placeholder_workflow.yml
vendored
4
.github/workflows/placeholder_workflow.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Placeholder workflow file allowing running it without having to merge to main first
|
||||
name: Placeholder Workflow
|
||||
name: placeholder_workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
@@ -8,7 +8,7 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
placeholder:
|
||||
name: Placeholder
|
||||
name: placeholder_workflow/placeholder
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
74
.github/workflows/sync_on_push.yml
vendored
74
.github/workflows/sync_on_push.yml
vendored
@@ -1,5 +1,5 @@
|
||||
# Sync repos
|
||||
name: Sync repos
|
||||
name: sync_on_push
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -7,30 +7,62 @@ on:
|
||||
- 'main'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions: {}
|
||||
permissions: { }
|
||||
|
||||
jobs:
|
||||
sync-repo:
|
||||
name: sync_on_push/sync-repo
|
||||
if: ${{ github.repository == 'zama-ai/tfhe-rs' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: 'false'
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
- name: git-sync
|
||||
uses: valtech-sd/git-sync@e734cfe9485a92e720eac5af8a4555dde5fecf88
|
||||
with:
|
||||
source_repo: "zama-ai/tfhe-rs"
|
||||
source_branch: "main"
|
||||
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
|
||||
destination_branch: "main"
|
||||
- name: git-sync tags
|
||||
uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83
|
||||
with:
|
||||
source_repo: "zama-ai/tfhe-rs"
|
||||
source_branch: "refs/tags/*"
|
||||
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
|
||||
destination_branch: "refs/tags/*"
|
||||
env:
|
||||
SOURCE_REPO: "zama-ai/tfhe-rs"
|
||||
SOURCE_BRANCH: "main"
|
||||
DESTINATION_BRANCH: "main"
|
||||
USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
TOKEN: ${{ secrets.SYNC_REPO_TOKEN }}
|
||||
DEST_REPO: ${{ secrets.SYNC_DEST_REPO }}
|
||||
run: |
|
||||
echo ">>> Cloning source repo..."
|
||||
git lfs install
|
||||
git clone "https://${USERNAME}:${TOKEN}@github.com/${SOURCE_REPO}.git" ./tfhe-rs --origin source && cd ./tfhe-rs
|
||||
git remote add destination "https://${USERNAME}:${TOKEN}@github.com/${DEST_REPO}.git"
|
||||
|
||||
echo ">>> Fetching all branches references down locally so subsequent commands can see them..."
|
||||
git fetch source '+refs/heads/*:refs/heads/*' --update-head-ok
|
||||
|
||||
echo ">>> Print out all branches"
|
||||
git --no-pager branch -a -vv
|
||||
|
||||
echo ">>> Fetching all LFS items from source..."
|
||||
git lfs fetch --all source "${SOURCE_BRANCH}"
|
||||
|
||||
echo ">>> Pushing git changes..."
|
||||
git push destination "${SOURCE_BRANCH}:${DESTINATION_BRANCH}" -f
|
||||
|
||||
echo ">>> Pushing all LFS items..."
|
||||
git lfs push --all destination "${DESTINATION_BRANCH}"
|
||||
|
||||
- name: git-sync-tags
|
||||
env:
|
||||
SOURCE_REPO: "zama-ai/tfhe-rs"
|
||||
SOURCE_BRANCH: "refs/tags/*"
|
||||
DESTINATION_BRANCH: "refs/tags/*"
|
||||
USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
TOKEN: ${{ secrets.SYNC_REPO_TOKEN }}
|
||||
DEST_REPO: ${{ secrets.SYNC_DEST_REPO }}
|
||||
run: |
|
||||
echo ">>> Cloning source repo..."
|
||||
git lfs install
|
||||
git clone "https://${USERNAME}:${TOKEN}@github.com/${SOURCE_REPO}.git" ./tfhe-rs-tag --origin source && cd ./tfhe-rs-tag
|
||||
git remote add destination "https://${USERNAME}:${TOKEN}@github.com/${DEST_REPO}.git"
|
||||
|
||||
echo ">>> Fetching all branches references down locally so subsequent commands can see them..."
|
||||
git fetch source '+refs/heads/*:refs/heads/*' --update-head-ok
|
||||
|
||||
echo ">>> Print out all branches"
|
||||
git --no-pager branch -a -vv
|
||||
|
||||
echo ">>> Pushing git changes..."
|
||||
git push destination "${SOURCE_BRANCH}:${DESTINATION_BRANCH}" -f
|
||||
|
||||
6
.github/workflows/unverified_prs.yml
vendored
6
.github/workflows/unverified_prs.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: 'Close unverified PRs'
|
||||
# Close unverified PRs'
|
||||
name: unverified_prs
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
@@ -7,12 +8,13 @@ permissions: {}
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: unverified_prs/stale
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
|
||||
with:
|
||||
stale-pr-message: 'This PR is unverified and has been open for 2 days, it will now be closed. If you want to contribute please sign the CLA as indicated by the bot.'
|
||||
days-before-stale: 2
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# Verify a tagged commit
|
||||
name: Verify tagged commit
|
||||
# Verify a triggering actor
|
||||
name: verify_triggering_actor
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
secrets:
|
||||
RELEASE_TEAM:
|
||||
ALLOWED_TEAM:
|
||||
required: true
|
||||
READ_ORG_TOKEN:
|
||||
required: true
|
||||
@@ -12,9 +12,9 @@ on:
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
checks:
|
||||
check-actor:
|
||||
name: verify_triggering_actor/check-actor
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
steps:
|
||||
# Check triggering actor membership
|
||||
- name: Actor verification
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
with:
|
||||
username: ${{ github.triggering_actor }}
|
||||
org: ${{ github.repository_owner }}
|
||||
team: ${{ secrets.RELEASE_TEAM }}
|
||||
team: ${{ secrets.ALLOWED_TEAM }}
|
||||
github_token: ${{ secrets.READ_ORG_TOKEN }}
|
||||
|
||||
- name: Actor authorized
|
||||
13
Cargo.toml
13
Cargo.toml
@@ -1,5 +1,5 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
resolver = "3"
|
||||
members = [
|
||||
"tfhe",
|
||||
"tfhe-benchmark",
|
||||
@@ -24,7 +24,7 @@ exclude = [
|
||||
]
|
||||
[workspace.dependencies]
|
||||
aligned-vec = { version = "0.6", default-features = false }
|
||||
bytemuck = "1.14.3"
|
||||
bytemuck = "<1.24"
|
||||
dyn-stack = { version = "0.11", default-features = false }
|
||||
itertools = "0.14"
|
||||
num-complex = "0.4"
|
||||
@@ -32,7 +32,7 @@ pulp = { version = "0.21", default-features = false }
|
||||
rand = "0.8"
|
||||
rayon = "1.11"
|
||||
serde = { version = "1.0", default-features = false }
|
||||
wasm-bindgen = "0.2.100"
|
||||
wasm-bindgen = "0.2.101"
|
||||
getrandom = "0.2.8"
|
||||
|
||||
[profile.bench]
|
||||
@@ -54,3 +54,10 @@ debug-assertions = false
|
||||
|
||||
[workspace.metadata.dylint]
|
||||
libraries = [{ path = "utils/tfhe-lints" }]
|
||||
|
||||
[profile.debug_lto_off]
|
||||
inherits = "dev"
|
||||
debug = true
|
||||
lto = "off"
|
||||
debug-assertions = false
|
||||
overflow-checks = false
|
||||
|
||||
282
Makefile
282
Makefile
@@ -21,11 +21,13 @@ BENCH_OP_FLAVOR?=DEFAULT
|
||||
BENCH_TYPE?=latency
|
||||
BENCH_PARAM_TYPE?=classical
|
||||
BENCH_PARAMS_SET?=default
|
||||
BENCH_CUSTOM_COMMAND:=
|
||||
NODE_VERSION=22.6
|
||||
BACKWARD_COMPAT_DATA_DIR=utils/tfhe-backward-compat-data
|
||||
BACKWARD_COMPAT_DATA_GEN_VERSION:=$(TFHE_VERSION)
|
||||
CURRENT_TFHE_VERSION:=$(shell grep '^version[[:space:]]*=' tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
|
||||
WASM_PACK_VERSION="0.13.1"
|
||||
# We are kind of hacking the cut here, the version cannot contain a quote '"'
|
||||
WASM_BINDGEN_VERSION:=$(shell grep '^wasm-bindgen[[:space:]]*=' Cargo.toml | cut -d '"' -f 2 | xargs)
|
||||
WASM_BINDGEN_VERSION:=$(shell cargo tree --target wasm32-unknown-unknown -e all --prefix none | grep "wasm-bindgen v" | head -n 1 | cut -d 'v' -f2)
|
||||
WEB_RUNNER_DIR=web-test-runner
|
||||
WEB_SERVER_DIR=tfhe/web_wasm_parallel_tests
|
||||
# This is done to avoid forgetting it, we still precise the RUSTFLAGS in the commands to be able to
|
||||
@@ -54,6 +56,7 @@ TFHECUDA_BUILD=$(TFHECUDA_SRC)/build
|
||||
|
||||
# tfhe-hpu-backend
|
||||
HPU_CONFIG=v80
|
||||
V80_PCIE_DEV?=01
|
||||
|
||||
# Exclude these files from coverage reports
|
||||
define COVERAGE_EXCLUDED_FILES
|
||||
@@ -71,6 +74,12 @@ define COVERAGE_EXCLUDED_FILES
|
||||
--exclude-files tfhe/examples/utilities/*
|
||||
endef
|
||||
|
||||
# Prints out recipe name at the beginning of the execution and print it out again at the end if a failure occurs.
|
||||
define run_recipe_with_details
|
||||
@echo "Running recipe: $1"
|
||||
@$(MAKE) $1 --no-print-directory || { echo "Recipe '$@' failed"; exit 1; }
|
||||
endef
|
||||
|
||||
.PHONY: rs_check_toolchain # Echo the rust toolchain used for checks
|
||||
rs_check_toolchain:
|
||||
@echo $(RS_CHECK_TOOLCHAIN)
|
||||
@@ -86,6 +95,14 @@ install_rs_check_toolchain:
|
||||
( echo "Unable to install $(RS_CHECK_TOOLCHAIN) toolchain, check your rustup installation. \
|
||||
Rustup can be downloaded at https://rustup.rs/" && exit 1 )
|
||||
|
||||
.PHONY: install_rs_latest_nightly_toolchain # Install the nightly toolchain used to build docs using same version as docs.rs
|
||||
# We don't check that it exists, because we always want the latest
|
||||
# and the command below will install/update
|
||||
install_rs_latest_nightly_toolchain:
|
||||
rustup toolchain install --profile default nightly || \
|
||||
( echo "Unable to install nightly toolchain, check your rustup installation. \
|
||||
Rustup can be downloaded at https://rustup.rs/" && exit 1 )
|
||||
|
||||
.PHONY: install_rs_build_toolchain # Install the toolchain used for builds
|
||||
install_rs_build_toolchain:
|
||||
@( rustup toolchain list | grep -q "$(RS_BUILD_TOOLCHAIN)" && \
|
||||
@@ -114,10 +131,6 @@ install_cargo_nextest: install_rs_build_toolchain
|
||||
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install cargo-nextest --locked || \
|
||||
( echo "Unable to install cargo nextest, unknown error." && exit 1 )
|
||||
|
||||
# The installation should use the ^ symbol if the specified version in the root Cargo.toml is of the
|
||||
# form "0.2.96" then we get ^0.2.96 e.g., as we don't lock those dependencies
|
||||
# this allows to get the matching CLI
|
||||
# If a version range is specified no need to add the leading ^
|
||||
.PHONY: install_wasm_bindgen_cli # Install wasm-bindgen-cli to get access to the test runner
|
||||
install_wasm_bindgen_cli: install_rs_build_toolchain
|
||||
cargo +$(RS_BUILD_TOOLCHAIN) install --locked wasm-bindgen-cli --version "$(WASM_BINDGEN_VERSION)"
|
||||
@@ -160,9 +173,13 @@ install_tarpaulin: install_rs_build_toolchain
|
||||
( echo "Unable to install cargo tarpaulin, unknown error." && exit 1 )
|
||||
|
||||
.PHONY: install_cargo_dylint # Install custom tfhe-rs lints
|
||||
install_cargo_dylint:
|
||||
install_cargo_dylint: install_rs_build_toolchain
|
||||
cargo install --locked cargo-dylint dylint-link
|
||||
|
||||
.PHONY: install_cargo_audit # Check dependencies
|
||||
install_cargo_audit: install_rs_build_toolchain
|
||||
cargo install --locked cargo-audit
|
||||
|
||||
.PHONY: install_typos_checker # Install typos checker
|
||||
install_typos_checker: install_rs_build_toolchain
|
||||
@typos --version > /dev/null 2>&1 || \
|
||||
@@ -498,7 +515,7 @@ clippy_backward_compat_data: install_rs_check_toolchain # the toolchain is selec
|
||||
@# Some old crates are x86 specific, only run in that case
|
||||
@if uname -a | grep -q x86; then \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options \
|
||||
-C $(BACKWARD_COMPAT_DATA_DIR) clippy --all-targets \
|
||||
-C $(BACKWARD_COMPAT_DATA_DIR) clippy --all --all-targets \
|
||||
-- --no-deps -D warnings; \
|
||||
else \
|
||||
echo "Cannot run clippy for backward compat crate on non x86 platform for now."; \
|
||||
@@ -545,6 +562,10 @@ tfhe_lints: install_cargo_dylint
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo dylint --all -p tfhe-zk-pok --no-deps -- \
|
||||
--features=experimental
|
||||
|
||||
.PHONY: audit_dependencies # Run cargo audit to check vulnerable dependencies
|
||||
audit_dependencies: install_rs_build_toolchain install_cargo_audit
|
||||
cargo audit
|
||||
|
||||
|
||||
.PHONY: build_core # Build core_crypto without experimental features
|
||||
build_core: install_rs_build_toolchain install_rs_check_toolchain
|
||||
@@ -693,7 +714,7 @@ test_integer_gpu: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=integer,gpu -p tfhe -- integer::gpu::server_key:: --test-threads=2
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile $(CARGO_PROFILE) \
|
||||
--features=integer,gpu -p tfhe -- integer::gpu::server_key::
|
||||
--features=integer,gpu -p tfhe -- integer::gpu::server_key:: --test-threads=4
|
||||
|
||||
.PHONY: test_integer_gpu_debug # Run the tests of the integer module with Debug flags for CUDA
|
||||
test_integer_gpu_debug: install_rs_build_toolchain
|
||||
@@ -741,6 +762,16 @@ test_integer_short_run_gpu: install_rs_check_toolchain install_cargo_nextest
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=integer,gpu -p tfhe -- integer::gpu::server_key::radix::tests_long_run::test_random_op_sequence integer::gpu::server_key::radix::tests_long_run::test_signed_random_op_sequence --test-threads=1 --nocapture
|
||||
|
||||
.PHONY: build_debug_integer_short_run_gpu # Run the long run integer tests on the gpu backend
|
||||
build_debug_integer_short_run_gpu: install_rs_check_toolchain install_cargo_nextest
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test -vv --no-run --profile debug_lto_off \
|
||||
--features=integer,gpu-debug-fake-multi-gpu -p tfhe
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile debug_lto_off \
|
||||
--features=integer,gpu-debug-fake-multi-gpu -p tfhe -- integer::gpu::server_key::radix::tests_long_run::test_random_op_sequence::test_gpu_short_random --list
|
||||
@echo "To debug fake-multi-gpu short run tests run:"
|
||||
@echo "TFHE_RS_TEST_LONG_TESTS_MINIMAL=TRUE <executable> integer::gpu::server_key::radix::tests_long_run::test_random_op_sequence::test_gpu_short_random_op_sequence_param_gpu_multi_bit_group_4_message_2_carry_2_ks_pbs_tuniform_2m128 --nocapture"
|
||||
@echo "Where <executable> = the one printed in the () in the 'Running unittests src/lib.rs ()' line above"
|
||||
|
||||
.PHONY: test_integer_compression
|
||||
test_integer_compression: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
@@ -995,6 +1026,21 @@ test_high_level_api_gpu: install_rs_build_toolchain install_cargo_nextest
|
||||
--test-threads=4 --features=integer,internal-keycache,gpu,zk-pok -p tfhe \
|
||||
-E "test(/high_level_api::.*gpu.*/)"
|
||||
|
||||
test_list_gpu: install_rs_build_toolchain install_cargo_nextest
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) nextest list --cargo-profile $(CARGO_PROFILE) \
|
||||
--features=integer,internal-keycache,gpu,zk-pok -p tfhe \
|
||||
-E "test(/.*gpu.*/)"
|
||||
|
||||
.PHONY: build_one_hl_api_test_gpu
|
||||
build_one_hl_api_test_gpu: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
|
||||
--features=integer,gpu-debug -vv -p tfhe -- "$${TEST}" --test-threads=1 --nocapture
|
||||
|
||||
.PHONY: build_one_hl_api_test_fake_multi_gpu
|
||||
build_one_hl_api_test_fake_multi_gpu: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
|
||||
--features=integer,gpu-debug-fake-multi-gpu -vv -p tfhe -- "$${TEST}" --test-threads=1 --nocapture
|
||||
|
||||
test_high_level_api_hpu: install_rs_build_toolchain install_cargo_nextest
|
||||
ifeq ($(HPU_CONFIG), v80)
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) nextest run --cargo-profile $(CARGO_PROFILE) \
|
||||
@@ -1108,8 +1154,16 @@ test_tfhe_lints: install_cargo_dylint
|
||||
rustup toolchain install && \
|
||||
cargo test
|
||||
|
||||
# The backward compat data repo holds historical binary data but also rust code to generate and load them.
|
||||
# Here we use the "patch" functionality of Cargo to make sure the repo used for the data is the same as the one used for the code.
|
||||
# The backward compat data folder holds historical binary data but also rust code to generate and load them.
|
||||
.PHONY: gen_backward_compat_data # Re-generate backward compatibility data
|
||||
gen_backward_compat_data: install_rs_check_toolchain # the toolchain is selected with toolchain.toml
|
||||
$(BACKWARD_COMPAT_DATA_DIR)/gen_data.sh $(BACKWARD_COMPAT_DATA_GEN_VERSION)
|
||||
|
||||
# Instantiate a new backward data crate for the current TFHE-rs version, if it does not already exists
|
||||
.PHONY: new_backward_compat_crate
|
||||
new_backward_compat_crate: install_rs_check_toolchain # the toolchain is selected with toolchain.toml
|
||||
cd $(BACKWARD_COMPAT_DATA_DIR) && cargo run -p add_new_version -- --tfhe-version $(CURRENT_TFHE_VERSION)
|
||||
|
||||
.PHONY: test_backward_compatibility_ci
|
||||
test_backward_compatibility_ci: install_rs_build_toolchain
|
||||
TFHE_BACKWARD_COMPAT_DATA_DIR="../$(BACKWARD_COMPAT_DATA_DIR)" RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
@@ -1123,7 +1177,7 @@ doc: install_rs_check_toolchain
|
||||
@# Even though we are not in docs.rs, this allows to "just" build the doc
|
||||
DOCS_RS=1 \
|
||||
RUSTDOCFLAGS="--html-in-header katex-header.html" \
|
||||
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" doc \
|
||||
cargo +nightly doc \
|
||||
--features=boolean,shortint,integer,strings,gpu,internal-keycache,experimental,zk-pok --no-deps -p tfhe
|
||||
|
||||
.PHONY: docs # Build rust doc alias for doc
|
||||
@@ -1175,6 +1229,8 @@ check_compile_tests: install_rs_build_toolchain
|
||||
--features=experimental,boolean,shortint,integer,internal-keycache \
|
||||
-p tfhe
|
||||
|
||||
.PHONY: check_compile_tests_c_api # Build C API tests without running them
|
||||
check_compile_tests_c_api: install_rs_build_toolchain
|
||||
@if [[ "$(OS)" == "Linux" || "$(OS)" == "Darwin" ]]; then \
|
||||
"$(MAKE)" build_c_api && \
|
||||
./scripts/c_api_tests.sh --build-only --cargo-profile "$(CARGO_PROFILE)"; \
|
||||
@@ -1261,6 +1317,7 @@ dieharder_csprng: install_dieharder build_tfhe_csprng
|
||||
|
||||
.PHONY: clippy_bench # Run clippy lints on tfhe-benchmark
|
||||
clippy_bench: install_rs_check_toolchain
|
||||
! (grep --recursive "trivial" tfhe-benchmark && echo "trivial found in benches")
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
--features=boolean,shortint,integer,internal-keycache,nightly-avx512,pbs-stats,zk-pok \
|
||||
-p tfhe-benchmark -- --no-deps -D warnings
|
||||
@@ -1286,65 +1343,73 @@ print_doc_bench_parameters:
|
||||
bench_integer: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--bench integer \
|
||||
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_signed_integer # Run benchmarks for signed integer
|
||||
bench_signed_integer: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-signed-bench \
|
||||
--bench integer-signed \
|
||||
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_integer_gpu # Run benchmarks for integer on GPU backend
|
||||
bench_integer_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
--bench integer \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_signed_integer_gpu # Run benchmarks for signed integer on GPU backend
|
||||
bench_signed_integer_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-signed-bench \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
--bench integer-signed \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_integer_hpu # Run benchmarks for integer on HPU backend
|
||||
bench_integer_hpu: install_rs_check_toolchain
|
||||
source ./setup_hpu.sh --config $(HPU_CONFIG) -p ; \
|
||||
source ./setup_hpu.sh --config $(HPU_CONFIG); \
|
||||
export V80_PCIE_DEV=${V80_PCIE_DEV}; \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--features=integer,internal-keycache,pbs-stats,hpu,hpu-v80 -p tfhe-benchmark --
|
||||
--bench integer \
|
||||
--features=integer,internal-keycache,pbs-stats,hpu,hpu-v80 -p tfhe-benchmark -- --quick
|
||||
|
||||
.PHONY: bench_integer_compression # Run benchmarks for unsigned integer compression
|
||||
bench_integer_compression: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench glwe_packing_compression-integer-bench \
|
||||
--bench integer-glwe_packing_compression \
|
||||
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_integer_compression_gpu
|
||||
bench_integer_compression_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench glwe_packing_compression-integer-bench \
|
||||
--bench integer-glwe_packing_compression \
|
||||
--features=integer,internal-keycache,gpu,pbs-stats -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_integer_compression_128b_gpu
|
||||
bench_integer_compression_128b_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench glwe_packing_compression_128b-integer-bench \
|
||||
--features=integer,internal-keycache,gpu,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_integer_zk_gpu
|
||||
bench_integer_zk_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench zk-pke-bench \
|
||||
--features=integer,internal-keycache,gpu,pbs-stats,zk-pok -p tfhe-benchmark --
|
||||
--bench integer-zk-pke \
|
||||
--features=integer,internal-keycache,gpu,pbs-stats,zk-pok -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_integer_multi_bit # Run benchmarks for unsigned integer using multi-bit parameters
|
||||
bench_integer_multi_bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--bench integer \
|
||||
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_signed_integer_multi_bit # Run benchmarks for signed integer using multi-bit parameters
|
||||
@@ -1352,7 +1417,7 @@ bench_signed_integer_multi_bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-signed-bench \
|
||||
--bench integer-signed \
|
||||
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_integer_multi_bit_gpu # Run benchmarks for integer on GPU backend using multi-bit parameters
|
||||
@@ -1360,22 +1425,22 @@ bench_integer_multi_bit_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
--bench integer \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_signed_integer_multi_bit_gpu # Run benchmarks for signed integer on GPU backend using multi-bit parameters
|
||||
bench_signed_integer_multi_bit_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-signed-bench \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
--bench integer-signed \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_integer_zk # Run benchmarks for integer encryption with ZK proofs
|
||||
bench_integer_zk: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench zk-pke-bench \
|
||||
--bench integer-zk-pke \
|
||||
--features=integer,internal-keycache,zk-pok,nightly-avx512,pbs-stats \
|
||||
-p tfhe-benchmark --
|
||||
|
||||
@@ -1383,77 +1448,77 @@ bench_integer_zk: install_rs_check_toolchain
|
||||
bench_shortint: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench shortint-bench \
|
||||
--bench shortint \
|
||||
--features=shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
|
||||
.PHONY: bench_shortint_oprf # Run benchmarks for shortint
|
||||
bench_shortint_oprf: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench oprf-shortint-bench \
|
||||
--bench shortint-oprf \
|
||||
--features=shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
|
||||
.PHONY: bench_boolean # Run benchmarks for boolean
|
||||
bench_boolean: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench boolean-bench \
|
||||
--bench boolean \
|
||||
--features=boolean,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
|
||||
.PHONY: bench_ks # Run benchmarks for keyswitch
|
||||
bench_ks: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench ks-bench \
|
||||
--bench core_crypto-ks \
|
||||
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
|
||||
.PHONY: bench_ks_gpu # Run benchmarks for keyswitch on GPU backend
|
||||
bench_ks_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench ks-bench \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
--bench core_crypto-ks \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
|
||||
|
||||
.PHONY: bench_pbs # Run benchmarks for PBS
|
||||
bench_pbs: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench pbs-bench \
|
||||
--bench core_crypto-pbs \
|
||||
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
|
||||
.PHONY: bench_pbs_gpu # Run benchmarks for PBS on GPU backend
|
||||
bench_pbs_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench pbs-bench \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
--bench core_crypto-pbs \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
|
||||
|
||||
.PHONY: bench_ks_pbs # Run benchmarks for KS-PBS
|
||||
bench_ks_pbs: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench ks-pbs-bench \
|
||||
--bench core_crypto-ks-pbs \
|
||||
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
|
||||
.PHONY: bench_ks_pbs_gpu # Run benchmarks for KS-PBS on GPU backend
|
||||
bench_ks_pbs_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench ks-pbs-bench \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
--bench core_crypto-ks-pbs \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
|
||||
|
||||
.PHONY: bench_pbs128 # Run benchmarks for PBS using FFT 128 bits
|
||||
bench_pbs128: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench pbs128-bench \
|
||||
--bench core_crypto-pbs128 \
|
||||
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
|
||||
.PHONY: bench_pbs128_gpu # Run benchmarks for PBS using FFT 128 bits on GPU
|
||||
bench_pbs128_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench pbs128-bench \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
|
||||
--bench core_crypto-pbs128 \
|
||||
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
|
||||
|
||||
bench_web_js_api_parallel_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
|
||||
bench_web_js_api_parallel_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
|
||||
@@ -1489,21 +1554,22 @@ bench_web_js_api_parallel_firefox_ci: setup_venv
|
||||
bench_hlapi: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench hlapi \
|
||||
--features=integer,internal-keycache,nightly-avx512 -p tfhe-benchmark --
|
||||
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_hlapi_gpu # Run benchmarks for integer operations on GPU
|
||||
bench_hlapi_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench hlapi \
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --
|
||||
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_hlapi_hpu # Run benchmarks for HLAPI operations on HPU
|
||||
bench_hlapi_hpu: install_rs_check_toolchain
|
||||
source ./setup_hpu.sh --config $(HPU_CONFIG) -p ; \
|
||||
source ./setup_hpu.sh --config $(HPU_CONFIG); \
|
||||
export V80_PCIE_DEV=${V80_PCIE_DEV}; \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench hlapi \
|
||||
--features=integer,internal-keycache,hpu,hpu-v80 -p tfhe-benchmark --
|
||||
--features=integer,internal-keycache,hpu,hpu-v80,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_hlapi_erc20 # Run benchmarks for ERC20 operations
|
||||
bench_hlapi_erc20: install_rs_check_toolchain
|
||||
@@ -1515,7 +1581,7 @@ bench_hlapi_erc20: install_rs_check_toolchain
|
||||
bench_hlapi_erc20_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench hlapi-erc20 \
|
||||
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
|
||||
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_hlapi_dex # Run benchmarks for DEX operations
|
||||
bench_hlapi_dex: install_rs_check_toolchain
|
||||
@@ -1527,15 +1593,16 @@ bench_hlapi_dex: install_rs_check_toolchain
|
||||
bench_hlapi_dex_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench hlapi-dex \
|
||||
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
|
||||
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
.PHONY: bench_hlapi_erc20_hpu # Run benchmarks for ECR20 operations on HPU
|
||||
bench_hlapi_erc20_hpu: install_rs_check_toolchain
|
||||
source ./setup_hpu.sh --config $(HPU_CONFIG) -p ; \
|
||||
source ./setup_hpu.sh --config $(HPU_CONFIG); \
|
||||
export V80_PCIE_DEV=${V80_PCIE_DEV}; \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench hlapi-erc20 \
|
||||
--features=integer,internal-keycache,hpu,hpu-v80 -p tfhe-benchmark --
|
||||
--features=integer,internal-keycache,hpu,hpu-v80,pbs-stats -p tfhe-benchmark --
|
||||
|
||||
.PHONY: bench_tfhe_zk_pok # Run benchmarks for the tfhe_zk_pok crate
|
||||
bench_tfhe_zk_pok: install_rs_check_toolchain
|
||||
@@ -1554,7 +1621,12 @@ bench_hlapi_noise_squash_gpu: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench hlapi-noise-squash \
|
||||
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
|
||||
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --profile release_lto_off --
|
||||
|
||||
|
||||
.PHONY: bench_custom # Run benchmarks with a user-defined command
|
||||
bench_custom: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench -p tfhe-benchmark $(BENCH_CUSTOM_COMMAND)
|
||||
|
||||
#
|
||||
# Utility tools
|
||||
@@ -1643,22 +1715,100 @@ sha256_bool: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) run --profile $(CARGO_PROFILE) \
|
||||
--example sha256_bool --features=boolean
|
||||
|
||||
.PHONY: pcc # pcc stands for pre commit checks (except GPU)
|
||||
pcc: no_tfhe_typo no_dbg_log check_parameter_export_ok check_fmt check_typos lint_doc \
|
||||
check_md_docs_are_tested check_intra_md_links check_doc_paths_use_dash \
|
||||
clippy_all check_compile_tests test_tfhe_lints \
|
||||
tfhe_lints
|
||||
.PHONY: pcc # pcc stands for pre commit checks for CPU compilation
|
||||
pcc: pcc_batch_1 pcc_batch_2 pcc_batch_3 pcc_batch_4 pcc_batch_5 pcc_batch_6 pcc_batch_7
|
||||
|
||||
#
|
||||
# PCC split into several batches to speed-up CI feedback.
|
||||
# Each batch have roughly the same execution time.
|
||||
# Durations are given from GitHub Ubuntu large runner with 16 CPU.
|
||||
#
|
||||
|
||||
.PHONY: pcc_batch_1 # duration: 6'10''
|
||||
pcc_batch_1:
|
||||
$(call run_recipe_with_details,no_tfhe_typo)
|
||||
$(call run_recipe_with_details,no_dbg_log)
|
||||
$(call run_recipe_with_details,check_parameter_export_ok)
|
||||
$(call run_recipe_with_details,check_fmt)
|
||||
$(call run_recipe_with_details,check_typos)
|
||||
$(call run_recipe_with_details,lint_doc)
|
||||
$(call run_recipe_with_details,check_md_docs_are_tested)
|
||||
$(call run_recipe_with_details,check_intra_md_links)
|
||||
$(call run_recipe_with_details,check_doc_paths_use_dash)
|
||||
$(call run_recipe_with_details,test_tfhe_lints)
|
||||
$(call run_recipe_with_details,tfhe_lints)
|
||||
$(call run_recipe_with_details,clippy_rustdoc)
|
||||
|
||||
.PHONY: pcc_batch_2 # duration: 6'10'' (shortest one, extend it with further checks)
|
||||
pcc_batch_2:
|
||||
$(call run_recipe_with_details,clippy)
|
||||
$(call run_recipe_with_details,clippy_all_targets)
|
||||
$(call run_recipe_with_details,check_fmt_js)
|
||||
|
||||
.PHONY: pcc_batch_3 # duration: 6'50''
|
||||
pcc_batch_3:
|
||||
$(call run_recipe_with_details,clippy_shortint)
|
||||
$(call run_recipe_with_details,clippy_integer)
|
||||
|
||||
.PHONY: pcc_batch_4 # duration: 7'40''
|
||||
pcc_batch_4:
|
||||
$(call run_recipe_with_details,clippy_core)
|
||||
$(call run_recipe_with_details,clippy_js_wasm_api)
|
||||
$(call run_recipe_with_details,clippy_ws_tests)
|
||||
$(call run_recipe_with_details,clippy_bench)
|
||||
|
||||
.PHONY: pcc_batch_5 # duration: 7'20''
|
||||
pcc_batch_5:
|
||||
$(call run_recipe_with_details,clippy_tfhe_lints)
|
||||
$(call run_recipe_with_details,check_compile_tests)
|
||||
$(call run_recipe_with_details,clippy_backward_compat_data)
|
||||
|
||||
.PHONY: pcc_batch_6 # duration: 6'32''
|
||||
pcc_batch_6:
|
||||
$(call run_recipe_with_details,clippy_boolean)
|
||||
$(call run_recipe_with_details,clippy_c_api)
|
||||
$(call run_recipe_with_details,clippy_tasks)
|
||||
$(call run_recipe_with_details,clippy_tfhe_csprng)
|
||||
$(call run_recipe_with_details,clippy_zk_pok)
|
||||
$(call run_recipe_with_details,clippy_trivium)
|
||||
$(call run_recipe_with_details,clippy_versionable)
|
||||
$(call run_recipe_with_details,clippy_param_dedup)
|
||||
$(call run_recipe_with_details,docs)
|
||||
|
||||
.PHONY: pcc_batch_7 # duration: 7'50'' (currently PCC execution bottleneck)
|
||||
pcc_batch_7:
|
||||
$(call run_recipe_with_details,check_compile_tests_c_api)
|
||||
|
||||
.PHONY: pcc_gpu # pcc stands for pre commit checks for GPU compilation
|
||||
pcc_gpu: check_rust_bindings_did_not_change clippy_rustdoc_gpu \
|
||||
clippy_gpu clippy_cuda_backend clippy_bench_gpu check_compile_tests_benches_gpu test_integer_hl_test_gpu_check_warnings
|
||||
pcc_gpu:
|
||||
$(call run_recipe_with_details,check_rust_bindings_did_not_change)
|
||||
$(call run_recipe_with_details,clippy_rustdoc_gpu)
|
||||
$(call run_recipe_with_details,clippy_gpu)
|
||||
$(call run_recipe_with_details,clippy_cuda_backend)
|
||||
$(call run_recipe_with_details,clippy_bench_gpu)
|
||||
$(call run_recipe_with_details,check_compile_tests_benches_gpu)
|
||||
$(call run_recipe_with_details,test_integer_hl_test_gpu_check_warnings)
|
||||
|
||||
.PHONY: pcc_hpu # pcc stands for pre commit checks for HPU compilation
|
||||
pcc_hpu: clippy_hpu clippy_hpu_backend clippy_hpu_mockup test_integer_hpu_mockup_ci_fast
|
||||
pcc_hpu:
|
||||
$(call run_recipe_with_details,clippy_hpu)
|
||||
$(call run_recipe_with_details,clippy_hpu_backend)
|
||||
$(call run_recipe_with_details,clippy_hpu_mockup)
|
||||
$(call run_recipe_with_details,test_integer_hpu_mockup_ci_fast)
|
||||
|
||||
.PHONY: fpcc # pcc stands for pre commit checks, the f stands for fast
|
||||
fpcc: no_tfhe_typo no_dbg_log check_parameter_export_ok check_fmt check_typos lint_doc \
|
||||
check_md_docs_are_tested check_intra_md_links check_doc_paths_use_dash clippy_fast check_compile_tests
|
||||
fpcc:
|
||||
$(call run_recipe_with_details,no_tfhe_typo)
|
||||
$(call run_recipe_with_details,no_dbg_log)
|
||||
$(call run_recipe_with_details,check_parameter_export_ok)
|
||||
$(call run_recipe_with_details,check_fmt)
|
||||
$(call run_recipe_with_details,check_typos)
|
||||
$(call run_recipe_with_details,lint_doc)
|
||||
$(call run_recipe_with_details,check_md_docs_are_tested)
|
||||
$(call run_recipe_with_details,check_intra_md_links)
|
||||
$(call run_recipe_with_details,check_doc_paths_use_dash)
|
||||
$(call run_recipe_with_details,clippy_fast)
|
||||
$(call run_recipe_with_details,check_compile_tests)
|
||||
|
||||
.PHONY: conformance # Automatically fix problems that can be fixed
|
||||
conformance: fix_newline fmt fmt_js
|
||||
|
||||
@@ -45,7 +45,7 @@ production-ready library for all the advanced features of TFHE.
|
||||
- **Short integer API** that enables exact, unbounded FHE integer arithmetics with up to 8 bits of message space
|
||||
- **Size-efficient public key encryption**
|
||||
- **Ciphertext and server key compression** for efficient data transfer
|
||||
- **Full Rust API, C bindings to the Rust High-Level API, and client-side Javascript API using WASM**.
|
||||
- **Full Rust API, C bindings to the Rust High-Level API, and client-side JavaScript API using WASM**.
|
||||
|
||||
*Learn more about TFHE-rs features in the [documentation](https://docs.zama.ai/tfhe-rs/readme).*
|
||||
<br></br>
|
||||
@@ -79,7 +79,7 @@ tfhe = { version = "*", features = ["boolean", "shortint", "integer"] }
|
||||
```
|
||||
|
||||
> [!Note]
|
||||
> Note: You need to use Rust version >= 1.84 to compile TFHE-rs.
|
||||
> Note: You need Rust version 1.84 or newer to compile TFHE-rs. You can check your version with `rustc --version`.
|
||||
|
||||
> [!Note]
|
||||
> Note: AArch64-based machines are not supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs.
|
||||
@@ -147,7 +147,7 @@ To run this code, use the following command:
|
||||
|
||||
> [!Note]
|
||||
> Note that when running code that uses `TFHE-rs`, it is highly recommended
|
||||
to run in release mode with cargo's `--release` flag to have the best performances possible.
|
||||
to run in release mode with cargo's `--release` flag to have the best performance possible.
|
||||
|
||||
*Find an example with more explanations in [this part of the documentation](https://docs.zama.ai/tfhe-rs/get-started/quick-start)*
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ extend-ignore-identifiers-re = [
|
||||
# Example in trivium
|
||||
"C9217BA0D762ACA1",
|
||||
"0x[0-9a-fA-F]+",
|
||||
"xrt_coreutil",
|
||||
]
|
||||
|
||||
[files]
|
||||
|
||||
@@ -129,7 +129,7 @@ Other sizes than 64 bit are expected to be available in the future.
|
||||
|
||||
# FHE shortint Trivium implementation
|
||||
|
||||
The same implementation is also available for generic Ciphertexts representing bits (meant to be used with parameters `V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128`).
|
||||
The same implementation is also available for generic Ciphertexts representing bits (meant to be used with parameters `V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128`).
|
||||
It uses a lower level API of tfhe-rs, so the syntax is a little bit different. It also implements the `TransCiphering` trait. For optimization purposes, it does not internally run
|
||||
on the same cryptographic parameters as the high level API of tfhe-rs. As such, it requires the usage of a casting key, to switch from one parameter space to another, which makes
|
||||
its setup a little more intricate.
|
||||
@@ -138,9 +138,9 @@ Example code:
|
||||
```rust
|
||||
use tfhe::shortint::prelude::*;
|
||||
use tfhe::shortint::parameters::current_params::{
|
||||
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
};
|
||||
use tfhe::{ConfigBuilder, generate_keys, FheUint64};
|
||||
use tfhe::prelude::*;
|
||||
@@ -148,17 +148,17 @@ use tfhe_trivium::TriviumStreamShortint;
|
||||
|
||||
fn test_shortint() {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use criterion::Criterion;
|
||||
use tfhe::prelude::*;
|
||||
use tfhe::shortint::parameters::current_params::{
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
};
|
||||
use tfhe::shortint::prelude::*;
|
||||
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
|
||||
@@ -11,19 +11,19 @@ use tfhe_trivium::{KreyviumStreamShortint, TransCiphering};
|
||||
|
||||
pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
|
||||
@@ -64,19 +64,19 @@ pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
|
||||
|
||||
pub fn kreyvium_shortint_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
|
||||
@@ -112,19 +112,19 @@ pub fn kreyvium_shortint_gen(c: &mut Criterion) {
|
||||
|
||||
pub fn kreyvium_shortint_trans(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use criterion::Criterion;
|
||||
use tfhe::prelude::*;
|
||||
use tfhe::shortint::parameters::current_params::{
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
};
|
||||
use tfhe::shortint::prelude::*;
|
||||
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
|
||||
@@ -11,19 +11,19 @@ use tfhe_trivium::{TransCiphering, TriviumStreamShortint};
|
||||
|
||||
pub fn trivium_shortint_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -64,19 +64,19 @@ pub fn trivium_shortint_warmup(c: &mut Criterion) {
|
||||
|
||||
pub fn trivium_shortint_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -112,19 +112,19 @@ pub fn trivium_shortint_gen(c: &mut Criterion) {
|
||||
|
||||
pub fn trivium_shortint_trans(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use crate::{KreyviumStream, KreyviumStreamByte, KreyviumStreamShortint, TransCiphering};
|
||||
use tfhe::prelude::*;
|
||||
use tfhe::shortint::parameters::current_params::{
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
};
|
||||
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
|
||||
// Values for these tests come from the github repo renaud1239/Kreyvium,
|
||||
@@ -221,19 +221,19 @@ use tfhe::shortint::prelude::*;
|
||||
#[test]
|
||||
fn kreyvium_test_shortint_long() {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use crate::{TransCiphering, TriviumStream, TriviumStreamByte, TriviumStreamShortint};
|
||||
use tfhe::prelude::*;
|
||||
use tfhe::shortint::parameters::current_params::{
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
|
||||
};
|
||||
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
|
||||
// Values for these tests come from the github repo cantora/avr-crypto-lib, commit 2a5b018,
|
||||
@@ -357,19 +357,19 @@ use tfhe::shortint::prelude::*;
|
||||
#[test]
|
||||
fn trivium_test_shortint_long() {
|
||||
let config = ConfigBuilder::default()
|
||||
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
let (client_key, server_key): (ClientKey, ServerKey) =
|
||||
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
|
||||
|
||||
let ksk = KeySwitchingKey::new(
|
||||
(&client_key, Some(&server_key)),
|
||||
(&underlying_ck, &underlying_sk),
|
||||
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
|
||||
);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tfhe-cuda-backend"
|
||||
version = "0.11.0"
|
||||
version = "0.12.0"
|
||||
edition = "2021"
|
||||
authors = ["Zama team"]
|
||||
license = "BSD-3-Clause-Clear"
|
||||
@@ -20,3 +20,4 @@ bindgen = "0.71"
|
||||
experimental-multi-arch = []
|
||||
profile = []
|
||||
debug = []
|
||||
debug-fake-multi-gpu = []
|
||||
|
||||
@@ -48,13 +48,16 @@ fn main() {
|
||||
// Conditionally pass the "USE_NVTOOLS" variable to CMake if the feature is enabled
|
||||
if cfg!(feature = "profile") {
|
||||
cmake_config.define("USE_NVTOOLS", "ON");
|
||||
println!("cargo:rustc-link-lib=nvToolsExt");
|
||||
} else {
|
||||
cmake_config.define("USE_NVTOOLS", "OFF");
|
||||
}
|
||||
|
||||
if cfg!(feature = "debug") {
|
||||
cmake_config.define("CMAKE_BUILD_TYPE", "Debug");
|
||||
} else if cfg!(feature = "debug-fake-multi-gpu") {
|
||||
cmake_config.define("CMAKE_BUILD_TYPE", "DebugOnlyCpu");
|
||||
cmake_config.define("CMAKE_VERBOSE_MAKEFILE", "ON");
|
||||
cmake_config.define("FAKE_MULTI_GPU", "ON");
|
||||
}
|
||||
|
||||
// Build the CMake project
|
||||
@@ -81,6 +84,7 @@ fn main() {
|
||||
"cuda/include/ciphertext.h",
|
||||
"cuda/include/integer/compression/compression.h",
|
||||
"cuda/include/integer/integer.h",
|
||||
"cuda/include/aes/aes.h",
|
||||
"cuda/include/zk/zk.h",
|
||||
"cuda/include/keyswitch/keyswitch.h",
|
||||
"cuda/include/keyswitch/ks_enums.h",
|
||||
|
||||
@@ -86,6 +86,10 @@ if(CMAKE_BUILD_TYPE_LOWERCASE STREQUAL "debug")
|
||||
message("Compiling in Debug mode")
|
||||
add_definitions(-DDEBUG)
|
||||
set(OPTIMIZATION_FLAGS "${OPTIMIZATION_FLAGS} -O0 -G -g")
|
||||
set(USE_NVTOOLS 1)
|
||||
elseif(CMAKE_BUILD_TYPE_LOWERCASE STREQUAL "debugonlycpu")
|
||||
message("Compiling GPU kernels in Release and CPU code in Debug")
|
||||
set(OPTIMIZATION_FLAGS "${OPTIMIZATION_FLAGS} -O0 -g")
|
||||
else()
|
||||
# Release mode
|
||||
message("Compiling in Release mode")
|
||||
@@ -98,6 +102,11 @@ if(${USE_NVTOOLS})
|
||||
add_definitions(-DUSE_NVTOOLS)
|
||||
endif()
|
||||
|
||||
if(${FAKE_MULTI_GPU})
|
||||
message(STATUS "Fake multi-gpu debugging is enabled")
|
||||
add_definitions(-DDEBUG_FAKE_MULTI_GPU)
|
||||
endif()
|
||||
|
||||
# in production, should use -arch=sm_70 --ptxas-options=-v to see register spills -lineinfo for better debugging to use
|
||||
# nvtx when profiling -lnvToolsExt
|
||||
set(CMAKE_CUDA_FLAGS
|
||||
|
||||
44
backends/tfhe-cuda-backend/cuda/include/aes/aes.h
Normal file
44
backends/tfhe-cuda-backend/cuda/include/aes/aes.h
Normal file
@@ -0,0 +1,44 @@
|
||||
#ifndef AES_H
|
||||
#define AES_H
|
||||
#include "../integer/integer.h"
|
||||
|
||||
extern "C" {
|
||||
uint64_t scratch_cuda_integer_aes_encrypt_64(
|
||||
CudaStreamsFFI streams, int8_t **mem_ptr, uint32_t glwe_dimension,
|
||||
uint32_t polynomial_size, uint32_t lwe_dimension, uint32_t ks_level,
|
||||
uint32_t ks_base_log, uint32_t pbs_level, uint32_t pbs_base_log,
|
||||
uint32_t grouping_factor, uint32_t message_modulus, uint32_t carry_modulus,
|
||||
PBS_TYPE pbs_type, bool allocate_gpu_memory,
|
||||
PBS_MS_REDUCTION_T noise_reduction_type, uint32_t num_aes_inputs,
|
||||
uint32_t sbox_parallelism);
|
||||
|
||||
void cuda_integer_aes_ctr_encrypt_64(CudaStreamsFFI streams,
|
||||
CudaRadixCiphertextFFI *output,
|
||||
CudaRadixCiphertextFFI const *iv,
|
||||
CudaRadixCiphertextFFI const *round_keys,
|
||||
const uint64_t *counter_bits_le_all_blocks,
|
||||
uint32_t num_aes_inputs, int8_t *mem_ptr,
|
||||
void *const *bsks, void *const *ksks);
|
||||
|
||||
void cleanup_cuda_integer_aes_encrypt_64(CudaStreamsFFI streams,
|
||||
int8_t **mem_ptr_void);
|
||||
|
||||
uint64_t scratch_cuda_integer_key_expansion_64(
|
||||
CudaStreamsFFI streams, int8_t **mem_ptr, uint32_t glwe_dimension,
|
||||
uint32_t polynomial_size, uint32_t lwe_dimension, uint32_t ks_level,
|
||||
uint32_t ks_base_log, uint32_t pbs_level, uint32_t pbs_base_log,
|
||||
uint32_t grouping_factor, uint32_t message_modulus, uint32_t carry_modulus,
|
||||
PBS_TYPE pbs_type, bool allocate_gpu_memory,
|
||||
PBS_MS_REDUCTION_T noise_reduction_type);
|
||||
|
||||
void cuda_integer_key_expansion_64(CudaStreamsFFI streams,
|
||||
CudaRadixCiphertextFFI *expanded_keys,
|
||||
CudaRadixCiphertextFFI const *key,
|
||||
int8_t *mem_ptr, void *const *bsks,
|
||||
void *const *ksks);
|
||||
|
||||
void cleanup_cuda_integer_key_expansion_64(CudaStreamsFFI streams,
|
||||
int8_t **mem_ptr_void);
|
||||
}
|
||||
|
||||
#endif
|
||||
445
backends/tfhe-cuda-backend/cuda/include/aes/aes_utilities.h
Normal file
445
backends/tfhe-cuda-backend/cuda/include/aes/aes_utilities.h
Normal file
@@ -0,0 +1,445 @@
|
||||
#ifndef AES_UTILITIES
|
||||
#define AES_UTILITIES
|
||||
#include "../integer/integer_utilities.h"
|
||||
|
||||
/**
|
||||
* This structure holds pre-computed LUTs for essential bitwise operations
|
||||
* required by the homomorphic AES circuit. Pre-computing these tables allows
|
||||
* for efficient application of non-linear functions like AND during the PBS
|
||||
* process. It includes LUTs for:
|
||||
* - AND: for the non-linear part of the S-Box.
|
||||
* - FLUSH: to clear carry bits and isolate the message bit (x -> x & 1).
|
||||
* - CARRY: to extract the carry bit for additions (x -> (x >> 1) & 1).
|
||||
*/
|
||||
template <typename Torus> struct int_aes_lut_buffers {
|
||||
int_radix_lut<Torus> *and_lut;
|
||||
int_radix_lut<Torus> *flush_lut;
|
||||
int_radix_lut<Torus> *carry_lut;
|
||||
|
||||
int_aes_lut_buffers(CudaStreams streams, const int_radix_params ¶ms,
|
||||
bool allocate_gpu_memory, uint32_t num_aes_inputs,
|
||||
uint32_t sbox_parallelism, uint64_t &size_tracker) {
|
||||
|
||||
constexpr uint32_t AES_STATE_BITS = 64;
|
||||
constexpr uint32_t SBOX_MAX_AND_GATES = 18;
|
||||
|
||||
this->and_lut = new int_radix_lut<Torus>(
|
||||
streams, params, 1,
|
||||
SBOX_MAX_AND_GATES * num_aes_inputs * sbox_parallelism,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
std::function<Torus(Torus, Torus)> and_lambda =
|
||||
[](Torus a, Torus b) -> Torus { return a & b; };
|
||||
generate_device_accumulator_bivariate<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->and_lut->get_lut(0, 0),
|
||||
this->and_lut->get_degree(0), this->and_lut->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, and_lambda, allocate_gpu_memory);
|
||||
auto active_streams_and_lut = streams.active_gpu_subset(
|
||||
SBOX_MAX_AND_GATES * num_aes_inputs * sbox_parallelism);
|
||||
this->and_lut->broadcast_lut(active_streams_and_lut);
|
||||
|
||||
this->flush_lut = new int_radix_lut<Torus>(
|
||||
streams, params, 1, AES_STATE_BITS * num_aes_inputs,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
std::function<Torus(Torus)> flush_lambda = [](Torus x) -> Torus {
|
||||
return x & 1;
|
||||
};
|
||||
generate_device_accumulator(
|
||||
streams.stream(0), streams.gpu_index(0), this->flush_lut->get_lut(0, 0),
|
||||
this->flush_lut->get_degree(0), this->flush_lut->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, flush_lambda, allocate_gpu_memory);
|
||||
auto active_streams_flush_lut =
|
||||
streams.active_gpu_subset(AES_STATE_BITS * num_aes_inputs);
|
||||
this->flush_lut->broadcast_lut(active_streams_flush_lut);
|
||||
|
||||
this->carry_lut = new int_radix_lut<Torus>(
|
||||
streams, params, 1, num_aes_inputs, allocate_gpu_memory, size_tracker);
|
||||
std::function<Torus(Torus)> carry_lambda = [](Torus x) -> Torus {
|
||||
return (x >> 1) & 1;
|
||||
};
|
||||
generate_device_accumulator(
|
||||
streams.stream(0), streams.gpu_index(0), this->carry_lut->get_lut(0, 0),
|
||||
this->carry_lut->get_degree(0), this->carry_lut->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, carry_lambda, allocate_gpu_memory);
|
||||
auto active_streams_carry_lut = streams.active_gpu_subset(num_aes_inputs);
|
||||
this->carry_lut->broadcast_lut(active_streams_carry_lut);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams) {
|
||||
this->and_lut->release(streams);
|
||||
delete this->and_lut;
|
||||
this->and_lut = nullptr;
|
||||
|
||||
this->flush_lut->release(streams);
|
||||
delete this->flush_lut;
|
||||
this->flush_lut = nullptr;
|
||||
|
||||
this->carry_lut->release(streams);
|
||||
delete this->carry_lut;
|
||||
this->carry_lut = nullptr;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* The operations within an AES round, particularly MixColumns, require
|
||||
* intermediate storage for calculations. These buffers are designed to hold
|
||||
* temporary values like copies of columns or the results of multiplications,
|
||||
* avoiding overwriting data that is still needed in the same round.
|
||||
*/
|
||||
template <typename Torus> struct int_aes_round_workspaces {
|
||||
CudaRadixCiphertextFFI *mix_columns_col_copy_buffer;
|
||||
CudaRadixCiphertextFFI *mix_columns_mul_workspace_buffer;
|
||||
CudaRadixCiphertextFFI *vec_tmp_bit_buffer;
|
||||
|
||||
int_aes_round_workspaces(CudaStreams streams, const int_radix_params ¶ms,
|
||||
bool allocate_gpu_memory, uint32_t num_aes_inputs,
|
||||
uint64_t &size_tracker) {
|
||||
|
||||
constexpr uint32_t BITS_PER_BYTE = 8;
|
||||
constexpr uint32_t BYTES_PER_COLUMN = 4;
|
||||
constexpr uint32_t BITS_PER_COLUMN = BITS_PER_BYTE * BYTES_PER_COLUMN;
|
||||
constexpr uint32_t MIX_COLUMNS_MUL_WORKSPACE_BYTES = BYTES_PER_COLUMN + 1;
|
||||
|
||||
this->mix_columns_col_copy_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0),
|
||||
this->mix_columns_col_copy_buffer, BITS_PER_COLUMN * num_aes_inputs,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
|
||||
this->mix_columns_mul_workspace_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0),
|
||||
this->mix_columns_mul_workspace_buffer,
|
||||
MIX_COLUMNS_MUL_WORKSPACE_BYTES * BITS_PER_BYTE * num_aes_inputs,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
|
||||
this->vec_tmp_bit_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->vec_tmp_bit_buffer,
|
||||
num_aes_inputs, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams, bool allocate_gpu_memory) {
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->mix_columns_col_copy_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->mix_columns_col_copy_buffer;
|
||||
this->mix_columns_col_copy_buffer = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->mix_columns_mul_workspace_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->mix_columns_mul_workspace_buffer;
|
||||
this->mix_columns_mul_workspace_buffer = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->vec_tmp_bit_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->vec_tmp_bit_buffer;
|
||||
this->vec_tmp_bit_buffer = nullptr;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* In CTR mode, a counter is homomorphically added to the encrypted IV. This
|
||||
* structure holds the necessary buffers for this 128-bit ripple-carry
|
||||
* addition, such as the buffer for the propagating carry bit
|
||||
* (`vec_tmp_carry_buffer`) across the addition chain.
|
||||
*/
|
||||
template <typename Torus> struct int_aes_counter_workspaces {
|
||||
CudaRadixCiphertextFFI *vec_tmp_carry_buffer;
|
||||
CudaRadixCiphertextFFI *vec_tmp_sum_buffer;
|
||||
CudaRadixCiphertextFFI *vec_trivial_b_bits_buffer;
|
||||
Torus *h_counter_bits_buffer;
|
||||
Torus *d_counter_bits_buffer;
|
||||
|
||||
int_aes_counter_workspaces(CudaStreams streams,
|
||||
const int_radix_params ¶ms,
|
||||
bool allocate_gpu_memory, uint32_t num_aes_inputs,
|
||||
uint64_t &size_tracker) {
|
||||
|
||||
this->vec_tmp_carry_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->vec_tmp_carry_buffer,
|
||||
num_aes_inputs, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
|
||||
this->vec_tmp_sum_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->vec_tmp_sum_buffer,
|
||||
num_aes_inputs, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
|
||||
this->vec_trivial_b_bits_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0),
|
||||
this->vec_trivial_b_bits_buffer, num_aes_inputs,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
|
||||
this->h_counter_bits_buffer =
|
||||
(Torus *)malloc(num_aes_inputs * sizeof(Torus));
|
||||
size_tracker += num_aes_inputs * sizeof(Torus);
|
||||
this->d_counter_bits_buffer = (Torus *)cuda_malloc_with_size_tracking_async(
|
||||
num_aes_inputs * sizeof(Torus), streams.stream(0), streams.gpu_index(0),
|
||||
size_tracker, allocate_gpu_memory);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams, bool allocate_gpu_memory) {
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->vec_tmp_carry_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->vec_tmp_carry_buffer;
|
||||
this->vec_tmp_carry_buffer = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->vec_tmp_sum_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->vec_tmp_sum_buffer;
|
||||
this->vec_tmp_sum_buffer = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->vec_trivial_b_bits_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->vec_trivial_b_bits_buffer;
|
||||
this->vec_trivial_b_bits_buffer = nullptr;
|
||||
|
||||
if (allocate_gpu_memory) {
|
||||
cuda_drop_async(this->d_counter_bits_buffer, streams.stream(0),
|
||||
streams.gpu_index(0));
|
||||
}
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
free(this->h_counter_bits_buffer);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This structure allocates the most significant memory blocks:
|
||||
* - `sbox_internal_workspace`: A large workspace for the complex, parallel
|
||||
* evaluation of the S-Box circuit.
|
||||
* - `main_bitsliced_states_buffer`: Holds the entire set of AES states in a
|
||||
* bitsliced layout, which is optimal for parallel bitwise operations on the
|
||||
* GPU.
|
||||
* - Other buffers are used for data layout transformations (transposition) and
|
||||
* for batching small operations into larger, more efficient launches.
|
||||
*/
|
||||
template <typename Torus> struct int_aes_main_workspaces {
|
||||
CudaRadixCiphertextFFI *sbox_internal_workspace;
|
||||
CudaRadixCiphertextFFI *initial_states_and_jit_key_workspace;
|
||||
CudaRadixCiphertextFFI *main_bitsliced_states_buffer;
|
||||
CudaRadixCiphertextFFI *tmp_tiled_key_buffer;
|
||||
CudaRadixCiphertextFFI *batch_processing_buffer;
|
||||
|
||||
int_aes_main_workspaces(CudaStreams streams, const int_radix_params ¶ms,
|
||||
bool allocate_gpu_memory, uint32_t num_aes_inputs,
|
||||
uint32_t sbox_parallelism, uint64_t &size_tracker) {
|
||||
|
||||
constexpr uint32_t AES_STATE_BITS = 64;
|
||||
constexpr uint32_t SBOX_MAX_AND_GATES = 18;
|
||||
constexpr uint32_t BATCH_BUFFER_OPERANDS = 3;
|
||||
|
||||
this->sbox_internal_workspace = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->sbox_internal_workspace,
|
||||
num_aes_inputs * AES_STATE_BITS * sbox_parallelism,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
|
||||
this->initial_states_and_jit_key_workspace = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0),
|
||||
this->initial_states_and_jit_key_workspace,
|
||||
num_aes_inputs * AES_STATE_BITS, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
|
||||
this->main_bitsliced_states_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0),
|
||||
this->main_bitsliced_states_buffer, num_aes_inputs * AES_STATE_BITS,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
|
||||
this->tmp_tiled_key_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->tmp_tiled_key_buffer,
|
||||
num_aes_inputs * AES_STATE_BITS, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
|
||||
this->batch_processing_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->batch_processing_buffer,
|
||||
num_aes_inputs * SBOX_MAX_AND_GATES * BATCH_BUFFER_OPERANDS *
|
||||
sbox_parallelism,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams, bool allocate_gpu_memory) {
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->sbox_internal_workspace,
|
||||
allocate_gpu_memory);
|
||||
delete this->sbox_internal_workspace;
|
||||
this->sbox_internal_workspace = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->initial_states_and_jit_key_workspace,
|
||||
allocate_gpu_memory);
|
||||
delete this->initial_states_and_jit_key_workspace;
|
||||
this->initial_states_and_jit_key_workspace = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->main_bitsliced_states_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->main_bitsliced_states_buffer;
|
||||
this->main_bitsliced_states_buffer = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->tmp_tiled_key_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->tmp_tiled_key_buffer;
|
||||
this->tmp_tiled_key_buffer = nullptr;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->batch_processing_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->batch_processing_buffer;
|
||||
this->batch_processing_buffer = nullptr;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This structure acts as a container, holding instances of all the other buffer
|
||||
* management structs. It provides a
|
||||
* single object to manage the entire lifecycle of memory needed for a complete
|
||||
* AES-CTR encryption operation.
|
||||
*/
|
||||
template <typename Torus> struct int_aes_encrypt_buffer {
|
||||
int_radix_params params;
|
||||
bool allocate_gpu_memory;
|
||||
uint32_t num_aes_inputs;
|
||||
uint32_t sbox_parallel_instances;
|
||||
|
||||
int_aes_lut_buffers<Torus> *luts;
|
||||
int_aes_round_workspaces<Torus> *round_workspaces;
|
||||
int_aes_counter_workspaces<Torus> *counter_workspaces;
|
||||
int_aes_main_workspaces<Torus> *main_workspaces;
|
||||
|
||||
int_aes_encrypt_buffer(CudaStreams streams, const int_radix_params ¶ms,
|
||||
bool allocate_gpu_memory, uint32_t num_aes_inputs,
|
||||
uint32_t sbox_parallelism, uint64_t &size_tracker) {
|
||||
|
||||
PANIC_IF_FALSE(num_aes_inputs >= 1,
|
||||
"num_aes_inputs should be greater or equal to 1");
|
||||
|
||||
this->params = params;
|
||||
this->allocate_gpu_memory = allocate_gpu_memory;
|
||||
this->num_aes_inputs = num_aes_inputs;
|
||||
this->sbox_parallel_instances = sbox_parallelism;
|
||||
|
||||
this->luts = new int_aes_lut_buffers<Torus>(
|
||||
streams, params, allocate_gpu_memory, num_aes_inputs, sbox_parallelism,
|
||||
size_tracker);
|
||||
|
||||
this->round_workspaces = new int_aes_round_workspaces<Torus>(
|
||||
streams, params, allocate_gpu_memory, num_aes_inputs, size_tracker);
|
||||
|
||||
this->counter_workspaces = new int_aes_counter_workspaces<Torus>(
|
||||
streams, params, allocate_gpu_memory, num_aes_inputs, size_tracker);
|
||||
|
||||
this->main_workspaces = new int_aes_main_workspaces<Torus>(
|
||||
streams, params, allocate_gpu_memory, num_aes_inputs, sbox_parallelism,
|
||||
size_tracker);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams) {
|
||||
luts->release(streams);
|
||||
delete luts;
|
||||
luts = nullptr;
|
||||
|
||||
round_workspaces->release(streams, allocate_gpu_memory);
|
||||
delete round_workspaces;
|
||||
round_workspaces = nullptr;
|
||||
|
||||
counter_workspaces->release(streams, allocate_gpu_memory);
|
||||
delete counter_workspaces;
|
||||
counter_workspaces = nullptr;
|
||||
|
||||
main_workspaces->release(streams, allocate_gpu_memory);
|
||||
delete main_workspaces;
|
||||
main_workspaces = nullptr;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This structure holds the buffer for the 44 words of the expanded key
|
||||
* and temporary storage for word manipulations.
|
||||
* It contains its own instance of `int_aes_encrypt_buffer` because the
|
||||
* key expansion algorithm itself requires using the S-Box.
|
||||
* This separation ensures that memory for key expansion can be allocated and
|
||||
* freed independently of the main encryption process.
|
||||
*/
|
||||
template <typename Torus> struct int_key_expansion_buffer {
|
||||
int_radix_params params;
|
||||
bool allocate_gpu_memory;
|
||||
|
||||
CudaRadixCiphertextFFI *words_buffer;
|
||||
|
||||
CudaRadixCiphertextFFI *tmp_word_buffer;
|
||||
CudaRadixCiphertextFFI *tmp_rotated_word_buffer;
|
||||
|
||||
int_aes_encrypt_buffer<Torus> *aes_encrypt_buffer;
|
||||
|
||||
int_key_expansion_buffer(CudaStreams streams, const int_radix_params ¶ms,
|
||||
bool allocate_gpu_memory, uint64_t &size_tracker) {
|
||||
this->params = params;
|
||||
this->allocate_gpu_memory = allocate_gpu_memory;
|
||||
|
||||
constexpr uint32_t TOTAL_WORDS = 22;
|
||||
constexpr uint32_t BITS_PER_WORD = 32;
|
||||
constexpr uint32_t TOTAL_BITS = TOTAL_WORDS * BITS_PER_WORD;
|
||||
|
||||
this->words_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->words_buffer, TOTAL_BITS,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
|
||||
this->tmp_word_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->tmp_word_buffer,
|
||||
BITS_PER_WORD, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
|
||||
this->tmp_rotated_word_buffer = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), this->tmp_rotated_word_buffer,
|
||||
BITS_PER_WORD, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
|
||||
this->aes_encrypt_buffer = new int_aes_encrypt_buffer<Torus>(
|
||||
streams, params, allocate_gpu_memory, 1, 4, size_tracker);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams) {
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->words_buffer, allocate_gpu_memory);
|
||||
delete this->words_buffer;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->tmp_word_buffer, allocate_gpu_memory);
|
||||
delete this->tmp_word_buffer;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
this->tmp_rotated_word_buffer,
|
||||
allocate_gpu_memory);
|
||||
delete this->tmp_rotated_word_buffer;
|
||||
|
||||
this->aes_encrypt_buffer->release(streams);
|
||||
delete this->aes_encrypt_buffer;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -4,9 +4,7 @@
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <cuda_runtime.h>
|
||||
#include <vector>
|
||||
|
||||
extern "C" {
|
||||
|
||||
@@ -141,4 +139,5 @@ bool cuda_check_support_thread_block_clusters();
|
||||
template <typename Torus>
|
||||
void cuda_set_value_async(cudaStream_t stream, uint32_t gpu_index,
|
||||
Torus *d_array, Torus value, Torus n);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
#include "integer/integer.h"
|
||||
|
||||
extern std::mutex m;
|
||||
extern bool p2p_enabled;
|
||||
extern const int THRESHOLD_MULTI_GPU;
|
||||
@@ -37,10 +39,237 @@ get_variant_element(const std::variant<std::vector<Torus>, Torus> &variant,
|
||||
}
|
||||
}
|
||||
|
||||
int get_active_gpu_count(int num_inputs, int gpu_count);
|
||||
uint32_t get_active_gpu_count(uint32_t num_inputs, uint32_t gpu_count);
|
||||
|
||||
int get_num_inputs_on_gpu(int total_num_inputs, int gpu_index, int gpu_count);
|
||||
|
||||
int get_gpu_offset(int total_num_inputs, int gpu_index, int gpu_count);
|
||||
|
||||
// A Set of GPU Streams and associated GPUs
|
||||
// Can be constructed from the FFI struct CudaStreamsFFI which
|
||||
// is only used to pass the streams/gpus at the rust/C interface
|
||||
// This class should only be constructed from the FFI struct,
|
||||
// through class methods or through the copy constructor. The class
|
||||
// can also be constructed as an empty set
|
||||
struct CudaStreams {
|
||||
private:
|
||||
cudaStream_t const *_streams;
|
||||
uint32_t const *_gpu_indexes;
|
||||
uint32_t _gpu_count;
|
||||
bool _owns_streams;
|
||||
|
||||
// Prevent the construction of a CudaStreams class from user-code
|
||||
CudaStreams(cudaStream_t const *streams, uint32_t const *gpu_indexes,
|
||||
uint32_t gpu_count)
|
||||
: _streams(streams), _gpu_indexes(gpu_indexes), _gpu_count(gpu_count),
|
||||
_owns_streams(false) {}
|
||||
|
||||
public:
|
||||
// Construct an empty set. Invalid use of an empty set should raise an error
|
||||
// right away through asserts or because of a nullptr dereference
|
||||
CudaStreams()
|
||||
: _streams(nullptr), _gpu_indexes(nullptr), _gpu_count((uint32_t)-1),
|
||||
_owns_streams(false) {}
|
||||
|
||||
// Returns a subset of this set as an active subset. An active subset is one
|
||||
// that is temporarily used to perform some computation
|
||||
CudaStreams active_gpu_subset(int num_radix_blocks) {
|
||||
return CudaStreams(_streams, _gpu_indexes,
|
||||
get_active_gpu_count(num_radix_blocks, _gpu_count));
|
||||
}
|
||||
|
||||
// Returns a CudaStreams struct containing only the ith stream
|
||||
CudaStreams get_ith(int i) const {
|
||||
return CudaStreams(&_streams[i], &_gpu_indexes[i], 1);
|
||||
}
|
||||
|
||||
// Synchronize all the streams in the set
|
||||
void synchronize() const {
|
||||
for (uint32_t i = 0; i < _gpu_count; i++) {
|
||||
cuda_synchronize_stream(_streams[i], _gpu_indexes[i]);
|
||||
}
|
||||
}
|
||||
|
||||
cudaStream_t stream(uint32_t idx) const {
|
||||
PANIC_IF_FALSE(idx < _gpu_count, "Invalid GPU index");
|
||||
return _streams[idx];
|
||||
}
|
||||
uint32_t gpu_index(uint32_t idx) const {
|
||||
PANIC_IF_FALSE(idx < _gpu_count, "Invalid GPU index");
|
||||
return _gpu_indexes[idx];
|
||||
}
|
||||
uint32_t count() const { return _gpu_count; }
|
||||
|
||||
// Construct from the rust FFI stream set. Streams are created in rust
|
||||
// using the bindings.
|
||||
CudaStreams(CudaStreamsFFI &ffi)
|
||||
: _streams((cudaStream_t *)ffi.streams), _gpu_indexes(ffi.gpu_indexes),
|
||||
_gpu_count(ffi.gpu_count), _owns_streams(false) {}
|
||||
|
||||
// Create a new set of streams on the same gpus as those of the current stream
|
||||
// set Can be used to parallelize computation by issuing kernels on multiple
|
||||
// streams on the same GPU
|
||||
void create_on_same_gpus(const CudaStreams &other) {
|
||||
PANIC_IF_FALSE(_streams == nullptr,
|
||||
"Assign clone to non-empty cudastreams");
|
||||
|
||||
cudaStream_t *new_streams = new cudaStream_t[other._gpu_count];
|
||||
|
||||
uint32_t *gpu_indexes_clone = new uint32_t[_gpu_count];
|
||||
for (uint32_t i = 0; i < other._gpu_count; ++i) {
|
||||
new_streams[i] = cuda_create_stream(other._gpu_indexes[i]);
|
||||
gpu_indexes_clone[i] = other._gpu_indexes[i];
|
||||
}
|
||||
|
||||
this->_streams = new_streams;
|
||||
this->_gpu_indexes = gpu_indexes_clone;
|
||||
this->_gpu_count = other._gpu_count;
|
||||
|
||||
// Flag this instance as owning streams so that we can destroy
|
||||
// the streams when they aren't needed anymore
|
||||
this->_owns_streams = true;
|
||||
}
|
||||
|
||||
// Copy constructor, setting the own flag to false
|
||||
// Only the initial instance of CudaStreams created with
|
||||
// assign_clone owns streams, all copies of it do not own the
|
||||
// streams
|
||||
CudaStreams(const CudaStreams &src)
|
||||
: _streams(src._streams), _gpu_indexes(src._gpu_indexes),
|
||||
_gpu_count(src._gpu_count), _owns_streams(false) {}
|
||||
|
||||
CudaStreams &operator=(CudaStreams const &other) {
|
||||
PANIC_IF_FALSE(this->_streams == nullptr ||
|
||||
this->_streams == other._streams,
|
||||
"Assigning an already initialized CudaStreams");
|
||||
this->_streams = other._streams;
|
||||
this->_gpu_indexes = other._gpu_indexes;
|
||||
this->_gpu_count = other._gpu_count;
|
||||
|
||||
// Only the initial instance of CudaStreams created with
|
||||
// assign_clone owns streams, all copies of it do not own the
|
||||
// streams
|
||||
this->_owns_streams = false;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Destroy the streams if they are created by assign_clone.
|
||||
// We require the developer to call `destroy` on all instances
|
||||
// of cloned streams.
|
||||
void release() {
|
||||
// If this instance doesn't own streams, there's nothing to do
|
||||
// as the streams were created on the Rust side.
|
||||
if (_owns_streams) {
|
||||
for (uint32_t i = 0; i < _gpu_count; ++i) {
|
||||
cuda_destroy_stream(_streams[i], _gpu_indexes[i]);
|
||||
}
|
||||
delete[] _streams;
|
||||
_streams = nullptr;
|
||||
delete[] _gpu_indexes;
|
||||
_gpu_indexes = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// The destructor checks that streams created with assign_clone
|
||||
// were destroyed manually with `destroy`.
|
||||
~CudaStreams() {
|
||||
// Ensure streams are destroyed
|
||||
PANIC_IF_FALSE(
|
||||
!_owns_streams || _streams == nullptr,
|
||||
"Destroy (this=%p) was not called on a CudaStreams object that "
|
||||
"is a clone "
|
||||
"of another one, %p",
|
||||
this, this->_streams);
|
||||
}
|
||||
};
|
||||
|
||||
struct CudaStreamsBarrier {
|
||||
private:
|
||||
std::vector<cudaEvent_t> _events;
|
||||
CudaStreams _streams;
|
||||
|
||||
CudaStreamsBarrier(const CudaStreamsBarrier &) {} // Prevent copy-construction
|
||||
CudaStreamsBarrier &operator=(const CudaStreamsBarrier &) {
|
||||
return *this;
|
||||
} // Prevent assignment
|
||||
public:
|
||||
void create_on(const CudaStreams &streams) {
|
||||
_streams = streams;
|
||||
|
||||
GPU_ASSERT(streams.count() > 1, "CudaStreamsFirstWaitsWorkersBarrier: "
|
||||
"Attempted to create on single GPU");
|
||||
_events.resize(streams.count());
|
||||
for (int i = 0; i < streams.count(); i++) {
|
||||
_events[i] = cuda_create_event(streams.gpu_index(i));
|
||||
}
|
||||
}
|
||||
|
||||
CudaStreamsBarrier(){};
|
||||
|
||||
void local_streams_wait_for_stream_0(const CudaStreams &user_streams) {
|
||||
GPU_ASSERT(!_events.empty(),
|
||||
"CudaStreamsBarrier: must call create_on before use");
|
||||
GPU_ASSERT(user_streams.gpu_index(0) == _streams.gpu_index(0),
|
||||
"CudaStreamsBarrier: synchronization can only be performed on "
|
||||
"the GPUs the barrier was initially created on.");
|
||||
|
||||
cuda_event_record(_events[0], user_streams.stream(0),
|
||||
user_streams.gpu_index(0));
|
||||
for (int j = 1; j < user_streams.count(); j++) {
|
||||
GPU_ASSERT(user_streams.gpu_index(j) == _streams.gpu_index(j),
|
||||
"CudaStreamsBarrier: synchronization can only be performed on "
|
||||
"the GPUs the barrier was initially created on.");
|
||||
cuda_stream_wait_event(user_streams.stream(j), _events[0],
|
||||
user_streams.gpu_index(j));
|
||||
}
|
||||
}
|
||||
|
||||
void stream_0_wait_for_local_streams(const CudaStreams &user_streams) {
|
||||
GPU_ASSERT(
|
||||
!_events.empty(),
|
||||
"CudaStreamsFirstWaitsWorkersBarrier: must call create_on before use");
|
||||
GPU_ASSERT(
|
||||
user_streams.count() <= _events.size(),
|
||||
"CudaStreamsFirstWaitsWorkersBarrier: trying to synchronize too many "
|
||||
"streams. "
|
||||
"The barrier was created on a LUT that had %lu active streams, while "
|
||||
"the user stream set has %u streams",
|
||||
_events.size(), user_streams.count());
|
||||
|
||||
if (user_streams.count() > 1) {
|
||||
// Worker GPUs record their events
|
||||
for (int j = 1; j < user_streams.count(); j++) {
|
||||
GPU_ASSERT(_streams.gpu_index(j) == user_streams.gpu_index(j),
|
||||
"CudaStreamsBarrier: The user stream "
|
||||
"set GPU[%d]=%u while the LUT stream set GPU[%d]=%u",
|
||||
j, user_streams.gpu_index(j), j, _streams.gpu_index(j));
|
||||
|
||||
cuda_event_record(_events[j], user_streams.stream(j),
|
||||
user_streams.gpu_index(j));
|
||||
}
|
||||
|
||||
// GPU 0 waits for all workers
|
||||
for (int j = 1; j < user_streams.count(); j++) {
|
||||
cuda_stream_wait_event(user_streams.stream(0), _events[j],
|
||||
user_streams.gpu_index(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void release() {
|
||||
for (int j = 0; j < _streams.count(); j++) {
|
||||
cuda_event_destroy(_events[j], _streams.gpu_index(j));
|
||||
}
|
||||
|
||||
_events.clear();
|
||||
}
|
||||
|
||||
~CudaStreamsBarrier() {
|
||||
GPU_ASSERT(_events.empty(),
|
||||
"CudaStreamsBarrier: must "
|
||||
"call release before destruction: events size = %lu",
|
||||
_events.size());
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
52
backends/tfhe-cuda-backend/cuda/include/integer/abs.h
Normal file
52
backends/tfhe-cuda-backend/cuda/include/integer/abs.h
Normal file
@@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
#include "bitwise_ops.h"
|
||||
#include "integer_utilities.h"
|
||||
#include "scalar_shifts.h"
|
||||
|
||||
template <typename Torus> struct int_abs_buffer {
|
||||
int_radix_params params;
|
||||
|
||||
int_arithmetic_scalar_shift_buffer<Torus> *arithmetic_scalar_shift_mem;
|
||||
int_sc_prop_memory<Torus> *scp_mem;
|
||||
int_bitop_buffer<Torus> *bitxor_mem;
|
||||
|
||||
CudaRadixCiphertextFFI *mask;
|
||||
bool allocate_gpu_memory;
|
||||
|
||||
int_abs_buffer(CudaStreams streams, int_radix_params params,
|
||||
uint32_t num_radix_blocks, bool allocate_gpu_memory,
|
||||
uint64_t &size_tracker) {
|
||||
this->params = params;
|
||||
this->allocate_gpu_memory = allocate_gpu_memory;
|
||||
arithmetic_scalar_shift_mem = new int_arithmetic_scalar_shift_buffer<Torus>(
|
||||
streams, SHIFT_OR_ROTATE_TYPE::RIGHT_SHIFT, params, num_radix_blocks,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
uint32_t requested_flag = outputFlag::FLAG_NONE;
|
||||
scp_mem = new int_sc_prop_memory<Torus>(streams, params, num_radix_blocks,
|
||||
requested_flag, allocate_gpu_memory,
|
||||
size_tracker);
|
||||
bitxor_mem = new int_bitop_buffer<Torus>(streams, BITOP_TYPE::BITXOR,
|
||||
params, num_radix_blocks,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
|
||||
mask = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), mask, num_radix_blocks,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams) {
|
||||
arithmetic_scalar_shift_mem->release(streams);
|
||||
scp_mem->release(streams);
|
||||
bitxor_mem->release(streams);
|
||||
|
||||
delete arithmetic_scalar_shift_mem;
|
||||
delete scp_mem;
|
||||
delete bitxor_mem;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
mask, this->allocate_gpu_memory);
|
||||
delete mask;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
107
backends/tfhe-cuda-backend/cuda/include/integer/bitwise_ops.h
Normal file
107
backends/tfhe-cuda-backend/cuda/include/integer/bitwise_ops.h
Normal file
@@ -0,0 +1,107 @@
|
||||
#pragma once
|
||||
#include "integer_utilities.h"
|
||||
|
||||
template <typename Torus> struct int_bitop_buffer {
|
||||
|
||||
int_radix_params params;
|
||||
int_radix_lut<Torus> *lut;
|
||||
BITOP_TYPE op;
|
||||
bool gpu_memory_allocated;
|
||||
|
||||
int_bitop_buffer(CudaStreams streams, BITOP_TYPE op, int_radix_params params,
|
||||
uint32_t num_radix_blocks, bool allocate_gpu_memory,
|
||||
uint64_t &size_tracker) {
|
||||
gpu_memory_allocated = allocate_gpu_memory;
|
||||
this->op = op;
|
||||
this->params = params;
|
||||
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
|
||||
switch (op) {
|
||||
case BITAND:
|
||||
case BITOR:
|
||||
case BITXOR:
|
||||
lut = new int_radix_lut<Torus>(streams, params, 1, num_radix_blocks,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
{
|
||||
auto lut_bivariate_f = [op](Torus lhs, Torus rhs) -> Torus {
|
||||
if (op == BITOP_TYPE::BITAND) {
|
||||
// AND
|
||||
return lhs & rhs;
|
||||
} else if (op == BITOP_TYPE::BITOR) {
|
||||
// OR
|
||||
return lhs | rhs;
|
||||
} else {
|
||||
// XOR
|
||||
return lhs ^ rhs;
|
||||
}
|
||||
};
|
||||
|
||||
generate_device_accumulator_bivariate<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), lut->get_lut(0, 0),
|
||||
lut->get_degree(0), lut->get_max_degree(0), params.glwe_dimension,
|
||||
params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, lut_bivariate_f, gpu_memory_allocated);
|
||||
lut->broadcast_lut(active_streams);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// Scalar OP
|
||||
lut = new int_radix_lut<Torus>(streams, params, params.message_modulus,
|
||||
num_radix_blocks, allocate_gpu_memory,
|
||||
size_tracker);
|
||||
|
||||
for (int i = 0; i < params.message_modulus; i++) {
|
||||
auto rhs = i;
|
||||
|
||||
auto lut_univariate_scalar_f = [op, rhs](Torus x) -> Torus {
|
||||
if (op == BITOP_TYPE::SCALAR_BITAND) {
|
||||
// AND
|
||||
return x & rhs;
|
||||
} else if (op == BITOP_TYPE::SCALAR_BITOR) {
|
||||
// OR
|
||||
return x | rhs;
|
||||
} else {
|
||||
// XOR
|
||||
return x ^ rhs;
|
||||
}
|
||||
};
|
||||
generate_device_accumulator<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), lut->get_lut(0, i),
|
||||
lut->get_degree(i), lut->get_max_degree(i), params.glwe_dimension,
|
||||
params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, lut_univariate_scalar_f,
|
||||
gpu_memory_allocated);
|
||||
lut->broadcast_lut(active_streams);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void release(CudaStreams streams) {
|
||||
lut->release(streams);
|
||||
delete lut;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
void update_degrees_after_bitand(uint64_t *output_degrees,
|
||||
uint64_t *lwe_array_1_degrees,
|
||||
uint64_t *lwe_array_2_degrees,
|
||||
uint32_t num_radix_blocks);
|
||||
void update_degrees_after_bitor(uint64_t *output_degrees,
|
||||
uint64_t *lwe_array_1_degrees,
|
||||
uint64_t *lwe_array_2_degrees,
|
||||
uint32_t num_radix_blocks);
|
||||
void update_degrees_after_bitxor(uint64_t *output_degrees,
|
||||
uint64_t *lwe_array_1_degrees,
|
||||
uint64_t *lwe_array_2_degrees,
|
||||
uint32_t num_radix_blocks);
|
||||
void update_degrees_after_scalar_bitand(uint64_t *output_degrees,
|
||||
uint64_t const *clear_degrees,
|
||||
uint64_t const *input_degrees,
|
||||
uint32_t num_clear_blocks);
|
||||
void update_degrees_after_scalar_bitor(uint64_t *output_degrees,
|
||||
uint64_t const *clear_degrees,
|
||||
uint64_t const *input_degrees,
|
||||
uint32_t num_clear_blocks);
|
||||
void update_degrees_after_scalar_bitxor(uint64_t *output_degrees,
|
||||
uint64_t const *clear_degrees,
|
||||
uint64_t const *input_degrees,
|
||||
uint32_t num_clear_blocks);
|
||||
77
backends/tfhe-cuda-backend/cuda/include/integer/cast.h
Normal file
77
backends/tfhe-cuda-backend/cuda/include/integer/cast.h
Normal file
@@ -0,0 +1,77 @@
|
||||
#pragma once
|
||||
#include "integer_utilities.h"
|
||||
|
||||
template <typename Torus> struct int_extend_radix_with_sign_msb_buffer {
|
||||
|
||||
int_radix_params params;
|
||||
bool allocate_gpu_memory;
|
||||
|
||||
int_radix_lut<Torus> *lut = nullptr;
|
||||
|
||||
CudaRadixCiphertextFFI *last_block = nullptr;
|
||||
CudaRadixCiphertextFFI *padding_block = nullptr;
|
||||
|
||||
int_extend_radix_with_sign_msb_buffer(CudaStreams streams,
|
||||
const int_radix_params params,
|
||||
uint32_t num_radix_blocks,
|
||||
uint32_t num_additional_blocks,
|
||||
const bool allocate_gpu_memory,
|
||||
uint64_t &size_tracker) {
|
||||
|
||||
this->params = params;
|
||||
this->allocate_gpu_memory = allocate_gpu_memory;
|
||||
|
||||
if (num_additional_blocks != 0) {
|
||||
this->lut = new int_radix_lut<Torus>(streams, params, 1, num_radix_blocks,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
|
||||
uint32_t bits_per_block = std::log2(params.message_modulus);
|
||||
uint32_t msg_modulus = params.message_modulus;
|
||||
|
||||
generate_device_accumulator<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), lut->get_lut(0, 0),
|
||||
lut->get_degree(0), lut->get_max_degree(0), params.glwe_dimension,
|
||||
params.polynomial_size, params.message_modulus, params.carry_modulus,
|
||||
[msg_modulus, bits_per_block](Torus x) {
|
||||
const auto xm = x % msg_modulus;
|
||||
const auto sign_bit = (xm >> (bits_per_block - 1)) & 1;
|
||||
return (Torus)((msg_modulus - 1) * sign_bit);
|
||||
},
|
||||
allocate_gpu_memory);
|
||||
|
||||
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
|
||||
lut->broadcast_lut(active_streams);
|
||||
|
||||
this->last_block = new CudaRadixCiphertextFFI;
|
||||
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), last_block, 1,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
|
||||
this->padding_block = new CudaRadixCiphertextFFI;
|
||||
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), padding_block, 1,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
}
|
||||
}
|
||||
|
||||
void release(CudaStreams streams) {
|
||||
|
||||
if (lut != nullptr) {
|
||||
lut->release(streams);
|
||||
delete lut;
|
||||
}
|
||||
if (last_block != nullptr) {
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
last_block, allocate_gpu_memory);
|
||||
delete last_block;
|
||||
}
|
||||
if (padding_block != nullptr) {
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
padding_block, allocate_gpu_memory);
|
||||
delete padding_block;
|
||||
}
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
141
backends/tfhe-cuda-backend/cuda/include/integer/cmux.h
Normal file
141
backends/tfhe-cuda-backend/cuda/include/integer/cmux.h
Normal file
@@ -0,0 +1,141 @@
|
||||
#pragma once
|
||||
#include "integer_utilities.h"
|
||||
|
||||
template <typename Torus> struct int_zero_out_if_buffer {
|
||||
|
||||
int_radix_params params;
|
||||
|
||||
CudaRadixCiphertextFFI *tmp;
|
||||
|
||||
bool gpu_memory_allocated;
|
||||
|
||||
int_zero_out_if_buffer(CudaStreams streams, int_radix_params params,
|
||||
uint32_t num_radix_blocks, bool allocate_gpu_memory,
|
||||
uint64_t &size_tracker) {
|
||||
gpu_memory_allocated = allocate_gpu_memory;
|
||||
this->params = params;
|
||||
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
|
||||
|
||||
tmp = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), tmp, num_radix_blocks,
|
||||
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
|
||||
}
|
||||
void release(CudaStreams streams) {
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0), tmp,
|
||||
gpu_memory_allocated);
|
||||
delete tmp;
|
||||
tmp = nullptr;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
template <typename Torus> struct int_cmux_buffer {
|
||||
int_radix_lut<Torus> *predicate_lut;
|
||||
int_radix_lut<Torus> *message_extract_lut;
|
||||
|
||||
CudaRadixCiphertextFFI *buffer_in;
|
||||
CudaRadixCiphertextFFI *buffer_out;
|
||||
CudaRadixCiphertextFFI *condition_array;
|
||||
|
||||
int_radix_params params;
|
||||
bool allocate_gpu_memory;
|
||||
bool gpu_memory_allocated;
|
||||
int_cmux_buffer(CudaStreams streams,
|
||||
std::function<Torus(Torus)> predicate_lut_f,
|
||||
int_radix_params params, uint32_t num_radix_blocks,
|
||||
bool allocate_gpu_memory, uint64_t &size_tracker) {
|
||||
gpu_memory_allocated = allocate_gpu_memory;
|
||||
|
||||
this->params = params;
|
||||
this->allocate_gpu_memory = allocate_gpu_memory;
|
||||
|
||||
buffer_in = new CudaRadixCiphertextFFI;
|
||||
buffer_out = new CudaRadixCiphertextFFI;
|
||||
condition_array = new CudaRadixCiphertextFFI;
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), buffer_in,
|
||||
2 * num_radix_blocks, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), buffer_out,
|
||||
2 * num_radix_blocks, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
create_zero_radix_ciphertext_async<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), condition_array,
|
||||
2 * num_radix_blocks, params.big_lwe_dimension, size_tracker,
|
||||
allocate_gpu_memory);
|
||||
|
||||
auto lut_f = [predicate_lut_f](Torus block, Torus condition) -> Torus {
|
||||
return predicate_lut_f(condition) ? 0 : block;
|
||||
};
|
||||
auto inverted_lut_f = [predicate_lut_f](Torus block,
|
||||
Torus condition) -> Torus {
|
||||
return predicate_lut_f(condition) ? block : 0;
|
||||
};
|
||||
auto message_extract_lut_f = [params](Torus x) -> Torus {
|
||||
return x % params.message_modulus;
|
||||
};
|
||||
|
||||
predicate_lut =
|
||||
new int_radix_lut<Torus>(streams, params, 2, 2 * num_radix_blocks,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
|
||||
message_extract_lut =
|
||||
new int_radix_lut<Torus>(streams, params, 1, num_radix_blocks,
|
||||
allocate_gpu_memory, size_tracker);
|
||||
|
||||
generate_device_accumulator_bivariate<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), predicate_lut->get_lut(0, 0),
|
||||
predicate_lut->get_degree(0), predicate_lut->get_max_degree(0),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, inverted_lut_f, gpu_memory_allocated);
|
||||
|
||||
generate_device_accumulator_bivariate<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0), predicate_lut->get_lut(0, 1),
|
||||
predicate_lut->get_degree(1), predicate_lut->get_max_degree(1),
|
||||
params.glwe_dimension, params.polynomial_size, params.message_modulus,
|
||||
params.carry_modulus, lut_f, gpu_memory_allocated);
|
||||
|
||||
generate_device_accumulator<Torus>(
|
||||
streams.stream(0), streams.gpu_index(0),
|
||||
message_extract_lut->get_lut(0, 0), message_extract_lut->get_degree(0),
|
||||
message_extract_lut->get_max_degree(0), params.glwe_dimension,
|
||||
params.polynomial_size, params.message_modulus, params.carry_modulus,
|
||||
message_extract_lut_f, gpu_memory_allocated);
|
||||
Torus *h_lut_indexes = predicate_lut->h_lut_indexes;
|
||||
for (int index = 0; index < 2 * num_radix_blocks; index++) {
|
||||
if (index < num_radix_blocks) {
|
||||
h_lut_indexes[index] = 0;
|
||||
} else {
|
||||
h_lut_indexes[index] = 1;
|
||||
}
|
||||
}
|
||||
cuda_memcpy_with_size_tracking_async_to_gpu(
|
||||
predicate_lut->get_lut_indexes(0, 0), h_lut_indexes,
|
||||
2 * num_radix_blocks * sizeof(Torus), streams.stream(0),
|
||||
streams.gpu_index(0), allocate_gpu_memory);
|
||||
auto active_streams_pred = streams.active_gpu_subset(2 * num_radix_blocks);
|
||||
predicate_lut->broadcast_lut(active_streams_pred);
|
||||
auto active_streams_msg = streams.active_gpu_subset(num_radix_blocks);
|
||||
message_extract_lut->broadcast_lut(active_streams_msg);
|
||||
}
|
||||
|
||||
void release(CudaStreams streams) {
|
||||
predicate_lut->release(streams);
|
||||
delete predicate_lut;
|
||||
message_extract_lut->release(streams);
|
||||
delete message_extract_lut;
|
||||
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
buffer_in, gpu_memory_allocated);
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
buffer_out, gpu_memory_allocated);
|
||||
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
|
||||
condition_array, gpu_memory_allocated);
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
delete buffer_in;
|
||||
delete buffer_out;
|
||||
delete condition_array;
|
||||
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
|
||||
}
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user