mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-12 16:18:52 -05:00
Compare commits
24 Commits
hw-team/de
...
Workflows-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
65045be63f | ||
|
|
696f964ecf | ||
|
|
a5323d1edf | ||
|
|
2d500d0de6 | ||
|
|
b1657876fb | ||
|
|
d2a570bdd6 | ||
|
|
122ef489fd | ||
|
|
ed84387bba | ||
|
|
1f4ba33a50 | ||
|
|
e645ee3397 | ||
|
|
569abd9a3b | ||
|
|
917bb5e1ef | ||
|
|
509aadcad2 | ||
|
|
e20aea90df | ||
|
|
e8ab448454 | ||
|
|
50f6773c82 | ||
|
|
1eb8270812 | ||
|
|
0fab6324b9 | ||
|
|
bb1c215951 | ||
|
|
70a0021cbf | ||
|
|
36b6376cc4 | ||
|
|
62d0d16f6d | ||
|
|
c86deec683 | ||
|
|
4d42425f4f |
@@ -80,7 +80,7 @@ jobs:
|
||||
|
||||
- name: Retrieve data from cache
|
||||
id: retrieve-data-cache
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
with:
|
||||
path: |
|
||||
utils/tfhe-backward-compat-data/**/*.cbor
|
||||
@@ -109,7 +109,7 @@ jobs:
|
||||
- name: Store data in cache
|
||||
if: steps.retrieve-data-cache.outputs.cache-hit != 'true'
|
||||
continue-on-error: true
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
with:
|
||||
path: |
|
||||
utils/tfhe-backward-compat-data/**/*.cbor
|
||||
|
||||
6
.github/workflows/aws_tfhe_fast_tests.yml
vendored
6
.github/workflows/aws_tfhe_fast_tests.yml
vendored
@@ -71,7 +71,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
dependencies:
|
||||
@@ -219,7 +219,7 @@ jobs:
|
||||
|
||||
- name: Node cache restoration
|
||||
id: node-cache
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
with:
|
||||
path: |
|
||||
~/.nvm
|
||||
@@ -232,7 +232,7 @@ jobs:
|
||||
make install_node
|
||||
|
||||
- name: Node cache save
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
if: steps.node-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: |
|
||||
|
||||
2
.github/workflows/aws_tfhe_integer_tests.yml
vendored
2
.github/workflows/aws_tfhe_integer_tests.yml
vendored
@@ -58,7 +58,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
integer:
|
||||
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
integer:
|
||||
|
||||
2
.github/workflows/aws_tfhe_tests.yml
vendored
2
.github/workflows/aws_tfhe_tests.yml
vendored
@@ -80,7 +80,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
dependencies:
|
||||
|
||||
4
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
4
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
@@ -80,7 +80,7 @@ jobs:
|
||||
|
||||
- name: Node cache restoration
|
||||
id: node-cache
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
with:
|
||||
path: |
|
||||
~/.nvm
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
make install_node
|
||||
|
||||
- name: Node cache save
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
if: steps.node-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: |
|
||||
|
||||
2
.github/workflows/benchmark_cpu_common.yml
vendored
2
.github/workflows/benchmark_cpu_common.yml
vendored
@@ -223,7 +223,7 @@ jobs:
|
||||
results_type: ${{ inputs.additional_results_type }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}_${{ matrix.params_type }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
2
.github/workflows/benchmark_ct_key_sizes.yml
vendored
2
.github/workflows/benchmark_ct_key_sizes.yml
vendored
@@ -99,7 +99,7 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_ct_key_sizes
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
persist-credentials: 'false'
|
||||
|
||||
- name: Download SVG tables
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
path: svg_tables
|
||||
merge-multiple: 'true'
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
echo "date=$(date '+%g_%m_%d_%Hh%Mm%Ss')" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Create pull-request
|
||||
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
|
||||
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
|
||||
with:
|
||||
sign-commits: true # Commit will be signed by github-actions bot
|
||||
add-paths: ${{ env.PATH_TO_DOC_ASSETS }}/*.svg
|
||||
|
||||
4
.github/workflows/benchmark_gpu_4090.yml
vendored
4
.github/workflows/benchmark_gpu_4090.yml
vendored
@@ -89,7 +89,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_integer_multi_bit_gpu_default
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
@@ -173,7 +173,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_core_crypto
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
2
.github/workflows/benchmark_gpu_common.yml
vendored
2
.github/workflows/benchmark_gpu_common.yml
vendored
@@ -281,7 +281,7 @@ jobs:
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ inputs.profile }}_${{ matrix.bench_type }}_${{ matrix.params_type }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
@@ -192,10 +192,10 @@ jobs:
|
||||
cargo install sqlx-cli
|
||||
|
||||
- name: Install foundry
|
||||
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e
|
||||
uses: foundry-rs/foundry-toolchain@8b0419c685ef46cb79ec93fbdc131174afceb730
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
@@ -262,7 +262,7 @@ jobs:
|
||||
- name: Upload profile artifact
|
||||
env:
|
||||
REPORT_NAME: ${{ steps.nsys_profile_name.outputs.profile }}
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ env.REPORT_NAME }}
|
||||
path: fhevm/coprocessor/fhevm-engine/tfhe-worker/${{ env.REPORT_NAME }}
|
||||
@@ -293,7 +293,7 @@ jobs:
|
||||
working-directory: fhevm/
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${COMMIT_SHA}_${BENCHMARKS}_${{ needs.parse-inputs.outputs.profile }}
|
||||
path: fhevm/$${{ env.RESULTS_FILENAME }}
|
||||
|
||||
2
.github/workflows/benchmark_hpu_common.yml
vendored
2
.github/workflows/benchmark_hpu_common.yml
vendored
@@ -185,7 +185,7 @@ jobs:
|
||||
BENCH_TYPE: ${{ matrix.bench_type }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.bench_type }}_integer_benchmarks
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
@@ -280,7 +280,7 @@ jobs:
|
||||
BENCH_TYPE: ${{ env.__TFHE_RS_BENCH_TYPE }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_regression_${{ env.RESULTS_FILE_SHA }} # RESULT_FILE_SHA is needed to avoid collision between matrix.command runs
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
2
.github/workflows/benchmark_tfhe_fft.yml
vendored
2
.github/workflows/benchmark_tfhe_fft.yml
vendored
@@ -96,7 +96,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_fft
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
2
.github/workflows/benchmark_tfhe_ntt.yml
vendored
2
.github/workflows/benchmark_tfhe_ntt.yml
vendored
@@ -96,7 +96,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_ntt
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
14
.github/workflows/benchmark_wasm_client.yml
vendored
14
.github/workflows/benchmark_wasm_client.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
wasm_bench:
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
|
||||
- name: Node cache restoration
|
||||
id: node-cache
|
||||
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
with:
|
||||
path: |
|
||||
~/.nvm
|
||||
@@ -132,7 +132,7 @@ jobs:
|
||||
make install_node
|
||||
|
||||
- name: Node cache save
|
||||
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
|
||||
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
|
||||
if: steps.node-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: |
|
||||
@@ -153,6 +153,12 @@ jobs:
|
||||
env:
|
||||
BROWSER: ${{ matrix.browser }}
|
||||
|
||||
- name: Run benchmarks (unsafe coop)
|
||||
run: |
|
||||
make bench_web_js_api_unsafe_coop_"${BROWSER}"_ci
|
||||
env:
|
||||
BROWSER: ${{ matrix.browser }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
make parse_wasm_benchmarks
|
||||
@@ -169,7 +175,7 @@ jobs:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_wasm_${{ matrix.browser }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
2
.github/workflows/cargo_test_fft.yml
vendored
2
.github/workflows/cargo_test_fft.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
fft:
|
||||
|
||||
2
.github/workflows/cargo_test_ntt.yml
vendored
2
.github/workflows/cargo_test_ntt.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
ntt:
|
||||
|
||||
2
.github/workflows/ci_lint.yml
vendored
2
.github/workflows/ci_lint.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
version: ${{ steps.get_zizmor.outputs.version }}
|
||||
|
||||
- name: Ensure SHA pinned actions
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@9e9574ef04ea69da568d6249bd69539ccc704e74 # v4.0.0
|
||||
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@6124774845927d14c601359ab8138699fa5b70c3 # v4.0.1
|
||||
with:
|
||||
allowlist: |
|
||||
slsa-framework/slsa-github-generator
|
||||
|
||||
6
.github/workflows/code_coverage.yml
vendored
6
.github/workflows/code_coverage.yml
vendored
@@ -62,7 +62,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
tfhe:
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
make test_shortint_cov
|
||||
|
||||
- name: Upload tfhe coverage to Codecov
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
make test_integer_cov
|
||||
|
||||
- name: Upload tfhe coverage to Codecov
|
||||
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
|
||||
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
16
.github/workflows/generate_svg_common.yml
vendored
16
.github/workflows/generate_svg_common.yml
vendored
@@ -75,6 +75,15 @@ jobs:
|
||||
DATA_EXTRACTOR_DATABASE_HOST: ${{ secrets.DATA_EXTRACTOR_DATABASE_HOST }}
|
||||
DATA_EXTRACTOR_DATABASE_PASSWORD: ${{ secrets.DATA_EXTRACTOR_DATABASE_PASSWORD }}
|
||||
|
||||
- name: Upload tables
|
||||
if: inputs.backend_comparison == false
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ inputs.backend }}_${{ inputs.layer }}_${{ inputs.pbs_kind }}_${{ inputs.bench_type }}_tables
|
||||
# This will upload all the file generated
|
||||
path: ${{ inputs.output_filename }}*.svg
|
||||
retention-days: 60
|
||||
|
||||
- name: Produce backends comparison table from database
|
||||
if: inputs.backend_comparison == true
|
||||
run: |
|
||||
@@ -90,10 +99,11 @@ jobs:
|
||||
DATA_EXTRACTOR_DATABASE_HOST: ${{ secrets.DATA_EXTRACTOR_DATABASE_HOST }}
|
||||
DATA_EXTRACTOR_DATABASE_PASSWORD: ${{ secrets.DATA_EXTRACTOR_DATABASE_PASSWORD }}
|
||||
|
||||
- name: Upload tables
|
||||
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
|
||||
- name: Upload comparison tables
|
||||
if: inputs.backend_comparison == true
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ inputs.backend }}_${{ inputs.layer }}_${{ inputs.pbs_kind }}_${{ inputs.bench_type }}_tables
|
||||
name: ${{ github.sha }}_backends_comparison_tables
|
||||
# This will upload all the file generated
|
||||
path: ${{ inputs.output_filename }}*.svg
|
||||
retention-days: 60
|
||||
|
||||
2
.github/workflows/gpu_fast_h100_tests.yml
vendored
2
.github/workflows/gpu_fast_h100_tests.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
2
.github/workflows/gpu_fast_tests.yml
vendored
2
.github/workflows/gpu_fast_tests.yml
vendored
@@ -47,7 +47,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
gpu:
|
||||
|
||||
2
.github/workflows/hpu_hlapi_tests.yml
vendored
2
.github/workflows/hpu_hlapi_tests.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
|
||||
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
|
||||
with:
|
||||
files_yaml: |
|
||||
hpu:
|
||||
|
||||
4
.github/workflows/make_release_common.yml
vendored
4
.github/workflows/make_release_common.yml
vendored
@@ -62,7 +62,7 @@ jobs:
|
||||
PACKAGE: ${{ inputs.package-name }}
|
||||
run: |
|
||||
cargo package -p "${PACKAGE}"
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: crate-${{ inputs.package-name }}
|
||||
path: target/package/*.crate
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
name: crate-${{ inputs.package-name }}
|
||||
path: target/package
|
||||
|
||||
4
.github/workflows/make_release_cuda.yml
vendored
4
.github/workflows/make_release_cuda.yml
vendored
@@ -104,7 +104,7 @@ jobs:
|
||||
run: |
|
||||
cargo package -p tfhe-cuda-backend
|
||||
|
||||
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
|
||||
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
with:
|
||||
name: crate-tfhe-cuda-backend
|
||||
path: target/package/*.crate
|
||||
@@ -174,7 +174,7 @@ jobs:
|
||||
GCC_VERSION: ${{ matrix.gcc }}
|
||||
|
||||
- name: Download artifact
|
||||
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
|
||||
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
|
||||
with:
|
||||
name: crate-tfhe-cuda-backend
|
||||
path: target/package
|
||||
|
||||
@@ -36,6 +36,7 @@ rayon = "1.11"
|
||||
serde = { version = "1.0", default-features = false }
|
||||
wasm-bindgen = "0.2.101"
|
||||
getrandom = "0.2.8"
|
||||
bincode = "=1.3.3"
|
||||
|
||||
[profile.bench]
|
||||
lto = "fat"
|
||||
|
||||
38
Makefile
38
Makefile
@@ -1300,13 +1300,14 @@ run_web_js_api_parallel: build_web_js_api_parallel setup_venv
|
||||
--browser-path $(browser_path) \
|
||||
--driver-path $(driver_path) \
|
||||
--browser-kind $(browser_kind) \
|
||||
--server-cmd "npm run server" \
|
||||
--server-cmd $(server_cmd) \
|
||||
--server-workdir "$(WEB_SERVER_DIR)" \
|
||||
--id-pattern $(filter)
|
||||
|
||||
test_web_js_api_parallel_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
|
||||
test_web_js_api_parallel_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
|
||||
test_web_js_api_parallel_chrome: browser_kind = chrome
|
||||
test_web_js_api_parallel_chrome: server_cmd = "npm run server:multithreaded"
|
||||
test_web_js_api_parallel_chrome: filter = Test
|
||||
|
||||
.PHONY: test_web_js_api_parallel_chrome # Run tests for the web wasm api on Chrome
|
||||
@@ -1322,6 +1323,7 @@ test_web_js_api_parallel_chrome_ci: setup_venv
|
||||
test_web_js_api_parallel_firefox: browser_path = "$(WEB_RUNNER_DIR)/firefox/firefox/firefox"
|
||||
test_web_js_api_parallel_firefox: driver_path = "$(WEB_RUNNER_DIR)/firefox/geckodriver"
|
||||
test_web_js_api_parallel_firefox: browser_kind = firefox
|
||||
test_web_js_api_parallel_firefox: server_cmd = "npm run server:multithreaded"
|
||||
test_web_js_api_parallel_firefox: filter = Test
|
||||
|
||||
.PHONY: test_web_js_api_parallel_firefox # Run tests for the web wasm api on Firefox
|
||||
@@ -1571,6 +1573,7 @@ bench_pbs128_gpu: install_rs_check_toolchain
|
||||
bench_web_js_api_parallel_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
|
||||
bench_web_js_api_parallel_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
|
||||
bench_web_js_api_parallel_chrome: browser_kind = chrome
|
||||
bench_web_js_api_parallel_chrome: server_cmd = "npm run server:multithreaded"
|
||||
bench_web_js_api_parallel_chrome: filter = Bench
|
||||
|
||||
.PHONY: bench_web_js_api_parallel_chrome # Run benchmarks for the web wasm api
|
||||
@@ -1586,6 +1589,7 @@ bench_web_js_api_parallel_chrome_ci: setup_venv
|
||||
bench_web_js_api_parallel_firefox: browser_path = "$(WEB_RUNNER_DIR)/firefox/firefox/firefox"
|
||||
bench_web_js_api_parallel_firefox: driver_path = "$(WEB_RUNNER_DIR)/firefox/geckodriver"
|
||||
bench_web_js_api_parallel_firefox: browser_kind = firefox
|
||||
bench_web_js_api_parallel_firefox: server_cmd = "npm run server:multithreaded"
|
||||
bench_web_js_api_parallel_firefox: filter = Bench
|
||||
|
||||
.PHONY: bench_web_js_api_parallel_firefox # Run benchmarks for the web wasm api
|
||||
@@ -1598,6 +1602,38 @@ bench_web_js_api_parallel_firefox_ci: setup_venv
|
||||
nvm use $(NODE_VERSION) && \
|
||||
$(MAKE) bench_web_js_api_parallel_firefox
|
||||
|
||||
bench_web_js_api_unsafe_coop_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
|
||||
bench_web_js_api_unsafe_coop_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
|
||||
bench_web_js_api_unsafe_coop_chrome: browser_kind = chrome
|
||||
bench_web_js_api_unsafe_coop_chrome: server_cmd = "npm run server:unsafe-coop"
|
||||
bench_web_js_api_unsafe_coop_chrome: filter = ZeroKnowledgeBench # Only bench zk with unsafe coop
|
||||
|
||||
.PHONY: bench_web_js_api_unsafe_coop_chrome # Run benchmarks for the web wasm api without cross-origin isolation
|
||||
bench_web_js_api_unsafe_coop_chrome: run_web_js_api_parallel
|
||||
|
||||
.PHONY: bench_web_js_api_unsafe_coop_chrome_ci # Run benchmarks for the web wasm api without cross-origin isolation
|
||||
bench_web_js_api_unsafe_coop_chrome_ci: setup_venv
|
||||
source ~/.nvm/nvm.sh && \
|
||||
nvm install $(NODE_VERSION) && \
|
||||
nvm use $(NODE_VERSION) && \
|
||||
$(MAKE) bench_web_js_api_unsafe_coop_chrome
|
||||
|
||||
bench_web_js_api_unsafe_coop_firefox: browser_path = "$(WEB_RUNNER_DIR)/firefox/firefox/firefox"
|
||||
bench_web_js_api_unsafe_coop_firefox: driver_path = "$(WEB_RUNNER_DIR)/firefox/geckodriver"
|
||||
bench_web_js_api_unsafe_coop_firefox: browser_kind = firefox
|
||||
bench_web_js_api_unsafe_coop_firefox: server_cmd = "npm run server:unsafe-coop"
|
||||
bench_web_js_api_unsafe_coop_firefox: filter = ZeroKnowledgeBench # Only bench zk with unsafe coop
|
||||
|
||||
.PHONY: bench_web_js_api_unsafe_coop_firefox # Run benchmarks for the web wasm api without cross-origin isolation
|
||||
bench_web_js_api_unsafe_coop_firefox: run_web_js_api_parallel
|
||||
|
||||
.PHONY: bench_web_js_api_unsafe_coop_firefox_ci # Run benchmarks for the web wasm api without cross-origin isolation
|
||||
bench_web_js_api_unsafe_coop_firefox_ci: setup_venv
|
||||
source ~/.nvm/nvm.sh && \
|
||||
nvm install $(NODE_VERSION) && \
|
||||
nvm use $(NODE_VERSION) && \
|
||||
$(MAKE) bench_web_js_api_unsafe_coop_firefox
|
||||
|
||||
.PHONY: bench_hlapi # Run benchmarks for integer operations
|
||||
bench_hlapi: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_BIT_SIZES_SET=$(BIT_SIZES_SET) \
|
||||
|
||||
704
WORKFLOWS.md
Normal file
704
WORKFLOWS.md
Normal file
@@ -0,0 +1,704 @@
|
||||
# 🔄 TFHE-rs GitHub Workflows Documentation
|
||||
|
||||
This document provides a comprehensive overview of all GitHub Actions workflows in the TFHE-rs project, organized by category with visual diagrams showing their triggers and purposes.
|
||||
|
||||
## 📊 Workflow Overview
|
||||
|
||||
The project contains **71 workflows** organized into the following categories:
|
||||
|
||||
- **Testing & Validation** (31 workflows) - AWS CPU (7), GPU (16), HPU (1), M1 (1), special tests (4), cargo tests (2)
|
||||
- **Benchmarking** (17 workflows) - CPU, GPU, HPU, WASM, specialized benchmarks
|
||||
- **Building & Compilation** (4 workflows) - Cargo builds
|
||||
- **Release Management** (9 workflows) - Publishing to crates.io and npm
|
||||
- **CI/CD & Maintenance** (10 workflows) - Linting, PR management, security
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Workflow Trigger Types
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
A[Workflow Triggers] --> B[Pull Request]
|
||||
A --> C[Push to main]
|
||||
A --> D[Schedule/Cron]
|
||||
A --> E[Workflow Dispatch]
|
||||
A --> F[Label Events]
|
||||
|
||||
B --> B1[On PR Open]
|
||||
B --> B2[On PR Approval]
|
||||
F --> F1[approved label]
|
||||
F --> F2[m1_test label]
|
||||
D --> D1[Daily]
|
||||
D --> D2[Weekly]
|
||||
D --> D3[Nightly]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🧪 Testing & Validation Workflows
|
||||
|
||||
### CPU Testing Workflows
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph "CPU Test Workflows"
|
||||
AWS[aws_tfhe_tests]
|
||||
FAST[aws_tfhe_fast_tests]
|
||||
INT[aws_tfhe_integer_tests]
|
||||
SIGN[aws_tfhe_signed_integer_tests]
|
||||
BACK[aws_tfhe_backward_compat_tests]
|
||||
WASM[aws_tfhe_wasm_tests]
|
||||
NOISE[aws_tfhe_noise_checks]
|
||||
M1[m1_tests]
|
||||
end
|
||||
|
||||
subgraph "Triggers"
|
||||
PR[Pull Request<br/>+ approved label]
|
||||
SCHED[Schedule<br/>Nightly Mon-Fri]
|
||||
DISP[Workflow Dispatch]
|
||||
M1LABEL[m1_test label]
|
||||
end
|
||||
|
||||
PR --> AWS
|
||||
SCHED --> AWS
|
||||
DISP --> AWS
|
||||
|
||||
PR --> FAST
|
||||
PR --> INT
|
||||
PR --> SIGN
|
||||
PR --> BACK
|
||||
PR --> WASM
|
||||
|
||||
DISP --> M1
|
||||
M1LABEL --> M1
|
||||
SCHED --> M1
|
||||
```
|
||||
|
||||
| Workflow | Trigger | Purpose | Runner |
|
||||
|----------|---------|---------|--------|
|
||||
| **aws_tfhe_tests** | PR (approved) / Nightly / Manual | Comprehensive CPU tests (csprng, zk-pok, core_crypto, boolean, shortint, strings, high-level API, C API, examples, apps) | AWS cpu-big |
|
||||
| **aws_tfhe_fast_tests** | PR (approved) / Manual | Fast subset of tests for quick validation | AWS cpu-small |
|
||||
| **aws_tfhe_integer_tests** | PR (approved) / Manual | Integer operations testing | AWS cpu-big |
|
||||
| **aws_tfhe_signed_integer_tests** | PR (approved) / Manual | Signed integer operations testing | AWS cpu-big |
|
||||
| **aws_tfhe_backward_compat_tests** | PR (approved) / Manual | Backward compatibility validation | AWS cpu-small |
|
||||
| **aws_tfhe_wasm_tests** | PR (approved) / Manual | WebAssembly tests | AWS cpu-small |
|
||||
| **aws_tfhe_noise_checks** | PR (approved) / Manual | Cryptographic noise validation | AWS cpu-small |
|
||||
| **m1_tests** | Manual / Schedule (10pm daily) / m1_test label | Tests on Apple M1 architecture | Self-hosted M1 Mac |
|
||||
|
||||
---
|
||||
|
||||
### GPU Testing Workflows
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph "GPU Test Workflows"
|
||||
GFAST[gpu_fast_tests]
|
||||
G4090[gpu_4090_tests]
|
||||
GH100F[gpu_fast_h100_tests]
|
||||
GH100[gpu_full_h100_tests]
|
||||
GMULTI[gpu_full_multi_gpu_tests]
|
||||
GVAL[gpu_code_validation_tests]
|
||||
GMEM[gpu_memory_sanitizer]
|
||||
GMEMH[gpu_memory_sanitizer_h100]
|
||||
GUINT[gpu_unsigned_integer_tests]
|
||||
GSINT[gpu_signed_integer_tests]
|
||||
GUINTC[gpu_unsigned_integer_classic_tests]
|
||||
GSINTC[gpu_signed_integer_classic_tests]
|
||||
GUINTH[gpu_unsigned_integer_h100_tests]
|
||||
GSINTH[gpu_signed_integer_h100_tests]
|
||||
GLONG[gpu_integer_long_run_tests]
|
||||
GPCC[gpu_pcc]
|
||||
end
|
||||
|
||||
subgraph "Triggers"
|
||||
PR[Pull Request]
|
||||
DISP[Workflow Dispatch]
|
||||
APPR[PR approved label]
|
||||
end
|
||||
|
||||
PR --> GFAST
|
||||
DISP --> GFAST
|
||||
|
||||
DISP --> G4090
|
||||
DISP --> GH100F
|
||||
DISP --> GH100
|
||||
DISP --> GMULTI
|
||||
DISP --> GVAL
|
||||
|
||||
APPR --> GMEM
|
||||
APPR --> GMEMH
|
||||
APPR --> GUINT
|
||||
APPR --> GSINT
|
||||
APPR --> GPCC
|
||||
```
|
||||
|
||||
| Workflow | Trigger | Purpose | GPU |
|
||||
|----------|---------|---------|-----|
|
||||
| **gpu_fast_tests** | PR / Manual | Quick GPU validation tests | Hyperstack GPU |
|
||||
| **gpu_4090_tests** | Manual | Tests on RTX 4090 hardware | RTX 4090 |
|
||||
| **gpu_fast_h100_tests** | Manual | Fast tests on H100 GPU | H100 |
|
||||
| **gpu_full_h100_tests** | Manual | Comprehensive H100 tests | H100 |
|
||||
| **gpu_full_multi_gpu_tests** | Manual | Multi-GPU testing | Multiple GPUs |
|
||||
| **gpu_code_validation_tests** | Manual | GPU code validation | GPU |
|
||||
| **gpu_memory_sanitizer** | PR (approved) / Manual | Memory leak detection | GPU |
|
||||
| **gpu_memory_sanitizer_h100** | PR (approved) / Manual | Memory sanitizer on H100 | H100 |
|
||||
| **gpu_unsigned_integer_tests** | PR (approved) / Manual | Unsigned integer GPU tests | GPU |
|
||||
| **gpu_signed_integer_tests** | PR (approved) / Manual | Signed integer GPU tests | GPU |
|
||||
| **gpu_unsigned_integer_classic_tests** | Manual | Classic unsigned integer tests | GPU |
|
||||
| **gpu_signed_integer_classic_tests** | Manual | Classic signed integer tests | GPU |
|
||||
| **gpu_unsigned_integer_h100_tests** | Manual | Unsigned integer tests on H100 | H100 |
|
||||
| **gpu_signed_integer_h100_tests** | Manual | Signed integer tests on H100 | H100 |
|
||||
| **gpu_integer_long_run_tests** | Manual | Long-running integer tests | GPU |
|
||||
| **gpu_pcc** | PR (approved) / Manual | GPU PCC checks | GPU |
|
||||
|
||||
---
|
||||
|
||||
### HPU Testing Workflows
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
HPU[hpu_hlapi_tests]
|
||||
DISP[Workflow Dispatch] --> HPU
|
||||
HPU --> |Tests on|INTEL[Intel HPU Hardware]
|
||||
```
|
||||
|
||||
| Workflow | Trigger | Purpose | Hardware |
|
||||
|----------|---------|---------|----------|
|
||||
| **hpu_hlapi_tests** | Manual | High-level API tests on Intel HPU | Intel HPU |
|
||||
|
||||
---
|
||||
|
||||
### Special Testing Workflows
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph "Special Tests"
|
||||
COV[code_coverage]
|
||||
CSPRNG[csprng_randomness_tests]
|
||||
LONG[integer_long_run_tests]
|
||||
PARAMS[parameters_check]
|
||||
end
|
||||
|
||||
subgraph "Cargo Tests"
|
||||
TESTFFT[cargo_test_fft]
|
||||
TESTNTT[cargo_test_ntt]
|
||||
end
|
||||
|
||||
DISP[Workflow Dispatch] --> COV
|
||||
DISP --> CSPRNG
|
||||
DISP --> LONG
|
||||
|
||||
APPR[PR approved label] --> CSPRNG
|
||||
|
||||
PUSH[Push to main] --> PARAMS
|
||||
PR[PR on specific paths] --> PARAMS
|
||||
DISP --> PARAMS
|
||||
|
||||
PR --> TESTFFT
|
||||
PR --> TESTNTT
|
||||
```
|
||||
|
||||
| Workflow | Trigger | Purpose |
|
||||
|----------|---------|---------|
|
||||
| **code_coverage** | Manual | Generate code coverage reports and upload to Codecov |
|
||||
| **csprng_randomness_tests** | Manual / PR (approved) | Dieharder randomness test suite for CSPRNG |
|
||||
| **integer_long_run_tests** | Manual | Extended integer testing |
|
||||
| **parameters_check** | Push to main / PR (specific paths) / Manual | Security check on cryptographic parameters using lattice estimator |
|
||||
| **cargo_test_fft** | PR | Run tfhe-fft tests |
|
||||
| **cargo_test_ntt** | PR | Run tfhe-ntt tests |
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Building & Compilation Workflows (4 workflows)
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph "Build Workflows"
|
||||
BUILD[cargo_build]
|
||||
COMMON[cargo_build_common]
|
||||
FFT[cargo_build_tfhe_fft]
|
||||
NTT[cargo_build_tfhe_ntt]
|
||||
end
|
||||
|
||||
subgraph "Build Jobs"
|
||||
PCC[Parallel PCC CPU]
|
||||
PCCHPU[PCC HPU]
|
||||
FULL[Build TFHE Full]
|
||||
LAYERS[Build Layers]
|
||||
CAPI[Build C API]
|
||||
end
|
||||
|
||||
PR[Pull Request] --> BUILD
|
||||
BUILD --> PCC
|
||||
BUILD --> PCCHPU
|
||||
BUILD --> FULL
|
||||
BUILD --> LAYERS
|
||||
BUILD --> CAPI
|
||||
|
||||
PR --> FFT
|
||||
PR --> NTT
|
||||
```
|
||||
|
||||
| Workflow | Trigger | Purpose |
|
||||
|----------|---------|---------|
|
||||
| **cargo_build** | PR | Main build workflow - coordinates all build jobs |
|
||||
| **cargo_build_common** | Reusable | Shared build logic for different targets |
|
||||
| **cargo_build_tfhe_fft** | PR | Build and validate tfhe-fft crate |
|
||||
| **cargo_build_tfhe_ntt** | PR | Build and validate tfhe-ntt crate |
|
||||
|
||||
**Build Targets:**
|
||||
- ✅ Parallel PCC (Program Counter Checks) for CPU
|
||||
- ✅ PCC for HPU
|
||||
- ✅ Full TFHE build (Linux, macOS M1, Windows)
|
||||
- ✅ Layer-by-layer builds
|
||||
- ✅ C API builds
|
||||
|
||||
---
|
||||
|
||||
## 📊 Benchmarking Workflows (17 workflows)
|
||||
|
||||
All benchmark workflows are **triggered manually** via workflow_dispatch.
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph "CPU Benchmarks - 3 workflows"
|
||||
BCPU[benchmark_cpu<br/>Main CPU benchmarks]
|
||||
BCPUW[benchmark_cpu_weekly<br/>Weekly CPU benchmarks]
|
||||
BCPUC[benchmark_cpu_common<br/>Reusable workflow]
|
||||
end
|
||||
|
||||
subgraph "GPU Benchmarks - 5 workflows"
|
||||
BGPU[benchmark_gpu<br/>Main GPU benchmarks]
|
||||
BGPUW[benchmark_gpu_weekly<br/>Weekly GPU benchmarks]
|
||||
BGPUC[benchmark_gpu_common<br/>Reusable workflow]
|
||||
BGPU4090[benchmark_gpu_4090<br/>RTX 4090 specific]
|
||||
BGPUCOP[benchmark_gpu_coprocessor<br/>Coprocessor mode]
|
||||
end
|
||||
|
||||
subgraph "HPU Benchmarks - 2 workflows"
|
||||
BHPU[benchmark_hpu<br/>Intel HPU benchmarks]
|
||||
BHPUC[benchmark_hpu_common<br/>Reusable workflow]
|
||||
end
|
||||
|
||||
subgraph "Specialized Benchmarks - 7 workflows"
|
||||
BWASM[benchmark_wasm_client<br/>WebAssembly client]
|
||||
BCT[benchmark_ct_key_sizes<br/>Ciphertext & key sizes]
|
||||
BFFT[benchmark_tfhe_fft<br/>FFT performance]
|
||||
BNTT[benchmark_tfhe_ntt<br/>NTT performance]
|
||||
BWHITE[benchmark_whitepaper<br/>Whitepaper params]
|
||||
BREG[benchmark_perf_regression<br/>Regression detection]
|
||||
BDOC[benchmark_documentation<br/>Generate docs]
|
||||
end
|
||||
|
||||
DISP[Workflow Dispatch<br/>Manual Trigger] --> BCPU
|
||||
DISP --> BCPUW
|
||||
DISP --> BGPU
|
||||
DISP --> BGPUW
|
||||
DISP --> BHPU
|
||||
DISP --> BWASM
|
||||
DISP --> BCT
|
||||
DISP --> BFFT
|
||||
DISP --> BNTT
|
||||
DISP --> BWHITE
|
||||
DISP --> BREG
|
||||
DISP --> BDOC
|
||||
DISP --> BGPU4090
|
||||
DISP --> BGPUCOP
|
||||
```
|
||||
|
||||
### CPU Benchmarks (3 workflows)
|
||||
|
||||
| Workflow | Purpose | Operations Tested |
|
||||
|----------|---------|-------------------|
|
||||
| **benchmark_cpu** | Main CPU performance benchmarks | integer, signed_integer, integer_compression, integer_zk, shortint, shortint_oprf, hlapi, hlapi_erc20, hlapi_dex, hlapi_noise_squash, tfhe_zk_pok, boolean, pbs, pbs128, ks, ks_pbs |
|
||||
| **benchmark_cpu_weekly** | Weekly scheduled CPU benchmarks | Similar to benchmark_cpu |
|
||||
| **benchmark_cpu_common** | Reusable workflow for CPU benchmarks | Shared logic |
|
||||
|
||||
### GPU Benchmarks (5 workflows)
|
||||
|
||||
| Workflow | Purpose | Hardware |
|
||||
|----------|---------|----------|
|
||||
| **benchmark_gpu** | Main GPU performance benchmarks | Standard GPU |
|
||||
| **benchmark_gpu_weekly** | Weekly scheduled GPU benchmarks | Standard GPU |
|
||||
| **benchmark_gpu_4090** | Benchmarks on RTX 4090 | RTX 4090 |
|
||||
| **benchmark_gpu_coprocessor** | GPU coprocessor mode benchmarks | GPU |
|
||||
| **benchmark_gpu_common** | Reusable workflow for GPU benchmarks | Shared logic |
|
||||
|
||||
### HPU Benchmarks (2 workflows)
|
||||
|
||||
| Workflow | Purpose | Hardware |
|
||||
|----------|---------|----------|
|
||||
| **benchmark_hpu** | Intel HPU performance benchmarks | Intel HPU |
|
||||
| **benchmark_hpu_common** | Reusable workflow for HPU benchmarks | Shared logic |
|
||||
|
||||
### Specialized Benchmarks (7 workflows)
|
||||
|
||||
| Workflow | Purpose | Focus |
|
||||
|----------|---------|-------|
|
||||
| **benchmark_wasm_client** | WebAssembly client performance | WASM execution |
|
||||
| **benchmark_ct_key_sizes** | Measure ciphertext and key sizes | Memory footprint |
|
||||
| **benchmark_tfhe_fft** | FFT library performance | tfhe-fft crate |
|
||||
| **benchmark_tfhe_ntt** | NTT library performance | tfhe-ntt crate |
|
||||
| **benchmark_whitepaper** | Whitepaper parameter validation | Research params |
|
||||
| **benchmark_perf_regression** | Detect performance regressions | Regression testing |
|
||||
| **benchmark_documentation** | Generate benchmark documentation | Documentation |
|
||||
|
||||
### Benchmark Configuration Options
|
||||
|
||||
**📏 Operation Flavors:**
|
||||
- `default` - Standard operations
|
||||
- `fast_default` - Fast variant operations
|
||||
- `smart` - Smart operations (with automatic PBS)
|
||||
- `unchecked` - Unchecked operations (no PBS)
|
||||
- `misc` - Miscellaneous operations
|
||||
|
||||
**🎯 Precision Sets:**
|
||||
- `fast` - Quick validation subset
|
||||
- `all` - All supported bit precisions
|
||||
- `documentation` - Precisions for documentation
|
||||
|
||||
**⏱️ Benchmark Types:**
|
||||
- `latency` - Single operation timing
|
||||
- `throughput` - Operations per second
|
||||
- `both` - Both latency and throughput
|
||||
|
||||
**🔧 Parameter Types:**
|
||||
- `classical` - Classical parameters
|
||||
- `multi_bit` - Multi-bit parameters
|
||||
- `classical + multi_bit` - Both parameter sets
|
||||
- `classical_documentation` - Classical for docs
|
||||
- `multi_bit_documentation` - Multi-bit for docs
|
||||
- `classical_documentation + multi_bit_documentation` - Both for docs
|
||||
|
||||
---
|
||||
|
||||
## 📦 Release Management Workflows (9 workflows)
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph "Release Workflows"
|
||||
RTFHE[make_release_tfhe]
|
||||
RCUDA[make_release_cuda]
|
||||
RHPU[make_release_hpu]
|
||||
RFFT[make_release_tfhe_fft]
|
||||
RNTT[make_release_tfhe_ntt]
|
||||
RCSPRNG[make_release_tfhe_csprng]
|
||||
RZK[make_release_zk_pok]
|
||||
RVER[make_release_tfhe_versionable]
|
||||
RCOMMON[make_release_common]
|
||||
end
|
||||
|
||||
DISP[Workflow Dispatch<br/>Manual Only] --> RTFHE
|
||||
DISP --> RCUDA
|
||||
DISP --> RHPU
|
||||
DISP --> RFFT
|
||||
DISP --> RNTT
|
||||
DISP --> RCSPRNG
|
||||
DISP --> RZK
|
||||
DISP --> RVER
|
||||
|
||||
RTFHE --> |Publishes to|CRATES[crates.io]
|
||||
RTFHE --> |Publishes to|NPM[npm registry]
|
||||
|
||||
style RTFHE fill:#ff6b6b
|
||||
style DISP fill:#ffd93d
|
||||
```
|
||||
|
||||
| Workflow | Purpose | Platforms |
|
||||
|----------|---------|-----------|
|
||||
| **make_release_tfhe** | Release main TFHE library | crates.io, npm (web & node packages) |
|
||||
| **make_release_cuda** | Release CUDA backend | crates.io |
|
||||
| **make_release_hpu** | Release HPU backend | crates.io |
|
||||
| **make_release_tfhe_fft** | Release FFT library | crates.io |
|
||||
| **make_release_tfhe_ntt** | Release NTT library | crates.io |
|
||||
| **make_release_tfhe_csprng** | Release CSPRNG library | crates.io |
|
||||
| **make_release_zk_pok** | Release Zero-Knowledge Proof of Knowledge library | crates.io |
|
||||
| **make_release_tfhe_versionable** | Release versionable trait library | crates.io |
|
||||
| **make_release_common** | Shared release logic | Reusable workflow |
|
||||
|
||||
**Release Options:**
|
||||
- 🧪 Dry-run mode
|
||||
- 📦 Push to crates.io
|
||||
- 🌐 Push web JS package
|
||||
- 📱 Push Node.js package
|
||||
- 🏷️ Set NPM latest tag
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ CI/CD & Maintenance Workflows (10 workflows)
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph "Code Quality"
|
||||
LINT[ci_lint]
|
||||
COMMIT[check_commit]
|
||||
AUDIT[cargo_audit]
|
||||
end
|
||||
|
||||
subgraph "PR Management"
|
||||
APPROVE[approve_label]
|
||||
UNVER[unverified_prs]
|
||||
VERIFY[verify_triggering_actor]
|
||||
end
|
||||
|
||||
subgraph "Repository Sync"
|
||||
SYNC[sync_on_push]
|
||||
end
|
||||
|
||||
subgraph "SVG Generation"
|
||||
SVG[generate_svgs]
|
||||
SVGC[generate_svg_common]
|
||||
end
|
||||
|
||||
PR[Pull Request] --> LINT
|
||||
PR --> COMMIT
|
||||
PR --> APPROVE
|
||||
|
||||
SCHED1[Daily 4am UTC] --> AUDIT
|
||||
SCHED2[Daily 1:30am UTC] --> UNVER
|
||||
|
||||
PUSH[Push to main] --> SYNC
|
||||
|
||||
DISP[Workflow Dispatch] --> SVG
|
||||
DISP --> AUDIT
|
||||
DISP --> SYNC
|
||||
```
|
||||
|
||||
| Workflow | Trigger | Purpose |
|
||||
|----------|---------|---------|
|
||||
| **ci_lint** | PR | Lint workflows with actionlint & check security with zizmor |
|
||||
| **check_commit** | PR | Validate commit message format, line length, and signatures |
|
||||
| **approve_label** | PR / PR Review | Auto-manage "approved" label on PRs |
|
||||
| **cargo_audit** | Daily 4am UTC / Manual | Check dependencies for security vulnerabilities |
|
||||
| **unverified_prs** | Daily 1:30am UTC | Close PRs without CLA signature after 2 days |
|
||||
| **verify_triggering_actor** | Various | Verify actor permissions for sensitive workflows |
|
||||
| **sync_on_push** | Push to main / Manual | Sync repository to internal mirror |
|
||||
| **generate_svgs** | Manual | Generate parameter curve SVG visualizations |
|
||||
| **generate_svg_common** | Reusable | Common SVG generation logic |
|
||||
| **placeholder_workflow** | N/A | Template workflow |
|
||||
|
||||
---
|
||||
|
||||
## 🔐 Security & Quality Workflows
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph "Security Checks"
|
||||
A[commit signatures]
|
||||
B[dependency audit]
|
||||
C[zizmor security scan]
|
||||
D[parameters security]
|
||||
end
|
||||
|
||||
subgraph "Quality Checks"
|
||||
E[commit format]
|
||||
F[actionlint]
|
||||
G[code coverage]
|
||||
H[randomness tests]
|
||||
end
|
||||
|
||||
COMMIT[check_commit] --> A
|
||||
COMMIT --> E
|
||||
|
||||
AUDIT[cargo_audit] --> B
|
||||
|
||||
LINT[ci_lint] --> C
|
||||
LINT --> F
|
||||
|
||||
PARAMS[parameters_check] --> D
|
||||
|
||||
COV[code_coverage] --> G
|
||||
|
||||
CSPRNG[csprng_randomness_tests] --> H
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📈 Workflow Statistics
|
||||
|
||||
### By Trigger Type
|
||||
|
||||
| Trigger | Count | Examples |
|
||||
|---------|-------|----------|
|
||||
| **Workflow Dispatch** (Manual) | 65 | All benchmarks, releases, most tests |
|
||||
| **Pull Request** | 18 | Build, lint, fast tests, GPU tests |
|
||||
| **Pull Request (approved label)** | 12 | AWS tests, GPU memory tests |
|
||||
| **Schedule/Cron** | 5 | Nightly tests, audit, unverified PRs |
|
||||
| **Push to main** | 2 | Sync, parameters check |
|
||||
| **Label Events** | 3 | M1 tests, approve workflow |
|
||||
|
||||
### By Runner Type
|
||||
|
||||
| Runner | Count | Purpose |
|
||||
|--------|-------|---------|
|
||||
| **AWS CPU** | 15 | Main testing infrastructure |
|
||||
| **Hyperstack GPU** | 13 | GPU testing and benchmarks |
|
||||
| **Self-hosted M1 Mac** | 1 | Apple Silicon testing |
|
||||
| **Intel HPU** | 2 | HPU testing and benchmarks |
|
||||
| **Ubuntu Latest** | 25 | CI/CD, builds, coordination |
|
||||
| **Windows** | 1 | Windows builds |
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Key Workflow Patterns
|
||||
|
||||
### 1. Instance Management Pattern
|
||||
|
||||
Many workflows follow this pattern for cost optimization:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant GitHub
|
||||
participant Setup
|
||||
participant Runner
|
||||
participant Tests
|
||||
participant Teardown
|
||||
|
||||
GitHub->>Setup: Trigger workflow
|
||||
Setup->>Runner: Start AWS/GPU instance
|
||||
Runner->>Tests: Execute tests
|
||||
Tests->>Teardown: Complete (success/fail)
|
||||
Teardown->>Runner: Stop instance
|
||||
Teardown->>GitHub: Send Slack notification
|
||||
```
|
||||
|
||||
**Workflows using this pattern:**
|
||||
- All `aws_tfhe_*` workflows
|
||||
- All `gpu_*` workflows
|
||||
- `hpu_hlapi_tests`
|
||||
- `code_coverage`
|
||||
- `parameters_check`
|
||||
- `csprng_randomness_tests`
|
||||
|
||||
### 2. Branch Protection Rules (BPR)
|
||||
|
||||
Workflows marked with `(bpr)` are required for PRs to be merged:
|
||||
|
||||
- ✅ `cargo_build/cargo-builds (bpr)`
|
||||
- ✅ `ci_lint/lint-check (bpr)`
|
||||
- ✅ `check_commit/check-commit-pr (bpr)`
|
||||
|
||||
### 3. File Change Detection
|
||||
|
||||
Many workflows use `tj-actions/changed-files` to conditionally run tests based on changed files, optimizing CI time and resources.
|
||||
|
||||
---
|
||||
|
||||
## 🔄 Workflow Dependencies
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph "Reusable Workflows"
|
||||
COMMON[cargo_build_common]
|
||||
BENCH_CPU_C[benchmark_cpu_common]
|
||||
BENCH_GPU_C[benchmark_gpu_common]
|
||||
BENCH_HPU_C[benchmark_hpu_common]
|
||||
REL_COMMON[make_release_common]
|
||||
SVG_COMMON[generate_svg_common]
|
||||
end
|
||||
|
||||
subgraph "Parent Workflows"
|
||||
BUILD[cargo_build]
|
||||
BENCH_CPU[benchmark_cpu]
|
||||
BENCH_GPU[benchmark_gpu]
|
||||
BENCH_HPU[benchmark_hpu]
|
||||
RELEASES[make_release_*]
|
||||
SVG[generate_svgs]
|
||||
end
|
||||
|
||||
BUILD --> COMMON
|
||||
BENCH_CPU --> BENCH_CPU_C
|
||||
BENCH_GPU --> BENCH_GPU_C
|
||||
BENCH_HPU --> BENCH_HPU_C
|
||||
RELEASES --> REL_COMMON
|
||||
SVG --> SVG_COMMON
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📝 Workflow Naming Convention
|
||||
|
||||
```
|
||||
<category>_<component>_<type>
|
||||
```
|
||||
|
||||
Examples:
|
||||
- `aws_tfhe_tests` - AWS infrastructure, TFHE component, tests type
|
||||
- `gpu_fast_tests` - GPU infrastructure, fast variant, tests type
|
||||
- `benchmark_cpu_weekly` - Benchmark category, CPU target, weekly schedule
|
||||
- `make_release_tfhe` - Make/release action, TFHE component
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Quick Reference
|
||||
|
||||
### Running Tests on PR
|
||||
|
||||
1. **Quick validation**: Automatic on PR creation
|
||||
- `cargo_build` - Build checks
|
||||
- `ci_lint` - Linting
|
||||
- `check_commit` - Commit format
|
||||
- `gpu_fast_tests` - Basic GPU tests
|
||||
|
||||
2. **Full test suite**: After PR approval (add "approved" label)
|
||||
- `aws_tfhe_tests` - Comprehensive CPU tests
|
||||
- `gpu_memory_sanitizer` - Memory checks
|
||||
- GPU integer tests
|
||||
|
||||
3. **Special hardware**: Manual label addition
|
||||
- Add `m1_test` label for M1 Mac tests
|
||||
|
||||
### Running Benchmarks
|
||||
|
||||
All benchmarks are **manual only** via workflow dispatch. Choose:
|
||||
- Target: CPU, GPU, HPU, or WASM
|
||||
- Operation flavor: default, smart, unchecked
|
||||
- Precision set: fast, all, documentation
|
||||
- Benchmark type: latency, throughput, both
|
||||
|
||||
### Creating a Release
|
||||
|
||||
1. Run appropriate `make_release_*` workflow
|
||||
2. Configure options (dry-run, push to crates, npm packages)
|
||||
3. Workflow handles versioning, building, and publishing
|
||||
4. Includes provenance and SLSA attestation
|
||||
|
||||
---
|
||||
|
||||
## 🔔 Notification System
|
||||
|
||||
All critical workflows send Slack notifications on:
|
||||
- ❌ Failure
|
||||
- 🚫 Cancellation (non-PR events)
|
||||
- ⚠️ Instance teardown failures
|
||||
|
||||
Notifications include:
|
||||
- Job status
|
||||
- Pull request link (if applicable)
|
||||
- Action run URL
|
||||
|
||||
---
|
||||
|
||||
## 📚 Additional Resources
|
||||
|
||||
- **Workflow Files**: `.github/workflows/`
|
||||
- **Reusable Actions**: `.github/actions/`
|
||||
- **Configuration**: `ci/slab.toml`
|
||||
- **Scripts**: `scripts/` directory
|
||||
|
||||
---
|
||||
|
||||
## ✅ Verification Summary
|
||||
|
||||
**Total Workflows: 71**
|
||||
|
||||
Count by category:
|
||||
- Testing & Validation: **31 workflows** (7 AWS CPU + 16 GPU + 1 HPU + 1 M1 + 4 special + 2 cargo tests)
|
||||
- Benchmarking: **17 workflows** (3 CPU + 5 GPU + 2 HPU + 7 specialized)
|
||||
- Building & Compilation: **4 workflows**
|
||||
- Release Management: **9 workflows**
|
||||
- CI/CD & Maintenance: **10 workflows**
|
||||
|
||||
**Verification:** 31 + 17 + 4 + 9 + 10 = **71** ✅
|
||||
|
||||
*Last Updated: 2026-01-08*
|
||||
@@ -1,24 +1,32 @@
|
||||
08f31a47c29cc4d72ad32c0b5411fa20b3deef5b84558dd2fb892d3cdf90528a data/toy_params/glwe_after_id_br_karatsuba.cbor
|
||||
29b6e3e7d27700004b70dca24d225816500490e2d6ee49b9af05837fd421896b data/valid_params_128/lwe_after_spec_pbs.cbor
|
||||
2c70d1d78cc3760733850a353ace2b9c4705e840141b75841739e90e51247e18 data/valid_params_128/small_lwe_secret_key.cbor
|
||||
2fb4bb45c259b8383da10fc8f9459c40a6972c49b1696eb107f0a75640724be5 data/toy_params/lwe_after_id_pbs_karatsuba.cbor
|
||||
36c9080b636475fcacca503ce041bbfeee800fd3e1890dee559ea18defff9fe8 data/toy_params/glwe_after_id_br.cbor
|
||||
377761beeb4216cf5aa2624a8b64b8259f5a75c32d28e850be8bced3a0cdd6f5 data/toy_params/ksk.cbor
|
||||
59dba26d457f96478eda130cab5301fce86f23c6a8807de42f2a1e78c4985ca7 data/valid_params_128/lwe_ks.cbor
|
||||
5d80dd93fefae4f4f89484dfcd65bbe99cc32e7e3b0a90c33dd0d77516c0a023 data/valid_params_128/glwe_after_id_br_karatsuba.cbor
|
||||
656f0009c7834c5bcb61621e222047516054b9bc5d0593d474ab8f1c086b67a6 data/valid_params_128/lwe_after_id_pbs.cbor
|
||||
699580ca92b9c2f9e1f57fb1e312c9e8cb29714f7acdef9d2ba05f798546751f data/toy_params/lwe_sum.cbor
|
||||
6e54ab41056984595b077baff70236d934308cf5c0c33b4482fbfb129b3756c6 data/valid_params_128/glwe_after_id_br.cbor
|
||||
70f5e5728822de05b49071efb5ec28551b0f5cc87aa709a455d8e7f04b9c96ee data/toy_params/lwe_after_id_pbs.cbor
|
||||
76a5c52cab7fec1dc167da676c6cd39479cda6b2bb9f4e0573cb7d99c2692faa data/valid_params_128/lwe_after_id_pbs_karatsuba.cbor
|
||||
7cc6803f5fbc3d5a1bf597f2b979ce17eecd3d6baca12183dea21022a7b65c52 data/toy_params/bsk.cbor
|
||||
7f3c40a134623b44779a556212477fea26eaed22450f3b6faeb8721d63699972 data/valid_params_128/lwe_sum.cbor
|
||||
837b3bd3245d4d0534ed255fdef896fb4fa6998a258a14543dfdadd0bfc9b6dd data/toy_params/lwe_prod.cbor
|
||||
9ece8ca9c1436258b94e8c5e629b8722f9b18fdd415dd5209b6167a9dde8491c data/toy_params/glwe_after_spec_br_karatsuba.cbor
|
||||
aa44aea29efd6d9e4d35a21a625d9cba155672e3f7ed3eddee1e211e62ad146b data/valid_params_128/lwe_ms.cbor
|
||||
b7a037b9eaa88d6385167579b93e26a0cb6976d9b8967416fd1173e113bda199 data/valid_params_128/large_lwe_secret_key.cbor
|
||||
b7b8e3586128887bd682120f3e3a43156139bce5e3fe0b03284f8753a864d647 data/toy_params/lwe_after_spec_pbs_karatsuba.cbor
|
||||
bd00a8ae7494e400de5753029552ee1647efe7e17409b863a26a13b081099b8c data/toy_params/lwe_after_spec_pbs.cbor
|
||||
c6df98676de04fe54b5ffc2eb30a82ebb706c9d7d5a4e0ed509700fec88761f7 data/toy_params/lwe_ms.cbor
|
||||
c7d5a864d5616a7d8ad50bbf40416e41e6c9b60c546dc14d4aa8fc40a418baa7 data/toy_params/large_lwe_secret_key.cbor
|
||||
c806533b325b1009db38be2f9bef5f3b2fad6b77b4c71f2855ccc9d3b4162e98 data/valid_params_128/lwe_b.cbor
|
||||
c9eb75bd2993639348a679cf48c06e3c38d1a513f48e5b0ce0047cea8cff6bbc data/toy_params/lwe_a.cbor
|
||||
d3391969acf26dc69de0927ba279139d8d79999944069addc8ff469ad6c5ae2d data/valid_params_128/lwe_after_spec_pbs_karatsuba.cbor
|
||||
d6da5baef0e787f6be56e218d8354e26904652602db964844156fdff08350ce6 data/toy_params/lwe_ks.cbor
|
||||
e591ab9af1b6a0aede273f9a3abb65a4c387feb5fa06a6959e9314058ca0f7e5 data/valid_params_128/ksk.cbor
|
||||
e59b002df3a9b01ad321ec51cf076fa35131ab9dbef141d1c54b717d61426c92 data/valid_params_128/glwe_after_spec_br_karatsuba.cbor
|
||||
e628354c81508a2d888016e8282df363dd12f1e19190b6475d4eb9d7ab8ae007 data/valid_params_128/glwe_after_spec_br.cbor
|
||||
e69d2d2c064fc8c0460b39191ca65338146990349954f5ec5ebd01d93610e7eb data/valid_params_128/lwe_a.cbor
|
||||
e76c24b2a0c9a842ad13dda35473c2514f9e7d20983b5ea0759c4521a91626d9 data/valid_params_128/lwe_prod.cbor
|
||||
|
||||
@@ -39,6 +39,9 @@ The following values are generated:
|
||||
| `glwe_after_spec_br` | The glwe returned by the application of the spec blind rotation on the mod switched ciphertexts. | `GlweCiphertext<Vec<u64>>` | rot spec LUT |
|
||||
| `lwe_after_spec_pbs` | The lwe returned by the application of the sample extract operation on the output of the spec blind rotation | `LweCiphertext<Vec<u64>>` | `spec(A)` |
|
||||
|
||||
Ciphertexts with the `_karatsuba` suffix are generated using the Karatsuba polynomial multiplication algorithm in the blind rotation, while default ciphertexts are generated using an FFT multiplication.
|
||||
This makes it easier to reproduce bit exact results.
|
||||
|
||||
### Encodings
|
||||
#### Non native encoding
|
||||
Warning: TFHE-rs uses a specific encoding for non native (ie: u32, u64) power of two ciphertext modulus. This encoding puts the encoded value in the high bits of the native integer.
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:08f31a47c29cc4d72ad32c0b5411fa20b3deef5b84558dd2fb892d3cdf90528a
|
||||
size 4679
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:9ece8ca9c1436258b94e8c5e629b8722f9b18fdd415dd5209b6167a9dde8491c
|
||||
size 4679
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:2fb4bb45c259b8383da10fc8f9459c40a6972c49b1696eb107f0a75640724be5
|
||||
size 2365
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:b7b8e3586128887bd682120f3e3a43156139bce5e3fe0b03284f8753a864d647
|
||||
size 2365
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:5d80dd93fefae4f4f89484dfcd65bbe99cc32e7e3b0a90c33dd0d77516c0a023
|
||||
size 36935
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:e59b002df3a9b01ad321ec51cf076fa35131ab9dbef141d1c54b717d61426c92
|
||||
size 36935
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:76a5c52cab7fec1dc167da676c6cd39479cda6b2bb9f4e0573cb7d99c2692faa
|
||||
size 18493
|
||||
@@ -0,0 +1,3 @@
|
||||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:d3391969acf26dc69de0927ba279139d8d79999944069addc8ff469ad6c5ae2d
|
||||
size 18493
|
||||
@@ -265,6 +265,7 @@ fn generate_test_vectors<P: AsRef<Path>>(
|
||||
|
||||
let mut id_lut = encoding.encode_lut(glwe_dimension, polynomial_size, ID_LUT);
|
||||
assert_data_not_zero(&id_lut);
|
||||
let mut id_lut_karatsuba = id_lut.clone();
|
||||
|
||||
blind_rotate_assign(&modswitched, &mut id_lut, &fourier_bsk);
|
||||
assert_data_not_zero(&id_lut);
|
||||
@@ -287,8 +288,32 @@ fn generate_test_vectors<P: AsRef<Path>>(
|
||||
assert_data_not_zero(&lwe_pbs_id);
|
||||
store_data(path, &lwe_pbs_id, "lwe_after_id_pbs");
|
||||
|
||||
blind_rotate_karatsuba_assign(&modswitched, &mut id_lut_karatsuba, &bsk);
|
||||
store_data(path, &id_lut_karatsuba, "glwe_after_id_br_karatsuba");
|
||||
|
||||
let mut lwe_pbs_karatsuba_id = LweCiphertext::new(
|
||||
0u64,
|
||||
glwe_dimension
|
||||
.to_equivalent_lwe_dimension(polynomial_size)
|
||||
.to_lwe_size(),
|
||||
encoding.ciphertext_modulus,
|
||||
);
|
||||
|
||||
extract_lwe_sample_from_glwe_ciphertext(
|
||||
&id_lut_karatsuba,
|
||||
&mut lwe_pbs_karatsuba_id,
|
||||
MonomialDegree(0),
|
||||
);
|
||||
|
||||
let decrypted_pbs_id = decrypt_lwe_ciphertext(&large_lwe_secret_key, &lwe_pbs_karatsuba_id);
|
||||
let res = encoding.decode(decrypted_pbs_id);
|
||||
|
||||
assert_eq!(res, MSG_A);
|
||||
store_data(path, &lwe_pbs_karatsuba_id, "lwe_after_id_pbs_karatsuba");
|
||||
|
||||
let mut spec_lut = encoding.encode_lut(glwe_dimension, polynomial_size, SPEC_LUT);
|
||||
assert_data_not_zero(&spec_lut);
|
||||
let mut spec_lut_karatsuba = spec_lut.clone();
|
||||
|
||||
blind_rotate_assign(&modswitched, &mut spec_lut, &fourier_bsk);
|
||||
assert_data_not_zero(&spec_lut);
|
||||
@@ -310,6 +335,33 @@ fn generate_test_vectors<P: AsRef<Path>>(
|
||||
assert_eq!(res, SPEC_LUT(MSG_A));
|
||||
assert_data_not_zero(&lwe_pbs_spec);
|
||||
store_data(path, &lwe_pbs_spec, "lwe_after_spec_pbs");
|
||||
|
||||
blind_rotate_karatsuba_assign(&modswitched, &mut spec_lut_karatsuba, &bsk);
|
||||
store_data(path, &spec_lut_karatsuba, "glwe_after_spec_br_karatsuba");
|
||||
|
||||
let mut lwe_pbs_karatsuba_spec = LweCiphertext::new(
|
||||
0u64,
|
||||
glwe_dimension
|
||||
.to_equivalent_lwe_dimension(polynomial_size)
|
||||
.to_lwe_size(),
|
||||
encoding.ciphertext_modulus,
|
||||
);
|
||||
|
||||
extract_lwe_sample_from_glwe_ciphertext(
|
||||
&spec_lut_karatsuba,
|
||||
&mut lwe_pbs_karatsuba_spec,
|
||||
MonomialDegree(0),
|
||||
);
|
||||
|
||||
let decrypted_pbs_spec = decrypt_lwe_ciphertext(&large_lwe_secret_key, &lwe_pbs_karatsuba_spec);
|
||||
let res = encoding.decode(decrypted_pbs_spec);
|
||||
|
||||
assert_eq!(res, SPEC_LUT(MSG_A));
|
||||
store_data(
|
||||
path,
|
||||
&lwe_pbs_karatsuba_spec,
|
||||
"lwe_after_spec_pbs_karatsuba",
|
||||
);
|
||||
}
|
||||
|
||||
fn rm_dir_except_readme<P: AsRef<Path>>(dir: P) {
|
||||
|
||||
@@ -61,8 +61,8 @@ bitvec = { version = "1.0", optional = true }
|
||||
serde_json = { version = "1.0", optional = true }
|
||||
|
||||
# Dependencies used for v80 pdi handling
|
||||
bincode ={ version = "1.3", optional = true}
|
||||
serde_derive ={ version = "1.0", optional = true}
|
||||
bincode = { workspace = true, optional = true }
|
||||
serde_derive = { version = "1.0", optional = true }
|
||||
|
||||
# Binary for manual debugging
|
||||
# Enable to access Hpu register and drive some custom sequence by hand
|
||||
|
||||
@@ -239,7 +239,12 @@ pub fn iop_erc_20(prog: &mut Program) {
|
||||
pub fn iop_erc_20_simd(prog: &mut Program) {
|
||||
// Add Comment header
|
||||
prog.push_comment("ERC_20_SIMD (new_from, new_to) <- (from, to, amount)".to_string());
|
||||
simd(prog, crate::asm::iop::SIMD_N, fw_impl::llt::iop_erc_20_rtl, None);
|
||||
simd(
|
||||
prog,
|
||||
crate::asm::iop::SIMD_N,
|
||||
fw_impl::llt::iop_erc_20_rtl,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(prog))]
|
||||
@@ -430,7 +435,8 @@ pub fn iop_erc_20_rtl(prog: &mut Program, batch_index: u8, kogge_blk_w: Option<u
|
||||
if let Some(blk_w) = kogge_blk_w {
|
||||
kogge::add(prog, dst_to, src_to, src_amount.clone(), None, blk_w)
|
||||
+ kogge::sub(prog, dst_from, src_from, src_amount, blk_w)
|
||||
} else { // Default to ripple carry
|
||||
} else {
|
||||
// Default to ripple carry
|
||||
kogge::ripple_add(dst_to, src_to, src_amount.clone(), None)
|
||||
+ kogge::ripple_sub(prog, dst_from, src_from, src_amount)
|
||||
}
|
||||
|
||||
@@ -367,6 +367,8 @@ def dump_benchmark_results(results, browser_kind):
|
||||
"""
|
||||
Dump as JSON benchmark results into a file.
|
||||
If `results` is an empty dict then this function is a no-op.
|
||||
If the file already exists, new results are merged with existing ones,
|
||||
overwriting keys that already exist.
|
||||
|
||||
:param results: benchmark results as :class:`dict`
|
||||
:param browser_kind: browser as :class:`BrowserKind`
|
||||
@@ -376,7 +378,15 @@ def dump_benchmark_results(results, browser_kind):
|
||||
key.replace("mean", "_".join((browser_kind.name, "mean"))): val
|
||||
for key, val in results.items()
|
||||
}
|
||||
pathlib.Path("tfhe-benchmark/wasm_benchmark_results.json").write_text(json.dumps(results))
|
||||
results_path = pathlib.Path("tfhe-benchmark/wasm_benchmark_results.json")
|
||||
existing_results = {}
|
||||
if results_path.exists():
|
||||
try:
|
||||
existing_results = json.loads(results_path.read_text())
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
existing_results.update(results)
|
||||
results_path.write_text(json.dumps(existing_results))
|
||||
|
||||
|
||||
def start_web_server(
|
||||
|
||||
@@ -33,7 +33,11 @@ RUSTFLAGS="$RUSTFLAGS" cargo nextest list --cargo-profile "${CARGO_PROFILE}" \
|
||||
--features=integer,internal-keycache,gpu-debug,zk-pok -p tfhe &> /tmp/test_list.txt
|
||||
|
||||
if [[ "${RUN_VALGRIND}" == "1" ]]; then
|
||||
TESTS_TO_RUN=$(sed -e $'s/\x1b\[[0-9;]*m//g' < /tmp/test_list.txt | grep -E 'high_level_api::.*gpu.*' | grep -v 'array' | grep -v 'flip')
|
||||
# The tests are filtered using grep (to keep only HL) GPU tests.
|
||||
# Since, when output is directed to a file, nextest outputs a list of `<executable name> <test name>` the `grep -o '[^ ]\+$'` filter
|
||||
# will keep only the test name and the `tfhe` executable is assumed. To sanitize tests from another
|
||||
# executable changes might be needed
|
||||
TESTS_TO_RUN=$(sed -e $'s/\x1b\[[0-9;]*m//g' < /tmp/test_list.txt | grep -E 'high_level_api::.*gpu.*' | grep -v 'array' | grep -v 'flip' | grep -o '[^ ]\+$')
|
||||
|
||||
# Build the tests but don't run them
|
||||
RUSTFLAGS="$RUSTFLAGS" cargo test --no-run --profile "${CARGO_PROFILE}" \
|
||||
@@ -56,7 +60,11 @@ if [[ "${RUN_VALGRIND}" == "1" ]]; then
|
||||
fi
|
||||
|
||||
if [[ "${RUN_COMPUTE_SANITIZER}" == "1" ]]; then
|
||||
TESTS_TO_RUN=$(sed -e $'s/\x1b\[[0-9;]*m//g' < /tmp/test_list.txt | grep -E 'high_level_api::.*gpu.*|core_crypto::.*gpu.*' | grep -v 'array' | grep -v 'modulus_switch' | grep -v '3_3' | grep -v 'noise_distribution' | grep -v 'flip')
|
||||
# The tests are filtered using grep (to keep only HL / corecrypto) GPU tests.
|
||||
# Since, when output is directed to a file, nextest outputs a list of `<executable name> <test name>` the `grep -o '[^ ]\+$'` filter
|
||||
# will keep only the test name and the `tfhe` executable is assumed. To sanitize tests from another
|
||||
# executable changes might be needed
|
||||
TESTS_TO_RUN=$(sed -e $'s/\x1b\[[0-9;]*m//g' < /tmp/test_list.txt | grep -E 'high_level_api::.*gpu.*|core_crypto::.*gpu.*' | grep -v 'array' | grep -v 'modulus_switch' | grep -v '3_3' | grep -v 'noise_distribution' | grep -v 'flip' | grep -o '[^ ]\+$')
|
||||
# Build the tests but don't run them
|
||||
RUSTFLAGS="$RUSTFLAGS" cargo test --no-run --profile "${CARGO_PROFILE}" \
|
||||
--features=integer,internal-keycache,gpu,zk-pok -p tfhe
|
||||
|
||||
@@ -15,7 +15,7 @@ name = "benchmark"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
bincode = "1.3.3"
|
||||
bincode = { workspace = true }
|
||||
# clap has to be pinned as its minimum supported rust version
|
||||
# changes often between minor releases, which breaks our CI
|
||||
clap = { version = "=4.5.30", features = ["derive"] }
|
||||
|
||||
@@ -28,12 +28,22 @@ pub fn transfer_whitepaper<FheType>(
|
||||
amount: &FheType,
|
||||
) -> (FheType, FheType)
|
||||
where
|
||||
FheType: Add<Output = FheType> + for<'a> FheOrd<&'a FheType>,
|
||||
FheBool: IfThenZero<FheType>,
|
||||
FheType: Add<Output = FheType> + for<'a> FheOrd<&'a FheType> + FheTrivialEncrypt<u64>,
|
||||
FheBool: IfThenZero<FheType> + IfThenElse<FheType>,
|
||||
for<'a> &'a FheType: Add<Output = FheType> + Sub<Output = FheType>,
|
||||
{
|
||||
let has_enough_funds = (from_amount).ge(amount);
|
||||
let amount_to_transfer = has_enough_funds.if_then_zero(amount);
|
||||
let amount_to_transfer = {
|
||||
#[cfg(not(feature = "hpu"))]
|
||||
{
|
||||
let zero_amount = FheType::encrypt_trivial(0u64);
|
||||
has_enough_funds.select(amount, &zero_amount)
|
||||
}
|
||||
#[cfg(feature = "hpu")]
|
||||
{
|
||||
has_enough_funds.if_then_zero(amount)
|
||||
}
|
||||
};
|
||||
|
||||
let new_to_amount = to_amount + &amount_to_transfer;
|
||||
let new_from_amount = from_amount - &amount_to_transfer;
|
||||
@@ -50,13 +60,21 @@ pub fn par_transfer_whitepaper<FheType>(
|
||||
where
|
||||
FheType:
|
||||
Add<Output = FheType> + for<'a> FheOrd<&'a FheType> + Send + Sync + FheTrivialEncrypt<u64>,
|
||||
FheBool: IfThenZero<FheType>,
|
||||
FheBool: IfThenZero<FheType> + IfThenElse<FheType>,
|
||||
for<'a> &'a FheType: Add<Output = FheType> + Sub<Output = FheType>,
|
||||
{
|
||||
let has_enough_funds = (from_amount).ge(amount);
|
||||
//let zero_amount = FheType::encrypt_trivial(0u64);
|
||||
//let amount_to_transfer = has_enough_funds.select(amount, &zero_amount);
|
||||
let amount_to_transfer = has_enough_funds.if_then_zero(amount);
|
||||
let amount_to_transfer = {
|
||||
#[cfg(feature = "gpu")]
|
||||
{
|
||||
let zero_amount = FheType::encrypt_trivial(0u64);
|
||||
has_enough_funds.select(amount, &zero_amount)
|
||||
}
|
||||
#[cfg(not(feature = "gpu"))]
|
||||
{
|
||||
has_enough_funds.if_then_zero(amount)
|
||||
}
|
||||
};
|
||||
|
||||
let (new_to_amount, new_from_amount) = rayon::join(
|
||||
|| to_amount + &amount_to_transfer,
|
||||
|
||||
@@ -630,7 +630,7 @@ mod integer_params {
|
||||
#[cfg(feature = "hpu")]
|
||||
let params = vec![BENCH_HPU_PARAM_MESSAGE_2_CARRY_2_KS32_PBS_TUNIFORM_2M128.into()];
|
||||
#[cfg(not(feature = "hpu"))]
|
||||
let params = vec![BENCH_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M128.into()];
|
||||
let params = vec![BENCH_PARAM_MESSAGE_2_CARRY_2_KS32_PBS.into()];
|
||||
|
||||
let params_and_bit_sizes = iproduct!(params, env_config.bit_sizes());
|
||||
Self {
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
#[cfg(any(feature = "shortint", feature = "integer"))]
|
||||
pub mod shortint_params_aliases {
|
||||
use tfhe::shortint::parameters::current_params::*;
|
||||
#[cfg(feature = "hpu")]
|
||||
use tfhe::shortint::parameters::KeySwitch32PBSParameters;
|
||||
use tfhe::shortint::parameters::{
|
||||
ClassicPBSParameters, CompactPublicKeyEncryptionParameters, CompressionParameters,
|
||||
MultiBitPBSParameters, NoiseSquashingCompressionParameters, NoiseSquashingParameters,
|
||||
ShortintKeySwitchingParameters,
|
||||
KeySwitch32PBSParameters, MultiBitPBSParameters, NoiseSquashingCompressionParameters,
|
||||
NoiseSquashingParameters, ShortintKeySwitchingParameters,
|
||||
};
|
||||
|
||||
// KS PBS Gaussian
|
||||
@@ -42,6 +40,8 @@ pub mod shortint_params_aliases {
|
||||
V1_5_PARAM_MESSAGE_4_CARRY_4_KS_PBS_TUNIFORM_2M128;
|
||||
pub const BENCH_PARAM_MESSAGE_2_CARRY_2_KS_PBS: ClassicPBSParameters =
|
||||
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M128;
|
||||
pub const BENCH_PARAM_MESSAGE_2_CARRY_2_KS32_PBS: KeySwitch32PBSParameters =
|
||||
V1_5_PARAM_MESSAGE_2_CARRY_2_KS32_PBS_TUNIFORM_2M128;
|
||||
|
||||
pub const BENCH_ALL_CLASSIC_PBS_PARAMETERS: [(&ClassicPBSParameters, &str); 141] =
|
||||
VEC_ALL_CLASSIC_PBS_PARAMETERS;
|
||||
|
||||
@@ -30,7 +30,7 @@ serde = ["dep:serde", "num-complex/serde"]
|
||||
[dev-dependencies]
|
||||
rustfft = "6.0"
|
||||
rand = { workspace = true }
|
||||
bincode = "1.3"
|
||||
bincode = { workspace = true }
|
||||
more-asserts = "0.3.1"
|
||||
serde_json = "1.0.96"
|
||||
dyn-stack = { workspace = true, features = ["alloc"] }
|
||||
|
||||
@@ -31,7 +31,7 @@ experimental = []
|
||||
[dev-dependencies]
|
||||
serde_json = "~1.0"
|
||||
itertools = { workspace = true }
|
||||
bincode = "1.3.3"
|
||||
bincode = { workspace = true }
|
||||
criterion = "0.5.1"
|
||||
|
||||
[[bench]]
|
||||
|
||||
@@ -58,7 +58,7 @@ tfhe-csprng = { version = "0.8.0", path = "../tfhe-csprng", features = [
|
||||
] }
|
||||
serde = { workspace = true, features = ["default", "derive"] }
|
||||
rayon = { workspace = true }
|
||||
bincode = "1.3.3"
|
||||
bincode = { workspace = true }
|
||||
tfhe-fft = { version = "0.10.0", path = "../tfhe-fft", features = [
|
||||
"serde",
|
||||
"fft128",
|
||||
|
||||
@@ -0,0 +1,415 @@
|
||||
use aligned_vec::CACHELINE_ALIGN;
|
||||
use dyn_stack::{PodStack, StackReq};
|
||||
|
||||
use crate::core_crypto::commons::traits::*;
|
||||
use crate::core_crypto::commons::utils::izip_eq;
|
||||
use crate::core_crypto::entities::*;
|
||||
use crate::core_crypto::fft_impl::fft64::crypto::ggsw::collect_next_term;
|
||||
use crate::core_crypto::fft_impl::fft64::math::decomposition::TensorSignedDecompositionLendingIter;
|
||||
use crate::core_crypto::prelude::polynomial_algorithms::*;
|
||||
use crate::core_crypto::prelude::{
|
||||
extract_lwe_sample_from_glwe_ciphertext, lwe_ciphertext_modulus_switch, ComputationBuffers,
|
||||
DecompositionBaseLog, DecompositionLevelCount, GlweSize, ModulusSwitchedLweCiphertext,
|
||||
MonomialDegree, PolynomialSize, SignedDecomposer,
|
||||
};
|
||||
|
||||
pub fn programmable_bootstrap_karatsuba_lwe_ciphertext_mem_optimized_requirement<Scalar>(
|
||||
glwe_size: GlweSize,
|
||||
polynomial_size: PolynomialSize,
|
||||
) -> StackReq {
|
||||
StackReq::all_of(&[
|
||||
// local accumulator
|
||||
StackReq::new_aligned::<Scalar>(glwe_size.0 * polynomial_size.0, CACHELINE_ALIGN),
|
||||
// blind rotation
|
||||
blind_rotate_karatsuba_assign_scratch::<Scalar>(glwe_size, polynomial_size),
|
||||
])
|
||||
}
|
||||
|
||||
/// Return the required memory for [`blind_rotate_karatsuba_assign`].
|
||||
pub fn blind_rotate_karatsuba_assign_scratch<Scalar>(
|
||||
glwe_size: GlweSize,
|
||||
polynomial_size: PolynomialSize,
|
||||
) -> StackReq {
|
||||
StackReq::any_of(&[
|
||||
// tmp_poly allocation
|
||||
StackReq::new_aligned::<Scalar>(polynomial_size.0, CACHELINE_ALIGN),
|
||||
StackReq::all_of(&[
|
||||
// ct1 allocation
|
||||
StackReq::new_aligned::<Scalar>(glwe_size.0 * polynomial_size.0, CACHELINE_ALIGN),
|
||||
// external product
|
||||
karatsuba_add_external_product_assign_scratch::<Scalar>(glwe_size, polynomial_size),
|
||||
]),
|
||||
])
|
||||
}
|
||||
|
||||
/// Return the required memory for [`karatsuba_add_external_product_assign`].
|
||||
pub fn karatsuba_add_external_product_assign_scratch<Scalar>(
|
||||
glwe_size: GlweSize,
|
||||
polynomial_size: PolynomialSize,
|
||||
) -> StackReq {
|
||||
StackReq::all_of(&[
|
||||
// Output buffer
|
||||
StackReq::new_aligned::<Scalar>(glwe_size.0 * polynomial_size.0, CACHELINE_ALIGN),
|
||||
// decomposition
|
||||
StackReq::new_aligned::<Scalar>(glwe_size.0 * polynomial_size.0, CACHELINE_ALIGN),
|
||||
// decomposition term
|
||||
StackReq::new_aligned::<Scalar>(glwe_size.0 * polynomial_size.0, CACHELINE_ALIGN),
|
||||
])
|
||||
}
|
||||
|
||||
/// Perform a programmable bootstrap given an input [`LWE ciphertext`](`LweCiphertext`), a
|
||||
/// look-up table passed as a [`GLWE ciphertext`](`GlweCiphertext`) and an [`LWE bootstrap
|
||||
/// key`](`LweBootstrapKey`) using the karatsuba polynomial multiplication. The result is written in
|
||||
/// the provided output [`LWE ciphertext`](`LweCiphertext`).
|
||||
///
|
||||
/// If you want to manage the computation memory manually you can use
|
||||
/// [`programmable_bootstrap_karatsuba_lwe_ciphertext_mem_optimized`].
|
||||
///
|
||||
/// # Warning
|
||||
/// For a more efficient implementation of the programmable bootstrap, see
|
||||
/// [`programmable_bootstrap_lwe_ciphertext`](super::programmable_bootstrap_lwe_ciphertext)
|
||||
pub fn programmable_bootstrap_karatsuba_lwe_ciphertext<InputCont, OutputCont, AccCont, KeyCont>(
|
||||
input: &LweCiphertext<InputCont>,
|
||||
output: &mut LweCiphertext<OutputCont>,
|
||||
accumulator: &GlweCiphertext<AccCont>,
|
||||
bsk: &LweBootstrapKey<KeyCont>,
|
||||
) where
|
||||
InputCont: Container<Element = u64>,
|
||||
OutputCont: ContainerMut<Element = u64>,
|
||||
AccCont: Container<Element = u64>,
|
||||
KeyCont: Container<Element = u64>,
|
||||
{
|
||||
assert!(
|
||||
input.ciphertext_modulus().is_power_of_two(),
|
||||
"This operation requires the input to have a power of two modulus."
|
||||
);
|
||||
assert_eq!(
|
||||
output.ciphertext_modulus(),
|
||||
accumulator.ciphertext_modulus()
|
||||
);
|
||||
|
||||
let mut buffers = ComputationBuffers::new();
|
||||
|
||||
buffers.resize(
|
||||
programmable_bootstrap_karatsuba_lwe_ciphertext_mem_optimized_requirement::<u64>(
|
||||
bsk.glwe_size(),
|
||||
bsk.polynomial_size(),
|
||||
)
|
||||
.unaligned_bytes_required(),
|
||||
);
|
||||
|
||||
programmable_bootstrap_karatsuba_lwe_ciphertext_mem_optimized(
|
||||
input,
|
||||
output,
|
||||
accumulator,
|
||||
bsk,
|
||||
buffers.stack(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Perform a programmable bootstrap given an input [`LWE ciphertext`](`LweCiphertext`), a
|
||||
/// look-up table passed as a [`GLWE ciphertext`](`GlweCiphertext`) and an [`LWE bootstrap
|
||||
/// key`](`LweBootstrapKey`) using the karatsuba polynomial multiplication. The result is written in
|
||||
/// the provided output [`LWE ciphertext`](`LweCiphertext`).
|
||||
///
|
||||
/// # Warning
|
||||
/// For a more efficient implementation of the programmable bootstrap, see
|
||||
/// [`programmable_bootstrap_lwe_ciphertext_mem_optimized`](super::programmable_bootstrap_lwe_ciphertext_mem_optimized)
|
||||
pub fn programmable_bootstrap_karatsuba_lwe_ciphertext_mem_optimized<
|
||||
InputCont,
|
||||
OutputCont,
|
||||
AccCont,
|
||||
KeyCont,
|
||||
>(
|
||||
input: &LweCiphertext<InputCont>,
|
||||
output: &mut LweCiphertext<OutputCont>,
|
||||
accumulator: &GlweCiphertext<AccCont>,
|
||||
bsk: &LweBootstrapKey<KeyCont>,
|
||||
stack: &mut PodStack,
|
||||
) where
|
||||
InputCont: Container<Element = u64>,
|
||||
OutputCont: ContainerMut<Element = u64>,
|
||||
AccCont: Container<Element = u64>,
|
||||
KeyCont: Container<Element = u64>,
|
||||
{
|
||||
assert_eq!(
|
||||
output.ciphertext_modulus(),
|
||||
accumulator.ciphertext_modulus()
|
||||
);
|
||||
assert_eq!(accumulator.ciphertext_modulus(), bsk.ciphertext_modulus());
|
||||
|
||||
let (local_accumulator_data, stack) =
|
||||
stack.collect_aligned(CACHELINE_ALIGN, accumulator.as_ref().iter().copied());
|
||||
let mut local_accumulator = GlweCiphertextMutView::from_container(
|
||||
&mut *local_accumulator_data,
|
||||
accumulator.polynomial_size(),
|
||||
accumulator.ciphertext_modulus(),
|
||||
);
|
||||
|
||||
let log_modulus = accumulator
|
||||
.polynomial_size()
|
||||
.to_blind_rotation_input_modulus_log();
|
||||
|
||||
let msed = lwe_ciphertext_modulus_switch(input.as_view(), log_modulus);
|
||||
|
||||
blind_rotate_karatsuba_assign_mem_optimized(&msed, &mut local_accumulator, bsk, stack);
|
||||
|
||||
extract_lwe_sample_from_glwe_ciphertext(&local_accumulator, output, MonomialDegree(0));
|
||||
}
|
||||
|
||||
/// Perform a blind rotation given an input [`modulus switched LWE
|
||||
/// ciphertext`](`ModulusSwitchedLweCiphertext`), modifying a look-up table passed as a [`GLWE
|
||||
/// ciphertext`](`GlweCiphertext`) and an [`LWE bootstrap key`](`LweBootstrapKey`) using the
|
||||
/// karatsuba polynomial multiplication.
|
||||
///
|
||||
/// If you want to manage the computation memory manually you can use
|
||||
/// [`blind_rotate_karatsuba_assign_mem_optimized`].
|
||||
///
|
||||
/// # Warning
|
||||
/// For a more efficient implementation of the blind rotation, see
|
||||
/// [`blind_rotate_assign`](super::blind_rotate_assign)
|
||||
pub fn blind_rotate_karatsuba_assign<OutputScalar, OutputCont, KeyCont>(
|
||||
msed_input: &impl ModulusSwitchedLweCiphertext<usize>,
|
||||
lut: &mut GlweCiphertext<OutputCont>,
|
||||
bsk: &LweBootstrapKey<KeyCont>,
|
||||
) where
|
||||
OutputScalar: UnsignedTorus + CastInto<usize>,
|
||||
OutputCont: ContainerMut<Element = OutputScalar>,
|
||||
KeyCont: Container<Element = OutputScalar>,
|
||||
GlweCiphertext<OutputCont>: PartialEq<GlweCiphertext<OutputCont>>,
|
||||
{
|
||||
let mut buffers = ComputationBuffers::new();
|
||||
|
||||
buffers.resize(
|
||||
blind_rotate_karatsuba_assign_scratch::<u64>(bsk.glwe_size(), bsk.polynomial_size())
|
||||
.unaligned_bytes_required(),
|
||||
);
|
||||
|
||||
blind_rotate_karatsuba_assign_mem_optimized(msed_input, lut, bsk, buffers.stack())
|
||||
}
|
||||
|
||||
/// Perform a blind rotation given an input [`modulus switched LWE
|
||||
/// ciphertext`](`ModulusSwitchedLweCiphertext`), modifying a look-up table passed as a [`GLWE
|
||||
/// ciphertext`](`GlweCiphertext`) and an [`LWE bootstrap key`](`LweBootstrapKey`) using the
|
||||
/// karatsuba polynomial multiplication.
|
||||
///
|
||||
/// # Warning
|
||||
/// For a more efficient implementation of the blind rotation, see
|
||||
/// [`blind_rotate_assign`](super::blind_rotate_assign)
|
||||
pub fn blind_rotate_karatsuba_assign_mem_optimized<OutputScalar, OutputCont, KeyCont>(
|
||||
msed_input: &impl ModulusSwitchedLweCiphertext<usize>,
|
||||
lut: &mut GlweCiphertext<OutputCont>,
|
||||
bsk: &LweBootstrapKey<KeyCont>,
|
||||
stack: &mut PodStack,
|
||||
) where
|
||||
OutputScalar: UnsignedTorus + CastInto<usize>,
|
||||
OutputCont: ContainerMut<Element = OutputScalar>,
|
||||
KeyCont: Container<Element = OutputScalar>,
|
||||
GlweCiphertext<OutputCont>: PartialEq<GlweCiphertext<OutputCont>>,
|
||||
{
|
||||
assert!(lut.ciphertext_modulus().is_power_of_two());
|
||||
|
||||
assert_eq!(
|
||||
bsk.input_lwe_dimension(),
|
||||
msed_input.lwe_dimension(),
|
||||
"Mismatched input LweDimension. \
|
||||
LweBootstrapKey input LweDimension: {:?}, input LweCiphertext LweDimension {:?}.",
|
||||
bsk.input_lwe_dimension(),
|
||||
msed_input.lwe_dimension(),
|
||||
);
|
||||
assert_eq!(
|
||||
bsk.glwe_size(),
|
||||
lut.glwe_size(),
|
||||
"Mismatched GlweSize. \
|
||||
LweBootstrapKey GlweSize: {:?}, lut GlweSize {:?}.",
|
||||
bsk.glwe_size(),
|
||||
lut.glwe_size(),
|
||||
);
|
||||
assert_eq!(
|
||||
lut.polynomial_size(),
|
||||
bsk.polynomial_size(),
|
||||
"Mismatched PolynomialSize. \
|
||||
LweBootstrapKey PolynomialSize: {:?}, lut PolynomialSize {:?}.",
|
||||
bsk.polynomial_size(),
|
||||
lut.polynomial_size(),
|
||||
);
|
||||
|
||||
let msed_lwe_mask = msed_input.mask();
|
||||
|
||||
let msed_lwe_body = msed_input.body();
|
||||
|
||||
let monomial_degree = MonomialDegree(msed_lwe_body.cast_into());
|
||||
|
||||
let lut_poly_size = lut.polynomial_size();
|
||||
let ciphertext_modulus = lut.ciphertext_modulus();
|
||||
assert!(ciphertext_modulus.is_compatible_with_native_modulus());
|
||||
|
||||
lut.as_mut_polynomial_list()
|
||||
.iter_mut()
|
||||
.for_each(|mut poly| {
|
||||
let (tmp_poly, _) = stack.make_aligned_raw(poly.as_ref().len(), CACHELINE_ALIGN);
|
||||
|
||||
let mut tmp_poly = Polynomial::from_container(&mut *tmp_poly);
|
||||
tmp_poly.as_mut().copy_from_slice(poly.as_ref());
|
||||
polynomial_wrapping_monic_monomial_div(&mut poly, &tmp_poly, monomial_degree);
|
||||
});
|
||||
|
||||
// We initialize the ct_0 used for the successive cmuxes
|
||||
let ct0 = lut;
|
||||
let (ct1, stack) = stack.make_aligned_raw(ct0.as_ref().len(), CACHELINE_ALIGN);
|
||||
let mut ct1 =
|
||||
GlweCiphertextMutView::from_container(&mut *ct1, lut_poly_size, ciphertext_modulus);
|
||||
|
||||
for (lwe_mask_element, bootstrap_key_ggsw) in izip_eq!(msed_lwe_mask, bsk.iter()) {
|
||||
if lwe_mask_element != 0 {
|
||||
let monomial_degree = MonomialDegree(lwe_mask_element);
|
||||
|
||||
// we effectively inline the body of cmux here, merging the initial subtraction
|
||||
// operation with the monic polynomial multiplication, then performing the
|
||||
// external product manually
|
||||
|
||||
// We rotate ct_1 and subtract ct_0 (first step of cmux) by performing
|
||||
// ct_1 <- (ct_0 * X^a_i) - ct_0
|
||||
for (mut ct1_poly, ct0_poly) in izip_eq!(
|
||||
ct1.as_mut_polynomial_list().iter_mut(),
|
||||
ct0.as_polynomial_list().iter(),
|
||||
) {
|
||||
polynomial_wrapping_monic_monomial_mul_and_subtract(
|
||||
&mut ct1_poly,
|
||||
&ct0_poly,
|
||||
monomial_degree,
|
||||
);
|
||||
}
|
||||
|
||||
// second step of cmux:
|
||||
// ct_0 <- ct_0 + ct1 * s_i
|
||||
// with ct_0 + ct1s_i = ct_0 + ((ct_0 * X^a_i) - ct_0)s_i
|
||||
// = ct_0 if s_i= 0
|
||||
// ct_0 * X^a_i otherwise
|
||||
// = ct_0 * X^(a_i * s_i)
|
||||
//
|
||||
// as_mut_view is required to keep borrow rules consistent
|
||||
karatsuba_add_external_product_assign(
|
||||
ct0.as_mut_view(),
|
||||
bootstrap_key_ggsw,
|
||||
ct1.as_view(),
|
||||
stack,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !ciphertext_modulus.is_native_modulus() {
|
||||
let signed_decomposer = SignedDecomposer::new(
|
||||
DecompositionBaseLog(ciphertext_modulus.get_custom_modulus().ilog2() as usize),
|
||||
DecompositionLevelCount(1),
|
||||
);
|
||||
ct0.as_mut()
|
||||
.iter_mut()
|
||||
.for_each(|x| *x = signed_decomposer.closest_representable(*x));
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform the external product of `ggsw` and `glwe`, and adds the result to `out`.
|
||||
#[cfg_attr(feature = "__profiling", inline(never))]
|
||||
pub fn karatsuba_add_external_product_assign<Scalar>(
|
||||
mut out: GlweCiphertextMutView<'_, Scalar>,
|
||||
ggsw: GgswCiphertextView<Scalar>,
|
||||
glwe: GlweCiphertextView<Scalar>,
|
||||
stack: &mut PodStack,
|
||||
) where
|
||||
Scalar: UnsignedTorus,
|
||||
{
|
||||
// we check that the polynomial sizes match
|
||||
debug_assert_eq!(ggsw.polynomial_size(), glwe.polynomial_size());
|
||||
debug_assert_eq!(ggsw.polynomial_size(), out.polynomial_size());
|
||||
// we check that the glwe sizes match
|
||||
debug_assert_eq!(ggsw.glwe_size(), glwe.glwe_size());
|
||||
debug_assert_eq!(ggsw.glwe_size(), out.glwe_size());
|
||||
|
||||
let align = CACHELINE_ALIGN;
|
||||
let poly_size = ggsw.polynomial_size().0;
|
||||
|
||||
// we round the input mask and body
|
||||
let decomposer = SignedDecomposer::<Scalar>::new(
|
||||
ggsw.decomposition_base_log(),
|
||||
ggsw.decomposition_level_count(),
|
||||
);
|
||||
|
||||
let (output_buffer, substack0) =
|
||||
stack.make_aligned_raw::<Scalar>(poly_size * ggsw.glwe_size().0, align);
|
||||
// output_fft_buffer is initially uninitialized, considered to be implicitly zero, to avoid
|
||||
// the cost of filling it up with zeros. `is_output_uninit` is set to `false` once
|
||||
// it has been fully initialized for the first time.
|
||||
let output_buffer = &mut *output_buffer;
|
||||
let mut is_output_uninit = true;
|
||||
|
||||
let (mut decomposition, substack1) = TensorSignedDecompositionLendingIter::new(
|
||||
glwe.as_ref()
|
||||
.iter()
|
||||
.map(|s| decomposer.init_decomposer_state(*s)),
|
||||
DecompositionBaseLog(decomposer.base_log),
|
||||
DecompositionLevelCount(decomposer.level_count),
|
||||
substack0,
|
||||
);
|
||||
|
||||
// We loop through the levels
|
||||
for ggsw_decomp_matrix in ggsw.iter() {
|
||||
// We retrieve the decomposition of this level.
|
||||
let (_glwe_level, glwe_decomp_term, _substack2) =
|
||||
collect_next_term(&mut decomposition, substack1, align);
|
||||
let glwe_decomp_term = GlweCiphertextView::from_container(
|
||||
&*glwe_decomp_term,
|
||||
ggsw.polynomial_size(),
|
||||
out.ciphertext_modulus(),
|
||||
);
|
||||
|
||||
// For each level we have to add the result of the vector-matrix product between the
|
||||
// decomposition of the glwe, and the ggsw level matrix to the output. To do so, we
|
||||
// iteratively add to the output, the product between every line of the matrix, and
|
||||
// the corresponding (scalar) polynomial in the glwe decomposition:
|
||||
//
|
||||
// ggsw_mat ggsw_mat
|
||||
// glwe_dec | - - - - | < glwe_dec | - - - - |
|
||||
// | - - - | x | - - - - | | - - - | x | - - - - | <
|
||||
// ^ | - - - - | ^ | - - - - |
|
||||
//
|
||||
// t = 1 t = 2 ...
|
||||
|
||||
for (ggsw_row, glwe_poly) in izip_eq!(
|
||||
ggsw_decomp_matrix.as_glwe_list().iter(),
|
||||
glwe_decomp_term.as_polynomial_list().iter()
|
||||
) {
|
||||
let row_as_poly_list = ggsw_row.as_polynomial_list();
|
||||
if is_output_uninit {
|
||||
for (mut output_poly, row_poly) in output_buffer
|
||||
.chunks_exact_mut(poly_size)
|
||||
.map(Polynomial::from_container)
|
||||
.zip(row_as_poly_list.iter())
|
||||
{
|
||||
polynomial_wrapping_mul(&mut output_poly, &row_poly, &glwe_poly);
|
||||
}
|
||||
} else {
|
||||
for (mut output_poly, row_poly) in output_buffer
|
||||
.chunks_exact_mut(poly_size)
|
||||
.map(Polynomial::from_container)
|
||||
.zip(row_as_poly_list.iter())
|
||||
{
|
||||
polynomial_wrapping_add_mul_assign(&mut output_poly, &row_poly, &glwe_poly);
|
||||
}
|
||||
}
|
||||
|
||||
is_output_uninit = false;
|
||||
}
|
||||
}
|
||||
|
||||
// We iterate over the polynomials in the output.
|
||||
if !is_output_uninit {
|
||||
izip_eq!(
|
||||
out.as_mut_polynomial_list().iter_mut(),
|
||||
output_buffer
|
||||
.into_chunks(poly_size)
|
||||
.map(Polynomial::from_container),
|
||||
)
|
||||
.for_each(|(mut out, res)| polynomial_wrapping_add_assign(&mut out, &res));
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
pub mod fft128_pbs;
|
||||
pub mod fft64_pbs;
|
||||
pub mod karatsuba_pbs;
|
||||
pub mod ntt64_bnf_pbs;
|
||||
pub mod ntt64_pbs;
|
||||
|
||||
pub use fft128_pbs::*;
|
||||
pub use fft64_pbs::*;
|
||||
pub use karatsuba_pbs::*;
|
||||
pub use ntt64_bnf_pbs::*;
|
||||
pub use ntt64_pbs::*;
|
||||
|
||||
|
||||
@@ -1161,3 +1161,91 @@ fn lwe_encrypt_pbs_ntt64_bnf_decrypt(params: ClassicTestParams<u64>) {
|
||||
create_parameterized_test!(lwe_encrypt_pbs_ntt64_bnf_decrypt {
|
||||
TEST_PARAMS_3_BITS_SOLINAS_U64
|
||||
});
|
||||
|
||||
fn lwe_encrypt_pbs_karatsuba_decrypt_custom_mod(params: ClassicTestParams<u64>) {
|
||||
let lwe_noise_distribution = params.lwe_noise_distribution;
|
||||
let ciphertext_modulus = params.ciphertext_modulus;
|
||||
let message_modulus_log = params.message_modulus_log;
|
||||
let msg_modulus = 1 << (message_modulus_log.0);
|
||||
let encoding_with_padding = get_encoding_with_padding(ciphertext_modulus);
|
||||
let glwe_dimension = params.glwe_dimension;
|
||||
let polynomial_size = params.polynomial_size;
|
||||
|
||||
let mut rsc = TestResources::new();
|
||||
|
||||
let f = |x: u64| x;
|
||||
|
||||
let delta: u64 = encoding_with_padding / msg_modulus;
|
||||
let mut msg = msg_modulus;
|
||||
|
||||
let accumulator = generate_programmable_bootstrap_glwe_lut(
|
||||
polynomial_size,
|
||||
glwe_dimension.to_glwe_size(),
|
||||
msg_modulus.cast_into(),
|
||||
ciphertext_modulus,
|
||||
delta,
|
||||
f,
|
||||
);
|
||||
|
||||
assert!(check_encrypted_content_respects_mod(
|
||||
&accumulator,
|
||||
ciphertext_modulus
|
||||
));
|
||||
|
||||
while msg != 0 {
|
||||
msg = msg.wrapping_sub(1);
|
||||
|
||||
let mut keys_gen = |params| generate_keys(params, &mut rsc);
|
||||
let keys = gen_keys_or_get_from_cache_if_enabled(params, &mut keys_gen);
|
||||
let (input_lwe_secret_key, output_lwe_secret_key, bsk) =
|
||||
(keys.small_lwe_sk, keys.big_lwe_sk, keys.bsk);
|
||||
|
||||
for _ in 0..NB_TESTS {
|
||||
let plaintext = Plaintext(msg * delta);
|
||||
|
||||
let lwe_ciphertext_in = allocate_and_encrypt_new_lwe_ciphertext(
|
||||
&input_lwe_secret_key,
|
||||
plaintext,
|
||||
lwe_noise_distribution,
|
||||
ciphertext_modulus,
|
||||
&mut rsc.encryption_random_generator,
|
||||
);
|
||||
|
||||
assert!(check_encrypted_content_respects_mod(
|
||||
&lwe_ciphertext_in,
|
||||
ciphertext_modulus
|
||||
));
|
||||
|
||||
let mut out_pbs_ct = LweCiphertext::new(
|
||||
0,
|
||||
output_lwe_secret_key.lwe_dimension().to_lwe_size(),
|
||||
ciphertext_modulus,
|
||||
);
|
||||
|
||||
programmable_bootstrap_karatsuba_lwe_ciphertext(
|
||||
&lwe_ciphertext_in,
|
||||
&mut out_pbs_ct,
|
||||
&accumulator,
|
||||
&bsk,
|
||||
);
|
||||
|
||||
assert!(check_encrypted_content_respects_mod(
|
||||
&out_pbs_ct,
|
||||
ciphertext_modulus
|
||||
));
|
||||
|
||||
let decrypted = decrypt_lwe_ciphertext(&output_lwe_secret_key, &out_pbs_ct);
|
||||
|
||||
let decoded = round_decode(decrypted.0, delta) % msg_modulus;
|
||||
|
||||
assert_eq!(decoded, f(msg));
|
||||
}
|
||||
|
||||
// In coverage, we break after one while loop iteration, changing message values does not
|
||||
// yield higher coverage
|
||||
#[cfg(tarpaulin)]
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
create_parameterized_test!(lwe_encrypt_pbs_karatsuba_decrypt_custom_mod);
|
||||
|
||||
@@ -386,6 +386,12 @@ fn test_if_then_else() {
|
||||
super::test_case_if_then_else(&client_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_if_then_zero() {
|
||||
let client_key = setup_default_cpu();
|
||||
super::test_case_if_then_zero(&client_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flip() {
|
||||
let client_key = setup_default_cpu();
|
||||
|
||||
@@ -89,6 +89,12 @@ fn test_case_if_then_else_hpu() {
|
||||
super::test_case_if_then_else(&client_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_case_if_then_zero_hpu() {
|
||||
let client_key = setup_default_hpu();
|
||||
super::test_case_if_then_zero(&client_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_case_flip_hpu() {
|
||||
let client_key = setup_default_hpu();
|
||||
|
||||
@@ -568,6 +568,28 @@ fn test_case_if_then_else(client_key: &ClientKey) {
|
||||
);
|
||||
}
|
||||
|
||||
fn test_case_if_then_zero(client_key: &ClientKey) {
|
||||
let clear_a = 42u8;
|
||||
let clear_b = 128u8;
|
||||
|
||||
let a = FheUint8::encrypt(clear_a, client_key);
|
||||
let b = FheUint8::encrypt(clear_b, client_key);
|
||||
|
||||
let result = a.le(&b).if_then_zero(&a);
|
||||
let decrypted_result: u8 = result.decrypt(client_key);
|
||||
assert_eq!(
|
||||
decrypted_result,
|
||||
if clear_a <= clear_b { clear_a } else { 0 }
|
||||
);
|
||||
|
||||
let result = a.ge(&b).if_then_zero(&a);
|
||||
let decrypted_result: u8 = result.decrypt(client_key);
|
||||
assert_eq!(
|
||||
decrypted_result,
|
||||
if clear_a >= clear_b { clear_a } else { 0 }
|
||||
);
|
||||
}
|
||||
|
||||
fn test_case_flip(client_key: &ClientKey) {
|
||||
let clear_a = rand::random::<u32>();
|
||||
let clear_b = rand::random::<u32>();
|
||||
|
||||
@@ -9,9 +9,9 @@
|
||||
pub use crate::high_level_api::traits::{
|
||||
BitSlice, CiphertextList, DivRem, FheDecrypt, FheEncrypt, FheEq, FheKeyswitch, FheMax, FheMin,
|
||||
FheOrd, FheTrivialEncrypt, FheTryEncrypt, FheTryTrivialEncrypt, FheWait, Flip, IfThenElse,
|
||||
OverflowingAdd, OverflowingMul, OverflowingNeg, OverflowingSub, ReRandomize, RotateLeft,
|
||||
RotateLeftAssign, RotateRight, RotateRightAssign, ScalarIfThenElse, SquashNoise, Tagged,
|
||||
IfThenZero,
|
||||
IfThenZero, OverflowingAdd, OverflowingMul, OverflowingNeg, OverflowingSub, ReRandomize,
|
||||
RotateLeft, RotateLeftAssign, RotateRight, RotateRightAssign, ScalarIfThenElse, SquashNoise,
|
||||
Tagged,
|
||||
};
|
||||
#[cfg(feature = "hpu")]
|
||||
pub use crate::high_level_api::traits::{FheHpu, HpuHandle};
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 252 KiB After Width: | Height: | Size: 3.2 KiB |
@@ -12,14 +12,22 @@ function setButtonsDisabledState(buttonIds, state) {
|
||||
|
||||
async function setup() {
|
||||
let supportsThreads = await threads();
|
||||
if (!supportsThreads) {
|
||||
console.error("This browser does not support threads");
|
||||
return;
|
||||
// This variable is set to true if we are using the `serve.multithreaded.json` config
|
||||
if (crossOriginIsolated) {
|
||||
if (supportsThreads) {
|
||||
console.info("Running in multithreaded mode");
|
||||
} else {
|
||||
console.error("This browser does not support threads");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
console.warn("Running in unsafe coop mode");
|
||||
}
|
||||
|
||||
const worker = new Worker(new URL("worker.js", import.meta.url), {
|
||||
type: "module",
|
||||
});
|
||||
|
||||
const demos = await Comlink.wrap(worker).demos;
|
||||
|
||||
const demoNames = [
|
||||
|
||||
@@ -5,7 +5,9 @@
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"build": "cp -r ../../tfhe/pkg ./ && webpack build ./index.js --mode production -o dist --output-filename index.js && cp index.html dist/ && cp favicon.ico dist/",
|
||||
"server": "serve --config ../serve.json dist/",
|
||||
"server": "npm run server:multithreaded",
|
||||
"server:multithreaded": "serve --config ../serve.multithreaded.json dist/",
|
||||
"server:unsafe-coop": "serve --config ../serve.unsafe-coop.json dist/",
|
||||
"format": "prettier . --write",
|
||||
"check-format": "prettier . --check"
|
||||
},
|
||||
|
||||
11
tfhe/web_wasm_parallel_tests/serve.unsafe-coop.json
Normal file
11
tfhe/web_wasm_parallel_tests/serve.unsafe-coop.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"headers": [
|
||||
{
|
||||
"source": "**/*.@(js|html)",
|
||||
"headers": [
|
||||
{ "key": "Cross-Origin-Embedder-Policy", "value": "unsafe-none" },
|
||||
{ "key": "Cross-Origin-Opener-Policy", "value": "unsafe-none" }
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
import * as Comlink from "comlink";
|
||||
import { threads } from "wasm-feature-detect";
|
||||
import init, {
|
||||
initThreadPool,
|
||||
init_panic_hook,
|
||||
@@ -753,7 +754,10 @@ async function compactPublicKeyZeroKnowledgeBench() {
|
||||
|
||||
async function main() {
|
||||
await init();
|
||||
await initThreadPool(navigator.hardwareConcurrency);
|
||||
let supportsThreads = await threads();
|
||||
if (supportsThreads) {
|
||||
await initThreadPool(navigator.hardwareConcurrency);
|
||||
}
|
||||
await init_panic_hook();
|
||||
|
||||
return Comlink.proxy({
|
||||
|
||||
@@ -8,7 +8,7 @@ license = "BSD-3-Clause-Clear"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
ron = { version = "0.8", features = ["integer128"] }
|
||||
ciborium = "0.2"
|
||||
bincode = "1.3"
|
||||
bincode = { workspace = true }
|
||||
strum = { version = "0.26", features = ["derive"] }
|
||||
semver = { version = "1.0" }
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
|
||||
@@ -17,7 +17,7 @@ static_assertions = "1.1"
|
||||
trybuild = { version = "1", features = ["diff"] }
|
||||
|
||||
# used to test various serialization formats
|
||||
bincode = "1.3"
|
||||
bincode = { workspace = true }
|
||||
serde_json = "1.0"
|
||||
ciborium = "0.2"
|
||||
rmp-serde = "1.3"
|
||||
|
||||
Reference in New Issue
Block a user