Compare commits

..

1 Commits

Author SHA1 Message Date
Enzo Di Maria
b86a282931 chore(gpu): some tests on erc20 2025-12-16 16:49:01 +01:00
162 changed files with 1011 additions and 4573 deletions

View File

@@ -2,8 +2,6 @@
ignore = [
# Ignoring unmaintained 'paste' advisory as it is a widely used, low-risk build dependency.
"RUSTSEC-2024-0436",
# Ignoring unmaintained 'bincode' crate. Getting rid of it would be too complex on the short term.
"RUSTSEC-2025-0141",
]
[output]

View File

@@ -23,8 +23,6 @@ runs:
echo "${CMAKE_SCRIPT_SHA} cmake-${CMAKE_VERSION}-linux-x86_64.sh" > checksum
sha256sum -c checksum
sudo bash cmake-"${CMAKE_VERSION}"-linux-x86_64.sh --skip-license --prefix=/usr/ --exclude-subdir
sudo apt-get clean
sudo rm -rf /var/lib/apt/lists/*
sudo apt update
sudo apt remove -y unattended-upgrades
sudo apt install -y cmake-format libclang-dev

View File

@@ -66,7 +66,7 @@ jobs:
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'true' # Needed to pull lfs data
token: ${{ env.CHECKOUT_TOKEN }}
@@ -80,7 +80,7 @@ jobs:
- name: Retrieve data from cache
id: retrieve-data-cache
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
utils/tfhe-backward-compat-data/**/*.cbor
@@ -109,7 +109,7 @@ jobs:
- name: Store data in cache
if: steps.retrieve-data-cache.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
utils/tfhe-backward-compat-data/**/*.cbor

View File

@@ -63,7 +63,7 @@ jobs:
any_file_changed: ${{ env.IS_PULL_REQUEST == 'false' || steps.aggregated-changes.outputs.any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -71,7 +71,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
dependencies:
@@ -171,7 +171,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
@@ -219,7 +219,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
~/.nvm
@@ -232,7 +232,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |

View File

@@ -50,7 +50,7 @@ jobs:
steps.changed-files.outputs.integer_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -58,7 +58,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
integer:
@@ -112,7 +112,7 @@ jobs:
timeout-minutes: 480 # 8 hours
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: "false"
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -60,7 +60,7 @@ jobs:
timeout-minutes: 1440
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -51,7 +51,7 @@ jobs:
steps.changed-files.outputs.integer_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -59,7 +59,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
integer:
@@ -112,7 +112,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: "false"
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -72,7 +72,7 @@ jobs:
any_file_changed: ${{ env.IS_PULL_REQUEST == 'false' || steps.aggregated-changes.outputs.any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -80,7 +80,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
dependencies:
@@ -182,7 +182,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -64,7 +64,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
@@ -80,7 +80,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
~/.nvm
@@ -93,7 +93,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |

View File

@@ -149,7 +149,7 @@ jobs:
params_type: ${{ fromJSON(needs.prepare-matrix.outputs.params_type) }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -223,13 +223,13 @@ jobs:
results_type: ${{ inputs.additional_results_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}_${{ matrix.params_type }}
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -49,7 +49,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -99,13 +99,13 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_ct_key_sizes
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -8,13 +8,8 @@ on:
description: "Run CPU benchmarks"
type: boolean
default: true
# GPU benchmarks are split because of resource scarcity.
run-gpu-integer-benchmarks:
description: "Run GPU integer benchmarks"
type: boolean
default: true
run-gpu-core-crypto-benchmarks:
description: "Run GPU core-crypto benchmarks"
run-gpu-benchmarks:
description: "Run GPU benchmarks"
type: boolean
default: true
run-hpu-benchmarks:
@@ -57,7 +52,7 @@ jobs:
run-benchmarks-gpu-integer:
name: benchmark_documentation/run-benchmarks-gpu-integer
uses: ./.github/workflows/benchmark_gpu_common.yml
if: inputs.run-gpu-integer-benchmarks
if: inputs.run-gpu-benchmarks
with:
profile: multi-h100-sxm5
hardware_name: n3-H100-SXM5x8
@@ -118,7 +113,7 @@ jobs:
run-benchmarks-gpu-core-crypto:
name: benchmark_documentation/run-benchmarks-gpu-core-crypto
uses: ./.github/workflows/benchmark_gpu_common.yml
if: inputs.run-gpu-core-crypto-benchmarks
if: inputs.run-gpu-benchmarks
with:
profile: multi-h100-sxm5
hardware_name: n3-H100-SXM5x8
@@ -138,7 +133,7 @@ jobs:
generate-svgs-with-benchmarks-run:
name: benchmark-documentation/generate-svgs-with-benchmarks-run
if: ${{ always() &&
(inputs.run-cpu-benchmarks || inputs.run-gpu-integer-benchmarks || inputs.run-gpu-core-crypto-benchmarks ||inputs.run-hpu-benchmarks) &&
(inputs.run-cpu-benchmarks || inputs.run-gpu-benchmarks ||inputs.run-hpu-benchmarks) &&
inputs.generate-svgs }}
needs: [
run-benchmarks-cpu-integer, run-benchmarks-gpu-integer, run-benchmarks-hpu-integer,
@@ -148,7 +143,7 @@ jobs:
with:
time_span_days: 5
generate-cpu-svgs: ${{ inputs.run-cpu-benchmarks }}
generate-gpu-svgs: ${{ inputs.run-gpu-integer-benchmarks || inputs.run-gpu-core-crypto-benchmarks }}
generate-gpu-svgs: ${{ inputs.run-gpu-benchmarks }}
generate-hpu-svgs: ${{ inputs.run-hpu-benchmarks }}
secrets:
DATA_EXTRACTOR_DATABASE_USER: ${{ secrets.DATA_EXTRACTOR_DATABASE_USER }}
@@ -157,7 +152,7 @@ jobs:
generate-svgs-without-benchmarks-run:
name: benchmark-documentation/generate-svgs-without-benchmarks-run
if: ${{ !(inputs.run-cpu-benchmarks || inputs.run-gpu-integer-benchmarks || inputs.run-gpu-core-crypto-benchmarks || inputs.run-hpu-benchmarks) &&
if: ${{ !(inputs.run-cpu-benchmarks || inputs.run-gpu-benchmarks || inputs.run-hpu-benchmarks) &&
inputs.generate-svgs }}
uses: ./.github/workflows/generate_svgs.yml
with:
@@ -180,12 +175,12 @@ jobs:
PATH_TO_DOC_ASSETS: tfhe/docs/.gitbook/assets
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
- name: Download SVG tables
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
path: svg_tables
merge-multiple: 'true'
@@ -203,7 +198,7 @@ jobs:
echo "date=$(date '+%g_%m_%d_%Hh%Mm%Ss')" >> "${GITHUB_OUTPUT}"
- name: Create pull-request
uses: peter-evans/create-pull-request@98357b18bf14b5342f975ff684046ec3b2a07725 # v8.0.0
uses: peter-evans/create-pull-request@84ae59a2cdc2258d6fa0732dd66352dddae2a412 # v7.0.9
with:
sign-commits: true # Commit will be signed by github-actions bot
add-paths: ${{ env.PATH_TO_DOC_ASSETS }}/*.svg

View File

@@ -40,7 +40,7 @@ jobs:
timeout-minutes: 1440 # 24 hours
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -63,7 +63,7 @@ jobs:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab
@@ -89,7 +89,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_integer_multi_bit_gpu_default
path: ${{ env.RESULTS_FILENAME }}
@@ -123,7 +123,7 @@ jobs:
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -146,7 +146,7 @@ jobs:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab
@@ -173,7 +173,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -175,7 +175,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -209,7 +209,7 @@ jobs:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -281,13 +281,13 @@ jobs:
BENCH_TYPE: ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ inputs.profile }}_${{ matrix.bench_type }}_${{ matrix.params_type }}
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -130,7 +130,7 @@ jobs:
git lfs install
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
path: tfhe-rs
persist-credentials: false
@@ -141,7 +141,7 @@ jobs:
ls
- name: Checkout fhevm
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
repository: zama-ai/fhevm
persist-credentials: 'false'
@@ -192,10 +192,10 @@ jobs:
cargo install sqlx-cli
- name: Install foundry
uses: foundry-rs/foundry-toolchain@8b0419c685ef46cb79ec93fbdc131174afceb730
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e
- name: Cache cargo
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: |
~/.cargo/registry
@@ -223,7 +223,7 @@ jobs:
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
- name: Use Node.js
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with:
node-version: 20.x
@@ -262,7 +262,7 @@ jobs:
- name: Upload profile artifact
env:
REPORT_NAME: ${{ steps.nsys_profile_name.outputs.profile }}
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ env.REPORT_NAME }}
path: fhevm/coprocessor/fhevm-engine/tfhe-worker/${{ env.REPORT_NAME }}
@@ -293,13 +293,13 @@ jobs:
working-directory: fhevm/
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${COMMIT_SHA}_${BENCHMARKS}_${{ needs.parse-inputs.outputs.profile }}
path: fhevm/$${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -126,7 +126,7 @@ jobs:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -185,13 +185,13 @@ jobs:
BENCH_TYPE: ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_${{ matrix.bench_type }}_${{ matrix.command }}_benchmarks
name: ${{ github.sha }}_${{ matrix.bench_type }}_integer_benchmarks
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -50,7 +50,7 @@ jobs:
pull-requests: write # Needed to write a comment in a pull-request
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
@@ -164,7 +164,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
@@ -191,7 +191,7 @@ jobs:
command: ${{ fromJson(needs.prepare-benchmarks.outputs.commands) }}
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0 # Needed to get commit hash
persist-credentials: 'false'
@@ -245,7 +245,7 @@ jobs:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab
@@ -280,7 +280,7 @@ jobs:
BENCH_TYPE: ${{ env.__TFHE_RS_BENCH_TYPE }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_regression_${{ env.RESULTS_FILE_SHA }} # RESULT_FILE_SHA is needed to avoid collision between matrix.command runs
path: ${{ env.RESULTS_FILENAME }}
@@ -305,7 +305,7 @@ jobs:
REF_NAME: ${{ github.head_ref || github.ref_name }}
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}

View File

@@ -55,7 +55,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -96,13 +96,13 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_fft
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -55,7 +55,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -96,13 +96,13 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_ntt
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -39,7 +39,7 @@ jobs:
wasm_bench: ${{ steps.changed-files.outputs.wasm_bench_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -47,7 +47,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
wasm_bench:
@@ -91,7 +91,7 @@ jobs:
browser: [ chrome, firefox ]
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -119,7 +119,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
~/.nvm
@@ -132,7 +132,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@9255dc7a253b0ccc959486e2bca901246202afeb #v5.0.1
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |
@@ -153,12 +153,6 @@ jobs:
env:
BROWSER: ${{ matrix.browser }}
- name: Run benchmarks (unsafe coop)
run: |
make bench_web_js_api_unsafe_coop_"${BROWSER}"_ci
env:
BROWSER: ${{ matrix.browser }}
- name: Parse results
run: |
make parse_wasm_benchmarks
@@ -175,13 +169,13 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_wasm_${{ matrix.browser }}
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: zama-ai/slab
path: slab

View File

@@ -26,7 +26,7 @@ jobs:
name: cargo_audit/audit
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -24,7 +24,7 @@ jobs:
outputs:
matrix_command: ${{ steps.set-pcc-commands-matrix.outputs.commands }}
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: "false"
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -140,7 +140,7 @@ jobs:
result: ${{ steps.set_builds_result.outputs.result }}
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -26,7 +26,7 @@ jobs:
fail-fast: false
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -24,7 +24,7 @@ jobs:
os: [ubuntu-latest, macos-latest, windows-latest]
fail-fast: false
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -29,7 +29,7 @@ jobs:
fft_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.fft_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -37,7 +37,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
fft:
@@ -56,7 +56,7 @@ jobs:
runner_type: [ ubuntu-latest, macos-latest, windows-latest ]
fail-fast: false
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
@@ -92,7 +92,7 @@ jobs:
if: needs.should-run.outputs.fft_test == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -31,7 +31,7 @@ jobs:
ntt_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.ntt_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: "false"
@@ -39,7 +39,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
ntt:
@@ -87,7 +87,7 @@ jobs:
os: ${{fromJson(needs.setup-instance.outputs.matrix_os)}}
fail-fast: false
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: "false"
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
@@ -50,7 +50,7 @@ jobs:
version: ${{ steps.get_zizmor.outputs.version }}
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@6124774845927d14c601359ab8138699fa5b70c3 # v4.0.1
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@9e9574ef04ea69da568d6249bd69539ccc704e74 # v4.0.0
with:
allowlist: |
slsa-framework/slsa-github-generator

View File

@@ -50,7 +50,7 @@ jobs:
timeout-minutes: 5760 # 4 days
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
@@ -62,7 +62,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
tfhe:
@@ -92,7 +92,7 @@ jobs:
make test_shortint_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
@@ -106,7 +106,7 @@ jobs:
make test_integer_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -62,7 +62,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -43,7 +43,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
@@ -75,22 +75,13 @@ jobs:
DATA_EXTRACTOR_DATABASE_HOST: ${{ secrets.DATA_EXTRACTOR_DATABASE_HOST }}
DATA_EXTRACTOR_DATABASE_PASSWORD: ${{ secrets.DATA_EXTRACTOR_DATABASE_PASSWORD }}
- name: Upload tables
if: inputs.backend_comparison == false
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
with:
name: ${{ github.sha }}_${{ inputs.backend }}_${{ inputs.layer }}_${{ inputs.pbs_kind }}_${{ inputs.bench_type }}_tables
# This will upload all the file generated
path: ${{ inputs.output_filename }}*.svg
retention-days: 60
- name: Produce backends comparison table from database
if: inputs.backend_comparison == true
run: |
python3 -m pip install -r ci/data_extractor/requirements.txt
python3 ci/data_extractor/src/data_extractor.py "${OUTPUT_FILENAME}" \
--generate-svg \
--backends-comparison \
--backend-comparison\
--time-span-days "${TIME_SPAN}"
env:
OUTPUT_FILENAME: ${{ inputs.output_filename }}
@@ -99,11 +90,10 @@ jobs:
DATA_EXTRACTOR_DATABASE_HOST: ${{ secrets.DATA_EXTRACTOR_DATABASE_HOST }}
DATA_EXTRACTOR_DATABASE_PASSWORD: ${{ secrets.DATA_EXTRACTOR_DATABASE_PASSWORD }}
- name: Upload comparison tables
if: inputs.backend_comparison == true
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f
- name: Upload tables
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_backends_comparison_tables
name: ${{ github.sha }}_${{ inputs.backend }}_${{ inputs.layer }}_${{ inputs.pbs_kind }}_${{ inputs.bench_type }}_tables
# This will upload all the file generated
path: ${{ inputs.output_filename }}*.svg
retention-days: 60

View File

@@ -41,7 +41,7 @@ jobs:
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -23,8 +23,8 @@ on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
schedule:
# every month
- cron: "0 0 1 * *"
# every 3 months
- cron: "0 0 1 */3 *"
permissions:
contents: read
@@ -50,7 +50,7 @@ jobs:
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: single-h100
profile: gpu-test
# This instance will be spawned especially for pull-request from forked repository
- name: Start GitHub instance
@@ -79,7 +79,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -40,7 +40,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -48,7 +48,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -129,7 +129,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -39,7 +39,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -47,7 +47,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -114,7 +114,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -68,7 +68,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}

View File

@@ -40,7 +40,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -48,7 +48,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -116,7 +116,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -65,7 +65,7 @@ jobs:
timeout-minutes: 4320 # 72 hours
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -78,7 +78,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -78,7 +78,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -74,7 +74,7 @@ jobs:
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -40,7 +40,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -48,7 +48,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -116,7 +116,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -40,7 +40,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -48,7 +48,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -129,7 +129,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -41,7 +41,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -49,7 +49,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -117,7 +117,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -40,7 +40,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -48,7 +48,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -116,7 +116,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -40,7 +40,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -48,7 +48,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -129,7 +129,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -41,7 +41,7 @@ jobs:
gpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.gpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -49,7 +49,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -117,7 +117,7 @@ jobs:
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -32,7 +32,7 @@ jobs:
hpu_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.hpu_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -40,7 +40,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@e0021407031f5be11a464abee9a0776171c79891 # v47.0.1
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
hpu:
@@ -83,7 +83,7 @@ jobs:
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -53,7 +53,7 @@ jobs:
timeout-minutes: 4320 # 72 hours
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}

View File

@@ -41,7 +41,7 @@ jobs:
timeout-minutes: 720
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
- uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: "false"
token: ${{ env.CHECKOUT_TOKEN }}

View File

@@ -52,7 +52,7 @@ jobs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -62,7 +62,7 @@ jobs:
PACKAGE: ${{ inputs.package-name }}
run: |
cargo package -p "${PACKAGE}"
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: crate-${{ inputs.package-name }}
path: target/package/*.crate
@@ -93,14 +93,14 @@ jobs:
id-token: write # Needed for OIDC token exchange on crates.io
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
name: crate-${{ inputs.package-name }}
path: target/package

View File

@@ -64,7 +64,7 @@ jobs:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0
persist-credentials: "false"
@@ -104,7 +104,7 @@ jobs:
run: |
cargo package -p tfhe-cuda-backend
- uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: crate-tfhe-cuda-backend
path: target/package/*.crate
@@ -174,7 +174,7 @@ jobs:
GCC_VERSION: ${{ matrix.gcc }}
- name: Download artifact
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
name: crate-tfhe-cuda-backend
path: target/package

View File

@@ -68,7 +68,7 @@ jobs:
id-token: write # also needed for OIDC token exchange on crates.io and npmjs.com
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
@@ -85,7 +85,7 @@ jobs:
make build_web_js_api_parallel
- name: Authenticate on NPM
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with:
node-version: '24'
registry-url: 'https://registry.npmjs.org'

View File

@@ -60,7 +60,7 @@ jobs:
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
@@ -71,7 +71,7 @@ jobs:
toolchain: stable
- name: Checkout lattice-estimator
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8
uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3
with:
repository: malb/lattice-estimator
path: lattice_estimator

View File

@@ -17,7 +17,7 @@ jobs:
issues: read # Needed to fetch all issues
pull-requests: write # Needed to write message and close the PR
steps:
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
with:
stale-pr-message: 'This PR is unverified and has been open for 2 days, it will now be closed. If you want to contribute please sign the CLA as indicated by the bot.'
days-before-stale: 2

1
.gitignore vendored
View File

@@ -10,7 +10,6 @@ target/
**/*.rmeta
**/Cargo.lock
**/*.bin
**/.DS_Store
# Some of our bench outputs
/tfhe/benchmarks_parameters

View File

@@ -11,7 +11,7 @@
/tfhe/src/core_crypto/gpu @agnesLeroy
/tfhe/src/core_crypto/hpu @zama-ai/hardware
/tfhe/src/shortint/ @mayeul-zama @nsarlin-zama
/tfhe/src/shortint/ @mayeul-zama
/tfhe/src/integer/ @tmontaigu
/tfhe/src/integer/gpu @agnesLeroy
@@ -19,12 +19,8 @@
/tfhe/src/high_level_api/ @tmontaigu
/tfhe-zk-pok/ @nsarlin-zama
/tfhe-benchmark/ @soonum
/utils/ @nsarlin-zama
/Makefile @IceTDrinker @soonum
/mockups/tfhe-hpu-mockup @zama-ai/hardware

View File

@@ -36,8 +36,6 @@ rayon = "1.11"
serde = { version = "1.0", default-features = false }
wasm-bindgen = "0.2.101"
getrandom = "0.2.8"
# The project maintainers consider that this is the last version of the 1.3 branch, any newer version should not be trusted
bincode = "=1.3.3"
[profile.bench]
lto = "fat"

View File

@@ -1300,14 +1300,13 @@ run_web_js_api_parallel: build_web_js_api_parallel setup_venv
--browser-path $(browser_path) \
--driver-path $(driver_path) \
--browser-kind $(browser_kind) \
--server-cmd $(server_cmd) \
--server-cmd "npm run server" \
--server-workdir "$(WEB_SERVER_DIR)" \
--id-pattern $(filter)
test_web_js_api_parallel_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
test_web_js_api_parallel_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
test_web_js_api_parallel_chrome: browser_kind = chrome
test_web_js_api_parallel_chrome: server_cmd = "npm run server:multithreaded"
test_web_js_api_parallel_chrome: filter = Test
.PHONY: test_web_js_api_parallel_chrome # Run tests for the web wasm api on Chrome
@@ -1323,7 +1322,6 @@ test_web_js_api_parallel_chrome_ci: setup_venv
test_web_js_api_parallel_firefox: browser_path = "$(WEB_RUNNER_DIR)/firefox/firefox/firefox"
test_web_js_api_parallel_firefox: driver_path = "$(WEB_RUNNER_DIR)/firefox/geckodriver"
test_web_js_api_parallel_firefox: browser_kind = firefox
test_web_js_api_parallel_firefox: server_cmd = "npm run server:multithreaded"
test_web_js_api_parallel_firefox: filter = Test
.PHONY: test_web_js_api_parallel_firefox # Run tests for the web wasm api on Firefox
@@ -1573,7 +1571,6 @@ bench_pbs128_gpu: install_rs_check_toolchain
bench_web_js_api_parallel_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
bench_web_js_api_parallel_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
bench_web_js_api_parallel_chrome: browser_kind = chrome
bench_web_js_api_parallel_chrome: server_cmd = "npm run server:multithreaded"
bench_web_js_api_parallel_chrome: filter = Bench
.PHONY: bench_web_js_api_parallel_chrome # Run benchmarks for the web wasm api
@@ -1589,7 +1586,6 @@ bench_web_js_api_parallel_chrome_ci: setup_venv
bench_web_js_api_parallel_firefox: browser_path = "$(WEB_RUNNER_DIR)/firefox/firefox/firefox"
bench_web_js_api_parallel_firefox: driver_path = "$(WEB_RUNNER_DIR)/firefox/geckodriver"
bench_web_js_api_parallel_firefox: browser_kind = firefox
bench_web_js_api_parallel_firefox: server_cmd = "npm run server:multithreaded"
bench_web_js_api_parallel_firefox: filter = Bench
.PHONY: bench_web_js_api_parallel_firefox # Run benchmarks for the web wasm api
@@ -1602,38 +1598,6 @@ bench_web_js_api_parallel_firefox_ci: setup_venv
nvm use $(NODE_VERSION) && \
$(MAKE) bench_web_js_api_parallel_firefox
bench_web_js_api_unsafe_coop_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
bench_web_js_api_unsafe_coop_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
bench_web_js_api_unsafe_coop_chrome: browser_kind = chrome
bench_web_js_api_unsafe_coop_chrome: server_cmd = "npm run server:unsafe-coop"
bench_web_js_api_unsafe_coop_chrome: filter = ZeroKnowledgeBench # Only bench zk with unsafe coop
.PHONY: bench_web_js_api_unsafe_coop_chrome # Run benchmarks for the web wasm api without cross-origin isolation
bench_web_js_api_unsafe_coop_chrome: run_web_js_api_parallel
.PHONY: bench_web_js_api_unsafe_coop_chrome_ci # Run benchmarks for the web wasm api without cross-origin isolation
bench_web_js_api_unsafe_coop_chrome_ci: setup_venv
source ~/.nvm/nvm.sh && \
nvm install $(NODE_VERSION) && \
nvm use $(NODE_VERSION) && \
$(MAKE) bench_web_js_api_unsafe_coop_chrome
bench_web_js_api_unsafe_coop_firefox: browser_path = "$(WEB_RUNNER_DIR)/firefox/firefox/firefox"
bench_web_js_api_unsafe_coop_firefox: driver_path = "$(WEB_RUNNER_DIR)/firefox/geckodriver"
bench_web_js_api_unsafe_coop_firefox: browser_kind = firefox
bench_web_js_api_unsafe_coop_firefox: server_cmd = "npm run server:unsafe-coop"
bench_web_js_api_unsafe_coop_firefox: filter = ZeroKnowledgeBench # Only bench zk with unsafe coop
.PHONY: bench_web_js_api_unsafe_coop_firefox # Run benchmarks for the web wasm api without cross-origin isolation
bench_web_js_api_unsafe_coop_firefox: run_web_js_api_parallel
.PHONY: bench_web_js_api_unsafe_coop_firefox_ci # Run benchmarks for the web wasm api without cross-origin isolation
bench_web_js_api_unsafe_coop_firefox_ci: setup_venv
source ~/.nvm/nvm.sh && \
nvm install $(NODE_VERSION) && \
nvm use $(NODE_VERSION) && \
$(MAKE) bench_web_js_api_unsafe_coop_firefox
.PHONY: bench_hlapi # Run benchmarks for integer operations
bench_hlapi: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_BIT_SIZES_SET=$(BIT_SIZES_SET) \

View File

@@ -1,32 +1,24 @@
08f31a47c29cc4d72ad32c0b5411fa20b3deef5b84558dd2fb892d3cdf90528a data/toy_params/glwe_after_id_br_karatsuba.cbor
29b6e3e7d27700004b70dca24d225816500490e2d6ee49b9af05837fd421896b data/valid_params_128/lwe_after_spec_pbs.cbor
2c70d1d78cc3760733850a353ace2b9c4705e840141b75841739e90e51247e18 data/valid_params_128/small_lwe_secret_key.cbor
2fb4bb45c259b8383da10fc8f9459c40a6972c49b1696eb107f0a75640724be5 data/toy_params/lwe_after_id_pbs_karatsuba.cbor
36c9080b636475fcacca503ce041bbfeee800fd3e1890dee559ea18defff9fe8 data/toy_params/glwe_after_id_br.cbor
377761beeb4216cf5aa2624a8b64b8259f5a75c32d28e850be8bced3a0cdd6f5 data/toy_params/ksk.cbor
59dba26d457f96478eda130cab5301fce86f23c6a8807de42f2a1e78c4985ca7 data/valid_params_128/lwe_ks.cbor
5d80dd93fefae4f4f89484dfcd65bbe99cc32e7e3b0a90c33dd0d77516c0a023 data/valid_params_128/glwe_after_id_br_karatsuba.cbor
656f0009c7834c5bcb61621e222047516054b9bc5d0593d474ab8f1c086b67a6 data/valid_params_128/lwe_after_id_pbs.cbor
699580ca92b9c2f9e1f57fb1e312c9e8cb29714f7acdef9d2ba05f798546751f data/toy_params/lwe_sum.cbor
6e54ab41056984595b077baff70236d934308cf5c0c33b4482fbfb129b3756c6 data/valid_params_128/glwe_after_id_br.cbor
70f5e5728822de05b49071efb5ec28551b0f5cc87aa709a455d8e7f04b9c96ee data/toy_params/lwe_after_id_pbs.cbor
76a5c52cab7fec1dc167da676c6cd39479cda6b2bb9f4e0573cb7d99c2692faa data/valid_params_128/lwe_after_id_pbs_karatsuba.cbor
7cc6803f5fbc3d5a1bf597f2b979ce17eecd3d6baca12183dea21022a7b65c52 data/toy_params/bsk.cbor
7f3c40a134623b44779a556212477fea26eaed22450f3b6faeb8721d63699972 data/valid_params_128/lwe_sum.cbor
837b3bd3245d4d0534ed255fdef896fb4fa6998a258a14543dfdadd0bfc9b6dd data/toy_params/lwe_prod.cbor
9ece8ca9c1436258b94e8c5e629b8722f9b18fdd415dd5209b6167a9dde8491c data/toy_params/glwe_after_spec_br_karatsuba.cbor
aa44aea29efd6d9e4d35a21a625d9cba155672e3f7ed3eddee1e211e62ad146b data/valid_params_128/lwe_ms.cbor
b7a037b9eaa88d6385167579b93e26a0cb6976d9b8967416fd1173e113bda199 data/valid_params_128/large_lwe_secret_key.cbor
b7b8e3586128887bd682120f3e3a43156139bce5e3fe0b03284f8753a864d647 data/toy_params/lwe_after_spec_pbs_karatsuba.cbor
bd00a8ae7494e400de5753029552ee1647efe7e17409b863a26a13b081099b8c data/toy_params/lwe_after_spec_pbs.cbor
c6df98676de04fe54b5ffc2eb30a82ebb706c9d7d5a4e0ed509700fec88761f7 data/toy_params/lwe_ms.cbor
c7d5a864d5616a7d8ad50bbf40416e41e6c9b60c546dc14d4aa8fc40a418baa7 data/toy_params/large_lwe_secret_key.cbor
c806533b325b1009db38be2f9bef5f3b2fad6b77b4c71f2855ccc9d3b4162e98 data/valid_params_128/lwe_b.cbor
c9eb75bd2993639348a679cf48c06e3c38d1a513f48e5b0ce0047cea8cff6bbc data/toy_params/lwe_a.cbor
d3391969acf26dc69de0927ba279139d8d79999944069addc8ff469ad6c5ae2d data/valid_params_128/lwe_after_spec_pbs_karatsuba.cbor
d6da5baef0e787f6be56e218d8354e26904652602db964844156fdff08350ce6 data/toy_params/lwe_ks.cbor
e591ab9af1b6a0aede273f9a3abb65a4c387feb5fa06a6959e9314058ca0f7e5 data/valid_params_128/ksk.cbor
e59b002df3a9b01ad321ec51cf076fa35131ab9dbef141d1c54b717d61426c92 data/valid_params_128/glwe_after_spec_br_karatsuba.cbor
e628354c81508a2d888016e8282df363dd12f1e19190b6475d4eb9d7ab8ae007 data/valid_params_128/glwe_after_spec_br.cbor
e69d2d2c064fc8c0460b39191ca65338146990349954f5ec5ebd01d93610e7eb data/valid_params_128/lwe_a.cbor
e76c24b2a0c9a842ad13dda35473c2514f9e7d20983b5ea0759c4521a91626d9 data/valid_params_128/lwe_prod.cbor

View File

@@ -39,9 +39,6 @@ The following values are generated:
| `glwe_after_spec_br` | The glwe returned by the application of the spec blind rotation on the mod switched ciphertexts. | `GlweCiphertext<Vec<u64>>` | rot spec LUT |
| `lwe_after_spec_pbs` | The lwe returned by the application of the sample extract operation on the output of the spec blind rotation | `LweCiphertext<Vec<u64>>` | `spec(A)` |
Ciphertexts with the `_karatsuba` suffix are generated using the Karatsuba polynomial multiplication algorithm in the blind rotation, while default ciphertexts are generated using an FFT multiplication.
This makes it easier to reproduce bit exact results.
### Encodings
#### Non native encoding
Warning: TFHE-rs uses a specific encoding for non native (ie: u32, u64) power of two ciphertext modulus. This encoding puts the encoded value in the high bits of the native integer.

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:08f31a47c29cc4d72ad32c0b5411fa20b3deef5b84558dd2fb892d3cdf90528a
size 4679

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:9ece8ca9c1436258b94e8c5e629b8722f9b18fdd415dd5209b6167a9dde8491c
size 4679

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2fb4bb45c259b8383da10fc8f9459c40a6972c49b1696eb107f0a75640724be5
size 2365

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b7b8e3586128887bd682120f3e3a43156139bce5e3fe0b03284f8753a864d647
size 2365

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5d80dd93fefae4f4f89484dfcd65bbe99cc32e7e3b0a90c33dd0d77516c0a023
size 36935

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e59b002df3a9b01ad321ec51cf076fa35131ab9dbef141d1c54b717d61426c92
size 36935

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:76a5c52cab7fec1dc167da676c6cd39479cda6b2bb9f4e0573cb7d99c2692faa
size 18493

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d3391969acf26dc69de0927ba279139d8d79999944069addc8ff469ad6c5ae2d
size 18493

View File

@@ -265,7 +265,6 @@ fn generate_test_vectors<P: AsRef<Path>>(
let mut id_lut = encoding.encode_lut(glwe_dimension, polynomial_size, ID_LUT);
assert_data_not_zero(&id_lut);
let mut id_lut_karatsuba = id_lut.clone();
blind_rotate_assign(&modswitched, &mut id_lut, &fourier_bsk);
assert_data_not_zero(&id_lut);
@@ -288,32 +287,8 @@ fn generate_test_vectors<P: AsRef<Path>>(
assert_data_not_zero(&lwe_pbs_id);
store_data(path, &lwe_pbs_id, "lwe_after_id_pbs");
blind_rotate_karatsuba_assign(&modswitched, &mut id_lut_karatsuba, &bsk);
store_data(path, &id_lut_karatsuba, "glwe_after_id_br_karatsuba");
let mut lwe_pbs_karatsuba_id = LweCiphertext::new(
0u64,
glwe_dimension
.to_equivalent_lwe_dimension(polynomial_size)
.to_lwe_size(),
encoding.ciphertext_modulus,
);
extract_lwe_sample_from_glwe_ciphertext(
&id_lut_karatsuba,
&mut lwe_pbs_karatsuba_id,
MonomialDegree(0),
);
let decrypted_pbs_id = decrypt_lwe_ciphertext(&large_lwe_secret_key, &lwe_pbs_karatsuba_id);
let res = encoding.decode(decrypted_pbs_id);
assert_eq!(res, MSG_A);
store_data(path, &lwe_pbs_karatsuba_id, "lwe_after_id_pbs_karatsuba");
let mut spec_lut = encoding.encode_lut(glwe_dimension, polynomial_size, SPEC_LUT);
assert_data_not_zero(&spec_lut);
let mut spec_lut_karatsuba = spec_lut.clone();
blind_rotate_assign(&modswitched, &mut spec_lut, &fourier_bsk);
assert_data_not_zero(&spec_lut);
@@ -335,33 +310,6 @@ fn generate_test_vectors<P: AsRef<Path>>(
assert_eq!(res, SPEC_LUT(MSG_A));
assert_data_not_zero(&lwe_pbs_spec);
store_data(path, &lwe_pbs_spec, "lwe_after_spec_pbs");
blind_rotate_karatsuba_assign(&modswitched, &mut spec_lut_karatsuba, &bsk);
store_data(path, &spec_lut_karatsuba, "glwe_after_spec_br_karatsuba");
let mut lwe_pbs_karatsuba_spec = LweCiphertext::new(
0u64,
glwe_dimension
.to_equivalent_lwe_dimension(polynomial_size)
.to_lwe_size(),
encoding.ciphertext_modulus,
);
extract_lwe_sample_from_glwe_ciphertext(
&spec_lut_karatsuba,
&mut lwe_pbs_karatsuba_spec,
MonomialDegree(0),
);
let decrypted_pbs_spec = decrypt_lwe_ciphertext(&large_lwe_secret_key, &lwe_pbs_karatsuba_spec);
let res = encoding.decode(decrypted_pbs_spec);
assert_eq!(res, SPEC_LUT(MSG_A));
store_data(
path,
&lwe_pbs_karatsuba_spec,
"lwe_after_spec_pbs_karatsuba",
);
}
fn rm_dir_except_readme<P: AsRef<Path>>(dir: P) {

View File

@@ -35,8 +35,7 @@ template <typename Torus> struct int_aes_lut_buffers {
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, and_lambda, allocate_gpu_memory);
auto active_streams_and_lut = streams.active_gpu_subset(
SBOX_MAX_AND_GATES * num_aes_inputs * sbox_parallelism,
params.pbs_type);
SBOX_MAX_AND_GATES * num_aes_inputs * sbox_parallelism);
this->and_lut->broadcast_lut(active_streams_and_lut);
this->and_lut->setup_gemm_batch_ks_temp_buffers(size_tracker);
@@ -51,8 +50,8 @@ template <typename Torus> struct int_aes_lut_buffers {
this->flush_lut->get_degree(0), this->flush_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, flush_lambda, allocate_gpu_memory);
auto active_streams_flush_lut = streams.active_gpu_subset(
AES_STATE_BITS * num_aes_inputs, params.pbs_type);
auto active_streams_flush_lut =
streams.active_gpu_subset(AES_STATE_BITS * num_aes_inputs);
this->flush_lut->broadcast_lut(active_streams_flush_lut);
this->flush_lut->setup_gemm_batch_ks_temp_buffers(size_tracker);
@@ -66,8 +65,7 @@ template <typename Torus> struct int_aes_lut_buffers {
this->carry_lut->get_degree(0), this->carry_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, carry_lambda, allocate_gpu_memory);
auto active_streams_carry_lut =
streams.active_gpu_subset(num_aes_inputs, params.pbs_type);
auto active_streams_carry_lut = streams.active_gpu_subset(num_aes_inputs);
this->carry_lut->broadcast_lut(active_streams_carry_lut);
this->carry_lut->setup_gemm_batch_ks_temp_buffers(size_tracker);
}

View File

@@ -8,8 +8,7 @@
extern std::mutex m;
extern bool p2p_enabled;
extern const int THRESHOLD_MULTI_GPU_WITH_MULTI_BIT_PARAMS;
extern const int THRESHOLD_MULTI_GPU_WITH_CLASSICAL_PARAMS;
extern const int THRESHOLD_MULTI_GPU;
extern "C" {
int32_t cuda_setup_multi_gpu(int device_0_id);
@@ -40,8 +39,7 @@ get_variant_element(const std::variant<std::vector<Torus>, Torus> &variant,
}
}
uint32_t get_active_gpu_count(uint32_t num_inputs, uint32_t gpu_count,
PBS_TYPE pbs_type);
uint32_t get_active_gpu_count(uint32_t num_inputs, uint32_t gpu_count);
int get_num_inputs_on_gpu(int total_num_inputs, int gpu_index, int gpu_count);
@@ -75,10 +73,9 @@ public:
// Returns a subset of this set as an active subset. An active subset is one
// that is temporarily used to perform some computation
CudaStreams active_gpu_subset(int num_radix_blocks, PBS_TYPE pbs_type) {
return CudaStreams(
_streams, _gpu_indexes,
get_active_gpu_count(num_radix_blocks, _gpu_count, pbs_type));
CudaStreams active_gpu_subset(int num_radix_blocks) {
return CudaStreams(_streams, _gpu_indexes,
get_active_gpu_count(num_radix_blocks, _gpu_count));
}
// Returns a CudaStreams struct containing only the ith stream

View File

@@ -20,8 +20,7 @@ template <typename Torus> struct boolean_bitop_buffer {
gpu_memory_allocated = allocate_gpu_memory;
this->op = op;
this->params = params;
auto active_streams =
streams.active_gpu_subset(lwe_ciphertext_count, params.pbs_type);
auto active_streams = streams.active_gpu_subset(lwe_ciphertext_count);
this->unchecked = is_unchecked;
switch (op) {
case BITAND:
@@ -120,8 +119,7 @@ template <typename Torus> struct int_bitop_buffer {
gpu_memory_allocated = allocate_gpu_memory;
this->op = op;
this->params = params;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
switch (op) {
case BITAND:
case BITOR:
@@ -218,8 +216,7 @@ template <typename Torus> struct boolean_bitnot_buffer {
message_extract_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
lut_f_message_extract, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(lwe_ciphertext_count, params.pbs_type);
auto active_streams = streams.active_gpu_subset(lwe_ciphertext_count);
message_extract_lut->broadcast_lut(active_streams);
}
}

View File

@@ -39,8 +39,7 @@ template <typename Torus> struct int_extend_radix_with_sign_msb_buffer {
},
allocate_gpu_memory);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
lut->broadcast_lut(active_streams);
this->last_block = new CudaRadixCiphertextFFI;

View File

@@ -14,8 +14,7 @@ template <typename Torus> struct int_zero_out_if_buffer {
uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
this->params = params;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
tmp = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
@@ -115,11 +114,9 @@ template <typename Torus> struct int_cmux_buffer {
predicate_lut->get_lut_indexes(0, 0), h_lut_indexes,
2 * num_radix_blocks * sizeof(Torus), streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
auto active_streams_pred =
streams.active_gpu_subset(2 * num_radix_blocks, params.pbs_type);
auto active_streams_pred = streams.active_gpu_subset(2 * num_radix_blocks);
predicate_lut->broadcast_lut(active_streams_pred);
auto active_streams_msg =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams_msg = streams.active_gpu_subset(num_radix_blocks);
message_extract_lut->broadcast_lut(active_streams_msg);
}

View File

@@ -52,8 +52,7 @@ template <typename Torus> struct int_are_all_block_true_buffer {
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, is_max_value_f, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(max_chunks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(max_chunks);
is_max_value->broadcast_lut(active_streams);
}
@@ -109,8 +108,7 @@ template <typename Torus> struct int_comparison_eq_buffer {
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, is_non_zero_lut_f, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
is_non_zero_lut->broadcast_lut(active_streams);
// Scalar may have up to num_radix_blocks blocks
@@ -240,8 +238,7 @@ template <typename Torus> struct int_tree_sign_reduction_buffer {
tree_inner_leaf_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
block_selector_f, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
tree_inner_leaf_lut->broadcast_lut(active_streams);
}
@@ -393,8 +390,7 @@ template <typename Torus> struct int_comparison_buffer {
this->op = op;
this->is_signed = is_signed;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
identity_lut_f = [](Torus x) -> Torus { return x; };
@@ -527,7 +523,7 @@ template <typename Torus> struct int_comparison_buffer {
signed_lut->get_degree(0), signed_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, signed_lut_f, gpu_memory_allocated);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
signed_lut->broadcast_lut(active_streams);
}
preallocated_h_lut = (Torus *)malloc(

View File

@@ -65,16 +65,6 @@ void cleanup_cuda_integer_compress_radix_ciphertext_128(CudaStreamsFFI streams,
void cleanup_cuda_integer_decompress_radix_ciphertext_128(
CudaStreamsFFI streams, int8_t **mem_ptr_void);
void cuda_integer_extract_glwe_128(
CudaStreamsFFI streams, void *glwe_array_out,
CudaPackedGlweCiphertextListFFI const *glwe_list,
uint32_t const glwe_index);
void cuda_integer_extract_glwe_64(
CudaStreamsFFI streams, void *glwe_array_out,
CudaPackedGlweCiphertextListFFI const *glwe_list,
uint32_t const glwe_index);
}
#endif

View File

@@ -116,8 +116,7 @@ template <typename Torus> struct int_decompression {
effective_compression_carry_modulus,
encryption_params.message_modulus, encryption_params.carry_modulus,
decompression_rescale_f, gpu_memory_allocated);
auto active_streams = streams.active_gpu_subset(
num_blocks_to_decompress, decompression_rescale_lut->params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_blocks_to_decompress);
decompression_rescale_lut->broadcast_lut(active_streams);
}
}

View File

@@ -356,8 +356,7 @@ template <typename Torus> struct unsigned_int_div_rem_2_2_memory {
luts[j]->get_degree(0), luts[j]->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, lut_f_message_extract, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_blocks);
luts[j]->broadcast_lut(active_streams);
}
}
@@ -1013,7 +1012,7 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
masking_luts_1[i]->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
lut_f_masking, gpu_memory_allocated);
auto active_streams_1 = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams_1 = streams.active_gpu_subset(1);
masking_luts_1[i]->broadcast_lut(active_streams_1);
generate_device_accumulator<Torus>(
@@ -1022,8 +1021,7 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
masking_luts_2[i]->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
lut_f_masking, gpu_memory_allocated);
auto active_streams_2 =
streams.active_gpu_subset(num_blocks, params.pbs_type);
auto active_streams_2 = streams.active_gpu_subset(num_blocks);
masking_luts_2[i]->broadcast_lut(active_streams_2);
}
@@ -1042,8 +1040,7 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
int_radix_lut<Torus> *luts[2] = {message_extract_lut_1,
message_extract_lut_2};
auto active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_blocks);
for (int j = 0; j < 2; j++) {
generate_device_accumulator<Torus>(
streams.stream(0), streams.gpu_index(0), luts[j]->get_lut(0, 0),
@@ -1131,8 +1128,7 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
// merge_overflow_flags_luts
merge_overflow_flags_luts = new int_radix_lut<Torus> *[num_bits_in_message];
auto active_gpu_count_for_bits =
streams.active_gpu_subset(1, params.pbs_type);
auto active_gpu_count_for_bits = streams.active_gpu_subset(1);
for (int i = 0; i < num_bits_in_message; i++) {
auto lut_f_bit = [i](Torus x, Torus y) -> Torus {
return (x == 0 && y == 0) << i;
@@ -1156,8 +1152,7 @@ template <typename Torus> struct unsigned_int_div_rem_memory {
uint32_t num_blocks, bool allocate_gpu_memory,
uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
auto active_streams =
streams.active_gpu_subset(2 * num_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(2 * num_blocks);
this->params = params;
if (params.message_modulus == 4 && params.carry_modulus == 4 &&
@@ -1478,8 +1473,7 @@ template <typename Torus> struct int_div_rem_memory {
bool allocate_gpu_memory, uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->params = params;
this->is_signed = is_signed;
@@ -1565,7 +1559,7 @@ template <typename Torus> struct int_div_rem_memory {
params.polynomial_size, params.message_modulus, params.carry_modulus,
f_compare_extracted_signed_bits, gpu_memory_allocated);
auto active_gpu_count_cmp =
streams.active_gpu_subset(1, params.pbs_type); // only 1 block needed
streams.active_gpu_subset(1); // only 1 block needed
compare_signed_bits_lut->broadcast_lut(active_gpu_count_cmp);
}
}

View File

@@ -20,8 +20,7 @@ template <typename Torus> struct int_prepare_count_of_consecutive_bits_buffer {
this->allocate_gpu_memory = allocate_gpu_memory;
this->direction = direction;
this->bit_value = bit_value;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
this->univ_lut_mem =
new int_radix_lut<Torus>(streams, params, 1, num_radix_blocks,
allocate_gpu_memory, size_tracker);
@@ -247,8 +246,7 @@ template <typename Torus> struct int_ilog2_buffer {
params.glwe_dimension, params.polynomial_size,
params.message_modulus, params.carry_modulus,
lut_message_lambda, allocate_gpu_memory);
auto active_streams =
streams.active_gpu_subset(counter_num_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(counter_num_blocks);
lut_message_not->broadcast_lut(active_streams);
this->lut_carry_not =

View File

@@ -371,8 +371,7 @@ struct int_radix_lut_custom_input_output {
this->num_input_blocks = num_input_blocks;
this->gpu_memory_allocated = allocate_gpu_memory;
this->active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_radix_blocks);
}
void setup_degrees() {
@@ -383,18 +382,14 @@ struct int_radix_lut_custom_input_output {
void allocate_pbs_buffers(int_radix_params params, uint32_t num_radix_blocks,
bool allocate_gpu_memory, uint64_t &size_tracker) {
int threshold = (params.pbs_type == PBS_TYPE::MULTI_BIT)
? THRESHOLD_MULTI_GPU_WITH_MULTI_BIT_PARAMS
: THRESHOLD_MULTI_GPU_WITH_CLASSICAL_PARAMS;
for (uint i = 0; i < active_streams.count(); i++) {
cuda_set_device(active_streams.gpu_index(i));
int8_t *gpu_pbs_buffer;
auto num_blocks_on_gpu = std::min(
(int)num_radix_blocks,
std::max(threshold, get_num_inputs_on_gpu(num_radix_blocks, i,
active_streams.count())));
auto num_blocks_on_gpu =
std::min((int)num_radix_blocks,
std::max(THRESHOLD_MULTI_GPU,
get_num_inputs_on_gpu(num_radix_blocks, i,
active_streams.count())));
uint64_t size = 0;
execute_scratch_pbs<OutputTorus>(
@@ -429,22 +424,18 @@ struct int_radix_lut_custom_input_output {
/// back to the original indexing
multi_gpu_alloc_lwe_async(active_streams, lwe_array_in_vec,
num_radix_blocks, params.big_lwe_dimension + 1,
size_tracker, params.pbs_type,
allocate_gpu_memory);
size_tracker, allocate_gpu_memory);
multi_gpu_alloc_lwe_async(active_streams, lwe_after_ks_vec,
num_radix_blocks, params.small_lwe_dimension + 1,
size_tracker, params.pbs_type,
allocate_gpu_memory);
size_tracker, allocate_gpu_memory);
if (num_many_lut > 1) {
multi_gpu_alloc_lwe_many_lut_output_async(
active_streams, lwe_after_pbs_vec, num_radix_blocks, num_many_lut,
params.big_lwe_dimension + 1, size_tracker, params.pbs_type,
allocate_gpu_memory);
params.big_lwe_dimension + 1, size_tracker, allocate_gpu_memory);
} else {
multi_gpu_alloc_lwe_async(active_streams, lwe_after_pbs_vec,
num_radix_blocks, params.big_lwe_dimension + 1,
size_tracker, params.pbs_type,
allocate_gpu_memory);
size_tracker, allocate_gpu_memory);
}
multi_gpu_alloc_array_async(active_streams, lwe_trivial_indexes_vec,
num_radix_blocks, size_tracker,
@@ -460,14 +451,12 @@ struct int_radix_lut_custom_input_output {
}
void setup_gemm_batch_ks_temp_buffers(uint64_t &size_tracker) {
int threshold = (params.pbs_type == PBS_TYPE::MULTI_BIT)
? THRESHOLD_MULTI_GPU_WITH_MULTI_BIT_PARAMS
: THRESHOLD_MULTI_GPU_WITH_CLASSICAL_PARAMS;
auto inputs_on_gpu = std::min(
(int)num_input_blocks,
std::max(threshold, get_num_inputs_on_gpu(num_input_blocks, 0,
active_streams.count())));
auto inputs_on_gpu =
std::min((int)num_input_blocks,
std::max(THRESHOLD_MULTI_GPU,
get_num_inputs_on_gpu(num_input_blocks, 0,
active_streams.count())));
if (inputs_on_gpu >= get_threshold_ks_gemm()) {
for (auto i = 0; i < active_streams.count(); ++i) {
@@ -809,20 +798,16 @@ struct int_radix_lut_custom_input_output {
void allocate_lwe_vector_for_non_trivial_indexes(
CudaStreams streams, uint64_t max_num_radix_blocks,
uint64_t &size_tracker, bool allocate_gpu_memory) {
int threshold = (params.pbs_type == PBS_TYPE::MULTI_BIT)
? THRESHOLD_MULTI_GPU_WITH_MULTI_BIT_PARAMS
: THRESHOLD_MULTI_GPU_WITH_CLASSICAL_PARAMS;
// We need to create the auxiliary array only in GPU 0
if (active_streams.count() > 1) {
lwe_aligned_vec.resize(active_streams.count());
for (uint i = 0; i < active_streams.count(); i++) {
uint64_t size_tracker_on_array_i = 0;
auto inputs_on_gpu = std::min(
(int)max_num_radix_blocks,
std::max(threshold, get_num_inputs_on_gpu(max_num_radix_blocks, i,
active_streams.count())));
auto inputs_on_gpu =
std::min((int)max_num_radix_blocks,
std::max(THRESHOLD_MULTI_GPU,
get_num_inputs_on_gpu(max_num_radix_blocks, i,
active_streams.count())));
InputTorus *d_array =
(InputTorus *)cuda_malloc_with_size_tracking_async(
inputs_on_gpu * (params.big_lwe_dimension + 1) *
@@ -1013,8 +998,8 @@ template <typename Torus> struct int_bit_extract_luts_buffer {
num_radix_blocks * bits_per_block * sizeof(Torus), streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
auto active_streams = streams.active_gpu_subset(
bits_per_block * num_radix_blocks, params.pbs_type);
auto active_streams =
streams.active_gpu_subset(bits_per_block * num_radix_blocks);
lut->broadcast_lut(active_streams);
/**
@@ -1281,8 +1266,7 @@ template <typename Torus> struct int_sum_ciphertexts_vec_memory {
luts_message_carry->get_max_degree(1), params.glwe_dimension,
params.polynomial_size, message_modulus, params.carry_modulus,
lut_f_carry, gpu_memory_allocated);
auto active_gpu_count_mc =
streams.active_gpu_subset(pbs_count, params.pbs_type);
auto active_gpu_count_mc = streams.active_gpu_subset(pbs_count);
luts_message_carry->broadcast_lut(active_gpu_count_mc);
}
}
@@ -1452,8 +1436,7 @@ template <typename Torus> struct int_seq_group_prop_memory {
cuda_memcpy_with_size_tracking_async_to_gpu(
seq_lut_indexes, h_seq_lut_indexes, num_seq_luts * sizeof(Torus),
streams.stream(0), streams.gpu_index(0), allocate_gpu_memory);
auto active_streams =
streams.active_gpu_subset(num_seq_luts, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_seq_luts);
lut_sequential_algorithm->broadcast_lut(active_streams);
free(h_seq_lut_indexes);
};
@@ -1507,8 +1490,7 @@ template <typename Torus> struct int_hs_group_prop_memory {
lut_hillis_steele->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, f_lut_hillis_steele,
gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_groups, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_groups);
lut_hillis_steele->broadcast_lut(active_streams);
};
void release(CudaStreams streams) {
@@ -1685,8 +1667,7 @@ template <typename Torus> struct int_shifted_blocks_and_states_memory {
lut_indexes, h_lut_indexes, lut_indexes_size, streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
// Do I need to do something else for the multi-gpu?
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
luts_array_first_step->broadcast_lut(active_streams);
};
void release(CudaStreams streams) {
@@ -1951,8 +1932,7 @@ template <typename Torus> struct int_prop_simu_group_carries_memory {
scalar_array_cum_sum, h_scalar_array_cum_sum,
num_radix_blocks * sizeof(Torus), streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
luts_array_second_step->broadcast_lut(active_streams);
if (use_sequential_algorithm_to_resolve_group_carries) {
@@ -1977,8 +1957,7 @@ template <typename Torus> struct int_prop_simu_group_carries_memory {
cuda_memcpy_with_size_tracking_async_gpu_to_gpu(
lut_indexes, new_lut_indexes, new_num_blocks * sizeof(Torus),
streams.stream(0), streams.gpu_index(0), gpu_memory_allocated);
auto new_active_streams = streams.active_gpu_subset(
new_num_blocks, luts_array_second_step->params.pbs_type);
auto new_active_streams = streams.active_gpu_subset(new_num_blocks);
// We just need to update the lut indexes so we use false here
luts_array_second_step->broadcast_lut(new_active_streams, false);
@@ -2145,7 +2124,7 @@ template <typename Torus> struct int_sc_prop_memory {
polynomial_size, message_modulus, carry_modulus, f_overflow_fp,
gpu_memory_allocated);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
lut_overflow_flag_prep->broadcast_lut(active_streams);
}
@@ -2217,8 +2196,7 @@ template <typename Torus> struct int_sc_prop_memory {
(num_radix_blocks + 1) * sizeof(Torus), streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
}
auto active_streams =
streams.active_gpu_subset(num_radix_blocks + 1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks + 1);
lut_message_extract->broadcast_lut(active_streams);
};
@@ -2415,8 +2393,7 @@ template <typename Torus> struct int_shifted_blocks_and_borrow_states_memory {
lut_indexes, h_lut_indexes, lut_indexes_size, streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
// Do I need to do something else for the multi-gpu?
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
luts_array_first_step->broadcast_lut(active_streams);
};
@@ -2427,8 +2404,7 @@ template <typename Torus> struct int_shifted_blocks_and_borrow_states_memory {
cuda_memcpy_with_size_tracking_async_gpu_to_gpu(
lut_indexes, new_lut_indexes, new_num_blocks * sizeof(Torus),
streams.stream(0), streams.gpu_index(0), gpu_memory_allocated);
auto new_active_streams = streams.active_gpu_subset(
new_num_blocks, luts_array_first_step->params.pbs_type);
auto new_active_streams = streams.active_gpu_subset(new_num_blocks);
// We just need to update the lut indexes so we use false here
luts_array_first_step->broadcast_lut(new_active_streams, false);
}
@@ -2523,8 +2499,7 @@ template <typename Torus> struct int_borrow_prop_memory {
lut_message_extract->get_max_degree(0), glwe_dimension, polynomial_size,
message_modulus, carry_modulus, f_message_extract,
gpu_memory_allocated);
active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
active_streams = streams.active_gpu_subset(num_radix_blocks);
lut_message_extract->broadcast_lut(active_streams);
@@ -2545,8 +2520,7 @@ template <typename Torus> struct int_borrow_prop_memory {
lut_borrow_flag->broadcast_lut(active_streams);
}
active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
active_streams = streams.active_gpu_subset(num_radix_blocks);
internal_streams.create_internal_cuda_streams_on_same_gpus(active_streams,
2);
};

View File

@@ -45,8 +45,7 @@ template <typename Torus> struct int_mul_memory {
params.polynomial_size, params.message_modulus, params.carry_modulus,
zero_out_predicate_lut_f, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
zero_out_predicate_lut->broadcast_lut(active_streams);
zero_out_mem = new int_zero_out_if_buffer<Torus>(
@@ -123,8 +122,7 @@ template <typename Torus> struct int_mul_memory {
streams.stream(0), streams.gpu_index(0),
luts_array->get_lut_indexes(0, lsb_vector_block_count), 1,
msb_vector_block_count);
auto active_streams =
streams.active_gpu_subset(total_block_count, params.pbs_type);
auto active_streams = streams.active_gpu_subset(total_block_count);
luts_array->broadcast_lut(active_streams);
// create memory object for sum ciphertexts
sum_ciphertexts_mem = new int_sum_ciphertexts_vec_memory<Torus>(

View File

@@ -126,8 +126,7 @@ template <typename Torus> struct int_grouped_oprf_memory {
luts->get_lut_indexes(0, 0), this->h_lut_indexes,
num_blocks_to_process * sizeof(Torus), streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
auto active_streams =
streams.active_gpu_subset(num_blocks_to_process, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_blocks_to_process);
luts->broadcast_lut(active_streams);
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));

View File

@@ -91,8 +91,7 @@ template <typename Torus> struct int_logical_scalar_shift_buffer {
cur_lut_bivariate->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
shift_lut_f, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
cur_lut_bivariate->broadcast_lut(active_streams);
lut_buffers_bivariate.push_back(cur_lut_bivariate);
@@ -178,8 +177,7 @@ template <typename Torus> struct int_logical_scalar_shift_buffer {
cur_lut_bivariate->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
shift_lut_f, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
cur_lut_bivariate->broadcast_lut(active_streams);
lut_buffers_bivariate.push_back(cur_lut_bivariate);
@@ -222,7 +220,7 @@ template <typename Torus> struct int_arithmetic_scalar_shift_buffer {
uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
// In the arithmetic shift, a PBS has to be applied to the last rotated
// block twice: once to shift it, once to compute the padding block to be
// copied onto all blocks to the left of the last rotated block
@@ -278,8 +276,7 @@ template <typename Torus> struct int_arithmetic_scalar_shift_buffer {
shift_last_block_lut_univariate->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, last_block_lut_f, gpu_memory_allocated);
auto active_streams_shift_last =
streams.active_gpu_subset(1, params.pbs_type);
auto active_streams_shift_last = streams.active_gpu_subset(1);
shift_last_block_lut_univariate->broadcast_lut(active_streams_shift_last);
lut_buffers_univariate.push_back(shift_last_block_lut_univariate);
@@ -305,7 +302,7 @@ template <typename Torus> struct int_arithmetic_scalar_shift_buffer {
padding_block_lut_univariate->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
padding_block_lut_f, gpu_memory_allocated);
// auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
// auto active_streams = streams.active_gpu_subset(1);
padding_block_lut_univariate->broadcast_lut(active_streams);
lut_buffers_univariate.push_back(padding_block_lut_univariate);
@@ -347,7 +344,7 @@ template <typename Torus> struct int_arithmetic_scalar_shift_buffer {
params.polynomial_size, params.message_modulus, params.carry_modulus,
blocks_lut_f, gpu_memory_allocated);
auto active_streams_shift_blocks =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
streams.active_gpu_subset(num_radix_blocks);
shift_blocks_lut_bivariate->broadcast_lut(active_streams_shift_blocks);
lut_buffers_bivariate.push_back(shift_blocks_lut_bivariate);

View File

@@ -119,8 +119,8 @@ template <typename Torus> struct int_shift_and_rotate_buffer {
mux_lut->get_degree(0), mux_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, mux_lut_f, gpu_memory_allocated);
auto active_gpu_count_mux = streams.active_gpu_subset(
bits_per_block * num_radix_blocks, params.pbs_type);
auto active_gpu_count_mux =
streams.active_gpu_subset(bits_per_block * num_radix_blocks);
mux_lut->broadcast_lut(active_gpu_count_mux);
auto cleaning_lut_f = [params](Torus x) -> Torus {
@@ -132,7 +132,7 @@ template <typename Torus> struct int_shift_and_rotate_buffer {
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, cleaning_lut_f, gpu_memory_allocated);
auto active_gpu_count_cleaning =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
streams.active_gpu_subset(num_radix_blocks);
cleaning_lut->broadcast_lut(active_gpu_count_cleaning);
}

View File

@@ -108,8 +108,7 @@ template <typename Torus> struct int_overflowing_sub_memory {
glwe_dimension, polynomial_size, message_modulus, carry_modulus,
f_message_acc, gpu_memory_allocated);
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
luts_array->broadcast_lut(active_streams);
luts_borrow_propagation_sum->broadcast_lut(active_streams);
message_acc->broadcast_lut(active_streams);

View File

@@ -38,8 +38,7 @@ template <typename Torus> struct int_unchecked_all_eq_slices_buffer {
num_streams_to_use = 1;
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
uint32_t num_gpus = active_streams.count();

View File

@@ -40,8 +40,7 @@ template <typename Torus> struct int_equality_selectors_buffer {
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);
@@ -155,8 +154,7 @@ template <typename Torus> struct int_possible_results_buffer {
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);
@@ -209,8 +207,7 @@ template <typename Torus> struct int_possible_results_buffer {
params.message_modulus, params.carry_modulus, fns,
allocate_gpu_memory);
current_lut->broadcast_lut(
streams.active_gpu_subset(1, params.pbs_type));
current_lut->broadcast_lut(streams.active_gpu_subset(1));
stream_luts[lut_count++] = current_lut;
lut_value_start += luts_in_this_call;
}
@@ -285,8 +282,7 @@ template <typename Torus> struct int_aggregate_one_hot_buffer {
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams);
@@ -304,8 +300,7 @@ template <typename Torus> struct int_aggregate_one_hot_buffer {
params.polynomial_size, params.message_modulus, params.carry_modulus,
id_fn, allocate_gpu_memory);
lut->broadcast_lut(
streams.active_gpu_subset(num_blocks, params.pbs_type));
lut->broadcast_lut(streams.active_gpu_subset(num_blocks));
this->stream_identity_luts[i] = lut;
}
@@ -326,7 +321,7 @@ template <typename Torus> struct int_aggregate_one_hot_buffer {
params.polynomial_size, params.message_modulus, params.carry_modulus,
msg_fn, allocate_gpu_memory);
this->message_extract_lut->broadcast_lut(
streams.active_gpu_subset(num_blocks, params.pbs_type));
streams.active_gpu_subset(num_blocks));
this->carry_extract_lut = new int_radix_lut<Torus>(
streams, params, 1, num_blocks, allocate_gpu_memory, size_tracker);
@@ -338,7 +333,7 @@ template <typename Torus> struct int_aggregate_one_hot_buffer {
params.polynomial_size, params.message_modulus, params.carry_modulus,
carry_fn, allocate_gpu_memory);
this->carry_extract_lut->broadcast_lut(
streams.active_gpu_subset(num_blocks, params.pbs_type));
streams.active_gpu_subset(num_blocks));
this->partial_aggregated_vectors =
new CudaRadixCiphertextFFI *[num_streams];
@@ -633,8 +628,7 @@ template <typename Torus> struct int_unchecked_contains_buffer {
num_streams_to_use = 1;
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);
@@ -709,8 +703,7 @@ template <typename Torus> struct int_unchecked_contains_clear_buffer {
num_streams_to_use = 1;
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);
@@ -1101,8 +1094,7 @@ template <typename Torus> struct int_unchecked_first_index_of_clear_buffer {
num_streams_to_use = 1;
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);
@@ -1192,8 +1184,7 @@ template <typename Torus> struct int_unchecked_first_index_of_clear_buffer {
this->prefix_sum_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
prefix_sum_fn, allocate_gpu_memory);
this->prefix_sum_lut->broadcast_lut(
streams.active_gpu_subset(num_inputs, params.pbs_type));
this->prefix_sum_lut->broadcast_lut(streams.active_gpu_subset(num_inputs));
auto cleanup_fn = [ALREADY_SEEN, params](Torus x) -> Torus {
Torus val = x % params.message_modulus;
@@ -1209,8 +1200,7 @@ template <typename Torus> struct int_unchecked_first_index_of_clear_buffer {
this->cleanup_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
cleanup_fn, allocate_gpu_memory);
this->cleanup_lut->broadcast_lut(
streams.active_gpu_subset(num_inputs, params.pbs_type));
this->cleanup_lut->broadcast_lut(streams.active_gpu_subset(num_inputs));
}
void release(CudaStreams streams) {
@@ -1302,8 +1292,7 @@ template <typename Torus> struct int_unchecked_first_index_of_buffer {
num_streams_to_use = 1;
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);
@@ -1383,8 +1372,7 @@ template <typename Torus> struct int_unchecked_first_index_of_buffer {
this->prefix_sum_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
prefix_sum_fn, allocate_gpu_memory);
this->prefix_sum_lut->broadcast_lut(
streams.active_gpu_subset(num_inputs, params.pbs_type));
this->prefix_sum_lut->broadcast_lut(streams.active_gpu_subset(num_inputs));
auto cleanup_fn = [ALREADY_SEEN, params](Torus x) -> Torus {
Torus val = x % params.message_modulus;
@@ -1400,8 +1388,7 @@ template <typename Torus> struct int_unchecked_first_index_of_buffer {
this->cleanup_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
cleanup_fn, allocate_gpu_memory);
this->cleanup_lut->broadcast_lut(
streams.active_gpu_subset(num_inputs, params.pbs_type));
this->cleanup_lut->broadcast_lut(streams.active_gpu_subset(num_inputs));
}
void release(CudaStreams streams) {
@@ -1475,8 +1462,7 @@ template <typename Torus> struct int_unchecked_index_of_buffer {
num_streams_to_use = 1;
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);
@@ -1537,8 +1523,7 @@ template <typename Torus> struct int_unchecked_index_of_clear_buffer {
num_streams_to_use = 1;
this->num_streams = num_streams_to_use;
this->active_streams =
streams.active_gpu_subset(num_blocks, params.pbs_type);
this->active_streams = streams.active_gpu_subset(num_blocks);
this->internal_cuda_streams.create_internal_cuda_streams_on_same_gpus(
active_streams, num_streams_to_use);

View File

@@ -47,7 +47,7 @@ template <typename Torus> struct compact_lwe_list {
template <typename Torus> struct flattened_compact_lwe_lists {
Torus *d_ptr;
Torus **ptr_array_to_d_compact_list;
Torus **d_ptr_to_compact_list;
const uint32_t *h_num_lwes_per_compact_list;
uint32_t num_compact_lists;
uint32_t lwe_dimension;
@@ -59,13 +59,13 @@ template <typename Torus> struct flattened_compact_lwe_lists {
uint32_t lwe_dimension)
: d_ptr(d_ptr), h_num_lwes_per_compact_list(h_num_lwes_per_compact_list),
num_compact_lists(num_compact_lists), lwe_dimension(lwe_dimension) {
ptr_array_to_d_compact_list =
static_cast<Torus **>(malloc(num_compact_lists * sizeof(Torus *)));
d_ptr_to_compact_list =
static_cast<Torus **>(malloc(num_compact_lists * sizeof(Torus **)));
total_num_lwes = 0;
auto curr_list = d_ptr;
for (auto i = 0; i < num_compact_lists; ++i) {
total_num_lwes += h_num_lwes_per_compact_list[i];
ptr_array_to_d_compact_list[i] = curr_list;
d_ptr_to_compact_list[i] = curr_list;
curr_list += lwe_dimension + h_num_lwes_per_compact_list[i];
}
}
@@ -75,12 +75,10 @@ template <typename Torus> struct flattened_compact_lwe_lists {
PANIC("index out of range in flattened_compact_lwe_lists::get");
}
return compact_lwe_list(ptr_array_to_d_compact_list[compact_list_index],
return compact_lwe_list(d_ptr_to_compact_list[compact_list_index],
lwe_dimension,
h_num_lwes_per_compact_list[compact_list_index]);
}
void release() { free(ptr_array_to_d_compact_list); }
};
/*
@@ -123,6 +121,7 @@ template <typename Torus> struct zk_expand_mem {
: computing_params(computing_params), casting_params(casting_params),
num_compact_lists(num_compact_lists),
casting_key_type(casting_key_type) {
gpu_memory_allocated = allocate_gpu_memory;
// We copy num_lwes_per_compact_list so we get protection against
@@ -290,8 +289,7 @@ template <typename Torus> struct zk_expand_mem {
lut_indexes, h_lut_indexes, num_packed_msgs * num_lwes * sizeof(Torus),
streams.stream(0), streams.gpu_index(0), allocate_gpu_memory);
auto active_streams =
streams.active_gpu_subset(2 * num_lwes, params.pbs_type);
auto active_streams = streams.active_gpu_subset(2 * num_lwes);
message_and_carry_extract_luts->broadcast_lut(active_streams);
message_and_carry_extract_luts->allocate_lwe_vector_for_non_trivial_indexes(
@@ -315,6 +313,7 @@ template <typename Torus> struct zk_expand_mem {
}
void release(CudaStreams streams) {
message_and_carry_extract_luts->release(streams);
delete message_and_carry_extract_luts;

View File

@@ -153,8 +153,7 @@ __host__ void are_all_comparisons_block_true(
cuda_memcpy_async_to_gpu(is_max_value_lut->get_lut_indexes(0, 0),
h_lut_indexes, num_chunks * sizeof(Torus),
streams.stream(0), streams.gpu_index(0));
auto active_streams =
streams.active_gpu_subset(num_chunks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_chunks);
is_max_value_lut->broadcast_lut(active_streams);
}
lut = is_max_value_lut;
@@ -173,8 +172,8 @@ __host__ void are_all_comparisons_block_true(
is_max_value_lut->h_lut_indexes,
is_max_value_lut->num_blocks * sizeof(Torus),
streams.stream(0), streams.gpu_index(0));
auto active_gpu_count_is_max = streams.active_gpu_subset(
is_max_value_lut->num_blocks, params.pbs_type);
auto active_gpu_count_is_max =
streams.active_gpu_subset(is_max_value_lut->num_blocks);
is_max_value_lut->broadcast_lut(active_gpu_count_is_max, false);
reset_radix_ciphertext_blocks(lwe_array_out, 1);
@@ -489,7 +488,7 @@ tree_sign_reduction(CudaStreams streams, CudaRadixCiphertextFFI *lwe_array_out,
polynomial_size, message_modulus, carry_modulus, f, true,
tree_buffer->preallocated_h_lut);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
last_lut->broadcast_lut(active_streams);
// Last leaf

View File

@@ -155,24 +155,3 @@ void cleanup_cuda_integer_decompress_radix_ciphertext_128(
delete mem_ptr;
*mem_ptr_void = nullptr;
}
void cuda_integer_extract_glwe_128(
CudaStreamsFFI streams, void *glwe_array_out,
CudaPackedGlweCiphertextListFFI const *glwe_list,
uint32_t const glwe_index) {
CudaStreams _streams = CudaStreams(streams);
host_extract<__uint128_t>(_streams.stream(0), _streams.gpu_index(0),
(__uint128_t *)glwe_array_out, glwe_list,
glwe_index);
}
void cuda_integer_extract_glwe_64(
CudaStreamsFFI streams, void *glwe_array_out,
CudaPackedGlweCiphertextListFFI const *glwe_list,
uint32_t const glwe_index) {
CudaStreams _streams = CudaStreams(streams);
host_extract<__uint64_t>(_streams.stream(0), _streams.gpu_index(0),
(__uint64_t *)glwe_array_out, glwe_list, glwe_index);
}

View File

@@ -339,9 +339,7 @@ host_integer_decompress(CudaStreams streams,
/// dimension to a big LWE dimension
auto encryption_params = h_mem_ptr->encryption_params;
auto lut = h_mem_ptr->decompression_rescale_lut;
auto active_streams = streams.active_gpu_subset(
num_blocks_to_decompress,
h_mem_ptr->decompression_rescale_lut->params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_blocks_to_decompress);
if (active_streams.count() == 1) {
execute_pbs_async<Torus, Torus>(
active_streams, (Torus *)d_lwe_array_out->ptr, lut->lwe_indexes_out,

View File

@@ -542,8 +542,7 @@ __host__ void integer_radix_apply_univariate_lookup_table(
std::vector<Torus *> lwe_after_pbs_vec = lut->lwe_after_pbs_vec;
std::vector<Torus *> lwe_trivial_indexes_vec = lut->lwe_trivial_indexes_vec;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
if (active_streams.count() == 1) {
execute_keyswitch_async<Torus>(
streams.get_ith(0), lwe_after_ks_vec[0], lwe_trivial_indexes_vec[0],
@@ -646,8 +645,7 @@ __host__ void integer_radix_apply_many_univariate_lookup_table(
std::vector<Torus *> lwe_after_pbs_vec = lut->lwe_after_pbs_vec;
std::vector<Torus *> lwe_trivial_indexes_vec = lut->lwe_trivial_indexes_vec;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
if (active_streams.count() == 1) {
execute_keyswitch_async<Torus>(
streams.get_ith(0), lwe_after_ks_vec[0], lwe_trivial_indexes_vec[0],
@@ -766,8 +764,7 @@ __host__ void integer_radix_apply_bivariate_lookup_table(
std::vector<Torus *> lwe_after_pbs_vec = lut->lwe_after_pbs_vec;
std::vector<Torus *> lwe_trivial_indexes_vec = lut->lwe_trivial_indexes_vec;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
if (active_streams.count() == 1) {
execute_keyswitch_async<Torus>(
streams.get_ith(0), lwe_after_ks_vec[0], lwe_trivial_indexes_vec[0],
@@ -1815,8 +1812,7 @@ uint64_t scratch_cuda_apply_univariate_lut(
(params.glwe_dimension + 1) * params.polynomial_size * sizeof(Torus),
streams.stream(0), streams.gpu_index(0), allocate_gpu_memory);
*(*mem_ptr)->get_degree(0) = lut_degree;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
(*mem_ptr)->broadcast_lut(active_streams);
POP_RANGE()
return size_tracker;
@@ -1851,8 +1847,7 @@ uint64_t scratch_cuda_apply_many_univariate_lut(
(params.glwe_dimension + 1) * params.polynomial_size * sizeof(Torus),
streams.stream(0), streams.gpu_index(0), allocate_gpu_memory);
*(*mem_ptr)->get_degree(0) = lut_degree;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
(*mem_ptr)->broadcast_lut(active_streams);
POP_RANGE()
return size_tracker;
@@ -1888,8 +1883,7 @@ uint64_t scratch_cuda_apply_bivariate_lut(
(params.glwe_dimension + 1) * params.polynomial_size * sizeof(Torus),
streams.stream(0), streams.gpu_index(0), allocate_gpu_memory);
*(*mem_ptr)->get_degree(0) = lut_degree;
auto active_streams =
streams.active_gpu_subset(num_radix_blocks, params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
(*mem_ptr)->broadcast_lut(active_streams);
POP_RANGE()
return size_tracker;
@@ -2342,8 +2336,8 @@ integer_radix_apply_noise_squashing(CudaStreams streams,
// Since the radix ciphertexts are packed, we have to use the num_radix_blocks
// from the output ct
auto active_streams = streams.active_gpu_subset(
lwe_array_out->num_radix_blocks, params.pbs_type);
auto active_streams =
streams.active_gpu_subset(lwe_array_out->num_radix_blocks);
if (active_streams.count() == 1) {
execute_keyswitch_async<InputTorus>(
streams.get_ith(0), lwe_after_ks_vec[0], lwe_trivial_indexes_vec[0],

View File

@@ -388,8 +388,7 @@ __host__ void host_integer_partial_sum_ciphertexts_vec(
current_columns.next_accumulation(total_ciphertexts, total_messages,
needs_processing);
auto active_streams =
streams.active_gpu_subset(total_ciphertexts, mem_ptr->params.pbs_type);
auto active_streams = streams.active_gpu_subset(total_ciphertexts);
GPU_ASSERT(total_ciphertexts <= mem_ptr->luts_message_carry->num_blocks,
"SUM CT");
@@ -443,8 +442,7 @@ __host__ void host_integer_partial_sum_ciphertexts_vec(
streams.stream(0), streams.gpu_index(0), current_blocks,
num_radix_blocks, num_radix_blocks + 1);
auto active_streams = streams.active_gpu_subset(2 * num_radix_blocks,
mem_ptr->params.pbs_type);
auto active_streams = streams.active_gpu_subset(2 * num_radix_blocks);
if (active_streams.count() == 1) {
execute_keyswitch_async<Torus>(

View File

@@ -29,8 +29,7 @@ void host_integer_grouped_oprf(CudaStreams streams,
int_grouped_oprf_memory<Torus> *mem_ptr,
void *const *bsks) {
auto active_streams = streams.active_gpu_subset(num_blocks_to_process,
mem_ptr->params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_blocks_to_process);
auto lut = mem_ptr->luts;
if (active_streams.count() == 1) {

View File

@@ -34,49 +34,49 @@ void cuda_rerand_64(
switch (rerand_buffer->params.big_lwe_dimension) {
case 256:
host_rerand_inplace<uint64_t, AmortizedDegree<256>>(
rerand_inplace<uint64_t, AmortizedDegree<256>>(
streams, static_cast<uint64_t *>(lwe_array),
static_cast<const uint64_t *>(
lwe_flattened_encryptions_of_zero_compact_array_in),
(uint64_t **)(ksk), rerand_buffer);
break;
case 512:
host_rerand_inplace<uint64_t, AmortizedDegree<512>>(
rerand_inplace<uint64_t, AmortizedDegree<512>>(
streams, static_cast<uint64_t *>(lwe_array),
static_cast<const uint64_t *>(
lwe_flattened_encryptions_of_zero_compact_array_in),
(uint64_t **)(ksk), rerand_buffer);
break;
case 1024:
host_rerand_inplace<uint64_t, AmortizedDegree<1024>>(
rerand_inplace<uint64_t, AmortizedDegree<1024>>(
streams, static_cast<uint64_t *>(lwe_array),
static_cast<const uint64_t *>(
lwe_flattened_encryptions_of_zero_compact_array_in),
(uint64_t **)(ksk), rerand_buffer);
break;
case 2048:
host_rerand_inplace<uint64_t, AmortizedDegree<2048>>(
rerand_inplace<uint64_t, AmortizedDegree<2048>>(
streams, static_cast<uint64_t *>(lwe_array),
static_cast<const uint64_t *>(
lwe_flattened_encryptions_of_zero_compact_array_in),
(uint64_t **)(ksk), rerand_buffer);
break;
case 4096:
host_rerand_inplace<uint64_t, AmortizedDegree<4096>>(
rerand_inplace<uint64_t, AmortizedDegree<4096>>(
streams, static_cast<uint64_t *>(lwe_array),
static_cast<const uint64_t *>(
lwe_flattened_encryptions_of_zero_compact_array_in),
(uint64_t **)(ksk), rerand_buffer);
break;
case 8192:
host_rerand_inplace<uint64_t, AmortizedDegree<8192>>(
rerand_inplace<uint64_t, AmortizedDegree<8192>>(
streams, static_cast<uint64_t *>(lwe_array),
static_cast<const uint64_t *>(
lwe_flattened_encryptions_of_zero_compact_array_in),
(uint64_t **)(ksk), rerand_buffer);
break;
case 16384:
host_rerand_inplace<uint64_t, AmortizedDegree<16384>>(
rerand_inplace<uint64_t, AmortizedDegree<16384>>(
streams, static_cast<uint64_t *>(lwe_array),
static_cast<const uint64_t *>(
lwe_flattened_encryptions_of_zero_compact_array_in),
@@ -88,6 +88,9 @@ void cuda_rerand_64(
" in the interval [256..16384].");
break;
}
cuda_synchronize_stream(static_cast<cudaStream_t>(streams.streams[0]),
streams.gpu_indexes[0]);
}
void cleanup_cuda_rerand(CudaStreamsFFI streams, int8_t **mem_ptr_void) {

View File

@@ -10,7 +10,7 @@
#include "zk/zk_utilities.h"
template <typename Torus, class params>
void host_rerand_inplace(
void rerand_inplace(
CudaStreams const streams, Torus *lwe_array,
const Torus *lwe_flattened_encryptions_of_zero_compact_array_in,
Torus *const *ksk, int_rerand_mem<Torus> *mem_ptr) {
@@ -73,7 +73,6 @@ void host_rerand_inplace(
message_modulus, carry_modulus);
release_cpu_radix_ciphertext_async(&lwes_ffi);
release_cpu_radix_ciphertext_async(&ksed_zero_lwes_ffi);
compact_lwe_lists.release();
}
template <typename Torus>

View File

@@ -45,8 +45,7 @@ host_scalar_bitop(CudaStreams streams, CudaRadixCiphertextFFI *output,
cuda_memcpy_async_gpu_to_gpu(lut->get_lut_indexes(0, 0), clear_blocks,
num_clear_blocks * sizeof(Torus),
streams.stream(0), streams.gpu_index(0));
auto active_streams = streams.active_gpu_subset(
num_clear_blocks, mem_ptr->lut->params.pbs_type);
auto active_streams = streams.active_gpu_subset(num_clear_blocks);
lut->broadcast_lut(active_streams, false);
integer_radix_apply_univariate_lookup_table<Torus>(

View File

@@ -146,7 +146,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check(
lut->get_degree(0), lut->get_max_degree(0), glwe_dimension,
polynomial_size, message_modulus, carry_modulus, scalar_last_leaf_lut_f,
true, mem_ptr->diff_buffer->tree_buffer->preallocated_h_lut);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
lut->broadcast_lut(active_streams);
integer_radix_apply_univariate_lookup_table<Torus>(
@@ -240,7 +240,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check(
polynomial_size, message_modulus, carry_modulus,
scalar_bivariate_last_leaf_lut_f, true,
mem_ptr->diff_buffer->tree_buffer->preallocated_h_lut);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
lut->broadcast_lut(active_streams);
integer_radix_apply_bivariate_lookup_table<Torus>(
@@ -274,7 +274,7 @@ __host__ void integer_radix_unsigned_scalar_difference_check(
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, one_block_lut_f, true,
mem_ptr->preallocated_h_lut);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
one_block_lut->broadcast_lut(active_streams);
integer_radix_apply_univariate_lookup_table<Torus>(
@@ -419,7 +419,7 @@ __host__ void integer_radix_signed_scalar_difference_check(
polynomial_size, message_modulus, carry_modulus,
scalar_bivariate_last_leaf_lut_f, true,
mem_ptr->diff_buffer->tree_buffer->preallocated_h_lut);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
lut->broadcast_lut(active_streams);
integer_radix_apply_bivariate_lookup_table<Torus>(
@@ -521,7 +521,7 @@ __host__ void integer_radix_signed_scalar_difference_check(
signed_msb_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
lut_f, true, mem_ptr->preallocated_h_lut);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
signed_msb_lut->broadcast_lut(active_streams);
CudaRadixCiphertextFFI sign_block;
@@ -567,7 +567,7 @@ __host__ void integer_radix_signed_scalar_difference_check(
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, one_block_lut_f, true,
mem_ptr->preallocated_h_lut);
auto active_streams = streams.active_gpu_subset(1, params.pbs_type);
auto active_streams = streams.active_gpu_subset(1);
one_block_lut->broadcast_lut(active_streams);
integer_radix_apply_univariate_lookup_table<Torus>(
@@ -785,8 +785,8 @@ __host__ void host_scalar_equality_check(
num_halved_scalar_blocks * sizeof(Torus), lsb_streams.stream(0),
lsb_streams.gpu_index(0));
}
auto active_streams = lsb_streams.active_gpu_subset(
num_halved_scalar_blocks, params.pbs_type);
auto active_streams =
lsb_streams.active_gpu_subset(num_halved_scalar_blocks);
// We use false cause we only will broadcast the indexes
scalar_comparison_luts->broadcast_lut(active_streams, false);

View File

@@ -30,7 +30,7 @@ __global__ void __launch_bounds__(params::degree / params::opt)
Torus *global_accumulator, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t grouping_factor, uint32_t lwe_offset,
uint32_t lwe_chunk_size, uint64_t keybundle_size_per_input,
uint64_t lwe_chunk_size, uint64_t keybundle_size_per_input,
int8_t *device_mem, uint64_t device_memory_size_per_block,
uint32_t num_many_lut, uint32_t lut_stride) {
@@ -321,9 +321,8 @@ __host__ void execute_cg_external_product_loop(
lwe_chunk_size * level_count * (glwe_dimension + 1) *
(glwe_dimension + 1) * (polynomial_size / 2);
uint32_t chunk_size = (uint32_t)(std::min(
lwe_chunk_size,
(uint64_t)(lwe_dimension / grouping_factor) - lwe_offset));
uint64_t chunk_size = std::min(
lwe_chunk_size, (uint64_t)(lwe_dimension / grouping_factor) - lwe_offset);
auto d_mem = buffer->d_mem_acc_cg;
auto keybundle_fft = buffer->keybundle_fft;

Some files were not shown because too many files have changed in this diff Show More