Compare commits

...

143 Commits

Author SHA1 Message Date
tmontaigu
f5fa60ecc3 feat(hlapi): bind string [r]split_once 2025-03-26 15:09:50 +01:00
Agnes Leroy
6e158cd109 chore(gpu): use template for first/last iter in split classical PBS 2025-03-26 10:01:39 +01:00
Agnes Leroy
cdcf00af45 chore(gpu): detect if we are in first or last iter with template argument for split kernel multi-bit PBS 2025-03-25 12:16:51 +01:00
Agnes Leroy
78638a24d2 chore(gpu): reduce test threads for 4090 tests to avoid out of mem error 2025-03-25 09:42:58 +01:00
dependabot[bot]
84c12cca56 chore(deps): bump actions/download-artifact from 4.1.9 to 4.2.1
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.1.9 to 4.2.1.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](cc20338598...95815c38cf)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 17:52:27 +01:00
dependabot[bot]
7d05a427a5 chore(deps): bump actions/upload-artifact from 4.6.1 to 4.6.2
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.1 to 4.6.2.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](4cec3d8aa0...ea165f8d65)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 17:52:17 +01:00
dependabot[bot]
c7bc981f7f chore(deps): bump tj-actions/changed-files from 46.0.1 to 46.0.2
Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 46.0.1 to 46.0.2.
- [Release notes](https://github.com/tj-actions/changed-files/releases)
- [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md)
- [Commits](2f7c5bfce2...26a38635fc)

---
updated-dependencies:
- dependency-name: tj-actions/changed-files
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 17:52:04 +01:00
dependabot[bot]
f7210c80a9 chore(deps): bump actions/cache from 4.2.2 to 4.2.3
Bumps [actions/cache](https://github.com/actions/cache) from 4.2.2 to 4.2.3.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](d4323d4df1...5a3ec84eff)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 17:50:01 +01:00
Arthur Meyre
a11584f905 refactor(integer): provide recompose_unsigned, recompose_signed functions
- to decrypt values and make it flexible wrt input type for the
recomposition, e.g. using u128 for noise squashed primitives
2025-03-24 17:12:18 +01:00
Arthur Meyre
f326aaf2f2 feat(shortint): add conformance for noise squashing keys 2025-03-24 17:12:18 +01:00
Arthur Meyre
3a63b96b77 refactor: make bootstrapping key conformance param generic over Scalar 2025-03-24 17:12:18 +01:00
Arthur Meyre
bbf30e5227 feat(shortint): add CompressedNoiseSquashingKey
- add decompressed key in noise squashing and check output from it as well
number of loops /2 as a result
2025-03-24 17:12:18 +01:00
Arthur Meyre
9f5266a56d fix: fix Named implementation for compression keys in integer
- for now keep the alias for backward compatibility
2025-03-24 17:12:18 +01:00
Arthur Meyre
065f863ba2 refactor(hl): rename some structs related to inner FheInt representations 2025-03-24 17:12:18 +01:00
Arthur Meyre
93dd4cf61a refactor(shortint): more sensible API for noise squashing private keygen 2025-03-24 17:12:18 +01:00
Arthur Meyre
3331ce2548 chore: expose noise squashing parameters and use in shortint test 2025-03-24 17:12:18 +01:00
Arthur Meyre
7e30816fe8 feat(integer): add raw parts APIs for compressed compression keys 2025-03-24 17:12:18 +01:00
Agnes Leroy
765d2b6dbe chore(gpu): relax too strict condition in copy slice 2025-03-24 14:58:45 +01:00
Beka Barbakadze
5207e55684 refactor(gpu): remove lwe_input_indexes, lwe_output_indexes and lut_vector_indexes for pbs128 2025-03-24 14:32:41 +01:00
David Testé
a4bd78912b chore: bump tfhe and tfhe-cuda-backend version to alpha.0 2025-03-24 13:18:46 +01:00
Arthur Meyre
c59fa4c479 chore(ci): make version formatting more resilient 2025-03-24 13:18:46 +01:00
tmontaigu
bbcad438fc feat: add trivial enc/dec for strings 2025-03-24 10:16:15 +01:00
tmontaigu
89016d7a07 feat(integer): add scalar cmux
Add variants of CMUX where one or two of the possible
output values are clear
2025-03-24 10:15:52 +01:00
tmontaigu
d454e67b89 fix(integer): block_rotate
encrypted block_rotate/shift family of functions had a few bugs

* It disallowed the use of 1_1 parameters even though it could support it
(given the another slight fix explained below was done)
* The offset at which shift bits were extracted was hard coded for 2_2
* Directions were inverted, i.e, block_rotate_left would rotate_right
2025-03-24 10:15:24 +01:00
tmontaigu
f1cf021d18 refactor: move bit shift/rotate tests 2025-03-24 10:15:24 +01:00
Agnes Leroy
b1008824e2 chore(gpu): supress warnings in pcc_gpu 2025-03-21 18:02:07 +01:00
Agnes Leroy
4928b1354e chore(gpu): add an alias for GPU compression parameters 2025-03-21 17:17:51 +01:00
Agnes Leroy
7d2a296d4d chore(gpu): reduce testing time after parameter update 2025-03-21 15:43:37 +01:00
Arthur Meyre
11cbffb3f2 chore(ci): fix size benchmark
- don't expand, we have tests for that, the server key would be required
with the "new" parameters we are interested in
2025-03-21 14:37:34 +01:00
Agnes Leroy
a7111014e8 fix(gpu): fix corner case in sum ctxt 2025-03-21 10:14:38 +01:00
Arthur Meyre
7d3cdbf466 chore: bump tfhe-cuda-backend to version 0.9.0 2025-03-20 17:47:18 +01:00
Arthur Meyre
dc9afe1146 chore: bump to 1.1 and add V1_1 parameters
- add aliases for tests to avoid having to upgrade too many locations
2025-03-20 17:47:18 +01:00
David Testé
8287f59ebd chore(ci): update aws ami for cpu
This is done to update python modules: pip, wheel and setuptools.
2025-03-20 15:47:17 +01:00
David Testé
9282dc49bf chore(ci): cache backward compatibility data
Git LFS transfers use a lot of bandwidth. Since data used to test
backward compatibility won't change every day, we can leverage
GitHub cache action.
2025-03-20 15:47:17 +01:00
Agnes Leroy
71d8bcff89 fix(gpu): fix signed scalar comparison tests 2025-03-19 18:12:26 +01:00
Agnes Leroy
30319452a4 chore(gpu): remove last memcpy_to_cpu 2025-03-19 18:12:26 +01:00
Agnes Leroy
fb0e9a3b35 chore(gpu): pass scalars on cpu to c++ to avoid calling copy_to_cpu 2025-03-19 18:12:26 +01:00
Agnes Leroy
dbcbea78a5 chore(gpu): pass host scalar to scalar add to avoid overhead due to copy_to_cpu 2025-03-19 18:12:26 +01:00
Agnes Leroy
c9b9fc52d8 chore(gpu): store h_lut_indexes in buffer to avoid regression in perf 2025-03-19 18:12:26 +01:00
Agnes Leroy
f404e8f10d chore(gpu): avoid syncing too much in release 2025-03-19 18:12:26 +01:00
Agnes Leroy
31de8d52d8 fix(gpu): fix bug introduced when reworking host_compare_with_zero
Bug introduced in this commit: 5258acc08f
2025-03-19 17:17:55 +01:00
Arthur Meyre
fd866d18fe chore(ci): pin changed files action to a sha1 corresponding to a tag 2025-03-19 09:25:20 +01:00
Arthur Meyre
56572d0223 fix: a shortint docstring had values not matching the explanation 2025-03-17 17:55:23 +01:00
dependabot[bot]
f3e14dc311 chore(deps): bump dtolnay/rust-toolchain
Bumps [dtolnay/rust-toolchain](https://github.com/dtolnay/rust-toolchain) from a54c7afa936fefeb4456b2dd8068152669aa8203 to 888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1.
- [Release notes](https://github.com/dtolnay/rust-toolchain/releases)
- [Commits](a54c7afa93...888c2e1ea6)

---
updated-dependencies:
- dependency-name: dtolnay/rust-toolchain
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 17:55:09 +01:00
Mayeul@Zama
1600f8c995 chore: remove trivium from main workspace 2025-03-17 14:22:17 +01:00
Agnes Leroy
5258acc08f fix(gpu): fix the logic of host_compare_with_zero_equality in Cuda to match the CPU 2025-03-17 10:26:38 +01:00
David Testé
912af0e87e chore(ci): install dependencies as standalone job
Installing dependencies several times, due to matrix strategy, lead to job failure.
Now, if the workflow uses the remote instance, the dependencies will be installed only once.
2025-03-14 17:51:47 +01:00
tmontaigu
0fdda14495 fix: upcasting of signed integer when block decomposing
Some parts of the code did not use the correct way to
decompose a clear integer into blocks which could be encrypted
or used in scalar ops.

The sign extension was not always properly done, leading for example
in the encryption of a negative integer stored on a i8 to a
SignedRadixCiphertext with a num_blocks greater than i8 to be incorrect:

```
let ct = cks.encrypt_signed(-1i8, 16) // 2_2 parameters
let d: i32 = cks.decrypt_signed(&ct);
assert_eq!(d, i32::from(-1i8)); // Fails
```

To fix, a BlockDecomposer::with_block_count function is added and used
This function will properly do the sign extension when needed
2025-03-14 17:40:13 +01:00
Carl-Zama
9886256242 feat(core): add glwe keyswitch 2025-03-13 14:45:31 +01:00
Carl-Zama
4b89766011 feat(core): add SliceSignedDecompositionIter 2025-03-13 14:45:31 +01:00
Agnes Leroy
5d3b4438d5 chore(gpu): fix cuda ks_pbs bench and rename workflow files 2025-03-13 14:11:51 +01:00
Agnes Leroy
e62710de12 chore(gpu): add benchmark for gpu pbs128 2025-03-13 14:11:51 +01:00
Agnes Leroy
a2f1825691 fix(gpu): fix bug in mul introduced during noise/degree refactor 2025-03-13 13:26:07 +01:00
Agnes Leroy
dcec10b0cf fix(gpu): fix scalar eq bug 2025-03-13 13:24:33 +01:00
Nicolas Sarlin
573ce0c803 chore(bench): add pbs-stats required feature 2025-03-13 09:34:00 +01:00
Beka Barbakadze
459969e9d2 feat(gpu): Implement 128 bit classic pbs 2025-03-12 22:13:22 +04:00
David Testé
8dadb626f2 chore(ci): add pull-request url to slack notification message
This adds context to Zama developers on slack to quickly go to pull-request if the run emitted from one.
2025-03-12 17:00:30 +01:00
Agnes Leroy
ba1235059a chore(gpu): update error messages about device index in integer/gpu/mod.rs 2025-03-12 12:16:00 +01:00
Agnes Leroy
d53db210de chore(gpu): fix multi-gpu integer throughput bench 2025-03-12 12:16:00 +01:00
Arthur Meyre
2258aa0cbe feat(shortint): add noise squashing capabilities
- noise squshing consists in running a PBS over a large modulus like
2^128 with parameters which ensure a big gap between like the plaintext
and the noise, like 50+ bits, this can allow to run a noise flooding
step in MPC to protect against certain key recovery attacks
2025-03-12 10:24:20 +01:00
Arthur Meyre
54a7d4b57c feat(shortint): make encoding generic over Scalar to use it for u128 2025-03-12 10:24:20 +01:00
Arthur Meyre
18db93f8fa feat(core)!: support mixed scalar bootstrapping key generation
- make Numeric CastFrom<Self>, this is not breaking as it's equivalent to
From<Self> in rust which is blanket implemented
- mark CastFrom<Self> inline(always) for the implementations I could find
- update APIs for bootstrappking key generation to support having mixed
integer types for both secret keys, i.e. having a u64 input key and an
an u128 output key

BREAKING: this change is technically breaking for core
2025-03-12 10:24:20 +01:00
Arthur Meyre
268b5892b7 refactor(core): rename files to avoid potential conflicts with exports 2025-03-12 10:24:20 +01:00
Arthur Meyre
a2beabf003 feat: make PBS 128 implems more flexible with respect to input 2025-03-12 10:24:20 +01:00
Arthur Meyre
311d666042 chore: fix a warning for gpu strings
- we don't currently have strings on GPU and so don't run clippy for them
2025-03-12 10:24:20 +01:00
Arthur Meyre
464f4ef9cf test(shortint): enable some standalone tests using ci_run_filter 2025-03-12 10:24:20 +01:00
David Testé
f8e56c104e chore(ci): fix slack notification message
There was a leftover from first iteration of external contribution management.
2025-03-11 14:20:26 +01:00
Agnes Leroy
adfd8e8c86 fix(gpu): fix ilog2 result when input is 0
This commit reverts ilog2 back to what it was before 00037f3b14.
The implementation on GPU differs from the CPU one though, we need to
dig further.
2025-03-11 13:52:03 +01:00
Agnes Leroy
473a4e383e chore(gpu): add C++ functions to pop/push/insert in radix ciphertext 2025-03-11 12:49:49 +01:00
Agnes Leroy
fca0cca071 chore(gpu): refactor div to track noise level & degree 2025-03-11 12:49:49 +01:00
David Testé
b7d33e6b3f docs: change svg benchmark tables appearance for pbs 2025-03-07 15:46:22 +01:00
Arthur Meyre
b0d7bb9f95 chore: pre-generate keyswitching keys for shortint tests
- we run in a cross process race condition which fucks up the key file
- no rust crate seems to help and linux locks are just a fucking mess
- also avoid truncating file when we are going to write to it, get a lock
first
2025-03-07 13:27:35 +01:00
Nicolas Sarlin
396f30ff5d feat(c_api): add new integer types 2025-03-07 11:07:19 +01:00
Nicolas Sarlin
10b82141eb chore(hl): add a feature for extended types 2025-03-07 11:07:19 +01:00
Nicolas Sarlin
e6e7081c7c feat(js): add new integer types 2025-03-07 11:07:19 +01:00
Nicolas Sarlin
20421747ed feat(hl): add new integer types 2025-03-07 11:07:19 +01:00
Agnes Leroy
59bb7ba35c chore(gpu): do not send slack message for external contributions for signed gpu tests 2025-03-06 13:38:54 +01:00
Agnes Leroy
80a1109260 chore(gpu): fix condition to trigger unsigned gpu test 2025-03-06 13:38:54 +01:00
David Testé
54396370a1 chore(ci): use new heuristic for throughput benchmarks
This is done to load benchmarks machine in smarter way. This makes
sure to saturate compute load of the benchmark machine while
keeping execution time reasonable.

iter_batched() criterion method is used instead of iter() so that
benchmarks are compatible with other flavors of operations
(unchecked_* or smart_*).
2025-03-06 13:26:23 +01:00
Agnes Leroy
3621d12c42 chore(ci): add hourly cost for sxm5 vms 2025-03-06 10:19:49 +01:00
Nicolas Sarlin
1f2e1537fa chore(ci): update tfhe-lints for newer compiler version 2025-03-06 09:48:18 +01:00
Arthur Meyre
52a1191474 chore(ci): force installation of toolchain for tfhe-lints
- also update toolchain.txt to match the tfhe-lint toolchain
2025-03-06 09:48:18 +01:00
Nicolas Sarlin
d06e8d1e87 chore(ci): re-enable tfhe_lints 2025-03-06 09:48:18 +01:00
David Testé
863234d134 docs: change svg benchmark tables appearance
Reduce number of FheUint types displayed in the integer benchmark
tables. Increase policy size and better columns fitting.
Remove link to enlarge image.
2025-03-05 18:41:33 +01:00
David Testé
fcfb77a8c5 chore(ci): fix permanent instance selection condition
Due to 'continue-on-error' directive 'use-permanent-instance' step could not rely on failure() function.
2025-03-05 18:13:20 +01:00
tmontaigu
98d58ada7a fix: BlockDecomposer
The BlockDecomposer gave the possibility when the number of bits per
block was not a multiple of the number of bits in the original integer
to force the extra bits of the last block to a particular value.

However, the way this was done could only work when setting these bits
to 1, when wanting to set them to 0 it would not work.

Good news is that we actually never wanted to set them to 0,
but it should still be fixed for completeness, and allow other
feature to be added without bugs
2025-03-05 14:27:56 +01:00
Agnes Leroy
8962d1f925 chore(gpu): refactor full propagation to track noise / degree 2025-03-05 11:06:30 +01:00
Arthur Meyre
f7655cc749 fix(shortint): make noise_level field of Ciphertext private again
- this is required to make sure we have correctness checks on noise_level
updates if we enable them
2025-03-05 10:16:17 +01:00
Nicolas Sarlin
371e8238db chore(ci): disable dylint until rustup issue is fixed 2025-03-04 15:57:58 +01:00
Beka Barbakadze
c1d534efa4 refactor(gpu): refactor double2 operators to use cuda intrinsics 2025-03-03 17:29:39 +01:00
David Testé
47589ea9a7 chore(bench): run core_crypto benchmarks on all parameters p-fail
This also add KS-PBS benchmarks.
2025-03-03 16:01:17 +01:00
Agnes Leroy
ce327b7b27 chore(gpu): refactor mul/scalar mul to track noise/degree 2025-03-03 13:51:00 +01:00
Arthur Meyre
877d0234ac fix: fix the atomic pattern used to cast in trivium and a test in shortint
- parameters are optimized for a clean ciphertext, the ciphertext being
keyswitched was noisy
2025-03-03 13:10:11 +01:00
dependabot[bot]
f457ac40e5 chore(deps): bump codecov/codecov-action from 5.3.1 to 5.4.0
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.3.1 to 5.4.0.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md)
- [Commits](13ce06bfc6...0565863a31)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 11:53:07 +01:00
dependabot[bot]
d9feb57b92 chore(deps): bump slsa-framework/slsa-github-generator
Bumps [slsa-framework/slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) from 2.0.0 to 2.1.0.
- [Release notes](https://github.com/slsa-framework/slsa-github-generator/releases)
- [Changelog](https://github.com/slsa-framework/slsa-github-generator/blob/main/CHANGELOG.md)
- [Commits](https://github.com/slsa-framework/slsa-github-generator/compare/v2.0.0...v2.1.0)

---
updated-dependencies:
- dependency-name: slsa-framework/slsa-github-generator
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 11:52:56 +01:00
dependabot[bot]
fa41fb3ad4 chore(deps): bump actions/cache from 4.2.1 to 4.2.2
Bumps [actions/cache](https://github.com/actions/cache) from 4.2.1 to 4.2.2.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](0c907a75c2...d4323d4df1)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 11:52:45 +01:00
dependabot[bot]
375a482d0b chore(deps): bump actions/download-artifact from 4.1.8 to 4.1.9
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.1.8 to 4.1.9.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](fa0a91b85d...cc20338598)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 11:52:37 +01:00
Beka Barbakadze
7e941b29c1 refactor(gpu): use hexes to initialize twiddles for 64 bit fft 2025-03-03 14:44:12 +04:00
David Testé
3897137a3f chore(ci): fallback on permanent h100 instance on shortage
When a shortage occurs on n3-H100x1 instances on Hyperstack, we'll
fall back on the permanent one registered on GitHub.
This can be done by using 'h100x1' as runner label to run a job on
it.
2025-03-03 11:38:32 +01:00
Beka Barbakadze
3988c85d6b feat(gpu): Implement fft128 in cuda backend 2025-03-03 12:27:46 +04:00
Agnes Leroy
c1bf43eac1 feat(gpu): add a function to set a CudaLweList to 0 2025-02-28 16:46:17 +01:00
Agnes Leroy
95863e1e36 chore(gpu): plug in signed gpu tests in the hl api 2025-02-28 13:42:52 +01:00
Pedro Alves
a508f4cadc fix(gpu): enforce tighter bounds on compression output 2025-02-28 07:12:36 -03:00
Agnes Leroy
dad278cdd3 chore(gpu): fix typo in doc 2025-02-28 11:12:17 +01:00
tmontaigu
699e24f735 docs: rename to README as its needed for link to work 2025-02-28 10:23:46 +01:00
Agnes Leroy
12ed899b34 chore(gpu): trigger long run tests every evening, edit workflow name 2025-02-27 17:22:02 +01:00
David Testé
8565b79a28 chore(ci): switch environment and add fallback for gpu profiles
Switch n3-H100-SXM5x8 to US-1 as CANADA is out of stock on this
instance.
Also L40 instances fallback on n3-RTX-A6000x1 to mitigate
resource shortages issues.
2025-02-27 16:59:04 +01:00
Agnes Leroy
1d7f9f1152 chore(gpu): refactor comparisons to track noise/degree 2025-02-27 16:57:24 +01:00
tmontaigu
3ecdd0d1bc fix(c-api): add missing casts
cast_into FheUint{12, 512, 1024, 2048} were missing from the C API
2025-02-27 16:30:51 +01:00
J-B Orfila
14517ca111 docs: add link in the README 2025-02-27 15:09:41 +01:00
Agnes Leroy
a2eceabd82 fix(gpu): fix scalar comparisons with 1 block 2025-02-27 13:11:36 +01:00
Guillermo Oyarzun
968ab31f27 fix(cpu): fix corner case when estimating the num blocks required 2025-02-27 11:38:17 +01:00
Agnes Leroy
74d5a88f1b chore(gpu): replace asserts with panic 2025-02-27 11:36:59 +01:00
Agnes Leroy
e18ce00f63 chore(gpu): increase 4090 test timeout 2025-02-27 11:27:55 +01:00
tmontaigu
7ec8f901da docs(js): update JS example
The example was still using CompactFheUint32List
which as been removed in favor of the more generic CompactCiphertextList
2025-02-27 10:54:08 +01:00
Arthur Meyre
610406ac27 chore: link CONTRIBUTING.md in the documentation 2025-02-26 16:07:44 +01:00
J-B Orfila
4162ff5b64 docs: security disclaimer updated 2025-02-26 16:07:31 +01:00
J-B Orfila
efd06c5b43 docs: correcting parameter section 2025-02-26 16:07:31 +01:00
Nicolas Sarlin
bd2a488f13 chore(doc): add a doc page about parameters 2025-02-26 16:07:31 +01:00
David Testé
9f48db2a90 chore(ci): fix workflow concurrency condition
Referencing current branch using github.head_ref is a leftover
from handling pull_request_target event. This event being removed,
there is no need to be specific and we can instead use
'github.workflow_ref' which is more robust.
2025-02-26 14:11:42 +01:00
Pedro Alves
f962716fa5 feat(gpu): refactor the sample extract entry point so the user can pass how many LWEs should be extracted per GLWE 2025-02-26 11:58:47 +01:00
Arthur Meyre
ec3f3a1b52 chore(docs): use tilde requirements to minimize breakage on users' end 2025-02-25 17:59:23 +01:00
Arthur Meyre
ab36f36116 chore: update README 2025-02-25 17:59:23 +01:00
David Testé
06638c33d7 chore(ci): add contributing guidance 2025-02-25 17:21:42 +01:00
David Testé
e583212e6d docs: refactor and update benchmarks pages
Benchmarks tables are rendered as descriptive SVG images.
Sort results by backend to have a clearer view in tree of content.
PBS benchmarks now display results for various p-fail and several
precisions.
2025-02-25 12:47:12 +01:00
David Testé
486ec9f053 chore(ci): update cpu aws ami and install git-lfs
Several network errors occurred while trying to install git-lfs
from within backward compatibility tests workflow. Having git-lfs
installed directly in the Amazon Machine Image fix this issue.
2025-02-25 12:45:47 +01:00
Arthur Meyre
0216e640bf test: make the bound on the base variance check a bit looser
We have seen failures, we need proper confidence intervals on these tests
2025-02-24 17:47:30 +01:00
David Testé
d00224caa3 chore(ci): add should-run to tfhe-fft and tfhe-ntt tests
This is done to avoid testing tfhe-ftt/ntt crates if nothing
changes in their source files.
However, these tests would be run unconditionally on each push on
main branch.
2025-02-24 16:35:31 +01:00
dependabot[bot]
bd06971680 chore(deps): bump actions/cache from 4.2.0 to 4.2.1
Bumps [actions/cache](https://github.com/actions/cache) from 4.2.0 to 4.2.1.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](1bd1e32a3b...0c907a75c2)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-24 11:46:53 +01:00
dependabot[bot]
58688cd401 chore(deps): bump actions/upload-artifact from 4.6.0 to 4.6.1
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.0 to 4.6.1.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](65c4c4a1dd...4cec3d8aa0)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-24 11:46:44 +01:00
Agnes Leroy
2757f7209a chore(gpu): update backend readme 2025-02-24 11:22:14 +01:00
Mayeul@Zama
b38b119746 chore(docs): add HL strings documentation 2025-02-24 10:58:29 +01:00
Pedro Alves
219c755a77 fix(gpu): fix wrong number of blocks used in cast 2025-02-21 20:09:54 -03:00
Mayeul@Zama
fc4abd5fb1 chore: update toolchain 2025-02-21 15:03:23 +01:00
Guillermo Oyarzun
5de1445cbf fix(gpu): fix wrong assert in division 2025-02-21 11:27:03 +01:00
Yuxi Zhao
6b21bff1e8 chore(docs): improve nagivation 2025-02-20 17:29:36 +01:00
Arthur Meyre
a1dc260fb2 chore(ci): make md doctest checker a bit more versatile on user errors 2025-02-20 17:29:36 +01:00
David Testé
5d9af12f6e chore(ci): fix release workflow for tfhe-versionable
tfhe-versionable crate depends on tfhe-versionable-derive.
Workflow, now ensure that derive crate is published before
attempting to package tfhe-versionable.

Dry-run option is removed since it cannot be use correctly due
the reason aforementioned.
2025-02-20 11:44:58 +01:00
Guillermo Oyarzun
32c93876d7 feat(gpu): enable division in high level api 2025-02-20 10:33:07 +01:00
Guillermo Oyarzun
bede76be82 feat(gpu): enable if then else for boolean ciphertexts in hlapi 2025-02-19 12:50:38 +01:00
Guillermo Oyarzun
508713f926 fix(gpu): enable large integers for the classical pbs flavors 2025-02-19 06:52:49 -03:00
Guillermo Oyarzun
6d7b32dd0a fix(gpu): enable large integers other multi bit pbs 2025-02-19 06:52:49 -03:00
Pedro Alves
15f7ba20aa fix(gpu): Remove unnecessary and incorrect bound check for decompression
Removed unnecessary bounds check for the number of LWEs against polynomial size.
2025-02-19 06:17:11 -03:00
607 changed files with 63865 additions and 18304 deletions

View File

@@ -11,6 +11,8 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -51,7 +53,7 @@ jobs:
name: Backward compatibility tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -62,18 +64,14 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
- name: Install git-lfs
run: |
sudo apt update && sudo apt -y install git-lfs
- name: Use specific data branch
if: ${{ contains(github.event.pull_request.labels.*.name, 'data_PR') }}
env:
PR_BRANCH: ${{ github.head_ref || github.ref_name }}
PR_BRANCH: ${{ github.ref_name }}
run: |
echo "BACKWARD_COMPAT_DATA_BRANCH=${PR_BRANCH}" >> "${GITHUB_ENV}"
@@ -83,7 +81,23 @@ jobs:
BRANCH="$(make backward_compat_branch)"
echo "branch=${BRANCH}" >> "${GITHUB_OUTPUT}"
- name: Get backward compat branch head SHA
id: backward_compat_sha
env:
REPO_URL: "https://github.com/zama-ai/tfhe-backward-compat-data"
run: |
SHA=$(git ls-remote ${{ env.REPO_URL }} refs/heads/${{ steps.backward_compat_branch.outputs.branch }} | awk '{print $1}')
echo "sha=${SHA}" >> "${GITHUB_OUTPUT}"
- name: Retrieve data from cache
id: retrieve-data-cache
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
with:
path: tests/tfhe-backward-compat-data
key: ${{ steps.backward_compat_branch.outputs.branch }}_${{ steps.backward_compat_sha.outputs.sha }}
- name: Clone test data
if: steps.retrieve-data-cache.outputs.cache-hit != 'true'
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
persist-credentials: 'false'
@@ -96,13 +110,26 @@ jobs:
run: |
make test_backward_compatibility_ci
- name: Store data in cache
if: steps.retrieve-data-cache.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
with:
path: tests/tfhe-backward-compat-data
key: ${{ steps.backward_compat_branch.outputs.branch }}_${{ steps.backward_compat_sha.outputs.sha }}
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Backward compatibility tests finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Backward compatibility tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (backward-compat-tests)
@@ -127,4 +154,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (backward-compat-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (backward-compat-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -63,7 +65,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
dependencies:
@@ -158,7 +160,7 @@ jobs:
name: Fast CPU tests
needs: [ should-run, setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -169,7 +171,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -209,7 +211,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 #v4.2.0
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
with:
path: |
~/.nvm
@@ -222,7 +224,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 #v4.2.0
uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |
@@ -264,13 +266,18 @@ jobs:
run: |
make test_zk
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Slack Notification
if: ${{ failure() && env.SECRETS_AVAILABLE == 'true' }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Fast AWS tests finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Fast AWS tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (fast-tests)
@@ -295,4 +302,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (fast-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (fast-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -10,6 +10,8 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
PULL_REQUEST_MD_LINK: ""
# We clear the cache to reduce memory pressure because of the numerous processes of cargo
# nextest
TFHE_RS_CLEAR_IN_MEMORY_KEY_CACHE: "1"
@@ -50,7 +52,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
integer:
@@ -98,7 +100,7 @@ jobs:
name: Unsigned integer tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -109,7 +111,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -134,13 +136,18 @@ jobs:
run: |
AVX512_SUPPORT=ON NO_BIG_PARAMS=${{ env.NO_BIG_PARAMS }} BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_ci
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Unsigned Integer tests finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Unsigned Integer tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (unsigned-integer-tests)
@@ -165,4 +172,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (unsigned-integer-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (unsigned-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -10,6 +10,8 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
PULL_REQUEST_MD_LINK: ""
# We clear the cache to reduce memory pressure because of the numerous processes of cargo
# nextest
TFHE_RS_CLEAR_IN_MEMORY_KEY_CACHE: "1"
@@ -51,7 +53,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
integer:
@@ -99,7 +101,7 @@ jobs:
name: Signed integer tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -110,7 +112,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -139,13 +141,18 @@ jobs:
run: |
AVX512_SUPPORT=ON NO_BIG_PARAMS=${{ env.NO_BIG_PARAMS }} BIG_TESTS_INSTANCE=TRUE make test_signed_integer_ci
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Signed Integer tests finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Signed Integer tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (signed-integer-tests)
@@ -170,4 +177,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (signed-integer-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (signed-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -10,7 +10,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -72,7 +74,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
dependencies:
@@ -169,7 +171,7 @@ jobs:
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
needs: [ should-run, setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}_${{github.event_name}}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -180,7 +182,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -246,13 +248,18 @@ jobs:
make test_trivium
make test_kreyvium
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CPU tests finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "CPU tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cpu-tests)
@@ -277,4 +284,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cpu-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cpu-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -10,6 +10,8 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -52,7 +54,7 @@ jobs:
name: WASM tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -63,7 +65,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -73,7 +75,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 #v4.2.0
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
with:
path: |
~/.nvm
@@ -86,7 +88,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 #v4.2.0
uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |
@@ -115,13 +117,18 @@ jobs:
run: |
make test_zk_wasm_x86_compat_ci
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "WASM tests finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "WASM tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (wasm-tests)
@@ -146,4 +153,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (wasm-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (wasm-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -43,7 +43,7 @@ jobs:
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
steps:
@@ -63,7 +63,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -94,7 +94,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_boolean
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -43,7 +43,7 @@ jobs:
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
steps:
- name: Checkout tfhe-rs repo with tags
@@ -62,12 +62,13 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
- name: Run benchmarks with AVX512
run: |
make bench_ks_pbs
make bench_pbs
make bench_pbs128
make bench_ks
@@ -85,7 +86,7 @@ jobs:
--walk-subdirs
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -43,7 +43,7 @@ jobs:
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
timeout-minutes: 720 # 12 hours
@@ -64,7 +64,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -99,7 +99,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_erc20
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -26,6 +26,7 @@ on:
- integer_multi_bit
- integer_compression
- pbs
- pbs128
- ks
op_flavor:
description: "Operations set to run"
@@ -68,7 +69,7 @@ jobs:
run-benchmarks:
name: Run benchmarks
needs: parse-inputs
uses: ./.github/workflows/benchmark_gpu_integer_common.yml
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: ${{ needs.parse-inputs.outputs.profile }}
hardware_name: ${{ needs.parse-inputs.outputs.hardware_name }}

View File

@@ -29,7 +29,7 @@ jobs:
github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs' ||
contains(github.event.label.name, '4090_bench') }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}_cuda_integer_bench
group: ${{ github.workflow_ref }}_cuda_integer_bench
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ["self-hosted", "4090-desktop"]
timeout-minutes: 1440 # 24 hours
@@ -51,7 +51,7 @@ jobs:
echo "FAST_BENCH=TRUE" >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -80,7 +80,7 @@ jobs:
--walk-subdirs
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_integer_multi_bit_gpu_default
path: ${{ env.RESULTS_FILENAME }}
@@ -97,14 +97,14 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Integer RTX 4090 full benchmarks finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
cuda-core-crypto-benchmarks:
name: Cuda core crypto benchmarks (RTX 4090)
if: ${{ github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || contains(github.event.label.name, '4090_bench') }}
needs: cuda-integer-benchmarks
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}_cuda_core_crypto_bench
group: ${{ github.workflow_ref }}_cuda_core_crypto_bench
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ["self-hosted", "4090-desktop"]
timeout-minutes: 1440 # 24 hours
@@ -126,7 +126,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -157,7 +157,7 @@ jobs:
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}
@@ -182,7 +182,7 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Core crypto RTX 4090 full benchmarks finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Core crypto RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
remove_github_label:
name: Remove 4090 bench label

View File

@@ -1,4 +1,4 @@
# Run integer benchmarks on CUDA instance and return parsed results to Slab CI bot.
# Run benchmarks on CUDA instance and return parsed results to Slab CI bot.
name: Cuda benchmarks - common
on:
@@ -114,10 +114,16 @@ jobs:
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
# If the latter fails due to a failed GitHub action runner set up, we have to fallback on the permanent instance.
# Since the on-demand remote label is set before failure, we have to do the logical OR in this order,
# otherwise we'll try to run the next job on a non-existing on-demand instance.
runner-name: ${{ steps.use-permanent-instance.outputs.runner_group || steps.start-remote-instance.outputs.label }}
remote-instance-outcome: ${{ steps.start-remote-instance.outcome }}
steps:
- name: Start instance
id: start-instance
- name: Start remote instance
id: start-remote-instance
continue-on-error: true
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
@@ -127,9 +133,43 @@ jobs:
backend: ${{ inputs.backend }}
profile: ${{ inputs.profile }}
# This will allow to fallback on permanent instances running on Hyperstack.
- name: Use permanent remote instance
id: use-permanent-instance
if: steps.start-remote-instance.outcome == 'failure' &&
inputs.profile == 'single-h100'
run: |
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
# Install dependencies only once since cuda-benchmarks uses a matrix strategy, thus running multiple times.
install-dependencies:
name: Install dependencies
needs: [ setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
matrix:
# explicit include-based build matrix, of known valid options
include:
- cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.setup-instance.outputs.remote-instance-outcome == 'success'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
gcc-version: ${{ matrix.gcc }}
cuda-benchmarks:
name: Cuda benchmarks (${{ inputs.profile }})
needs: [ prepare-matrix, setup-instance ]
needs: [ prepare-matrix, setup-instance, install-dependencies ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
continue-on-error: true
@@ -140,10 +180,8 @@ jobs:
command: ${{ fromJSON(needs.prepare-matrix.outputs.command) }}
op_flavor: ${{ fromJSON(needs.prepare-matrix.outputs.op_flavor) }}
bench_type: ${{ fromJSON(needs.prepare-matrix.outputs.bench_type) }}
# explicit include-based build matrix, of known valid options
include:
- os: ubuntu-22.04
cuda: "12.2"
- cuda: "12.2"
gcc: 11
steps:
- name: Checkout tfhe-rs repo with tags
@@ -153,12 +191,6 @@ jobs:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
gcc-version: ${{ matrix.gcc }}
- name: Get benchmark details
run: |
{
@@ -167,8 +199,28 @@ jobs:
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
# Re-export environment variables as dependencies setup perform this task in the previous job.
# Local env variables are cleaned at the end of each job.
- name: Export CUDA variables
shell: bash
run: |
CUDA_PATH=/usr/local/cuda-${{ matrix.cuda }}
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "PATH=$PATH:$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib64:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDA_MODULE_LOADER=EAGER" >> "${GITHUB_ENV}"
- name: Export gcc and g++ variables
shell: bash
run: |
{
echo "CC=/usr/bin/gcc-${{ matrix.gcc }}";
echo "CXX=/usr/bin/g++-${{ matrix.gcc }}";
echo "CUDAHOSTCXX=/usr/bin/g++-${{ matrix.gcc }}";
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -196,7 +248,7 @@ jobs:
--bench-type ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ inputs.profile }}
path: ${{ env.RESULTS_FILENAME }}
@@ -230,7 +282,7 @@ jobs:
teardown-instance:
name: Teardown instance (cuda-${{ inputs.profile }}-benchmarks)
if: ${{ always() && needs.setup-instance.result == 'success' }}
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:

View File

@@ -1,5 +1,5 @@
# Run core crypto benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Core crypto GPU benchmarks
name: Cuda - Core crypto benchmarks
on:
workflow_dispatch:
@@ -23,10 +23,16 @@ jobs:
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
# If the latter fails due to a failed GitHub action runner set up, we have to fallback on the permanent instance.
# Since the on-demand remote label is set before failure, we have to do the logical OR in this order,
# otherwise we'll try to run the next job on a non-existing on-demand instance.
runner-name: ${{ steps.use-permanent-instance.outputs.runner_group || steps.start-remote-instance.outputs.label }}
remote-instance-outcome: ${{ steps.start-remote-instance.outcome }}
steps:
- name: Start instance
id: start-instance
- name: Start remote instance
id: start-remote-instance
continue-on-error: true
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
@@ -36,6 +42,13 @@ jobs:
backend: hyperstack
profile: single-h100
# This will allow to fallback on permanent instances running on Hyperstack.
- name: Use permanent remote instance
id: use-permanent-instance
if: env.SECRETS_AVAILABLE == 'true' && steps.start-remote-instance.outcome == 'failure'
run: |
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
cuda-core-crypto-benchmarks:
name: Execute GPU core crypto benchmarks
needs: setup-instance
@@ -57,6 +70,7 @@ jobs:
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.setup-instance.outputs.remote-instance-outcome == 'success'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
@@ -71,13 +85,15 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
- name: Run benchmarks with AVX512
run: |
make bench_ks_pbs_gpu
make bench_pbs_gpu
make bench_pbs128_gpu
make bench_ks_gpu
- name: Parse results
@@ -94,7 +110,7 @@ jobs:
--walk-subdirs
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}
@@ -128,7 +144,7 @@ jobs:
teardown-instance:
name: Teardown instance (cuda-integer-full-benchmarks)
if: ${{ always() && needs.setup-instance.result == 'success' }}
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-core-crypto-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:

View File

@@ -50,10 +50,16 @@ jobs:
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
# If the latter fails due to a failed GitHub action runner set up, we have to fallback on the permanent instance.
# Since the on-demand remote label is set before failure, we have to do the logical OR in this order,
# otherwise we'll try to run the next job on a non-existing on-demand instance.
runner-name: ${{ steps.use-permanent-instance.outputs.runner_group || steps.start-remote-instance.outputs.label }}
remote-instance-outcome: ${{ steps.start-remote-instance.outcome }}
steps:
- name: Start instance
id: start-instance
- name: Start remote instance
id: start-remote-instance
continue-on-error: true
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
@@ -63,6 +69,15 @@ jobs:
backend: ${{ inputs.backend }}
profile: ${{ inputs.profile }}
# This will allow to fallback on permanent instances running on Hyperstack.
- name: Use permanent remote instance
id: use-permanent-instance
if: env.SECRETS_AVAILABLE == 'true' &&
steps.start-remote-instance.outcome == 'failure' &&
inputs.profile == 'single-h100'
run: |
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
cuda-erc20-benchmarks:
name: Cuda ERC20 benchmarks (${{ inputs.profile }})
needs: setup-instance
@@ -84,6 +99,7 @@ jobs:
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.setup-instance.outputs.remote-instance-outcome == 'success'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
@@ -98,7 +114,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -120,7 +136,7 @@ jobs:
--name-suffix avx512
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_erc20_${{ inputs.profile }}
path: ${{ env.RESULTS_FILENAME }}
@@ -154,7 +170,7 @@ jobs:
teardown-instance:
name: Teardown instance (cuda-erc20-${{ inputs.profile }}-benchmarks)
if: ${{ always() && needs.setup-instance.result == 'success' }}
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-erc20-benchmarks, slack-notify ]
runs-on: ubuntu-latest
steps:

View File

@@ -10,7 +10,7 @@ jobs:
run-benchmarks-1-h100:
name: Run benchmarks (1xH100)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_integer_common.yml
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: single-h100
hardware_name: n3-H100x1
@@ -23,7 +23,7 @@ jobs:
run-benchmarks-2-h100:
name: Run benchmarks (2xH100)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_integer_common.yml
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: 2-h100
hardware_name: n3-H100x2
@@ -36,7 +36,7 @@ jobs:
run-benchmarks-8-h100:
name: Run benchmarks (8xH100)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_integer_common.yml
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: multi-h100
hardware_name: n3-H100x8
@@ -49,7 +49,7 @@ jobs:
run-benchmarks-l40:
name: Run benchmarks (L40)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_integer_common.yml
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
profile: l40
hardware_name: n3-L40x1

View File

@@ -104,7 +104,7 @@ jobs:
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
timeout-minutes: 1440 # 24 hours
@@ -131,7 +131,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -172,7 +172,7 @@ jobs:
--bench-type ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -70,7 +70,7 @@ jobs:
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
strategy:
@@ -94,7 +94,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -138,7 +138,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_shortint_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -104,7 +104,7 @@ jobs:
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
continue-on-error: true
timeout-minutes: 1440 # 24 hours
@@ -131,7 +131,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -166,7 +166,7 @@ jobs:
--bench-type ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -45,7 +45,7 @@ jobs:
name: Execute FFT benchmarks in EC2
needs: setup-ec2
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
@@ -84,7 +84,7 @@ jobs:
--name-suffix avx512
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_fft
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -45,7 +45,7 @@ jobs:
name: Execute NTT benchmarks in EC2
needs: setup-ec2
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
steps:
@@ -84,7 +84,7 @@ jobs:
--name-suffix avx512
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_ntt
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -45,7 +45,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
zk_pok:
@@ -80,7 +80,7 @@ jobs:
if: needs.setup-instance.result != 'skipped'
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow_ref }}_${{github.event_name}}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -100,7 +100,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -132,7 +132,7 @@ jobs:
--bench-type ${{ env.BENCH_TYPE }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_tfhe_zk_pok
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -41,7 +41,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
wasm_bench:
@@ -100,7 +100,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -110,7 +110,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 #v4.2.0
uses: actions/cache/restore@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
with:
path: |
~/.nvm
@@ -123,7 +123,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 #v4.2.0
uses: actions/cache/save@5a3ec84eff668545956fd18022155c47e93e2684 #v4.2.3
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |
@@ -167,7 +167,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_wasm_${{ matrix.browser }}
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -48,7 +48,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
zk_pok:
@@ -118,7 +118,7 @@ jobs:
if: needs.setup-instance.result != 'skipped'
needs: [ prepare-matrix, setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
group: ${{ github.workflow_ref }}_${{github.event_name}}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -142,7 +142,7 @@ jobs:
} >> "${GITHUB_ENV}"
- name: Install rust
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: nightly
@@ -179,7 +179,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
with:
name: ${{ github.sha }}_integer_zk
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -28,7 +28,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable

View File

@@ -3,16 +3,46 @@ name: Cargo Test tfhe-fft
on:
pull_request:
push:
branches:
- main
env:
CARGO_TERM_COLOR: always
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: read
outputs:
fft_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.fft_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
persist-credentials: 'false'
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
fft:
- tfhe/Cargo.toml
- Makefile
- tfhe-fft/**
- '.github/workflows/cargo_test_fft.yml'
cargo-tests-fft:
needs: should-run
if: needs.should-run.outputs.fft_test == 'true'
runs-on: ${{ matrix.runner_type }}
strategy:
matrix:
@@ -39,6 +69,8 @@ jobs:
make test_fft_no_std
cargo-tests-fft-nightly:
needs: should-run
if: needs.should-run.outputs.fft_test == 'true'
runs-on: ${{ matrix.runner_type }}
strategy:
matrix:
@@ -61,7 +93,9 @@ jobs:
make test_fft_no_std_nightly
cargo-tests-fft-node-js:
runs-on: "ubuntu-latest"
needs: should-run
if: needs.should-run.outputs.fft_test == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
@@ -69,3 +103,30 @@ jobs:
run: |
make install_node
make test_fft_node_js_ci
cargo-tests-fft-successful:
needs: [ should-run, cargo-tests-fft, cargo-tests-fft-nightly, cargo-tests-fft-node-js ]
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Tests do not need to run
if: needs.should-run.outputs.fft_test == 'false'
run: |
echo "tfhe-fft files haven't changed tests don't need to run"
- name: Check all tests passed
if: needs.should-run.outputs.fft_test == 'true' &&
needs.cargo-tests-fft.result == 'success' &&
needs.cargo-tests-fft-nightly.result == 'success' &&
needs.cargo-tests-fft-node-js.result == 'success'
run: |
echo "All tfhe-fft test passed"
- name: Check tests failure
if: needs.should-run.outputs.fft_test == 'true' &&
(needs.cargo-tests-fft.result != 'success' ||
needs.cargo-tests-fft-nightly.result != 'success' ||
needs.cargo-tests-fft-node-js.result != 'success')
run: |
echo "Some tfhe-fft tests failed"
exit 1

View File

@@ -3,16 +3,46 @@ name: Cargo Test tfhe-ntt
on:
pull_request:
push:
branches:
- main
env:
CARGO_TERM_COLOR: always
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
jobs:
jobs:
should-run:
runs-on: ubuntu-latest
permissions:
pull-requests: read
outputs:
ntt_test: ${{ env.IS_PULL_REQUEST == 'false' || steps.changed-files.outputs.ntt_any_changed }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
with:
fetch-depth: 0
persist-credentials: 'false'
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
ntt:
- tfhe/Cargo.toml
- Makefile
- tfhe-ntt/**
- '.github/workflows/cargo_test_ntt.yml'
cargo-tests-ntt:
needs: should-run
if: needs.should-run.outputs.ntt_test == 'true'
runs-on: ${{ matrix.os }}
strategy:
matrix:
@@ -34,6 +64,8 @@ jobs:
run: make test_ntt_no_std
cargo-tests-ntt-nightly:
needs: should-run
if: needs.should-run.outputs.ntt_test == 'true'
runs-on: ${{ matrix.os }}
strategy:
matrix:
@@ -52,3 +84,28 @@ jobs:
- name: Test no-std nightly
run: make test_ntt_no_std_nightly
cargo-tests-ntt-successful:
needs: [ should-run, cargo-tests-ntt, cargo-tests-ntt-nightly ]
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Tests do not need to run
if: needs.should-run.outputs.ntt_test == 'false'
run: |
echo "tfhe-ntt files haven't changed tests don't need to run"
- name: Check all tests success
if: needs.should-run.outputs.ntt_test == 'true' &&
needs.cargo-tests-ntt.result == 'success' &&
needs.cargo-tests-ntt-nightly.result == 'success'
run: |
echo "All tfhe-ntt tests passed"
- name: Check tests failure
if: needs.should-run.outputs.ntt_test == 'true' &&
(needs.cargo-tests-ntt.result != 'success' ||
needs.cargo-tests-ntt-nightly.result != 'success')
run: |
echo "Some tfhe-ntt tests failed"
exit 1

View File

@@ -38,7 +38,7 @@ jobs:
name: Code coverage tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.event_name }}_${{ github.ref }}
group: ${{ github.workflow_ref }}_${{ github.event_name }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 5760 # 4 days
@@ -47,13 +47,13 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
tfhe:
@@ -83,7 +83,7 @@ jobs:
make test_shortint_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
@@ -97,7 +97,7 @@ jobs:
make test_integer_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@13ce06bfc6bbe3ecf90edbbf1bc32fe5978ca1d3
uses: codecov/codecov-action@0565863a31f2c772f9f0395002a31e3f06189574
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}

View File

@@ -52,7 +52,7 @@ jobs:
name: CSPRNG randomness tests
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
@@ -63,7 +63,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -77,7 +77,7 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-csprng randomness check finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "tfhe-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (csprng-randomness-tests)
@@ -102,4 +102,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (csprng-randomness-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (csprng-randomness-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -8,7 +8,7 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
PR_BRANCH: ${{ github.head_ref || github.ref_name }}
PR_BRANCH: ${{ github.ref_name }}
CLOSE_TYPE: ${{ github.event.pull_request.merged && 'merge' || 'close' }}
# only trigger on pull request closed events

View File

@@ -29,9 +29,10 @@ jobs:
contains(github.event.label.name, '4090_test') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ["self-hosted", "4090-desktop"]
timeout-minutes: 1440 # 24 hours
steps:
- name: Checkout tfhe-rs
@@ -41,7 +42,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -81,4 +82,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CUDA RTX 4090 tests finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "CUDA RTX 4090 tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -40,7 +42,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -68,11 +70,17 @@ jobs:
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
# If the latter fails due to a failed GitHub action runner set up, we have to fallback on the permanent instance.
# Since the on-demand remote label is set before failure, we have to do the logical OR in this order,
# otherwise we'll try to run the next job on a non-existing on-demand instance.
runner-name: ${{ steps.use-permanent-instance.outputs.runner_group || steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
remote-instance-outcome: ${{ steps.start-remote-instance.outcome }}
steps:
- name: Start remote instance
id: start-remote-instance
if: env.SECRETS_AVAILABLE == 'true'
continue-on-error: true
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
@@ -82,6 +90,13 @@ jobs:
backend: hyperstack
profile: single-h100
# This will allow to fallback on permanent instances running on Hyperstack.
- name: Use permanent remote instance
id: use-permanent-instance
if: env.SECRETS_AVAILABLE == 'true' && steps.start-remote-instance.outcome == 'failure'
run: |
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
# This instance will be spawned especially for pull-request from forked repository
- name: Start GitHub instance
id: start-github-instance
@@ -95,7 +110,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -114,6 +129,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.setup-instance.outputs.remote-instance-outcome == 'success'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
@@ -121,7 +137,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -150,16 +166,21 @@ jobs:
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Fast H100 tests finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Fast H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
if: ${{ always() && needs.setup-instance.result == 'success' }}
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
@@ -180,4 +201,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -39,7 +41,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -93,7 +95,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -119,7 +121,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -148,12 +150,17 @@ jobs:
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Base GPU tests finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Base GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
@@ -178,4 +185,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -20,10 +20,16 @@ jobs:
name: Setup instance (cuda-h100-tests)
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
# If the latter fails due to a failed GitHub action runner set up, we have to fallback on the permanent instance.
# Since the on-demand remote label is set before failure, we have to do the logical OR in this order,
# otherwise we'll try to run the next job on a non-existing on-demand instance.
runner-name: ${{ steps.use-permanent-instance.outputs.runner_group || steps.start-remote-instance.outputs.label }}
remote-instance-outcome: ${{ steps.start-remote-instance.outcome }}
steps:
- name: Start instance
id: start-instance
- name: Start remote instance
id: start-remote-instance
continue-on-error: true
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
@@ -33,11 +39,18 @@ jobs:
backend: hyperstack
profile: single-h100
# This will allow to fallback on permanent instances running on Hyperstack.
- name: Use permanent remote instance
id: use-permanent-instance
if: env.SECRETS_AVAILABLE == 'true' && steps.start-remote-instance.outcome == 'failure'
run: |
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA H100 tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -68,13 +81,14 @@ jobs:
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.setup-instance.outputs.remote-instance-outcome == 'success'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
gcc-version: ${{ matrix.gcc }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -105,10 +119,11 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Full H100 tests finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Full H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
@@ -128,4 +143,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -40,7 +42,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -95,7 +97,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -121,7 +123,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -153,12 +155,17 @@ jobs:
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Multi-GPU tests finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Multi-GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests-multi-gpu)
@@ -183,4 +190,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-tests-multi-gpu) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-tests-multi-gpu) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,4 +1,4 @@
name: Long Run Tests on GPU
name: Cuda - Long Run Tests on GPU
env:
CARGO_TERM_COLOR: always
@@ -15,8 +15,8 @@ on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
schedule:
# Weekly tests will be triggered each Friday at 9p.m.
- cron: "0 21 * * 5"
# Nightly tests will be triggered each evening 8p.m.
- cron: "0 20 * * *"
jobs:
setup-instance:
@@ -42,7 +42,7 @@ jobs:
name: Long run GPU tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}
group: ${{ github.workflow_ref }}_${{github.event_name}}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -65,7 +65,7 @@ jobs:
gcc-version: ${{ matrix.gcc }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable

View File

@@ -11,6 +11,8 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -49,7 +51,7 @@ jobs:
name: CUDA post-commit checks
needs: setup-instance
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -81,7 +83,7 @@ jobs:
sudo apt -y install "cuda-toolkit-${TOOLKIT_VERSION}" cmake-format
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -111,13 +113,18 @@ jobs:
run: |
make pcc_gpu
- name: Set pull-request URL
if: ${{ failure() && github.event_name == 'pull_request' }}
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Slack Notification
if: ${{ failure() && env.SECRETS_AVAILABLE == 'true' }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "CUDA AWS post-commit checks finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "CUDA AWS post-commit checks finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-pcc)
@@ -142,4 +149,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-pcc) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-pcc) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -40,7 +42,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -95,7 +97,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -121,7 +123,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -136,12 +138,17 @@ jobs:
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Integer GPU signed integer tests with classical PBS finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer GPU signed integer tests with classical PBS finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-signed-classic-tests)
@@ -166,4 +173,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-signed-classic-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-signed-classic-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -41,7 +43,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -69,11 +71,17 @@ jobs:
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
# If the latter fails due to a failed GitHub action runner set up, we have to fallback on the permanent instance.
# Since the on-demand remote label is set before failure, we have to do the logical OR in this order,
# otherwise we'll try to run the next job on a non-existing on-demand instance.
runner-name: ${{ steps.use-permanent-instance.outputs.runner_group || steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
remote-instance-outcome: ${{ steps.start-remote-instance.outcome }}
steps:
- name: Start remote instance
id: start-remote-instance
if: env.SECRETS_AVAILABLE == 'true'
continue-on-error: true
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
@@ -83,6 +91,13 @@ jobs:
backend: hyperstack
profile: single-h100
# This will allow to fallback on permanent instances running on Hyperstack.
- name: Use permanent remote instance
id: use-permanent-instance
if: env.SECRETS_AVAILABLE == 'true' && steps.start-remote-instance.outcome == 'failure'
run: |
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
# This instance will be spawned especially for pull-request from forked repository
- name: Start GitHub instance
id: start-github-instance
@@ -96,7 +111,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -115,6 +130,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.setup-instance.outputs.remote-instance-outcome == 'success'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
@@ -122,7 +138,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -137,16 +153,21 @@ jobs:
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
if: ${{ always() && needs.setup-instance.result == 'success' }}
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
@@ -167,4 +188,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,9 +11,11 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
FAST_TESTS: TRUE
NIGHTLY_TESTS: FALSE
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -44,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -63,6 +65,7 @@ jobs:
- '.github/workflows/gpu_signed_integer_tests.yml'
- scripts/integer-tests.sh
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-signed-integer-tests)
runs-on: ubuntu-latest
@@ -98,7 +101,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -108,7 +111,7 @@ jobs:
include:
- os: ubuntu-22.04
cuda: "12.2"
gcc: 11
gcc: 11
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
@@ -124,7 +127,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -147,11 +150,17 @@ jobs:
if: ${{ always() && needs.cuda-signed-integer-tests.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-signed-integer-tests.result }}
SLACK_MESSAGE: "Base GPU tests finished with status: ${{ needs.cuda-signed-integer-tests.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Signed GPU tests finished with status: ${{ needs.cuda-signed-integer-tests.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
@@ -176,4 +185,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-signed-integer-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-signed-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -41,7 +43,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -96,7 +98,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -122,7 +124,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -137,12 +139,17 @@ jobs:
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Unsigned integer GPU classic tests finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Unsigned integer GPU classic tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-unsigned-classic-tests)
@@ -167,4 +174,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-unsigned-classic-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-unsigned-classic-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,7 +11,9 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -40,7 +42,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -68,11 +70,17 @@ jobs:
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.gpu_test == 'true')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
# If the latter fails due to a failed GitHub action runner set up, we have to fallback on the permanent instance.
# Since the on-demand remote label is set before failure, we have to do the logical OR in this order,
# otherwise we'll try to run the next job on a non-existing on-demand instance.
runner-name: ${{ steps.use-permanent-instance.outputs.runner_group || steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
remote-instance-outcome: ${{ steps.start-remote-instance.outcome }}
steps:
- name: Start remote instance
id: start-remote-instance
if: env.SECRETS_AVAILABLE == 'true'
continue-on-error: true
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
@@ -82,6 +90,13 @@ jobs:
backend: hyperstack
profile: single-h100
# This will allow to fallback on permanent instances running on Hyperstack.
- name: Use permanent remote instance
id: use-permanent-instance
if: env.SECRETS_AVAILABLE == 'true' && steps.start-remote-instance.outcome == 'failure'
run: |
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
# This instance will be spawned especially for pull-request from forked repository
- name: Start GitHub instance
id: start-github-instance
@@ -95,7 +110,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -114,6 +129,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.setup-instance.outputs.remote-instance-outcome == 'success'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
@@ -121,7 +137,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -136,16 +152,21 @@ jobs:
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-tests-linux.result }}
SLACK_MESSAGE: "Unsigned integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Unsigned integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
if: ${{ always() && needs.setup-instance.result == 'success' }}
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
steps:
@@ -166,4 +187,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-h100-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -11,8 +11,11 @@ env:
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
FAST_TESTS: TRUE
NIGHTLY_TESTS: FALSE
IS_PULL_REQUEST: ${{ github.event_name == 'pull_request' }}
PULL_REQUEST_MD_LINK: ""
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
# Secrets will be available only to zama-ai organization members
SECRETS_AVAILABLE: ${{ secrets.JOB_SECRET != '' }}
@@ -22,7 +25,6 @@ on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
types: [ labeled ]
schedule:
# Nightly tests @ 1AM after each work day
- cron: "0 1 * * MON-FRI"
@@ -44,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@dcc7a0cba800f454d79fff4b993e8c3555bcc0a8
uses: tj-actions/changed-files@26a38635fc1173cc5820336ce97be6188d0de9f5 # v46.0.2
with:
files_yaml: |
gpu:
@@ -66,11 +68,11 @@ jobs:
setup-instance:
name: Setup instance (cuda-unsigned-integer-tests)
runs-on: ubuntu-latest
needs: should-run
if: (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
github.event_name == 'workflow_dispatch' ||
needs.should-run.outputs.gpu_test == 'true'
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
steps:
@@ -99,7 +101,7 @@ jobs:
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -125,7 +127,7 @@ jobs:
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -148,12 +150,17 @@ jobs:
if: ${{ always() && needs.cuda-unsigned-integer-tests.result != 'skipped' && failure() }}
continue-on-error: true
steps:
- name: Set pull-request URL
if: env.SECRETS_AVAILABLE == 'true' && github.event_name == 'pull_request'
run: |
echo "PULL_REQUEST_MD_LINK=[pull-request](${{ vars.PR_BASE_URL }}${{ github.event.pull_request.number }}), " >> "${GITHUB_ENV}"
- name: Send message
if: env.SECRETS_AVAILABLE == 'true'
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ needs.cuda-unsigned-integer-tests.result }}
SLACK_MESSAGE: "Unsigned integer GPU tests finished with status: ${{ needs.cuda-unsigned-integer-tests.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Unsigned integer GPU tests finished with status: ${{ needs.cuda-unsigned-integer-tests.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
@@ -178,4 +185,4 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (cuda-unsigned-integer-tests) finished with status: ${{ job.status }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (cuda-unsigned-integer-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -42,7 +42,7 @@ jobs:
name: Long run CPU tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow }}_${{github.event_name}}_${{ github.ref }}
group: ${{ github.workflow_ref }}_${{github.event_name}}
cancel-in-progress: true
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 4320 # 72 hours
@@ -54,7 +54,7 @@ jobs:
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable

View File

@@ -24,7 +24,7 @@ env:
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
concurrency:
group: ${{ github.workflow }}_${{ github.head_ref || github.ref }}
group: ${{ github.workflow_ref }}
cancel-in-progress: true
jobs:
@@ -43,7 +43,7 @@ jobs:
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -195,7 +195,7 @@ jobs:
SLACK_COLOR: ${{ needs.cargo-builds-m1.result }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "M1 tests finished with status: ${{ needs.cargo-builds-m1.result }} on '${{ env.BRANCH }}'. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "M1 tests finished with status: ${{ needs.cargo-builds-m1.result }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
MSG_MINIMAL: event,action url,commit

View File

@@ -51,7 +51,7 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate
path: target/package/*.crate
@@ -62,7 +62,7 @@ jobs:
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -94,7 +94,7 @@ jobs:
run: |
echo "NPM_TAG=latest" >> "${GITHUB_ENV}"
- name: Download artifact
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
name: crate
path: target/package

View File

@@ -65,7 +65,7 @@ jobs:
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable
@@ -99,7 +99,7 @@ jobs:
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -127,7 +127,7 @@ jobs:
CUDA_PATH: /usr/local/cuda-${{ matrix.cuda }}
steps:
- name: Install latest stable
uses: dtolnay/rust-toolchain@a54c7afa936fefeb4456b2dd8068152669aa8203
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1
with:
toolchain: stable

View File

@@ -30,7 +30,7 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe-csprng
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-tfhe-csprng
path: target/package/*.crate
@@ -42,7 +42,7 @@ jobs:
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -66,7 +66,7 @@ jobs:
fetch-depth: 0
token: ${{ secrets.FHE_ACTIONS_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
name: crate-tfhe-csprng
path: target/package

View File

@@ -33,7 +33,7 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe-fft
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate
path: target/package/*.crate
@@ -44,7 +44,7 @@ jobs:
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read

View File

@@ -33,7 +33,7 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe-ntt
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate
path: target/package/*.crate
@@ -44,7 +44,7 @@ jobs:
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read

View File

@@ -2,14 +2,13 @@ name: Publish tfhe-versionable release
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
jobs:
verify_tag:
@@ -19,6 +18,7 @@ jobs:
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package-derive:
name: Package tfhe-versionable-derive Release
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.hash.outputs.hash }}
@@ -30,7 +30,7 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe-versionable-derive
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-tfhe-versionable-derive
path: target/package/*.crate
@@ -40,7 +40,7 @@ jobs:
provenance-derive:
needs: [package-derive]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -53,8 +53,8 @@ jobs:
base64-subjects: ${{ needs.package-derive.outputs.hash }}
publish_release-derive:
name: Publish tfhe-versionable Release
needs: [verify_tag, package-derive] # for comparing hashes
name: Publish tfhe-versionable-derive Release
needs: [ verify_tag, package-derive ] # for comparing hashes
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -64,7 +64,7 @@ jobs:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
name: crate-tfhe-versionable-derive
path: target/package
@@ -72,7 +72,7 @@ jobs:
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: |
cargo publish -p tfhe-versionable-derive --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
cargo publish -p tfhe-versionable-derive --token ${{ env.CRATES_TOKEN }}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
@@ -82,24 +82,18 @@ jobs:
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2.3.2
env:
SLACK_COLOR: failure
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "SLSA tfhe-versionable-derive - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2.3.2
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "tfhe-versionable-derive release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
package:
name: Package tfhe-versionable Release
needs: publish_release-derive
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.hash.outputs.hash }}
@@ -111,7 +105,7 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe-versionable
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-tfhe-versionable
path: target/package/*.crate
@@ -120,8 +114,8 @@ jobs:
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
needs: package
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -135,7 +129,7 @@ jobs:
publish_release:
name: Publish tfhe-versionable Release
needs: [package] # for comparing hashes
needs: package # for comparing hashes
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -143,7 +137,7 @@ jobs:
with:
fetch-depth: 0
- name: Download artifact
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
name: crate-tfhe-versionable
path: target/package
@@ -151,32 +145,21 @@ jobs:
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
run: |
cargo publish -p tfhe-versionable --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
cargo publish -p tfhe-versionable --token ${{ env.CRATES_TOKEN }}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2.3.2
env:
SLACK_COLOR: failure
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "SLSA tfhe-versionable - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@c33737706dea87cd7784c687dadc9adf1be59990 # v2.3.2
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "tfhe-versionable release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -24,7 +24,7 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe-zk-pok
- uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-zk-pok
path: target/package/*.crate
@@ -34,7 +34,7 @@ jobs:
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.0.0
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -64,7 +64,7 @@ jobs:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1
with:
name: crate-zk-pok
path: target/package

1
.gitignore vendored
View File

@@ -37,3 +37,4 @@ package-lock.json
# Dir used for backward compatibility test data
tests/tfhe-backward-compat-data/
ci/

233
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,233 @@
# Contributing to TFHE-rs
This document provides guidance on how to contribute to **TFHE-rs**.
There are two ways to contribute:
- **Report issues:** Open issues on GitHub to report bugs, suggest improvements, or note typos.
- **Submit codes**: To become an official contributor, you must sign our Contributor License Agreement (CLA). Our CLA-bot will guide you through this process when you open your first pull request.
## 1. Setting up the project
Start by [forking](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo) the **TFHE-rs** repository.
{% hint style="info" %}
- **Rust version**: Ensure that you use a Rust version >= 1.81 to compile **TFHE-rs**.
- **Incompatibility**: AArch64-based machines are not yet supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in **TFHE-rs**.
- **Performance**: For optimal performance, it is highly recommended to run **TFHE-rs** code in release mode with cargo's `--release` flag.
{% endhint %}
To get more details about the library, please refer to the [documentation](https://docs.zama.ai/tfhe-rs).
## 2. Creating a new branch
When creating your branch, make sure to use the following format :
```
git checkout -b {feat|fix|docs|chore…}/short_description
```
For example:
```
git checkout -b feat/new_feature_X
```
## 3. Before committing
### 3.1 Linting
Each commit to **TFHE-rs** should conform to the standards of the project. In particular, every source code, docker or workflows files should be linted to prevent programmatic and stylistic errors.
- Rust source code linters: `clippy`
- Typescript/Javascript source code linters: `eslint`, `prettier`
To apply automatic code formatting, run:
```
make fmt
```
You can perform linting of all Cargo targets with:
```
make clippy_all_targets
```
### 3.2 Testing
Your contributions must include comprehensive documentation and tests without breaking existing tests. To run pre-commit checks, execute:
```
make pcc
```
This command ensure that all the targets in the library are building correctly.
For a faster check, use:
```
make fpcc
```
If you're contributing to GPU code, run also:
```
make pcc_gpu
```
Unit testing suites are heavy and can require a lot of computing power and RAM availability.
Whilst tests are run automatically in continuous integration pipeline, you can run tests locally.
All unit tests have a command formatted as:
```
make test_*
```
Run `make help` to display a list of all the commands available.
To quickly test your changes locally, follow these steps:
1. Locate where the code has changed.
2. Add (or modify) a Cargo test filter to the corresponding `make` target in Makefile.
3. Run the target.
{% hint style="success" %}
`make test_<something>` will print the underlying cargo command in STDOUT. You can quickly test your changes by copy/pasting the command and then modify it to suit your needs.
{% endhint %}
For example, if you made changes in `tfhe/src/integer/*`, you can test them with the following steps:
1. In `test_integer` target, replace the filter `-- integer::` by `-- my_new_test`.
2. Run `make test_integer`.
## 4. Committing
**TFHE-rs** follows the conventional commit specification to maintain a consistent commit history, essential for Semantic Versioning ([semver.org](https://semver.org/)).
Commit messages are automatically checked in CI and will be rejected if they do not comply, so make sure that you follow the commit conventions detailed on [this page]
(https://www.conventionalcommits.org/en/v1.0.0/).
## 5. Rebasing
Before creating a pull request, rebase your branch on the repository's `main` branch. Merge commits are not permitted, thus rebasing ensures fewer conflicts and a smoother PR review process.
## 6. Opening a Pull Request
Once your changes are ready, open a pull request.
For instructions on creating a PR from a fork, refer to GitHub's [official documentation](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork).
## 7. Continuous integration
Before a pull request can be merged, several test suites run automatically. Below is an overview of the CI process:
```mermaid
---
title: Continuous Integration Process
---
sequenceDiagram
autonumber
participant Contributor
participant GitHub
participant Reviewer
participant CI-pipeline
Contributor ->> GitHub: Open pull-request
GitHub -->> Contributor: Ask for CLA signing (once)
loop
Reviewer ->> GitHub: Review code
Reviewer ->> CI-pipeline: Approve workflows (short-run)
CI-pipeline -->> GitHub: Send checks results
Contributor ->> GitHub: Make changes
end
Reviewer ->> GitHub: Pull-request approval
Reviewer ->> CI-pipeline: Approve workflows (long-run)
CI-pipeline -->> GitHub: Send checks results
Reviewer -->> GitHub: Merge if pipeline green
```
> [!Note]
>Useful details:
>* pipeline is triggered by humans
>* review team is located in Paris timezone, pipeline launch will most likely happen during office hours
>* direct changes to CI related files are not allowed for external contributors
>* run `make pcc` to fix any build errors before pushing commits
## 8. Data versioning
Data serialized with TFHE-rs must remain backward compatible. This is done using the [tfhe-versionable](https://crates.io/crates/tfhe-versionable) crate.
If you modify a type that derives `Versionize` in a backward-incompatible way, an upgrade implementation must be provided.
For example, these changes are data breaking:
* Adding a field to a struct.
* Changing the order of the fields within a struct or the variants within an enum.
* Renaming a field of a struct or a variant of an enum.
* Changing the type of field in a struct or a variant in an enum.
On the contrary, these changes are *not* data breaking:
* Renaming a type (unless it implements the `Named` trait).
* Adding a variant to the end of an enum.
## Example: adding a field
Suppose you want to add an i32 field to a type named `MyType`. The original type is defined as:
```rust
#[derive(Serialize, Deserialize, Versionize)]
#[versionize(MyTypeVersions)]
struct MyType {
val: u64,
}
```
And you want to change it to:
```rust
#[derive(Serialize, Deserialize, Versionize)]
#[versionize(MyTypeVersions)]
struct MyType {
val: u64,
other_val: i32
}
```
Follow these steps:
1. Navigate to the definition of the dispatch enum of this type. This is the type inside the `#[versionize(MyTypeVersions)]` macro attribute. In general, this type has the same name as the base type with a `Versions` suffix. You should find something like
```rust
#[derive(VersionsDispatch)]
enum MyTypeVersions {
V0(MyTypeV0),
V1(MyType)
}
```
2. Add a new variant to the enum to preserve the previous version of the type. You can simply copy and paste the previous definition of the type and add a version suffix:
```rust
#[derive(Version)]
struct MyTypeV1 {
val: u64,
}
#[derive(VersionsDispatch)]
enum MyTypeVersions {
V0(MyTypeV0),
V1(MyTypeV1),
V2(MyType) // Here this points to your modified type
}
```
3. Implement the `Upgrade` trait to define how we should go from the previous version to the current version:
```rust
impl Upgrade<MyType> for MyTypeV1 {
type Error = Infallible;
fn upgrade(self) -> Result<MyType, Self::Error> {
Ok(MyType {
val: self.val,
other_val: 0
})
}
}
```
4. Fix the upgrade target of the previous version. In this example, `impl Upgrade<MyType> for MyTypeV0 {` should simply be changed to `impl Upgrade<MyTypeV1> for MyTypeV0 {`

View File

@@ -6,7 +6,6 @@ members = [
"tfhe-ntt",
"tfhe-zk-pok",
"tasks",
"apps/trivium",
"tfhe-csprng",
"backends/tfhe-cuda-backend",
"utils/tfhe-versionable",
@@ -14,14 +13,18 @@ members = [
"tests",
]
exclude = ["tests/backward_compatibility_tests", "utils/tfhe-lints"]
exclude = [
"tests/backward_compatibility_tests",
"utils/tfhe-lints",
"apps/trivium",
]
[workspace.dependencies]
aligned-vec = { version = "0.6", default-features = false }
bytemuck = "1.14.3"
dyn-stack = { version = "0.11", default-features = false }
itertools = "0.14"
num-complex = "0.4"
pulp = { version = "0.20", default-features = false }
pulp = { version = "0.21", default-features = false }
rand = "0.8"
rayon = "1"
serde = { version = "1.0", default-features = false }

101
Makefile
View File

@@ -18,6 +18,8 @@ FAST_BENCH?=FALSE
NIGHTLY_TESTS?=FALSE
BENCH_OP_FLAVOR?=DEFAULT
BENCH_TYPE?=latency
BENCH_PARAM_TYPE?=classical
BENCH_PARAMS_SET?=default
NODE_VERSION=22.6
BACKWARD_COMPAT_DATA_URL=https://github.com/zama-ai/tfhe-backward-compat-data.git
BACKWARD_COMPAT_DATA_BRANCH?=$(shell ./scripts/backward_compat_data_version.py)
@@ -282,14 +284,14 @@ check_typos: install_typos_checker
.PHONY: clippy_gpu # Run clippy lints on tfhe with "gpu" enabled
clippy_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=boolean,shortint,integer,internal-keycache,gpu \
--features=boolean,shortint,integer,internal-keycache,gpu,pbs-stats \
--all-targets \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: check_gpu # Run check on tfhe with "gpu" enabled
check_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" check \
--features=boolean,shortint,integer,internal-keycache,gpu \
--features=boolean,shortint,integer,internal-keycache,gpu,pbs-stats \
--all-targets \
-p $(TFHE_SPEC)
@@ -348,6 +350,9 @@ clippy_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=integer,experimental \
-p $(TFHE_SPEC) -- --no-deps -D warnings
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=integer,experimental,extended-types \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy # Run clippy lints enabling the boolean, shortint, integer
clippy: install_rs_check_toolchain
@@ -380,16 +385,16 @@ clippy_rustdoc_gpu: install_rs_check_toolchain
.PHONY: clippy_c_api # Run clippy lints enabling the boolean, shortint and the C API
clippy_c_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=boolean-c-api,shortint-c-api,high-level-c-api \
--features=boolean-c-api,shortint-c-api,high-level-c-api,extended-types \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy_js_wasm_api # Run clippy lints enabling the boolean, shortint, integer and the js wasm API
clippy_js_wasm_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,high-level-client-js-wasm-api,zk-pok \
--features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,high-level-client-js-wasm-api,zk-pok,extended-types \
-p $(TFHE_SPEC) -- --no-deps -D warnings
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,high-level-client-js-wasm-api \
--features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,high-level-client-js-wasm-api,extended-types \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy_tasks # Run clippy lints on helper tasks crate.
@@ -399,16 +404,16 @@ clippy_tasks: install_rs_check_toolchain
.PHONY: clippy_trivium # Run clippy lints on Trivium app
clippy_trivium: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
cd apps/trivium; RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
-p tfhe-trivium -- --no-deps -D warnings
.PHONY: clippy_all_targets # Run clippy lints on all targets (benches, examples, etc.)
clippy_all_targets: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=boolean,shortint,integer,internal-keycache,zk-pok,strings \
--features=boolean,shortint,integer,internal-keycache,zk-pok,strings,pbs-stats,extended-types \
-p $(TFHE_SPEC) -- --no-deps -D warnings
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=boolean,shortint,integer,internal-keycache,zk-pok,strings,experimental \
--features=boolean,shortint,integer,internal-keycache,zk-pok,strings,pbs-stats,extended-types,experimental \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy_tfhe_csprng # Run clippy lints on tfhe-csprng
@@ -431,6 +436,7 @@ clippy_versionable: install_rs_check_toolchain
.PHONY: clippy_tfhe_lints # Run clippy lints on tfhe-lints
clippy_tfhe_lints: install_cargo_dylint # the toolchain is selected with toolchain.toml
cd utils/tfhe-lints && \
rustup toolchain install && \
cargo clippy --all-targets -- --no-deps -D warnings
.PHONY: clippy_all # Run all clippy targets
@@ -506,13 +512,13 @@ build_tfhe_coverage: install_rs_build_toolchain
.PHONY: build_c_api # Build the C API for boolean, shortint and integer
build_c_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=boolean-c-api,shortint-c-api,high-level-c-api,zk-pok \
--features=boolean-c-api,shortint-c-api,high-level-c-api,zk-pok,extended-types \
-p $(TFHE_SPEC)
.PHONY: build_c_api_gpu # Build the C API for boolean, shortint and integer
build_c_api_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=boolean-c-api,shortint-c-api,high-level-c-api,zk-pok,gpu \
--features=boolean-c-api,shortint-c-api,high-level-c-api,zk-pok,extended-types,gpu \
-p $(TFHE_SPEC)
.PHONY: build_c_api_experimental_deterministic_fft # Build the C API for boolean, shortint and integer with experimental deterministic FFT
@@ -526,7 +532,7 @@ build_web_js_api: install_rs_build_toolchain install_wasm_pack
cd tfhe && \
RUSTFLAGS="$(WASM_RUSTFLAGS)" rustup run "$(RS_BUILD_TOOLCHAIN)" \
wasm-pack build --release --target=web \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,zk-pok
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,zk-pok,extended-types
.PHONY: build_web_js_api_parallel # Build the js API targeting the web browser with parallelism support
build_web_js_api_parallel: install_rs_check_toolchain install_wasm_pack
@@ -534,7 +540,7 @@ build_web_js_api_parallel: install_rs_check_toolchain install_wasm_pack
rustup component add rust-src --toolchain $(RS_CHECK_TOOLCHAIN) && \
RUSTFLAGS="$(WASM_RUSTFLAGS) -C target-feature=+atomics,+bulk-memory" rustup run $(RS_CHECK_TOOLCHAIN) \
wasm-pack build --release --target=web \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,parallel-wasm-api,zk-pok \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,parallel-wasm-api,zk-pok,extended-types \
-Z build-std=panic_abort,std && \
find pkg/snippets -type f -iname workerHelpers.js -exec sed -i "s|const pkg = await import('..\/..\/..');|const pkg = await import('..\/..\/..\/tfhe.js');|" {} \;
jq '.files += ["snippets"]' tfhe/pkg/package.json > tmp_pkg.json && mv -f tmp_pkg.json tfhe/pkg/package.json
@@ -544,7 +550,7 @@ build_node_js_api: install_rs_build_toolchain install_wasm_pack
cd tfhe && \
RUSTFLAGS="$(WASM_RUSTFLAGS)" rustup run "$(RS_BUILD_TOOLCHAIN)" \
wasm-pack build --release --target=nodejs \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,zk-pok
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api,zk-pok,extended-types
.PHONY: build_tfhe_csprng # Build tfhe_csprng
build_tfhe_csprng: install_rs_build_toolchain
@@ -596,7 +602,7 @@ test_core_crypto_gpu: install_rs_build_toolchain
.PHONY: test_integer_gpu # Run the tests of the integer module including experimental on the gpu backend
test_integer_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=integer,gpu -p $(TFHE_SPEC) -- integer::gpu::server_key:: --test-threads=6
--features=integer,gpu -p $(TFHE_SPEC) -- integer::gpu::server_key:: --test-threads=4
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile $(CARGO_PROFILE) \
--features=integer,gpu -p $(TFHE_SPEC) -- integer::gpu::server_key::
@@ -868,12 +874,12 @@ test_examples: test_sha256_bool test_regex_engine
.PHONY: test_trivium # Run tests for trivium
test_trivium: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
cd apps/trivium; RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
-p tfhe-trivium -- --test-threads=1 trivium::
.PHONY: test_kreyvium # Run tests for kreyvium
test_kreyvium: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
cd apps/trivium; RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
-p tfhe-trivium -- --test-threads=1 kreyvium::
.PHONY: test_tfhe_csprng # Run tfhe-csprng tests
@@ -907,6 +913,7 @@ test_versionable: install_rs_build_toolchain
.PHONY: test_tfhe_lints # Run test on tfhe-lints
test_tfhe_lints: install_cargo_dylint
cd utils/tfhe-lints && \
rustup toolchain install && \
cargo test
# The backward compat data repo holds historical binary data but also rust code to generate and load them.
@@ -1071,35 +1078,42 @@ bench_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) --
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_signed_integer # Run benchmarks for signed integer
bench_signed_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-signed-bench \
--features=integer,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) --
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_integer_gpu # Run benchmarks for integer on GPU backend
bench_integer_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,gpu,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) --
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_signed_integer_gpu # Run benchmarks for signed integer on GPU backend
bench_signed_integer_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-signed-bench \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_integer_compression # Run benchmarks for unsigned integer compression
bench_integer_compression: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench glwe_packing_compression-integer-bench \
--features=integer,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) --
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_integer_compression_gpu
bench_integer_compression_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench glwe_packing_compression-integer-bench \
--features=integer,internal-keycache,gpu -p $(TFHE_SPEC) --
--features=integer,internal-keycache,gpu,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_integer_multi_bit # Run benchmarks for unsigned integer using multi-bit parameters
bench_integer_multi_bit: install_rs_check_toolchain
@@ -1107,7 +1121,7 @@ bench_integer_multi_bit: install_rs_check_toolchain
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) --
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_signed_integer_multi_bit # Run benchmarks for signed integer using multi-bit parameters
bench_signed_integer_multi_bit: install_rs_check_toolchain
@@ -1115,7 +1129,7 @@ bench_signed_integer_multi_bit: install_rs_check_toolchain
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-signed-bench \
--features=integer,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) --
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_integer_multi_bit_gpu # Run benchmarks for integer on GPU backend using multi-bit parameters
bench_integer_multi_bit_gpu: install_rs_check_toolchain
@@ -1123,22 +1137,22 @@ bench_integer_multi_bit_gpu: install_rs_check_toolchain
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,gpu,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) --
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_unsigned_integer_multi_bit_gpu # Run benchmarks for unsigned integer on GPU backend using multi-bit parameters
bench_unsigned_integer_multi_bit_gpu: install_rs_check_toolchain
.PHONY: bench_signed_integer_multi_bit_gpu # Run benchmarks for signed integer on GPU backend using multi-bit parameters
bench_signed_integer_multi_bit_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT \
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,gpu,internal-keycache,nightly-avx512 -p $(TFHE_SPEC) -- ::unsigned
--bench integer-signed-bench \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p $(TFHE_SPEC) --
.PHONY: bench_integer_zk # Run benchmarks for integer encryption with ZK proofs
bench_integer_zk: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench zk-pke-bench \
--features=integer,internal-keycache,zk-pok,nightly-avx512 \
--features=integer,internal-keycache,zk-pok,nightly-avx512,pbs-stats \
-p $(TFHE_SPEC) --
.PHONY: bench_shortint # Run benchmarks for shortint
@@ -1171,31 +1185,52 @@ bench_boolean: install_rs_check_toolchain
.PHONY: bench_pbs # Run benchmarks for PBS
bench_pbs: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs-bench \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)
.PHONY: bench_ks_pbs # Run benchmarks for KS-PBS
bench_ks_pbs: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-pbs-bench \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)
.PHONY: bench_ks_pbs_gpu # Run benchmarks for KS-PBS on GPU backend
bench_ks_pbs_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-pbs-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)
.PHONY: bench_pbs128 # Run benchmarks for PBS using FFT 128 bits
bench_pbs128: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs128-bench \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)
.PHONY: bench_pbs128_gpu # Run benchmarks for PBS using FFT 128 bits on GPU
bench_pbs128_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs128-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)
.PHONY: bench_pbs_gpu # Run benchmarks for PBS on GPU backend
bench_pbs_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_FAST_BENCH=$(FAST_BENCH) cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)
.PHONY: bench_ks # Run benchmarks for keyswitch
bench_ks: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-bench \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)
.PHONY: bench_ks_gpu # Run benchmarks for PBS on GPU backend
bench_ks_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p $(TFHE_SPEC)

View File

@@ -10,7 +10,7 @@
<hr/>
<p align="center">
<a href="https://docs.zama.ai/tfhe-rs"> 📒 Documentation</a> | <a href="https://zama.ai/community"> 💛 Community support</a> | <a href="https://github.com/zama-ai/awesome-zama"> 📚 FHE resources by Zama</a>
<a href="https://github.com/zama-ai/tfhe-rs-handbook/blob/main/tfhe-rs-handbook.pdf"> 📃 Read Handbook</a> |<a href="https://docs.zama.ai/tfhe-rs"> 📒 Documentation</a> | <a href="https://zama.ai/community"> 💛 Community support</a> | <a href="https://github.com/zama-ai/awesome-zama"> 📚 FHE resources by Zama</a>
</p>
@@ -67,6 +67,9 @@ production-ready library for all the advanced features of TFHE.
## Getting started
> [!Important]
> **TFHE-rs** released its first stable version v1.0.0 in February 2025, stabilizing the high-level API for the x86 CPU backend.
### Cargo.toml configuration
To use the latest version of `TFHE-rs` in your project, you first need to add it as a dependency in your `Cargo.toml`:
@@ -75,13 +78,13 @@ tfhe = { version = "*", features = ["boolean", "shortint", "integer"] }
```
> [!Note]
> Note: You need to use a Rust version >= 1.81 to compile TFHE-rs.
> Note: You need to use Rust version >= 1.84 to compile TFHE-rs.
> [!Note]
> Note: aarch64-based machines are not yet supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs.
> Note: AArch64-based machines are not supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs.
<p align="right">
<a href="#about" > ↑ Back to top </a>
<a href="#about" > ↑ Back to top </a>
</p>
### A simple example
@@ -138,7 +141,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
}
```
To run this code, use the following command:
To run this code, use the following command:
<p align="center"> <code> cargo run --release </code> </p>
> [!Note]
@@ -148,12 +151,15 @@ to run in release mode with cargo's `--release` flag to have the best performanc
*Find an example with more explanations in [this part of the documentation](https://docs.zama.ai/tfhe-rs/get-started/quick_start)*
<p align="right">
<a href="#about" > ↑ Back to top </a>
<a href="#about" > ↑ Back to top </a>
</p>
## Resources
## Resources
### TFHE-rs Handbook
A document containing scientific and technical details about algorithms implemented into the library is available here: [TFHE-rs: A (Practical) Handbook](https://github.com/zama-ai/tfhe-rs-handbook/blob/main/tfhe-rs-handbook.pdf).
### TFHE deep dive
- [TFHE Deep Dive - Part I - Ciphertext types](https://www.zama.ai/post/tfhe-deep-dive-part-1)
@@ -176,7 +182,7 @@ to run in release mode with cargo's `--release` flag to have the best performanc
Full, comprehensive documentation is available here: [https://docs.zama.ai/tfhe-rs](https://docs.zama.ai/tfhe-rs).
<p align="right">
<a href="#about" > ↑ Back to top </a>
<a href="#about" > ↑ Back to top </a>
</p>
@@ -194,9 +200,13 @@ When a new update is published in the Lattice Estimator, we update parameters ac
### Security model
The default parameters for the TFHE-rs library are chosen considering the IND-CPA security model, and are selected with a bootstrapping failure probability fixed at p_error = $2^{-64}$. In particular, it is assumed that the results of decrypted computations are not shared by the secret key owner with any third parties, as such an action can lead to leakage of the secret encryption key. If you are designing an application where decryptions must be shared, you will need to craft custom encryption parameters which are chosen in consideration of the IND-CPA^D security model [1].
By default, the parameter sets used in the High-Level API with the x86 CPU backend have a failure probability $\le 2^{128}$ to securely work in the IND-CPA^D model using the algorithmic techniques provided in our code base [1].
If you want to work within the IND-CPA security model, which is less strict than the IND-CPA-D model, the parameter sets can easily be changed and would have slightly better performance. More details can be found in the [TFHE-rs documentation](https://docs.zama.ai/tfhe-rs).
[1] Li, Baiyu, et al. "Securing approximate homomorphic encryption using differential privacy." Annual International Cryptology Conference. Cham: Springer Nature Switzerland, 2022. https://eprint.iacr.org/2022/816.pdf
The default parameters used in the High-Level API with the GPU backend are chosen considering the IND-CPA security model, and are selected with a bootstrapping failure probability fixed at $p_{error} \le 2^{-64}$. In particular, it is assumed that the results of decrypted computations are not shared by the secret key owner with any third parties, as such an action can lead to leakage of the secret encryption key. If you are designing an application where decryptions must be shared, you will need to craft custom encryption parameters which are chosen in consideration of the IND-CPA^D security model [2].
[1] Bernard, Olivier, et al. "Drifting Towards Better Error Probabilities in Fully Homomorphic Encryption Schemes". https://eprint.iacr.org/2024/1718.pdf
[2] Li, Baiyu, et al. "Securing approximate homomorphic encryption using differential privacy." Annual International Cryptology Conference. Cham: Springer Nature Switzerland, 2022. https://eprint.iacr.org/2022/816.pdf
#### Side-channel attacks
@@ -245,7 +255,7 @@ This software is distributed under the **BSD-3-Clause-Clear** license. Read [thi
>We are open to collaborating and advancing the FHE space with our partners. If you have specific needs, please email us at hello@zama.ai.
<p align="right">
<a href="#about" > ↑ Back to top </a>
<a href="#about" > ↑ Back to top </a>
</p>
@@ -259,8 +269,8 @@ This software is distributed under the **BSD-3-Clause-Clear** license. Read [thi
</picture>
</a>
🌟 If you find this project helpful or interesting, please consider giving it a star on GitHub! Your support helps to grow the community and motivates further development.
🌟 If you find this project helpful or interesting, please consider giving it a star on GitHub! Your support helps to grow the community and motivates further development.
<p align="right">
<a href="#about" > ↑ Back to top </a>
<a href="#about" > ↑ Back to top </a>
</p>

View File

@@ -13,3 +13,9 @@ extend-ignore-identifiers-re = [
# Example in trivium
"C9217BA0D762ACA1"
]
[files]
extend-exclude = [
"backends/tfhe-cuda-backend/cuda/src/fft128/twiddles.cu",
"backends/tfhe-cuda-backend/cuda/src/fft/twiddles.cu",
]

View File

@@ -6,11 +6,17 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
rayon = { workspace = true }
tfhe = { path = "../../tfhe", features = [ "boolean", "shortint", "integer" ] }
rayon = "1"
tfhe = { path = "../../tfhe", features = ["boolean", "shortint", "integer"] }
[dev-dependencies]
criterion = { version = "0.5.1", features = [ "html_reports" ]}
criterion = { version = "0.5.1", features = ["html_reports"] }
[profile.devo]
inherits = "dev"
opt-level = 3
lto = "off"
debug-assertions = false
[[bench]]
name = "trivium"

View File

@@ -129,7 +129,7 @@ Other sizes than 64 bit are expected to be available in the future.
# FHE shortint Trivium implementation
The same implementation is also available for generic Ciphertexts representing bits (meant to be used with parameters `V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128`).
The same implementation is also available for generic Ciphertexts representing bits (meant to be used with parameters `V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128`).
It uses a lower level API of tfhe-rs, so the syntax is a little bit different. It also implements the `TransCiphering` trait. For optimization purposes, it does not internally run
on the same cryptographic parameters as the high level API of tfhe-rs. As such, it requires the usage of a casting key, to switch from one parameter space to another, which makes
its setup a little more intricate.
@@ -137,10 +137,10 @@ its setup a little more intricate.
Example code:
```rust
use tfhe::shortint::prelude::*;
use tfhe::shortint::parameters::v1_0::{
V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
use tfhe::shortint::parameters::v1_1::{
V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::{ConfigBuilder, generate_keys, FheUint64};
use tfhe::prelude::*;
@@ -148,17 +148,17 @@ use tfhe_trivium::TriviumStreamShortint;
fn test_shortint() {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();

View File

@@ -1,9 +1,9 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::parameters::v1_0::{
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
use tfhe::shortint::parameters::v1_1::{
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::shortint::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
@@ -11,19 +11,19 @@ use tfhe_trivium::{KreyviumStreamShortint, TransCiphering};
pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
@@ -64,19 +64,19 @@ pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
pub fn kreyvium_shortint_gen(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
@@ -112,19 +112,19 @@ pub fn kreyvium_shortint_gen(c: &mut Criterion) {
pub fn kreyvium_shortint_trans(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();

View File

@@ -1,9 +1,9 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::parameters::v1_0::{
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
use tfhe::shortint::parameters::v1_1::{
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::shortint::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
@@ -11,19 +11,19 @@ use tfhe_trivium::{TransCiphering, TriviumStreamShortint};
pub fn trivium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
@@ -64,19 +64,19 @@ pub fn trivium_shortint_warmup(c: &mut Criterion) {
pub fn trivium_shortint_gen(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
@@ -112,19 +112,19 @@ pub fn trivium_shortint_gen(c: &mut Criterion) {
pub fn trivium_shortint_trans(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();

View File

@@ -1,9 +1,9 @@
use crate::{KreyviumStream, KreyviumStreamByte, KreyviumStreamShortint, TransCiphering};
use tfhe::prelude::*;
use tfhe::shortint::parameters::v1_0::{
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
use tfhe::shortint::parameters::v1_1::{
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
// Values for these tests come from the github repo renaud1239/Kreyvium,
@@ -221,19 +221,19 @@ use tfhe::shortint::prelude::*;
#[test]
fn kreyvium_test_shortint_long() {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();

View File

@@ -48,6 +48,8 @@ fn transcipher_from_1_1_stream(
) -> FheUint64 {
assert_eq!(stream.len(), 64);
let id_lut = internal_server_key.generate_lookup_table(|x| x);
let pairs = (0..32)
.into_par_iter()
.map(|i| {
@@ -57,10 +59,11 @@ fn transcipher_from_1_1_stream(
let b0 = &stream[8 * byte_idx + 2 * pair_idx];
let b1 = &stream[8 * byte_idx + 2 * pair_idx + 1];
casting_key.cast(
&internal_server_key
.unchecked_add(b0, &internal_server_key.unchecked_scalar_mul(b1, 2)),
)
let mut combined = internal_server_key
.unchecked_add(b0, &internal_server_key.unchecked_scalar_mul(b1, 2));
internal_server_key.apply_lookup_table_assign(&mut combined, &id_lut);
casting_key.cast(&combined)
})
.collect::<Vec<_>>();

View File

@@ -1,9 +1,9 @@
use crate::{TransCiphering, TriviumStream, TriviumStreamByte, TriviumStreamShortint};
use tfhe::prelude::*;
use tfhe::shortint::parameters::v1_0::{
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
use tfhe::shortint::parameters::v1_1::{
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
// Values for these tests come from the github repo cantora/avr-crypto-lib, commit 2a5b018,
@@ -357,19 +357,19 @@ use tfhe::shortint::prelude::*;
#[test]
fn trivium_test_shortint_long() {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_0_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_1_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_0_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_1_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_0_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_1_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();

View File

@@ -1,6 +1,6 @@
[package]
name = "tfhe-cuda-backend"
version = "0.8.0"
version = "0.9.0-alpha.0"
edition = "2021"
authors = ["Zama team"]
license = "BSD-3-Clause-Clear"

View File

@@ -13,24 +13,26 @@ and forth between the CPU and the GPU, to create and destroy Cuda streams, etc.:
- `cuda_get_number_of_gpus`
- `cuda_synchronize_device`
The cryptographic operations it provides are:
- an amortized implementation of the TFHE programmable bootstrap: `cuda_bootstrap_amortized_lwe_ciphertext_vector_32` and `cuda_bootstrap_amortized_lwe_ciphertext_vector_64`
- a low latency implementation of the TFHE programmable bootstrap: `cuda_bootstrap_low latency_lwe_ciphertext_vector_32` and `cuda_bootstrap_low_latency_lwe_ciphertext_vector_64`
- the keyswitch: `cuda_keyswitch_lwe_ciphertext_vector_32` and `cuda_keyswitch_lwe_ciphertext_vector_64`
- the larger precision programmable bootstrap (wop PBS, which supports up to 16 bits of message while the classical PBS only supports up to 8 bits of message) and its sub-components: `cuda_wop_pbs_64`, `cuda_extract_bits_64`, `cuda_circuit_bootstrap_64`, `cuda_cmux_tree_64`, `cuda_blind_rotation_sample_extraction_64`
- acceleration for leveled operations: `cuda_negate_lwe_ciphertext_vector_64`, `cuda_add_lwe_ciphertext_vector_64`, `cuda_add_lwe_ciphertext_vector_plaintext_vector_64`, `cuda_mult_lwe_ciphertext_vector_cleartext_vector`.
- an implementation of the classical TFHE programmable bootstrap,
- an implementation of the multi-bit TFHE programmable bootstrap,
- the keyswitch,
- acceleration for leveled operations,
- acceleration for arithmetics over encrypted integers of arbitrary size,
- acceleration for integer compression/decompression.
## Dependencies
**Disclaimer**: Compilation on Windows/Mac is not supported yet. Only Nvidia GPUs are supported.
- nvidia driver - for example, if you're running Ubuntu 20.04 check this [page](https://linuxconfig.org/how-to-install-the-nvidia-drivers-on-ubuntu-20-04-focal-fossa-linux) for installation
- nvidia driver - for example, if you're running Ubuntu 20.04 check this [page](https://linuxconfig.org/how-to-install-the-nvidia-drivers-on-ubuntu-20-04-focal-fossa-linux) for installation. You need an Nvidia GPU with Compute Capability >= 3.0
- [nvcc](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) >= 10.0
- [gcc](https://gcc.gnu.org/) >= 8.0 - check this [page](https://gist.github.com/ax3l/9489132) for more details about nvcc/gcc compatible versions
- [cmake](https://cmake.org/) >= 3.24
- libclang, to match Rust bingen [requirements](https://rust-lang.github.io/rust-bindgen/requirements.html) >= 9.0
## Build
The Cuda project held in `tfhe-cuda-backend` can be compiled independently from TFHE-rs in the following way:
The Cuda project held in `tfhe-cuda-backend` can be compiled independently of TFHE-rs in the following way:
```
git clone git@github.com:zama-ai/tfhe-rs
cd backends/tfhe-cuda-backend/cuda

View File

@@ -62,6 +62,7 @@ fn main() {
"cuda/include/integer/integer.h",
"cuda/include/keyswitch.h",
"cuda/include/linear_algebra.h",
"cuda/include/fft/fft128.h",
"cuda/include/pbs/programmable_bootstrap.h",
"cuda/include/pbs/programmable_bootstrap_multibit.h",
];

View File

@@ -18,7 +18,7 @@ void cuda_convert_lwe_ciphertext_vector_to_cpu_64(void *stream,
void cuda_glwe_sample_extract_64(void *stream, uint32_t gpu_index,
void *lwe_array_out, void const *glwe_array_in,
uint32_t const *nth_array, uint32_t num_nths,
uint32_t glwe_dimension,
uint32_t lwe_per_glwe, uint32_t glwe_dimension,
uint32_t polynomial_size);
}
#endif

View File

@@ -52,7 +52,7 @@ void *cuda_malloc_async(uint64_t size, cudaStream_t stream, uint32_t gpu_index);
void cuda_check_valid_malloc(uint64_t size, uint32_t gpu_index);
void cuda_memcpy_async_to_gpu(void *dest, void *src, uint64_t size,
void cuda_memcpy_async_to_gpu(void *dest, const void *src, uint64_t size,
cudaStream_t stream, uint32_t gpu_index);
void cuda_memcpy_async_gpu_to_gpu(void *dest, void const *src, uint64_t size,
@@ -76,7 +76,7 @@ void cuda_drop(void *ptr, uint32_t gpu_index);
void cuda_drop_async(void *ptr, cudaStream_t stream, uint32_t gpu_index);
}
int cuda_get_max_shared_memory(uint32_t gpu_index);
uint32_t cuda_get_max_shared_memory(uint32_t gpu_index);
bool cuda_check_support_cooperative_groups();

View File

@@ -0,0 +1,17 @@
#include <stdint.h>
extern "C" {
void cuda_fourier_transform_forward_as_torus_f128_async(
void *stream, uint32_t gpu_index, void *re0, void *re1, void *im0,
void *im1, void const *standard, uint32_t const N,
const uint32_t number_of_samples);
void cuda_fourier_transform_forward_as_integer_f128_async(
void *stream, uint32_t gpu_index, void *re0, void *re1, void *im0,
void *im1, void const *standard, uint32_t const N,
const uint32_t number_of_samples);
void cuda_fourier_transform_backward_as_torus_f128_async(
void *stream, uint32_t gpu_index, void *standard, void const *re0,
void const *re1, void const *im0, void const *im1, uint32_t const N,
const uint32_t number_of_samples);
}

View File

@@ -102,9 +102,7 @@ template <typename Torus> struct int_decompression {
// Example: in the 2_2 case we are mapping a 2 bits message onto a 4 bits
// space, we want to keep the original 2 bits value in the 4 bits space,
// so we apply the identity and the encoding will rescale it for us.
auto decompression_rescale_f = [encryption_params](Torus x) -> Torus {
return x;
};
auto decompression_rescale_f = [](Torus x) -> Torus { return x; };
auto effective_compression_message_modulus =
encryption_params.carry_modulus;

View File

@@ -44,6 +44,7 @@ typedef struct {
uint64_t *degrees;
uint64_t *noise_levels;
uint32_t num_radix_blocks;
uint32_t max_num_radix_blocks;
uint32_t lwe_dimension;
} CudaRadixCiphertextFFI;
@@ -113,7 +114,8 @@ void scratch_cuda_full_propagation_64(
void cuda_full_propagation_64_inplace(void *const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count, void *input_blocks,
uint32_t gpu_count,
CudaRadixCiphertextFFI *input_blocks,
int8_t *mem_ptr, void *const *ksks,
void *const *bsks, uint32_t num_blocks);
@@ -132,10 +134,11 @@ void scratch_cuda_integer_mult_radix_ciphertext_kb_64(
void cuda_integer_mult_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *radix_lwe_out, void const *radix_lwe_left, bool const is_bool_left,
void const *radix_lwe_right, bool const is_bool_right, void *const *bsks,
void *const *ksks, int8_t *mem_ptr, uint32_t polynomial_size,
uint32_t num_blocks);
CudaRadixCiphertextFFI *radix_lwe_out,
CudaRadixCiphertextFFI const *radix_lwe_left, bool const is_bool_left,
CudaRadixCiphertextFFI const *radix_lwe_right, bool const is_bool_right,
void *const *bsks, void *const *ksks, int8_t *mem_ptr,
uint32_t polynomial_size, uint32_t num_blocks);
void cleanup_cuda_integer_mult(void *const *streams,
uint32_t const *gpu_indexes, uint32_t gpu_count,
@@ -145,12 +148,13 @@ void cuda_negate_integer_radix_ciphertext_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in, uint32_t message_modulus,
uint32_t carry_modulus);
uint32_t carry_modulus, uint32_t num_radix_blocks);
void cuda_scalar_addition_integer_radix_ciphertext_64_inplace(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *lwe_array, void const *scalar_input,
uint32_t num_scalars, uint32_t message_modulus, uint32_t carry_modulus);
void const *h_scalar_input, uint32_t num_scalars, uint32_t message_modulus,
uint32_t carry_modulus);
void scratch_cuda_integer_radix_logical_scalar_shift_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
@@ -218,15 +222,17 @@ void scratch_cuda_integer_radix_comparison_kb_64(
void cuda_comparison_integer_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array_out, void const *lwe_array_1, void const *lwe_array_2,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
uint32_t lwe_ciphertext_count);
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_1,
CudaRadixCiphertextFFI const *lwe_array_2, int8_t *mem_ptr,
void *const *bsks, void *const *ksks);
void cuda_scalar_comparison_integer_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array_out, void const *lwe_array_in, void const *scalar_blocks,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
uint32_t lwe_ciphertext_count, uint32_t num_scalar_blocks);
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in, void const *scalar_blocks,
void const *h_scalar_blocks, int8_t *mem_ptr, void *const *bsks,
void *const *ksks, uint32_t num_scalar_blocks);
void cleanup_cuda_integer_comparison(void *const *streams,
uint32_t const *gpu_indexes,
@@ -252,8 +258,8 @@ void cuda_scalar_bitop_integer_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_input, void const *clear_blocks,
uint32_t num_clear_blocks, int8_t *mem_ptr, void *const *bsks,
void *const *ksks);
void const *h_clear_blocks, uint32_t num_clear_blocks, int8_t *mem_ptr,
void *const *bsks, void *const *ksks);
void cleanup_cuda_integer_bitop(void *const *streams,
uint32_t const *gpu_indexes, uint32_t gpu_count,
@@ -373,9 +379,9 @@ void scratch_cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
void cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *radix_lwe_out, void *radix_lwe_vec, uint32_t num_radix_in_vec,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
uint32_t num_blocks_in_radix);
CudaRadixCiphertextFFI *radix_lwe_out,
CudaRadixCiphertextFFI *radix_lwe_vec, int8_t *mem_ptr, void *const *bsks,
void *const *ksks);
void cleanup_cuda_integer_radix_partial_sum_ciphertexts_vec(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
@@ -391,10 +397,10 @@ void scratch_cuda_integer_scalar_mul_kb_64(
void cuda_scalar_multiplication_integer_radix_ciphertext_64_inplace(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array, uint64_t const *decomposed_scalar,
CudaRadixCiphertextFFI *lwe_array, uint64_t const *decomposed_scalar,
uint64_t const *has_at_least_one_set, int8_t *mem_ptr, void *const *bsks,
void *const *ksks, uint32_t lwe_dimension, uint32_t polynomial_size,
uint32_t message_modulus, uint32_t num_blocks, uint32_t num_scalars);
void *const *ksks, uint32_t polynomial_size, uint32_t message_modulus,
uint32_t num_scalars);
void cleanup_cuda_integer_radix_scalar_mul(void *const *streams,
uint32_t const *gpu_indexes,
@@ -412,9 +418,10 @@ void scratch_cuda_integer_div_rem_radix_ciphertext_kb_64(
void cuda_integer_div_rem_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *quotient, void *remainder, void const *numerator, void const *divisor,
bool is_signed, int8_t *mem_ptr, void *const *bsks, void *const *ksks,
uint32_t num_blocks_in_radix);
CudaRadixCiphertextFFI *quotient, CudaRadixCiphertextFFI *remainder,
CudaRadixCiphertextFFI const *numerator,
CudaRadixCiphertextFFI const *divisor, bool is_signed, int8_t *mem_ptr,
void *const *bsks, void *const *ksks);
void cleanup_cuda_integer_div_rem(void *const *streams,
uint32_t const *gpu_indexes,
@@ -474,7 +481,8 @@ void scratch_cuda_integer_are_all_comparisons_block_true_kb_64(
void cuda_integer_are_all_comparisons_block_true_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array_out, void const *lwe_array_in, int8_t *mem_ptr,
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in, int8_t *mem_ptr,
void *const *bsks, void *const *ksks, uint32_t num_radix_blocks);
void cleanup_cuda_integer_are_all_comparisons_block_true(
@@ -492,7 +500,8 @@ void scratch_cuda_integer_is_at_least_one_comparisons_block_true_kb_64(
void cuda_integer_is_at_least_one_comparisons_block_true_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array_out, void const *lwe_array_in, int8_t *mem_ptr,
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in, int8_t *mem_ptr,
void *const *bsks, void *const *ksks, uint32_t num_radix_blocks);
void cleanup_cuda_integer_is_at_least_one_comparisons_block_true(

View File

@@ -1,8 +1,11 @@
#ifndef CUDA_RADIX_CIPHERTEXT_H
#define CUDA_RADIX_CIPHERTEXT_H
void release_radix_ciphertext(cudaStream_t const stream,
uint32_t const gpu_index,
CudaRadixCiphertextFFI *data);
void release_radix_ciphertext_async(cudaStream_t const stream,
uint32_t const gpu_index,
CudaRadixCiphertextFFI *data);
void reset_radix_ciphertext_blocks(CudaRadixCiphertextFFI *data,
uint32_t new_num_blocks);
#endif

View File

@@ -5,12 +5,12 @@
template <typename Torus>
bool supports_distributed_shared_memory_on_multibit_programmable_bootstrap(
uint32_t polynomial_size, int max_shared_memory);
uint32_t polynomial_size, uint32_t max_shared_memory);
template <typename Torus>
bool has_support_to_cuda_programmable_bootstrap_tbc_multi_bit(
uint32_t num_samples, uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, int max_shared_memory);
uint32_t level_count, uint32_t max_shared_memory);
#if CUDA_ARCH >= 900
template <typename Torus>

View File

@@ -9,20 +9,26 @@
template <typename Torus>
uint64_t get_buffer_size_full_sm_programmable_bootstrap_step_one(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size + // accumulator_rotated
sizeof(double2) * polynomial_size / 2; // accumulator fft
size_t double_count = (sizeof(Torus) == 16) ? 2 : 1;
return sizeof(Torus) * polynomial_size + // accumulator_rotated
sizeof(double) * 2 * double_count * polynomial_size /
2; // accumulator fft
}
template <typename Torus>
uint64_t get_buffer_size_full_sm_programmable_bootstrap_step_two(
uint32_t polynomial_size) {
return sizeof(Torus) * polynomial_size + // accumulator
sizeof(double2) * polynomial_size / 2; // accumulator fft
size_t double_count = (sizeof(Torus) == 16) ? 2 : 1;
return sizeof(Torus) * polynomial_size + // accumulator
sizeof(double) * 2 * double_count * polynomial_size /
2; // accumulator fft
}
template <typename Torus>
uint64_t
get_buffer_size_partial_sm_programmable_bootstrap(uint32_t polynomial_size) {
return sizeof(double2) * polynomial_size / 2; // accumulator fft
size_t double_count = (sizeof(Torus) == 16) ? 2 : 1;
return sizeof(double) * 2 * double_count * polynomial_size /
2; // accumulator fft
}
template <typename Torus>
@@ -61,7 +67,7 @@ get_buffer_size_partial_sm_programmable_bootstrap_cg(uint32_t polynomial_size) {
template <typename Torus>
bool supports_distributed_shared_memory_on_classic_programmable_bootstrap(
uint32_t polynomial_size, int max_shared_memory);
uint32_t polynomial_size, uint32_t max_shared_memory);
template <typename Torus, PBS_TYPE pbs_type> struct pbs_buffer;
@@ -215,6 +221,155 @@ template <typename Torus> struct pbs_buffer<Torus, PBS_TYPE::CLASSICAL> {
}
};
template <PBS_TYPE pbs_type> struct pbs_buffer_128;
template <> struct pbs_buffer_128<PBS_TYPE::CLASSICAL> {
int8_t *d_mem;
__uint128_t *global_accumulator;
double *global_join_buffer;
PBS_VARIANT pbs_variant;
pbs_buffer_128(cudaStream_t stream, uint32_t gpu_index,
uint32_t glwe_dimension, uint32_t polynomial_size,
uint32_t level_count, uint32_t input_lwe_ciphertext_count,
PBS_VARIANT pbs_variant, bool allocate_gpu_memory) {
cuda_set_device(gpu_index);
this->pbs_variant = pbs_variant;
auto max_shared_memory = cuda_get_max_shared_memory(gpu_index);
size_t global_join_buffer_size = (glwe_dimension + 1) * level_count *
input_lwe_ciphertext_count *
polynomial_size / 2 * sizeof(double) * 4;
if (allocate_gpu_memory) {
switch (pbs_variant) {
case PBS_VARIANT::DEFAULT: {
uint64_t full_sm_step_one =
get_buffer_size_full_sm_programmable_bootstrap_step_one<
__uint128_t>(polynomial_size);
uint64_t full_sm_step_two =
get_buffer_size_full_sm_programmable_bootstrap_step_two<
__uint128_t>(polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap<__uint128_t>(
polynomial_size);
uint64_t partial_dm_step_one = full_sm_step_one - partial_sm;
uint64_t partial_dm_step_two = full_sm_step_two - partial_sm;
uint64_t full_dm = full_sm_step_one;
uint64_t device_mem = 0;
if (max_shared_memory < partial_sm) {
device_mem = full_dm * input_lwe_ciphertext_count * level_count *
(glwe_dimension + 1);
} else if (max_shared_memory < full_sm_step_two) {
device_mem =
(partial_dm_step_two + partial_dm_step_one * level_count) *
input_lwe_ciphertext_count * (glwe_dimension + 1);
} else if (max_shared_memory < full_sm_step_one) {
device_mem = partial_dm_step_one * input_lwe_ciphertext_count *
level_count * (glwe_dimension + 1);
}
// Otherwise, both kernels run all in shared memory
d_mem = (int8_t *)cuda_malloc_async(device_mem, stream, gpu_index);
global_join_buffer = (double *)cuda_malloc_async(
global_join_buffer_size, stream, gpu_index);
global_accumulator = (__uint128_t *)cuda_malloc_async(
(glwe_dimension + 1) * input_lwe_ciphertext_count *
polynomial_size * sizeof(__uint128_t),
stream, gpu_index);
} break;
case PBS_VARIANT::CG: {
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_cg<__uint128_t>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_cg<__uint128_t>(
polynomial_size);
uint64_t partial_dm = full_sm - partial_sm;
uint64_t full_dm = full_sm;
uint64_t device_mem = 0;
if (max_shared_memory < partial_sm) {
device_mem = full_dm * input_lwe_ciphertext_count * level_count *
(glwe_dimension + 1);
} else if (max_shared_memory < full_sm) {
device_mem = partial_dm * input_lwe_ciphertext_count * level_count *
(glwe_dimension + 1);
}
// Otherwise, both kernels run all in shared memory
d_mem = (int8_t *)cuda_malloc_async(device_mem, stream, gpu_index);
global_join_buffer = (double *)cuda_malloc_async(
global_join_buffer_size, stream, gpu_index);
} break;
#if CUDA_ARCH >= 900
case PBS_VARIANT::TBC: {
bool supports_dsm =
supports_distributed_shared_memory_on_classic_programmable_bootstrap<
__uint128_t>(polynomial_size, max_shared_memory);
uint64_t full_sm =
get_buffer_size_full_sm_programmable_bootstrap_tbc<__uint128_t>(
polynomial_size);
uint64_t partial_sm =
get_buffer_size_partial_sm_programmable_bootstrap_tbc<__uint128_t>(
polynomial_size);
uint64_t minimum_sm_tbc = 0;
if (supports_dsm)
minimum_sm_tbc =
get_buffer_size_sm_dsm_plus_tbc_classic_programmable_bootstrap<
__uint128_t>(polynomial_size);
uint64_t partial_dm = full_sm - partial_sm;
uint64_t full_dm = full_sm;
uint64_t device_mem = 0;
// There is a minimum amount of memory we need to run the TBC PBS, which
// is minimum_sm_tbc. We know that minimum_sm_tbc bytes are available
// because otherwise the previous check would have redirected
// computation to some other variant. If over that we don't have more
// partial_sm bytes, TBC PBS will run on NOSM. If we have partial_sm but
// not full_sm bytes, it will run on PARTIALSM. Otherwise, FULLSM.
//
// NOSM mode actually requires minimum_sm_tbc shared memory bytes.
if (max_shared_memory < partial_sm + minimum_sm_tbc) {
device_mem = full_dm * input_lwe_ciphertext_count * level_count *
(glwe_dimension + 1);
} else if (max_shared_memory < full_sm + minimum_sm_tbc) {
device_mem = partial_dm * input_lwe_ciphertext_count * level_count *
(glwe_dimension + 1);
}
// Otherwise, both kernels run all in shared memory
d_mem = (int8_t *)cuda_malloc_async(device_mem, stream, gpu_index);
global_join_buffer = (double *)cuda_malloc_async(
global_join_buffer_size, stream, gpu_index);
} break;
#endif
default:
PANIC("Cuda error (PBS): unsupported implementation variant.")
}
}
}
void release(cudaStream_t stream, uint32_t gpu_index) {
cuda_drop_async(d_mem, stream, gpu_index);
cuda_drop_async(global_join_buffer, stream, gpu_index);
if (pbs_variant == DEFAULT)
cuda_drop_async(global_accumulator, stream, gpu_index);
}
};
template <typename Torus>
uint64_t get_buffer_size_programmable_bootstrap_cg(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
@@ -245,7 +400,7 @@ bool has_support_to_cuda_programmable_bootstrap_cg(uint32_t glwe_dimension,
uint32_t polynomial_size,
uint32_t level_count,
uint32_t num_samples,
int max_shared_memory);
uint32_t max_shared_memory);
template <typename Torus>
void cuda_programmable_bootstrap_cg_lwe_ciphertext_vector(

View File

@@ -20,6 +20,11 @@ void cuda_convert_lwe_programmable_bootstrap_key_64(
uint32_t input_lwe_dim, uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size);
void cuda_convert_lwe_programmable_bootstrap_key_128(
void *stream, uint32_t gpu_index, void *dest, void const *src,
uint32_t input_lwe_dim, uint32_t glwe_dim, uint32_t level_count,
uint32_t polynomial_size);
void scratch_cuda_programmable_bootstrap_amortized_32(
void *stream, uint32_t gpu_index, int8_t **pbs_buffer,
uint32_t glwe_dimension, uint32_t polynomial_size,
@@ -62,6 +67,11 @@ void scratch_cuda_programmable_bootstrap_64(
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, bool allocate_gpu_memory);
void scratch_cuda_programmable_bootstrap_128(
void *stream, uint32_t gpu_index, int8_t **buffer, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t level_count,
uint32_t input_lwe_ciphertext_count, bool allocate_gpu_memory);
void cuda_programmable_bootstrap_lwe_ciphertext_vector_32(
void *stream, uint32_t gpu_index, void *lwe_array_out,
void const *lwe_output_indexes, void const *lut_vector,
@@ -80,7 +90,17 @@ void cuda_programmable_bootstrap_lwe_ciphertext_vector_64(
uint32_t polynomial_size, uint32_t base_log, uint32_t level_count,
uint32_t num_samples, uint32_t num_many_lut, uint32_t lut_stride);
void cuda_programmable_bootstrap_lwe_ciphertext_vector_128(
void *stream, uint32_t gpu_index, void *lwe_array_out,
void const *lut_vector, void const *lwe_array_in,
void const *bootstrapping_key, int8_t *buffer, uint32_t lwe_dimension,
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t base_log,
uint32_t level_count, uint32_t num_samples);
void cleanup_cuda_programmable_bootstrap(void *stream, uint32_t gpu_index,
int8_t **pbs_buffer);
void cleanup_cuda_programmable_bootstrap_128(void *stream, uint32_t gpu_index,
int8_t **pbs_buffer);
}
#endif // CUDA_BOOTSTRAP_H

View File

@@ -8,7 +8,7 @@ extern "C" {
bool has_support_to_cuda_programmable_bootstrap_cg_multi_bit(
uint32_t glwe_dimension, uint32_t polynomial_size, uint32_t level_count,
uint32_t num_samples, int max_shared_memory);
uint32_t num_samples, uint32_t max_shared_memory);
void cuda_convert_lwe_multi_bit_programmable_bootstrap_key_64(
void *stream, uint32_t gpu_index, void *dest, void const *src,

View File

@@ -24,7 +24,7 @@ void cuda_convert_lwe_ciphertext_vector_to_cpu_64(void *stream,
void cuda_glwe_sample_extract_64(void *stream, uint32_t gpu_index,
void *lwe_array_out, void const *glwe_array_in,
uint32_t const *nth_array, uint32_t num_nths,
uint32_t glwe_dimension,
uint32_t lwe_per_glwe, uint32_t glwe_dimension,
uint32_t polynomial_size) {
switch (polynomial_size) {
@@ -32,43 +32,43 @@ void cuda_glwe_sample_extract_64(void *stream, uint32_t gpu_index,
host_sample_extract<uint64_t, AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t const *)glwe_array_in, (uint32_t const *)nth_array, num_nths,
glwe_dimension);
lwe_per_glwe, glwe_dimension);
break;
case 512:
host_sample_extract<uint64_t, AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t const *)glwe_array_in, (uint32_t const *)nth_array, num_nths,
glwe_dimension);
lwe_per_glwe, glwe_dimension);
break;
case 1024:
host_sample_extract<uint64_t, AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t const *)glwe_array_in, (uint32_t const *)nth_array, num_nths,
glwe_dimension);
lwe_per_glwe, glwe_dimension);
break;
case 2048:
host_sample_extract<uint64_t, AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t const *)glwe_array_in, (uint32_t const *)nth_array, num_nths,
glwe_dimension);
lwe_per_glwe, glwe_dimension);
break;
case 4096:
host_sample_extract<uint64_t, AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t const *)glwe_array_in, (uint32_t const *)nth_array, num_nths,
glwe_dimension);
lwe_per_glwe, glwe_dimension);
break;
case 8192:
host_sample_extract<uint64_t, AmortizedDegree<8192>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t const *)glwe_array_in, (uint32_t const *)nth_array, num_nths,
glwe_dimension);
lwe_per_glwe, glwe_dimension);
break;
case 16384:
host_sample_extract<uint64_t, AmortizedDegree<16384>>(
static_cast<cudaStream_t>(stream), gpu_index, (uint64_t *)lwe_array_out,
(uint64_t const *)glwe_array_in, (uint32_t const *)nth_array, num_nths,
glwe_dimension);
lwe_per_glwe, glwe_dimension);
break;
default:
PANIC("Cuda error: unsupported polynomial size. Supported "

View File

@@ -28,7 +28,7 @@ void cuda_convert_lwe_ciphertext_vector_to_cpu(cudaStream_t stream,
template <typename Torus, class params>
__global__ void sample_extract(Torus *lwe_array_out, Torus const *glwe_array_in,
uint32_t const *nth_array,
uint32_t const *nth_array, uint32_t lwe_per_glwe,
uint32_t glwe_dimension) {
const int input_id = blockIdx.x;
@@ -39,28 +39,28 @@ __global__ void sample_extract(Torus *lwe_array_out, Torus const *glwe_array_in,
auto lwe_out = lwe_array_out + input_id * lwe_output_size;
// We assume each GLWE will store the first polynomial_size inputs
uint32_t lwe_per_glwe = params::degree;
auto glwe_in = glwe_array_in + (input_id / lwe_per_glwe) * glwe_input_size;
// nth is ensured to be in [0, lwe_per_glwe)
auto nth = nth_array[input_id] % lwe_per_glwe;
// nth is ensured to be in [0, params::degree)
auto nth = nth_array[input_id] % params::degree;
sample_extract_mask<Torus, params>(lwe_out, glwe_in, glwe_dimension, nth);
sample_extract_body<Torus, params>(lwe_out, glwe_in, glwe_dimension, nth);
}
// lwe_per_glwe LWEs will be extracted per GLWE ciphertext, thus we need to have
// enough indexes
template <typename Torus, class params>
__host__ void host_sample_extract(cudaStream_t stream, uint32_t gpu_index,
Torus *lwe_array_out,
Torus const *glwe_array_in,
uint32_t const *nth_array, uint32_t num_nths,
uint32_t glwe_dimension) {
__host__ void
host_sample_extract(cudaStream_t stream, uint32_t gpu_index,
Torus *lwe_array_out, Torus const *glwe_array_in,
uint32_t const *nth_array, uint32_t num_nths,
uint32_t lwe_per_glwe, uint32_t glwe_dimension) {
cuda_set_device(gpu_index);
dim3 grid(num_nths);
dim3 thds(params::degree / params::opt);
sample_extract<Torus, params><<<grid, thds, 0, stream>>>(
lwe_array_out, glwe_array_in, nth_array, glwe_dimension);
lwe_array_out, glwe_array_in, nth_array, lwe_per_glwe, glwe_dimension);
check_cuda_error(cudaGetLastError());
}

View File

@@ -2,7 +2,6 @@
#define CNCRT_FAST_KS_CUH
#undef NDEBUG
#include <assert.h>
#include "device.h"
#include "gadget.cuh"

View File

@@ -3,6 +3,7 @@
#include "crypto/torus.cuh"
#include "device.h"
#include "fft128/f128.cuh"
#include <cstdint>
/**
@@ -42,6 +43,13 @@ public:
}
}
__device__ void decompose_and_compress_next_128(double *result) {
for (int j = 0; j < num_poly; j++) {
auto result_slice = result + j * params::degree / 2 * 4;
decompose_and_compress_next_polynomial_128(result_slice, j);
}
}
// Decomposes a single polynomial
__device__ void decompose_and_compress_next_polynomial(double2 *result,
int j) {
@@ -75,10 +83,58 @@ public:
synchronize_threads_in_block();
}
// Decomposes a single polynomial
__device__ void decompose_and_compress_next_polynomial_128(double *result,
int j) {
uint32_t tid = threadIdx.x;
auto state_slice = &state[j * params::degree];
for (int i = 0; i < params::opt / 2; i++) {
auto input1 = &state_slice[tid];
auto input2 = &state_slice[tid + params::degree / 2];
T res_re = *input1 & mask_mod_b;
T res_im = *input2 & mask_mod_b;
*input1 >>= base_log; // Update state
*input2 >>= base_log; // Update state
T carry_re = ((res_re - 1ll) | *input1) & res_re;
T carry_im = ((res_im - 1ll) | *input2) & res_im;
carry_re >>= (base_log - 1);
carry_im >>= (base_log - 1);
*input1 += carry_re; // Update state
*input2 += carry_im; // Update state
res_re -= carry_re << base_log;
res_im -= carry_im << base_log;
auto out_re = u128_to_signed_to_f128(res_re);
auto out_im = u128_to_signed_to_f128(res_im);
auto out_re_hi = result + 0 * params::degree / 2;
auto out_re_lo = result + 1 * params::degree / 2;
auto out_im_hi = result + 2 * params::degree / 2;
auto out_im_lo = result + 3 * params::degree / 2;
out_re_hi[tid] = out_re.hi;
out_re_lo[tid] = out_re.lo;
out_im_hi[tid] = out_im.hi;
out_im_lo[tid] = out_im.lo;
tid += params::degree / params::opt;
}
synchronize_threads_in_block();
}
__device__ void decompose_and_compress_level(double2 *result, int level) {
for (int i = 0; i < level_count - level; i++)
decompose_and_compress_next(result);
}
__device__ void decompose_and_compress_level_128(double *result, int level) {
for (int i = 0; i < level_count - level; i++)
decompose_and_compress_next_128(result);
}
};
template <typename Torus>

View File

@@ -135,7 +135,7 @@ bool cuda_check_support_thread_block_clusters() {
}
/// Copy memory to the GPU asynchronously
void cuda_memcpy_async_to_gpu(void *dest, void *src, uint64_t size,
void cuda_memcpy_async_to_gpu(void *dest, const void *src, uint64_t size,
cudaStream_t stream, uint32_t gpu_index) {
if (size == 0)
return;
@@ -304,8 +304,8 @@ void cuda_drop_async(void *ptr, cudaStream_t stream, uint32_t gpu_index) {
}
/// Get the maximum size for the shared memory
int cuda_get_max_shared_memory(uint32_t gpu_index) {
int max_shared_memory = 0;
uint32_t cuda_get_max_shared_memory(uint32_t gpu_index) {
auto max_shared_memory = 0;
#if CUDA_ARCH == 900
max_shared_memory = 226000;
#elif CUDA_ARCH == 890
@@ -321,5 +321,5 @@ int cuda_get_max_shared_memory(uint32_t gpu_index) {
gpu_index);
check_cuda_error(cudaGetLastError());
#endif
return max_shared_memory;
return (uint32_t)(max_shared_memory);
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,410 @@
#ifndef CUDA_FFT128_F128_CUH
#define CUDA_FFT128_F128_CUH
#include <cstdint>
struct alignas(16) f128 {
double hi;
double lo;
// Default and parameterized constructors
__host__ __device__ f128() : hi(0.0), lo(0.0) {}
__host__ __device__ f128(double high, double low) : hi(high), lo(low) {}
// Quick two-sum
__host__ __device__ __forceinline__ static f128 quick_two_sum(double a,
double b) {
#ifdef __CUDA_ARCH__
double s = __dadd_rn(a, b);
return f128(s, __dsub_rn(b, __dsub_rn(s, a)));
#else
double s = a + b;
return f128(s, b - (s - a));
#endif
}
// Two-sum
__host__ __device__ __forceinline__ static f128 two_sum(double a, double b) {
#ifdef __CUDA_ARCH__
double s = __dadd_rn(a, b);
double bb = __dsub_rn(s, a);
return f128(s, __dadd_rn(__dsub_rn(a, __dsub_rn(s, bb)), __dsub_rn(b, bb)));
#else
double s = a + b;
double bb = s - a;
return f128(s, (a - (s - bb)) + (b - bb));
#endif
}
// Two-product
__host__ __device__ __forceinline__ static f128 two_prod(double a, double b) {
#ifdef __CUDA_ARCH__
double p = __dmul_rn(a, b);
double p2 = __fma_rn(a, b, -p);
#else
double p = a * b;
double p2 = fma(a, b, -p);
#endif
return f128(p, p2);
}
__host__ __device__ __forceinline__ static f128 two_diff(double a, double b) {
#ifdef __CUDA_ARCH__
double s = __dsub_rn(a, b);
double bb = __dsub_rn(s, a);
return f128(s, __dsub_rn(__dsub_rn(a, __dsub_rn(s, bb)), __dadd_rn(b, bb)));
#else
double s = a - b;
double bb = s - a;
return f128(s, (a - (s - bb)) - (b + bb));
#endif
}
// Addition
__host__ __device__ static f128 add(const f128 &a, const f128 &b) {
auto s = two_sum(a.hi, b.hi);
auto t = two_sum(a.lo, b.lo);
double hi = s.hi;
double lo = s.lo + t.hi;
hi = hi + lo;
lo = lo - (hi - s.hi);
return f128(hi, lo + t.lo);
}
// Addition with estimate
__host__ __device__ static f128 add_estimate(const f128 &a, const f128 &b) {
auto se = two_sum(a.hi, b.hi);
#ifdef __CUDA_ARCH__
se.lo = __dadd_rn(se.lo, __dadd_rn(a.lo, b.lo));
#else
se.lo += (a.lo + b.lo);
#endif
return quick_two_sum(se.hi, se.lo);
}
// Subtraction with estimate
__host__ __device__ static f128 sub_estimate(const f128 &a, const f128 &b) {
f128 se = two_diff(a.hi, b.hi);
#ifdef __CUDA_ARCH__
se.lo = __dadd_rn(se.lo, a.lo);
se.lo = __dsub_rn(se.lo, b.lo);
#else
se.lo += a.lo;
se.lo -= b.lo;
#endif
return quick_two_sum(se.hi, se.lo);
}
// Subtraction
__host__ __device__ static f128 sub(const f128 &a, const f128 &b) {
auto s = two_diff(a.hi, b.hi);
auto t = two_diff(a.lo, b.lo);
s = quick_two_sum(s.hi, s.lo + t.hi);
return quick_two_sum(s.hi, s.lo + t.lo);
}
// Multiplication
__host__ __device__ static f128 mul(const f128 &a, const f128 &b) {
auto p = two_prod(a.hi, b.hi);
#ifdef __CUDA_ARCH__
double a_0_x_b_1 = __dmul_rn(a.hi, b.lo);
double a_1_x_b_0 = __dmul_rn(a.lo, b.hi);
p.lo = __dadd_rn(p.lo, __dadd_rn(a_0_x_b_1, a_1_x_b_0));
#else
p.lo += (a.hi * b.lo + a.lo * b.hi);
#endif
p = quick_two_sum(p.hi, p.lo);
return p;
}
__host__ __device__ static f128 add_f64_f64(const double a, const double b) {
return two_sum(a, b);
}
__host__ __device__ static f128 f128_floor(const f128 &x) {
double x0_floor = floor(x.hi);
if (x0_floor == x.hi) {
return add_f64_f64(x0_floor, floor(x.lo));
}
return f128(x0_floor, 0.0);
}
__host__ __device__ static void
cplx_f128_mul_assign(f128 &c_re, f128 &c_im, const f128 &a_re,
const f128 &a_im, const f128 &b_re, const f128 &b_im) {
auto a_re_x_b_re = mul(a_re, b_re);
auto a_re_x_b_im = mul(a_re, b_im);
auto a_im_x_b_re = mul(a_im, b_re);
auto a_im_x_b_im = mul(a_im, b_im);
c_re = sub_estimate(a_re_x_b_re, a_im_x_b_im);
c_im = add_estimate(a_im_x_b_re, a_re_x_b_im);
}
__host__ __device__ static void
cplx_f128_sub_assign(f128 &c_re, f128 &c_im, const f128 &a_re,
const f128 &a_im, const f128 &b_re, const f128 &b_im) {
c_re = sub_estimate(a_re, b_re);
c_im = sub_estimate(a_im, b_im);
}
__host__ __device__ static void
cplx_f128_add_assign(f128 &c_re, f128 &c_im, const f128 &a_re,
const f128 &a_im, const f128 &b_re, const f128 &b_im) {
c_re = add_estimate(a_re, b_re);
c_im = add_estimate(a_im, b_im);
}
};
struct f128x2 {
f128 re;
f128 im;
__host__ __device__ f128x2() : re(), im() {}
__host__ __device__ f128x2(const f128 &real, const f128 &imag)
: re(real), im(imag) {}
__host__ __device__ f128x2(double real, double imag)
: re(real, 0.0), im(imag, 0.0) {}
__host__ __device__ explicit f128x2(double real)
: re(real, 0.0), im(0.0, 0.0) {}
__host__ __device__ f128x2(const f128x2 &other)
: re(other.re), im(other.im) {}
__host__ __device__ f128x2(f128x2 &&other) noexcept
: re(std::move(other.re)), im(std::move(other.im)) {}
__host__ __device__ f128x2 &operator=(const f128x2 &other) {
if (this != &other) {
re = other.re;
im = other.im;
}
return *this;
}
__host__ __device__ f128x2 &operator=(f128x2 &&other) noexcept {
if (this != &other) {
re = std::move(other.re);
im = std::move(other.im);
}
return *this;
}
__host__ __device__ f128x2 conjugate() const {
return f128x2(re, f128(-im.hi, -im.lo));
}
__host__ __device__ f128 norm_squared() const {
return f128::add(f128::mul(re, re), f128::mul(im, im));
}
__host__ __device__ void zero() {
re = f128(0.0, 0.0);
im = f128(0.0, 0.0);
}
// Addition
__host__ __device__ friend f128x2 operator+(const f128x2 &a,
const f128x2 &b) {
return f128x2(f128::add(a.re, b.re), f128::add(a.im, b.im));
}
// Subtraction
__host__ __device__ friend f128x2 operator-(const f128x2 &a,
const f128x2 &b) {
return f128x2(f128::add(a.re, f128(-b.re.hi, -b.re.lo)),
f128::add(a.im, f128(-b.im.hi, -b.im.lo)));
}
// Multiplication (complex multiplication)
__host__ __device__ friend f128x2 operator*(const f128x2 &a,
const f128x2 &b) {
f128 real_part =
f128::add(f128::mul(a.re, b.re),
f128(-f128::mul(a.im, b.im).hi, -f128::mul(a.im, b.im).lo));
f128 imag_part = f128::add(f128::mul(a.re, b.im), f128::mul(a.im, b.re));
return f128x2(real_part, imag_part);
}
// Addition-assignment operator
__host__ __device__ f128x2 &operator+=(const f128x2 &other) {
re = f128::add(re, other.re);
im = f128::add(im, other.im);
return *this;
}
// Subtraction-assignment operator
__host__ __device__ f128x2 &operator-=(const f128x2 &other) {
re = f128::add(re, f128(-other.re.hi, -other.re.lo));
im = f128::add(im, f128(-other.im.hi, -other.im.lo));
return *this;
}
// Multiplication-assignment operator
__host__ __device__ f128x2 &operator*=(const f128x2 &other) {
f128 new_re =
f128::add(f128::mul(re, other.re), f128(-f128::mul(im, other.im).hi,
-f128::mul(im, other.im).lo));
f128 new_im = f128::add(f128::mul(re, other.im), f128::mul(im, other.re));
re = new_re;
im = new_im;
return *this;
}
};
__host__ __device__ inline uint64_t double_to_bits(double d) {
uint64_t bits = *reinterpret_cast<uint64_t *>(&d);
return bits;
}
__host__ __device__ inline double bits_to_double(uint64_t bits) {
double d = *reinterpret_cast<double *>(&bits);
return d;
}
__host__ __device__ inline double u128_to_f64(__uint128_t x) {
const __uint128_t ONE = 1;
const double A = ONE << 52;
const double B = ONE << 104;
const double C = ONE << 76;
const double D = 340282366920938500000000000000000000000.;
const __uint128_t threshold = (ONE << 104);
if (x < threshold) {
uint64_t A_bits = double_to_bits(A);
__uint128_t shifted = (x << 12);
uint64_t lower64 = static_cast<uint64_t>(shifted);
lower64 >>= 12;
uint64_t bits_l = A_bits | lower64;
double l_temp = bits_to_double(bits_l);
double l = l_temp - A;
uint64_t B_bits = double_to_bits(B);
uint64_t top64 = static_cast<uint64_t>(x >> 52);
uint64_t bits_h = B_bits | top64;
double h_temp = bits_to_double(bits_h);
double h = h_temp - B;
return (l + h);
} else {
uint64_t C_bits = double_to_bits(C);
__uint128_t shifted = (x >> 12);
uint64_t lower64 = static_cast<uint64_t>(shifted);
lower64 >>= 12;
uint64_t x_lo = static_cast<uint64_t>(x);
uint64_t mask_part = (x_lo & 0xFFFFFFULL);
uint64_t bits_l = C_bits | lower64 | mask_part;
double l_temp = bits_to_double(bits_l);
double l = l_temp - C;
uint64_t D_bits = double_to_bits(D);
uint64_t top64 = static_cast<uint64_t>(x >> 76);
uint64_t bits_h = D_bits | top64;
double h_temp = bits_to_double(bits_h);
double h = h_temp - D;
return (l + h);
}
}
__host__ __device__ inline __uint128_t f64_to_u128(const double f) {
const __uint128_t ONE = 1;
const uint64_t f_bits = double_to_bits(f);
if (f_bits < 1023ull << 52) {
return 0;
} else {
const __uint128_t m = ONE << 127 | (__uint128_t)f_bits << 75;
const uint64_t s = 1150 - (f_bits >> 52);
if (s >= 128) {
return 0;
} else {
return m >> s;
}
}
}
__host__ __device__ inline __uint128_t f64_to_i128(const double f) {
// Get raw bits of the double
const uint64_t f_bits = double_to_bits(f);
// Remove sign bit (equivalent to Rust's !0 >> 1 mask)
const uint64_t a = f_bits & 0x7FFFFFFFFFFFFFFFull;
// Check if value is in [0, 1) range
if (a < (1023ull << 52)) {
return 0;
}
// Reconstruct mantissa with implicit leading 1
const __uint128_t m =
(__uint128_t{1} << 127) | (static_cast<__uint128_t>(a) << 75);
// Calculate shift amount based on exponent
const uint64_t exponent = a >> 52;
const uint64_t s = 1150 - exponent;
// Perform unsigned right shift
const __uint128_t u = m >> s;
// Apply sign (check original sign bit)
const __int128_t result = static_cast<__int128_t>(u);
return (f_bits >> 63) ? -result : result;
}
__host__ __device__ inline double i128_to_f64(__int128_t const x) {
uint64_t sign = static_cast<uint64_t>(x >> 64) & (1ULL << 63);
__uint128_t abs =
(x < 0) ? static_cast<__uint128_t>(-x) : static_cast<__uint128_t>(x);
return bits_to_double(double_to_bits(u128_to_f64(abs)) | sign);
}
__host__ __device__ inline f128 u128_to_signed_to_f128(__uint128_t x) {
const double first_approx = i128_to_f64(x);
const uint64_t sign_bit = double_to_bits(first_approx) & (1ull << 63);
const __uint128_t first_approx_roundtrip =
f64_to_u128((first_approx < 0) ? -first_approx : first_approx);
const __uint128_t first_approx_roundtrip_signed =
(sign_bit == (1ull << 63)) ? -first_approx_roundtrip
: first_approx_roundtrip;
double correction = i128_to_f64(x - first_approx_roundtrip_signed);
return f128(first_approx, correction);
}
__host__ __device__ inline __uint128_t u128_from_torus_f128(const f128 &a) {
auto x = f128::sub_estimate(a, f128::f128_floor(a));
const double normalization = 340282366920938500000000000000000000000.;
#ifdef __CUDA_ARCH__
x.hi = __dmul_rn(x.hi, normalization);
x.lo = __dmul_rn(x.lo, normalization);
#else
x.hi *= normalization;
x.lo *= normalization;
#endif
// TODO has to be round
x = f128::f128_floor(x);
__uint128_t x0 = f64_to_u128(x.hi);
__int128_t x1 = f64_to_i128(x.lo);
return x0 + x1;
}
#endif

View File

@@ -0,0 +1,163 @@
#include "fft128.cuh"
void cuda_fourier_transform_forward_as_integer_f128_async(
void *stream, uint32_t gpu_index, void *re0, void *re1, void *im0,
void *im1, void const *standard, const uint32_t N,
const uint32_t number_of_samples) {
switch (N) {
case 64:
host_fourier_transform_forward_as_integer_f128<AmortizedDegree<64>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 128:
host_fourier_transform_forward_as_integer_f128<AmortizedDegree<128>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 256:
host_fourier_transform_forward_as_integer_f128<AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 512:
host_fourier_transform_forward_as_integer_f128<AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 1024:
host_fourier_transform_forward_as_integer_f128<AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 2048:
host_fourier_transform_forward_as_integer_f128<AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 4096:
host_fourier_transform_forward_as_integer_f128<AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
default:
PANIC("Cuda error (f128 fft): unsupported polynomial size. Supported "
"N's are powers of two"
" in the interval [64..4096].")
}
}
void cuda_fourier_transform_forward_as_torus_f128_async(
void *stream, uint32_t gpu_index, void *re0, void *re1, void *im0,
void *im1, void const *standard, const uint32_t N,
const uint32_t number_of_samples) {
switch (N) {
case 64:
host_fourier_transform_forward_as_torus_f128<AmortizedDegree<64>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 128:
host_fourier_transform_forward_as_torus_f128<AmortizedDegree<128>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 256:
host_fourier_transform_forward_as_torus_f128<AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 512:
host_fourier_transform_forward_as_torus_f128<AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 1024:
host_fourier_transform_forward_as_torus_f128<AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 2048:
host_fourier_transform_forward_as_torus_f128<AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
case 4096:
host_fourier_transform_forward_as_torus_f128<AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, (double *)re0,
(double *)re1, (double *)im0, (double *)im1,
(__uint128_t const *)standard, N, number_of_samples);
break;
default:
PANIC("Cuda error (f128 fft): unsupported polynomial size. Supported "
"N's are powers of two"
" in the interval [64..4096].")
}
}
void cuda_fourier_transform_backward_as_torus_f128_async(
void *stream, uint32_t gpu_index, void *standard, void const *re0,
void const *re1, void const *im0, void const *im1, const uint32_t N,
const uint32_t number_of_samples) {
switch (N) {
case 64:
host_fourier_transform_backward_as_torus_f128<AmortizedDegree<64>>(
static_cast<cudaStream_t>(stream), gpu_index, (__uint128_t *)standard,
(double const *)re0, (double const *)re1, (double const *)im0,
(double const *)im1, N, number_of_samples);
break;
case 128:
host_fourier_transform_backward_as_torus_f128<AmortizedDegree<128>>(
static_cast<cudaStream_t>(stream), gpu_index, (__uint128_t *)standard,
(double const *)re0, (double const *)re1, (double const *)im0,
(double const *)im1, N, number_of_samples);
break;
case 256:
host_fourier_transform_backward_as_torus_f128<AmortizedDegree<256>>(
static_cast<cudaStream_t>(stream), gpu_index, (__uint128_t *)standard,
(double const *)re0, (double const *)re1, (double const *)im0,
(double const *)im1, N, number_of_samples);
break;
case 512:
host_fourier_transform_backward_as_torus_f128<AmortizedDegree<512>>(
static_cast<cudaStream_t>(stream), gpu_index, (__uint128_t *)standard,
(double const *)re0, (double const *)re1, (double const *)im0,
(double const *)im1, N, number_of_samples);
break;
case 1024:
host_fourier_transform_backward_as_torus_f128<AmortizedDegree<1024>>(
static_cast<cudaStream_t>(stream), gpu_index, (__uint128_t *)standard,
(double const *)re0, (double const *)re1, (double const *)im0,
(double const *)im1, N, number_of_samples);
break;
case 2048:
host_fourier_transform_backward_as_torus_f128<AmortizedDegree<2048>>(
static_cast<cudaStream_t>(stream), gpu_index, (__uint128_t *)standard,
(double const *)re0, (double const *)re1, (double const *)im0,
(double const *)im1, N, number_of_samples);
break;
case 4096:
host_fourier_transform_backward_as_torus_f128<AmortizedDegree<4096>>(
static_cast<cudaStream_t>(stream), gpu_index, (__uint128_t *)standard,
(double const *)re0, (double const *)re1, (double const *)im0,
(double const *)im1, N, number_of_samples);
break;
default:
PANIC("Cuda error (f128 ifft): unsupported polynomial size. Supported "
"N's are powers of two"
" in the interval [64..4096].")
}
}

View File

@@ -0,0 +1,669 @@
#ifndef CUDA_FFT128_CUH
#define CUDA_FFT128_CUH
#include "f128.cuh"
#include "fft/fft128.h"
#include "polynomial/functions.cuh"
#include "polynomial/parameters.cuh"
#include "twiddles.cuh"
#include "types/complex/operations.cuh"
#include <iostream>
using Index = unsigned;
#define NEG_TWID(i) \
f128x2(f128(neg_twiddles_re_hi[(i)], neg_twiddles_re_lo[(i)]), \
f128(neg_twiddles_im_hi[(i)], neg_twiddles_im_lo[(i)]))
#define F64x4_TO_F128x2(f128x2_reg, ind) \
f128x2_reg.re.hi = dt_re_hi[ind]; \
f128x2_reg.re.lo = dt_re_lo[ind]; \
f128x2_reg.im.hi = dt_im_hi[ind]; \
f128x2_reg.im.lo = dt_im_lo[ind]
#define F128x2_TO_F64x4(f128x2_reg, ind) \
dt_re_hi[ind] = f128x2_reg.re.hi; \
dt_re_lo[ind] = f128x2_reg.re.lo; \
dt_im_hi[ind] = f128x2_reg.im.hi; \
dt_im_lo[ind] = f128x2_reg.im.lo
template <class params>
__device__ void negacyclic_forward_fft_f128(double *dt_re_hi, double *dt_re_lo,
double *dt_im_hi,
double *dt_im_lo) {
__syncthreads();
constexpr Index BUTTERFLY_DEPTH = params::opt >> 1;
constexpr Index LOG2_DEGREE = params::log2_degree;
constexpr Index HALF_DEGREE = params::degree >> 1;
constexpr Index STRIDE = params::degree / params::opt;
f128x2 u[BUTTERFLY_DEPTH], v[BUTTERFLY_DEPTH], w;
Index tid = threadIdx.x;
// load into registers
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; ++i) {
F64x4_TO_F128x2(u[i], tid);
F64x4_TO_F128x2(v[i], tid + HALF_DEGREE);
tid += STRIDE;
}
// level 1
// we don't make actual complex multiplication on level1 since we have only
// one twiddle, it's real and image parts are equal, so we can multiply
// it with simpler operations
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; ++i) {
auto ww = NEG_TWID(1);
f128::cplx_f128_mul_assign(w.re, w.im, v[i].re, v[i].im, NEG_TWID(1).re,
NEG_TWID(1).im);
f128::cplx_f128_sub_assign(v[i].re, v[i].im, u[i].re, u[i].im, w.re, w.im);
f128::cplx_f128_add_assign(u[i].re, u[i].im, u[i].re, u[i].im, w.re, w.im);
}
Index twiddle_shift = 1;
for (Index l = LOG2_DEGREE - 1; l >= 1; --l) {
Index lane_mask = 1 << (l - 1);
Index thread_mask = (1 << l) - 1;
twiddle_shift <<= 1;
tid = threadIdx.x;
__syncthreads();
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; i++) {
Index rank = tid & thread_mask;
bool u_stays_in_register = rank < lane_mask;
F128x2_TO_F64x4(((u_stays_in_register) ? v[i] : u[i]), tid);
tid = tid + STRIDE;
}
__syncthreads();
tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; i++) {
Index rank = tid & thread_mask;
bool u_stays_in_register = rank < lane_mask;
F64x4_TO_F128x2(w, tid ^ lane_mask);
u[i] = (u_stays_in_register) ? u[i] : w;
v[i] = (u_stays_in_register) ? w : v[i];
w = NEG_TWID(tid / lane_mask + twiddle_shift);
f128::cplx_f128_mul_assign(w.re, w.im, v[i].re, v[i].im, w.re, w.im);
f128::cplx_f128_sub_assign(v[i].re, v[i].im, u[i].re, u[i].im, w.re,
w.im);
f128::cplx_f128_add_assign(u[i].re, u[i].im, u[i].re, u[i].im, w.re,
w.im);
tid = tid + STRIDE;
}
}
__syncthreads();
// store registers in SM
tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; i++) {
F128x2_TO_F64x4(u[i], tid * 2);
F128x2_TO_F64x4(v[i], (tid * 2 + 1));
tid = tid + STRIDE;
}
__syncthreads();
}
template <class params>
__device__ void negacyclic_backward_fft_f128(double *dt_re_hi, double *dt_re_lo,
double *dt_im_hi,
double *dt_im_lo) {
__syncthreads();
constexpr Index BUTTERFLY_DEPTH = params::opt >> 1;
constexpr Index LOG2_DEGREE = params::log2_degree;
constexpr Index DEGREE = params::degree;
constexpr Index HALF_DEGREE = params::degree >> 1;
constexpr Index STRIDE = params::degree / params::opt;
size_t tid = threadIdx.x;
f128x2 u[BUTTERFLY_DEPTH], v[BUTTERFLY_DEPTH], w;
// load into registers and divide by compressed polynomial size
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; ++i) {
F64x4_TO_F128x2(u[i], 2 * tid);
F64x4_TO_F128x2(v[i], 2 * tid + 1);
tid += STRIDE;
}
Index twiddle_shift = DEGREE;
for (Index l = 1; l <= LOG2_DEGREE - 1; ++l) {
Index lane_mask = 1 << (l - 1);
Index thread_mask = (1 << l) - 1;
tid = threadIdx.x;
twiddle_shift >>= 1;
// at this point registers are ready for the butterfly
tid = threadIdx.x;
__syncthreads();
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; ++i) {
w = (u[i] - v[i]);
u[i] += v[i];
v[i] = w * NEG_TWID(tid / lane_mask + twiddle_shift).conjugate();
// keep one of the register for next iteration and store another one in sm
Index rank = tid & thread_mask;
bool u_stays_in_register = rank < lane_mask;
F128x2_TO_F64x4(((u_stays_in_register) ? v[i] : u[i]), tid);
tid = tid + STRIDE;
}
__syncthreads();
// prepare registers for next butterfly iteration
tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; ++i) {
Index rank = tid & thread_mask;
bool u_stays_in_register = rank < lane_mask;
F64x4_TO_F128x2(w, tid ^ lane_mask);
u[i] = (u_stays_in_register) ? u[i] : w;
v[i] = (u_stays_in_register) ? w : v[i];
tid = tid + STRIDE;
}
}
// last iteration
for (Index i = 0; i < BUTTERFLY_DEPTH; ++i) {
w = (u[i] - v[i]);
u[i] = u[i] + v[i];
v[i] = w * NEG_TWID(1).conjugate();
}
__syncthreads();
// store registers in SM
tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < BUTTERFLY_DEPTH; i++) {
F128x2_TO_F64x4(u[i], tid);
F128x2_TO_F64x4(v[i], tid + HALF_DEGREE);
tid = tid + STRIDE;
}
__syncthreads();
}
// params is expected to be full degree not half degree
template <class params>
__device__ void convert_u128_to_f128_as_integer(
double *out_re_hi, double *out_re_lo, double *out_im_hi, double *out_im_lo,
const __uint128_t *in_re, const __uint128_t *in_im) {
Index tid = threadIdx.x;
// #pragma unroll
for (Index i = 0; i < params::opt / 2; i++) {
auto out_re = u128_to_signed_to_f128(in_re[tid]);
auto out_im = u128_to_signed_to_f128(in_im[tid]);
out_re_hi[tid] = out_re.hi;
out_re_lo[tid] = out_re.lo;
out_im_hi[tid] = out_im.hi;
out_im_lo[tid] = out_im.lo;
tid += params::degree / params::opt;
}
}
// params is expected to be full degree not half degree
template <class params>
__device__ void convert_u128_to_f128_as_torus(
double *out_re_hi, double *out_re_lo, double *out_im_hi, double *out_im_lo,
const __uint128_t *in_re, const __uint128_t *in_im) {
const double normalization = pow(2., -128.);
Index tid = threadIdx.x;
// #pragma unroll
for (Index i = 0; i < params::opt / 2; i++) {
auto out_re = u128_to_signed_to_f128(in_re[tid]);
auto out_im = u128_to_signed_to_f128(in_im[tid]);
out_re_hi[tid] = out_re.hi * normalization;
out_re_lo[tid] = out_re.lo * normalization;
out_im_hi[tid] = out_im.hi * normalization;
out_im_lo[tid] = out_im.lo * normalization;
tid += params::degree / params::opt;
}
}
template <class params>
__device__ void
convert_f128_to_u128_as_torus(__uint128_t *out_re, __uint128_t *out_im,
const double *in_re_hi, const double *in_re_lo,
const double *in_im_hi, const double *in_im_lo) {
const double normalization = 1. / (params::degree / 2);
Index tid = threadIdx.x;
// #pragma unroll
for (Index i = 0; i < params::opt / 2; i++) {
f128 in_re(in_re_hi[tid] * normalization, in_re_lo[tid] * normalization);
f128 in_im(in_im_hi[tid] * normalization, in_im_lo[tid] * normalization);
out_re[tid] = u128_from_torus_f128(in_re);
out_im[tid] = u128_from_torus_f128(in_im);
tid += params::degree / params::opt;
}
}
// params is expected to be full degree not half degree
template <class params>
__global__ void
batch_convert_u128_to_f128_as_integer(double *out_re_hi, double *out_re_lo,
double *out_im_hi, double *out_im_lo,
const __uint128_t *in) {
convert_u128_to_f128_as_integer<params>(
&out_re_hi[blockIdx.x * params::degree / 2],
&out_re_lo[blockIdx.x * params::degree / 2],
&out_im_hi[blockIdx.x * params::degree / 2],
&out_im_lo[blockIdx.x * params::degree / 2],
&in[blockIdx.x * params::degree],
&in[blockIdx.x * params::degree + params::degree / 2]);
}
// params is expected to be full degree not half degree
// converts standqard input into complex<128> represented by 4 double
// with following pattern: [re_hi_0, re_hi_1, ... re_hi_n, re_lo_0, re_lo_1,
// ... re_lo_n, im_hi_0, im_hi_1, ..., im_hi_n, im_lo_0, im_lo_1, ..., im_lo_n]
template <class params>
__global__ void
batch_convert_u128_to_f128_as_torus(double *out_re_hi, double *out_re_lo,
double *out_im_hi, double *out_im_lo,
const __uint128_t *in) {
convert_u128_to_f128_as_torus<params>(
&out_re_hi[blockIdx.x * params::degree / 2],
&out_re_lo[blockIdx.x * params::degree / 2],
&out_im_hi[blockIdx.x * params::degree / 2],
&out_im_lo[blockIdx.x * params::degree / 2],
&in[blockIdx.x * params::degree],
&in[blockIdx.x * params::degree + params::degree / 2]);
}
// params is expected to be full degree not half degree
// converts standqard input into complex<128> represented by 4 double
// with following pattern: [re_hi_0, re_lo_0, im_hi_0, im_lo_0, re_hi_1,
// re_lo_1, im_hi_1, im_lo_1,
// ...,re_hi_n, re_lo_n, im_hi_n, im_lo_n, ]
template <class params>
__global__ void
batch_convert_u128_to_f128_strided_as_torus(double *d_out,
const __uint128_t *d_in) {
constexpr size_t chunk_size = params::degree / 2 * 4;
double *chunk = &d_out[blockIdx.x * chunk_size];
double *out_re_hi = &chunk[0 * params::degree / 2];
double *out_re_lo = &chunk[1 * params::degree / 2];
double *out_im_hi = &chunk[2 * params::degree / 2];
double *out_im_lo = &chunk[3 * params::degree / 2];
convert_u128_to_f128_as_torus<params>(
out_re_hi, out_re_lo, out_im_hi, out_im_lo,
&d_in[blockIdx.x * params::degree],
&d_in[blockIdx.x * params::degree + params::degree / 2]);
}
// params is expected to be full degree not half degree
template <class params>
__global__ void batch_convert_f128_to_u128_as_torus(__uint128_t *out,
const double *in_re_hi,
const double *in_re_lo,
const double *in_im_hi,
const double *in_im_lo) {
convert_f128_to_u128_as_torus<params>(
&out[blockIdx.x * params::degree],
&out[blockIdx.x * params::degree + params::degree / 2],
&in_re_hi[blockIdx.x * params::degree / 2],
&in_re_lo[blockIdx.x * params::degree / 2],
&in_im_hi[blockIdx.x * params::degree / 2],
&in_im_lo[blockIdx.x * params::degree / 2]);
}
template <class params, sharedMemDegree SMD>
__global__ void
batch_NSMFFT_128(double *in_re_hi, double *in_re_lo, double *in_im_hi,
double *in_im_lo, double *out_re_hi, double *out_re_lo,
double *out_im_hi, double *out_im_lo, double *buffer) {
extern __shared__ double sharedMemoryFFT128[];
double *re_hi, *re_lo, *im_hi, *im_lo;
if (SMD == NOSM) {
re_hi =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 0];
re_lo =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 1];
im_hi =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 2];
im_lo =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 3];
} else {
re_hi = &sharedMemoryFFT128[params::degree / 2 * 0];
re_lo = &sharedMemoryFFT128[params::degree / 2 * 1];
im_hi = &sharedMemoryFFT128[params::degree / 2 * 2];
im_lo = &sharedMemoryFFT128[params::degree / 2 * 3];
}
Index tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < params::opt / 2; ++i) {
re_hi[tid] = in_re_hi[blockIdx.x * (params::degree / 2) + tid];
re_lo[tid] = in_re_lo[blockIdx.x * (params::degree / 2) + tid];
im_hi[tid] = in_im_hi[blockIdx.x * (params::degree / 2) + tid];
im_lo[tid] = in_im_lo[blockIdx.x * (params::degree / 2) + tid];
tid += params::degree / params::opt;
}
__syncthreads();
if constexpr (params::fft_direction == 1) {
negacyclic_backward_fft_f128<HalfDegree<params>>(re_hi, re_lo, im_hi,
im_lo);
} else {
negacyclic_forward_fft_f128<HalfDegree<params>>(re_hi, re_lo, im_hi, im_lo);
}
__syncthreads();
tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < params::opt / 2; ++i) {
out_re_hi[blockIdx.x * (params::degree / 2) + tid] = re_hi[tid];
out_re_lo[blockIdx.x * (params::degree / 2) + tid] = re_lo[tid];
out_im_hi[blockIdx.x * (params::degree / 2) + tid] = im_hi[tid];
out_im_lo[blockIdx.x * (params::degree / 2) + tid] = im_lo[tid];
tid += params::degree / params::opt;
}
}
template <class params, sharedMemDegree SMD>
__global__ void batch_NSMFFT_strided_128(double *d_in, double *d_out,
double *buffer) {
extern __shared__ double sharedMemoryFFT128[];
double *re_hi, *re_lo, *im_hi, *im_lo;
if (SMD == NOSM) {
re_hi =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 0];
re_lo =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 1];
im_hi =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 2];
im_lo =
&buffer[blockIdx.x * params::degree / 2 * 4 + params::degree / 2 * 3];
} else {
re_hi = &sharedMemoryFFT128[params::degree / 2 * 0];
re_lo = &sharedMemoryFFT128[params::degree / 2 * 1];
im_hi = &sharedMemoryFFT128[params::degree / 2 * 2];
im_lo = &sharedMemoryFFT128[params::degree / 2 * 3];
}
constexpr size_t chunk_size = params::degree / 2 * 4;
double *chunk = &d_in[blockIdx.x * chunk_size];
double *tmp_re_hi = &chunk[0 * params::degree / 2];
double *tmp_re_lo = &chunk[1 * params::degree / 2];
double *tmp_im_hi = &chunk[2 * params::degree / 2];
double *tmp_im_lo = &chunk[3 * params::degree / 2];
Index tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < params::opt / 2; ++i) {
re_hi[tid] = tmp_re_hi[tid];
re_lo[tid] = tmp_re_lo[tid];
im_hi[tid] = tmp_im_hi[tid];
im_lo[tid] = tmp_im_lo[tid];
tid += params::degree / params::opt;
}
__syncthreads();
if constexpr (params::fft_direction == 1) {
negacyclic_backward_fft_f128<HalfDegree<params>>(re_hi, re_lo, im_hi,
im_lo);
} else {
negacyclic_forward_fft_f128<HalfDegree<params>>(re_hi, re_lo, im_hi, im_lo);
}
__syncthreads();
chunk = &d_out[blockIdx.x * chunk_size];
tmp_re_hi = &chunk[0 * params::degree / 2];
tmp_re_lo = &chunk[1 * params::degree / 2];
tmp_im_hi = &chunk[2 * params::degree / 2];
tmp_im_lo = &chunk[3 * params::degree / 2];
tid = threadIdx.x;
#pragma unroll
for (Index i = 0; i < params::opt / 2; ++i) {
tmp_re_hi[tid] = re_hi[tid];
tmp_re_lo[tid] = re_lo[tid];
tmp_im_hi[tid] = im_hi[tid];
tmp_im_lo[tid] = im_lo[tid];
tid += params::degree / params::opt;
}
}
template <class params>
__host__ void host_fourier_transform_forward_as_integer_f128(
cudaStream_t stream, uint32_t gpu_index, double *re0, double *re1,
double *im0, double *im1, const __uint128_t *standard, const uint32_t N,
const uint32_t number_of_samples) {
// allocate device buffers
double *d_re0 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_re1 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_im0 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_im1 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
__uint128_t *d_standard = (__uint128_t *)cuda_malloc_async(
N * sizeof(__uint128_t), stream, gpu_index);
// copy input into device
cuda_memcpy_async_to_gpu(d_standard, standard, N * sizeof(__uint128_t),
stream, gpu_index);
// setup launch parameters
size_t required_shared_memory_size = sizeof(double) * N / 2 * 4;
int grid_size = number_of_samples;
int block_size = params::degree / params::opt;
bool full_sm =
(required_shared_memory_size <= cuda_get_max_shared_memory(gpu_index));
size_t buffer_size = full_sm ? 0 : (size_t)number_of_samples * N / 2 * 4;
size_t shared_memory_size = full_sm ? required_shared_memory_size : 0;
double *buffer = (double *)cuda_malloc_async(buffer_size, stream, gpu_index);
// configure shared memory for batch fft kernel
if (full_sm) {
check_cuda_error(cudaFuncSetAttribute(
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory_size));
check_cuda_error(cudaFuncSetCacheConfig(
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, FULLSM>,
cudaFuncCachePreferShared));
}
// convert u128 into 4 x double
batch_convert_u128_to_f128_as_integer<params>
<<<grid_size, block_size, 0, stream>>>(d_re0, d_re1, d_im0, d_im1,
d_standard);
// call negacyclic 128 bit forward fft.
if (full_sm) {
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, FULLSM>
<<<grid_size, block_size, shared_memory_size, stream>>>(
d_re0, d_re1, d_im0, d_im1, d_re0, d_re1, d_im0, d_im1, buffer);
} else {
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, NOSM>
<<<grid_size, block_size, shared_memory_size, stream>>>(
d_re0, d_re1, d_im0, d_im1, d_re0, d_re1, d_im0, d_im1, buffer);
}
cuda_memcpy_async_to_cpu(re0, d_re0, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_cpu(re1, d_re1, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_cpu(im0, d_im0, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_cpu(im1, d_im1, N / 2 * sizeof(double), stream,
gpu_index);
cuda_drop_async(d_standard, stream, gpu_index);
cuda_drop_async(d_re0, stream, gpu_index);
cuda_drop_async(d_re1, stream, gpu_index);
cuda_drop_async(d_im0, stream, gpu_index);
cuda_drop_async(d_im1, stream, gpu_index);
}
template <class params>
__host__ void host_fourier_transform_forward_as_torus_f128(
cudaStream_t stream, uint32_t gpu_index, double *re0, double *re1,
double *im0, double *im1, const __uint128_t *standard, const uint32_t N,
const uint32_t number_of_samples) {
// allocate device buffers
double *d_re0 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_re1 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_im0 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_im1 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
__uint128_t *d_standard = (__uint128_t *)cuda_malloc_async(
N * sizeof(__uint128_t), stream, gpu_index);
// copy input into device
cuda_memcpy_async_to_gpu(d_standard, standard, N * sizeof(__uint128_t),
stream, gpu_index);
// setup launch parameters
size_t required_shared_memory_size = sizeof(double) * N / 2 * 4;
int grid_size = number_of_samples;
int block_size = params::degree / params::opt;
bool full_sm =
(required_shared_memory_size <= cuda_get_max_shared_memory(gpu_index));
size_t buffer_size = full_sm ? 0 : (size_t)number_of_samples * N / 2 * 4;
size_t shared_memory_size = full_sm ? required_shared_memory_size : 0;
double *buffer = (double *)cuda_malloc_async(buffer_size, stream, gpu_index);
// configure shared memory for batch fft kernel
if (full_sm) {
check_cuda_error(cudaFuncSetAttribute(
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory_size));
check_cuda_error(cudaFuncSetCacheConfig(
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, FULLSM>,
cudaFuncCachePreferShared));
}
// convert u128 into 4 x double
batch_convert_u128_to_f128_as_torus<params>
<<<grid_size, block_size, 0, stream>>>(d_re0, d_re1, d_im0, d_im1,
d_standard);
// call negacyclic 128 bit forward fft.
if (full_sm) {
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, FULLSM>
<<<grid_size, block_size, shared_memory_size, stream>>>(
d_re0, d_re1, d_im0, d_im1, d_re0, d_re1, d_im0, d_im1, buffer);
} else {
batch_NSMFFT_128<FFTDegree<params, ForwardFFT>, NOSM>
<<<grid_size, block_size, shared_memory_size, stream>>>(
d_re0, d_re1, d_im0, d_im1, d_re0, d_re1, d_im0, d_im1, buffer);
}
cuda_memcpy_async_to_cpu(re0, d_re0, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_cpu(re1, d_re1, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_cpu(im0, d_im0, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_cpu(im1, d_im1, N / 2 * sizeof(double), stream,
gpu_index);
cuda_drop_async(d_standard, stream, gpu_index);
cuda_drop_async(d_re0, stream, gpu_index);
cuda_drop_async(d_re1, stream, gpu_index);
cuda_drop_async(d_im0, stream, gpu_index);
cuda_drop_async(d_im1, stream, gpu_index);
}
template <class params>
__host__ void host_fourier_transform_backward_as_torus_f128(
cudaStream_t stream, uint32_t gpu_index, __uint128_t *standard,
double const *re0, double const *re1, double const *im0, double const *im1,
const uint32_t N, const uint32_t number_of_samples) {
// allocate device buffers
double *d_re0 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_re1 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_im0 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
double *d_im1 =
(double *)cuda_malloc_async(N / 2 * sizeof(double), stream, gpu_index);
__uint128_t *d_standard = (__uint128_t *)cuda_malloc_async(
N * sizeof(__uint128_t), stream, gpu_index);
// // copy input into device
cuda_memcpy_async_to_gpu(d_re0, re0, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_gpu(d_re1, re1, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_gpu(d_im0, im0, N / 2 * sizeof(double), stream,
gpu_index);
cuda_memcpy_async_to_gpu(d_im1, im1, N / 2 * sizeof(double), stream,
gpu_index);
// setup launch parameters
size_t required_shared_memory_size = sizeof(double) * N / 2 * 4;
int grid_size = number_of_samples;
int block_size = params::degree / params::opt;
bool full_sm =
(required_shared_memory_size <= cuda_get_max_shared_memory(gpu_index));
size_t buffer_size = full_sm ? 0 : (size_t)number_of_samples * N / 2 * 4;
size_t shared_memory_size = full_sm ? required_shared_memory_size : 0;
double *buffer = (double *)cuda_malloc_async(buffer_size, stream, gpu_index);
// configure shared memory for batch fft kernel
if (full_sm) {
check_cuda_error(cudaFuncSetAttribute(
batch_NSMFFT_128<FFTDegree<params, BackwardFFT>, FULLSM>,
cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory_size));
check_cuda_error(cudaFuncSetCacheConfig(
batch_NSMFFT_128<FFTDegree<params, BackwardFFT>, FULLSM>,
cudaFuncCachePreferShared));
batch_NSMFFT_128<FFTDegree<params, BackwardFFT>, FULLSM>
<<<grid_size, block_size, shared_memory_size, stream>>>(
d_re0, d_re1, d_im0, d_im1, d_re0, d_re1, d_im0, d_im1, buffer);
} else {
batch_NSMFFT_128<FFTDegree<params, BackwardFFT>, NOSM>
<<<grid_size, block_size, shared_memory_size, stream>>>(
d_re0, d_re1, d_im0, d_im1, d_re0, d_re1, d_im0, d_im1, buffer);
}
batch_convert_f128_to_u128_as_torus<params>
<<<grid_size, block_size, 0, stream>>>(d_standard, d_re0, d_re1, d_im0,
d_im1);
cuda_memcpy_async_to_cpu(standard, d_standard, N * sizeof(__uint128_t),
stream, gpu_index);
cuda_drop_async(d_standard, stream, gpu_index);
cuda_drop_async(d_re0, stream, gpu_index);
cuda_drop_async(d_re1, stream, gpu_index);
cuda_drop_async(d_im0, stream, gpu_index);
cuda_drop_async(d_im1, stream, gpu_index);
}
#undef NEG_TWID
#undef F64x4_TO_F128x2
#undef F128x2_TO_F64x4
#endif // TFHE_RS_BACKENDS_TFHE_CUDA_BACKEND_CUDA_SRC_FFT128_FFT128_CUH_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
#ifndef CUDA_FFT128_TWIDDLES_CUH
#define CUDA_FFT128_TWIDDLES_CUH
/*
* 'negtwiddles' are stored in device memory to profit caching
*/
extern __device__ double neg_twiddles_re_hi[4096];
extern __device__ double neg_twiddles_re_lo[4096];
extern __device__ double neg_twiddles_im_hi[4096];
extern __device__ double neg_twiddles_im_lo[4096];
#endif

View File

@@ -28,44 +28,6 @@ __host__ void scratch_cuda_integer_abs_kb(
num_blocks, allocate_gpu_memory);
}
template <typename Torus>
__host__ void legacy_host_integer_abs_kb_async(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *ct, void *const *bsks, uint64_t *const *ksks,
int_abs_buffer<uint64_t> *mem_ptr, bool is_signed, uint32_t num_blocks) {
if (!is_signed)
return;
auto radix_params = mem_ptr->params;
auto mask = (Torus *)(mem_ptr->mask->ptr);
auto big_lwe_dimension = radix_params.big_lwe_dimension;
auto big_lwe_size = big_lwe_dimension + 1;
auto big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
uint32_t num_bits_in_ciphertext =
(31 - __builtin_clz(radix_params.message_modulus)) * num_blocks;
cuda_memcpy_async_gpu_to_gpu(mask, ct, num_blocks * big_lwe_size_bytes,
streams[0], gpu_indexes[0]);
legacy_host_integer_radix_arithmetic_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, mask, num_bits_in_ciphertext - 1,
mem_ptr->arithmetic_scalar_shift_mem, bsks, ksks, num_blocks);
legacy_host_addition<Torus>(streams[0], gpu_indexes[0], ct, mask, ct,
radix_params.big_lwe_dimension, num_blocks);
uint32_t requested_flag = outputFlag::FLAG_NONE;
uint32_t uses_carry = 0;
legacy_host_propagate_single_carry<Torus>(
streams, gpu_indexes, gpu_count, ct, nullptr, nullptr, mem_ptr->scp_mem,
bsks, ksks, num_blocks, requested_flag, uses_carry);
// legacy bitop
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, ct, mask, ct, bsks, ksks, num_blocks,
mem_ptr->bitxor_mem->lut, mem_ptr->bitxor_mem->params.message_modulus);
}
template <typename Torus>
__host__ void
host_integer_abs_kb(cudaStream_t const *streams, uint32_t const *gpu_indexes,

View File

@@ -19,6 +19,12 @@ __host__ void host_integer_radix_bitop_kb(
CudaRadixCiphertextFFI const *lwe_array_2, int_bitop_buffer<Torus> *mem_ptr,
void *const *bsks, Torus *const *ksks) {
if (lwe_array_out->num_radix_blocks != lwe_array_1->num_radix_blocks ||
lwe_array_out->num_radix_blocks != lwe_array_2->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be equal")
if (lwe_array_out->lwe_dimension != lwe_array_1->lwe_dimension ||
lwe_array_out->lwe_dimension != lwe_array_2->lwe_dimension)
PANIC("Cuda error: input and output lwe dimension must be equal")
auto lut = mem_ptr->lut;
uint64_t degrees[lwe_array_1->num_radix_blocks];
if (mem_ptr->op == BITOP_TYPE::BITAND) {

View File

@@ -7,11 +7,20 @@
template <typename Torus>
__host__ void zero_out_if(cudaStream_t const *streams,
uint32_t const *gpu_indexes, uint32_t gpu_count,
Torus *lwe_array_out, Torus const *lwe_array_input,
Torus const *lwe_condition,
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_input,
CudaRadixCiphertextFFI const *lwe_condition,
int_zero_out_if_buffer<Torus> *mem_ptr,
int_radix_lut<Torus> *predicate, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
if (lwe_array_out->num_radix_blocks < num_radix_blocks ||
lwe_array_input->num_radix_blocks < num_radix_blocks)
PANIC("Cuda error: input or output radix ciphertexts does not have enough "
"blocks")
if (lwe_array_out->lwe_dimension != lwe_array_input->lwe_dimension ||
lwe_array_input->lwe_dimension != lwe_condition->lwe_dimension)
PANIC("Cuda error: input and output radix ciphertexts must have the same "
"lwe dimension")
cuda_set_device(gpu_indexes[0]);
auto params = mem_ptr->params;
@@ -21,56 +30,11 @@ __host__ void zero_out_if(cudaStream_t const *streams,
host_pack_bivariate_blocks_with_single_block<Torus>(
streams, gpu_indexes, gpu_count, tmp_lwe_array_input,
predicate->lwe_indexes_in, lwe_array_input, lwe_condition,
predicate->lwe_indexes_in, params.big_lwe_dimension,
params.message_modulus, num_radix_blocks);
predicate->lwe_indexes_in, params.message_modulus, num_radix_blocks);
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, tmp_lwe_array_input, bsks,
ksks, num_radix_blocks, predicate);
}
template <typename Torus>
__host__ void legacy_host_integer_radix_cmux_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_condition,
Torus const *lwe_array_true, Torus const *lwe_array_false,
int_cmux_buffer<Torus> *mem_ptr, void *const *bsks, Torus *const *ksks,
uint32_t num_radix_blocks) {
auto params = mem_ptr->params;
Torus lwe_size = params.big_lwe_dimension + 1;
Torus radix_lwe_size = lwe_size * num_radix_blocks;
cuda_memcpy_async_gpu_to_gpu(mem_ptr->buffer_in->ptr, lwe_array_true,
radix_lwe_size * sizeof(Torus), streams[0],
gpu_indexes[0]);
cuda_memcpy_async_gpu_to_gpu(
(Torus *)(mem_ptr->buffer_in->ptr) + radix_lwe_size, lwe_array_false,
radix_lwe_size * sizeof(Torus), streams[0], gpu_indexes[0]);
for (uint i = 0; i < 2 * num_radix_blocks; i++) {
cuda_memcpy_async_gpu_to_gpu(
(Torus *)(mem_ptr->condition_array->ptr) + i * lwe_size, lwe_condition,
lwe_size * sizeof(Torus), streams[0], gpu_indexes[0]);
}
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, (Torus *)(mem_ptr->buffer_out->ptr),
(Torus *)(mem_ptr->buffer_in->ptr),
(Torus *)(mem_ptr->condition_array->ptr), bsks, ksks,
2 * num_radix_blocks, mem_ptr->predicate_lut, params.message_modulus);
// If the condition was true, true_ct will have kept its value and false_ct
// will be 0 If the condition was false, true_ct will be 0 and false_ct will
// have kept its value
auto mem_true = (Torus *)(mem_ptr->buffer_out->ptr);
auto ptr = (Torus *)mem_ptr->buffer_out->ptr;
auto mem_false = &ptr[radix_lwe_size];
auto added_cts = mem_true;
legacy_host_addition<Torus>(streams[0], gpu_indexes[0], added_cts, mem_true,
mem_false, params.big_lwe_dimension,
num_radix_blocks);
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, added_cts, bsks, ksks,
num_radix_blocks, mem_ptr->message_extract_lut);
ksks, predicate, num_radix_blocks);
}
template <typename Torus>

View File

@@ -38,21 +38,26 @@ void scratch_cuda_integer_radix_comparison_kb_64(
void cuda_comparison_integer_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array_out, void const *lwe_array_1, void const *lwe_array_2,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
uint32_t num_radix_blocks) {
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_1,
CudaRadixCiphertextFFI const *lwe_array_2, int8_t *mem_ptr,
void *const *bsks, void *const *ksks) {
if (lwe_array_1->num_radix_blocks != lwe_array_1->num_radix_blocks)
PANIC("Cuda error: input num radix blocks must be the same")
// The output ciphertext might be a boolean block or a radix ciphertext
// depending on the case (eq/gt vs max/min) so the amount of blocks to
// consider for calculation is the one of the input
auto num_radix_blocks = lwe_array_1->num_radix_blocks;
int_comparison_buffer<uint64_t> *buffer =
(int_comparison_buffer<uint64_t> *)mem_ptr;
switch (buffer->op) {
case EQ:
case NE:
host_integer_radix_equality_check_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(lwe_array_out),
static_cast<const uint64_t *>(lwe_array_1),
static_cast<const uint64_t *>(lwe_array_2), buffer, bsks,
(uint64_t **)(ksks), num_radix_blocks);
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array_out,
lwe_array_1, lwe_array_2, buffer, bsks, (uint64_t **)(ksks),
num_radix_blocks);
break;
case GT:
case GE:
@@ -62,23 +67,18 @@ void cuda_comparison_integer_radix_ciphertext_kb_64(
PANIC("Cuda error (comparisons): the number of radix blocks has to be "
"even.")
host_integer_radix_difference_check_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(lwe_array_out),
static_cast<const uint64_t *>(lwe_array_1),
static_cast<const uint64_t *>(lwe_array_2), buffer,
buffer->diff_buffer->operator_f, bsks, (uint64_t **)(ksks),
num_radix_blocks);
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array_out,
lwe_array_1, lwe_array_2, buffer, buffer->diff_buffer->operator_f, bsks,
(uint64_t **)(ksks), num_radix_blocks);
break;
case MAX:
case MIN:
if (num_radix_blocks % 2 != 0)
PANIC("Cuda error (max/min): the number of radix blocks has to be even.")
host_integer_radix_maxmin_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(lwe_array_out),
static_cast<const uint64_t *>(lwe_array_1),
static_cast<const uint64_t *>(lwe_array_2), buffer, bsks,
(uint64_t **)(ksks), num_radix_blocks);
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array_out,
lwe_array_1, lwe_array_2, buffer, bsks, (uint64_t **)(ksks),
num_radix_blocks);
break;
default:
PANIC("Cuda error: integer operation not supported")
@@ -117,17 +117,16 @@ void scratch_cuda_integer_are_all_comparisons_block_true_kb_64(
void cuda_integer_are_all_comparisons_block_true_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array_out, void const *lwe_array_in, int8_t *mem_ptr,
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in, int8_t *mem_ptr,
void *const *bsks, void *const *ksks, uint32_t num_radix_blocks) {
int_comparison_buffer<uint64_t> *buffer =
(int_comparison_buffer<uint64_t> *)mem_ptr;
host_integer_are_all_comparisons_block_true_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(lwe_array_out),
static_cast<const uint64_t *>(lwe_array_in), buffer, bsks,
(uint64_t **)(ksks), num_radix_blocks);
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array_out,
lwe_array_in, buffer, bsks, (uint64_t **)(ksks), num_radix_blocks);
}
void cleanup_cuda_integer_are_all_comparisons_block_true(
@@ -161,17 +160,16 @@ void scratch_cuda_integer_is_at_least_one_comparisons_block_true_kb_64(
void cuda_integer_is_at_least_one_comparisons_block_true_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *lwe_array_out, void const *lwe_array_in, int8_t *mem_ptr,
CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in, int8_t *mem_ptr,
void *const *bsks, void *const *ksks, uint32_t num_radix_blocks) {
int_comparison_buffer<uint64_t> *buffer =
(int_comparison_buffer<uint64_t> *)mem_ptr;
host_integer_is_at_least_one_comparisons_block_true_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(lwe_array_out),
static_cast<const uint64_t *>(lwe_array_in), buffer, bsks,
(uint64_t **)(ksks), num_radix_blocks);
(cudaStream_t *)(streams), gpu_indexes, gpu_count, lwe_array_out,
lwe_array_in, buffer, bsks, (uint64_t **)(ksks), num_radix_blocks);
}
void cleanup_cuda_integer_is_at_least_one_comparisons_block_true(

View File

@@ -8,6 +8,7 @@
#include "integer/integer_utilities.h"
#include "integer/negation.cuh"
#include "integer/scalar_addition.cuh"
#include "integer/subtraction.cuh"
#include "pbs/programmable_bootstrap_classic.cuh"
#include "pbs/programmable_bootstrap_multibit.cuh"
#include "types/complex/operations.cuh"
@@ -58,10 +59,17 @@ __host__ void accumulate_all_blocks(cudaStream_t stream, uint32_t gpu_index,
template <typename Torus>
__host__ void are_all_comparisons_block_true(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_in,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_array_in->lwe_dimension)
PANIC("Cuda error: input and output lwe dimensions must be the same")
if (lwe_array_in->num_radix_blocks < num_radix_blocks)
PANIC("Cuda error: input num radix blocks should not be lower "
"than the number of blocks to operate on")
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto glwe_dimension = params.glwe_dimension;
@@ -76,10 +84,9 @@ __host__ void are_all_comparisons_block_true(
uint32_t total_modulus = message_modulus * carry_modulus;
uint32_t max_value = (total_modulus - 1) / (message_modulus - 1);
cuda_memcpy_async_gpu_to_gpu(tmp_out, lwe_array_in,
num_radix_blocks * (big_lwe_dimension + 1) *
sizeof(Torus),
streams[0], gpu_indexes[0]);
copy_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0], tmp_out,
0, num_radix_blocks, lwe_array_in, 0,
num_radix_blocks);
uint32_t remaining_blocks = num_radix_blocks;
@@ -89,8 +96,9 @@ __host__ void are_all_comparisons_block_true(
// Since all blocks encrypt either 0 or 1, we can sum max_value of them
// as in the worst case we will be adding `max_value` ones
auto input_blocks = tmp_out;
auto accumulator = are_all_block_true_buffer->tmp_block_accumulated;
auto input_blocks = (Torus *)tmp_out->ptr;
auto accumulator_ptr =
(Torus *)are_all_block_true_buffer->tmp_block_accumulated->ptr;
auto is_max_value_lut = are_all_block_true_buffer->is_max_value;
uint32_t chunk_lengths[num_chunks];
auto begin_remaining_blocks = remaining_blocks;
@@ -98,15 +106,15 @@ __host__ void are_all_comparisons_block_true(
uint32_t chunk_length =
std::min(max_value, begin_remaining_blocks - i * max_value);
chunk_lengths[i] = chunk_length;
accumulate_all_blocks<Torus>(streams[0], gpu_indexes[0], accumulator,
accumulate_all_blocks<Torus>(streams[0], gpu_indexes[0], accumulator_ptr,
input_blocks, big_lwe_dimension,
chunk_length);
accumulator += (big_lwe_dimension + 1);
accumulator_ptr += (big_lwe_dimension + 1);
remaining_blocks -= (chunk_length - 1);
input_blocks += (big_lwe_dimension + 1) * chunk_length;
}
accumulator = are_all_block_true_buffer->tmp_block_accumulated;
auto accumulator = are_all_block_true_buffer->tmp_block_accumulated;
// Selects a LUT
int_radix_lut<Torus> *lut;
@@ -127,7 +135,7 @@ __host__ void are_all_comparisons_block_true(
polynomial_size, message_modulus, carry_modulus,
is_equal_to_num_blocks_lut_f);
Torus *h_lut_indexes = (Torus *)malloc(num_chunks * sizeof(Torus));
Torus *h_lut_indexes = is_max_value_lut->h_lut_indexes;
for (int index = 0; index < num_chunks; index++) {
if (index == num_chunks - 1) {
h_lut_indexes[index] = 1;
@@ -139,8 +147,6 @@ __host__ void are_all_comparisons_block_true(
h_lut_indexes, num_chunks * sizeof(Torus),
streams[0], gpu_indexes[0]);
is_max_value_lut->broadcast_lut(streams, gpu_indexes, 0);
cuda_synchronize_stream(streams[0], gpu_indexes[0]);
free(h_lut_indexes);
}
lut = is_max_value_lut;
}
@@ -148,14 +154,24 @@ __host__ void are_all_comparisons_block_true(
// Applies the LUT
if (remaining_blocks == 1) {
// In the last iteration we copy the output to the final address
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, accumulator, bsks,
ksks, 1, lut);
ksks, lut, 1);
// Reset max_value_lut_indexes before returning, otherwise if the lut is
// reused the lut indexes will be wrong
memset(is_max_value_lut->h_lut_indexes, 0,
is_max_value_lut->num_blocks * sizeof(Torus));
cuda_memcpy_async_to_gpu(is_max_value_lut->get_lut_indexes(0, 0),
is_max_value_lut->h_lut_indexes,
is_max_value_lut->num_blocks * sizeof(Torus),
streams[0], gpu_indexes[0]);
is_max_value_lut->broadcast_lut(streams, gpu_indexes, 0);
reset_radix_ciphertext_blocks(lwe_array_out, 1);
return;
} else {
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, tmp_out, accumulator, bsks, ksks,
num_chunks, lut);
lut, num_chunks);
}
}
}
@@ -169,10 +185,17 @@ __host__ void are_all_comparisons_block_true(
template <typename Torus>
__host__ void is_at_least_one_comparisons_block_true(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_in,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_array_in->lwe_dimension)
PANIC("Cuda error: input lwe dimensions must be the same")
if (lwe_array_in->num_radix_blocks < num_radix_blocks)
PANIC("Cuda error: input num radix blocks should not be lower "
"than the number of blocks to operate on")
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto message_modulus = params.message_modulus;
@@ -183,10 +206,9 @@ __host__ void is_at_least_one_comparisons_block_true(
uint32_t total_modulus = message_modulus * carry_modulus;
uint32_t max_value = (total_modulus - 1) / (message_modulus - 1);
cuda_memcpy_async_gpu_to_gpu(mem_ptr->tmp_lwe_array_out, lwe_array_in,
num_radix_blocks * (big_lwe_dimension + 1) *
sizeof(Torus),
streams[0], gpu_indexes[0]);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], mem_ptr->tmp_lwe_array_out, 0,
num_radix_blocks, lwe_array_in, 0, num_radix_blocks);
uint32_t remaining_blocks = num_radix_blocks;
while (remaining_blocks > 0) {
@@ -195,8 +217,8 @@ __host__ void is_at_least_one_comparisons_block_true(
// Since all blocks encrypt either 0 or 1, we can sum max_value of them
// as in the worst case we will be adding `max_value` ones
auto input_blocks = mem_ptr->tmp_lwe_array_out;
auto accumulator = buffer->tmp_block_accumulated;
auto input_blocks = (Torus *)mem_ptr->tmp_lwe_array_out->ptr;
auto accumulator = (Torus *)buffer->tmp_block_accumulated->ptr;
uint32_t chunk_lengths[num_chunks];
auto begin_remaining_blocks = remaining_blocks;
for (int i = 0; i < num_chunks; i++) {
@@ -211,7 +233,6 @@ __host__ void is_at_least_one_comparisons_block_true(
remaining_blocks -= (chunk_length - 1);
input_blocks += (big_lwe_dimension + 1) * chunk_length;
}
accumulator = buffer->tmp_block_accumulated;
// Selects a LUT
int_radix_lut<Torus> *lut = mem_ptr->eq_buffer->is_non_zero_lut;
@@ -219,45 +240,35 @@ __host__ void is_at_least_one_comparisons_block_true(
// Applies the LUT
if (remaining_blocks == 1) {
// In the last iteration we copy the output to the final address
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, accumulator, bsks,
ksks, 1, lut);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out,
buffer->tmp_block_accumulated, bsks, ksks, lut, 1);
return;
} else {
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, mem_ptr->tmp_lwe_array_out,
accumulator, bsks, ksks, num_chunks, lut);
buffer->tmp_block_accumulated, bsks, ksks, lut, num_chunks);
}
}
}
// This takes an input slice of blocks.
//
// Each block can encrypt any value as long as its < message_modulus.
//
// It will compare blocks with 0, for either equality or difference.
//
// This returns a Vec of block, where each block encrypts 1 or 0
// depending of if all blocks matched with the comparison type with 0.
//
// E.g. For ZeroComparisonType::Equality, if all input blocks are zero
// than all returned block will encrypt 1
//
// The returned Vec will have less block than the number of input blocks.
// The returned blocks potentially needs to be 'reduced' to one block
// with eg are_all_comparisons_block_true.
//
// This function exists because sometimes it is faster to concatenate
// multiple vec of 'boolean' shortint block before reducing them with
// are_all_comparisons_block_true
template <typename Torus>
__host__ void host_compare_with_zero_equality(
__host__ void host_compare_blocks_with_zero(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_in,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, int32_t num_radix_blocks,
int_radix_lut<Torus> *zero_comparison) {
if (num_radix_blocks == 0)
return;
if (lwe_array_out->lwe_dimension != lwe_array_in->lwe_dimension)
PANIC("Cuda error: input lwe dimensions must be the same")
if (lwe_array_in->num_radix_blocks < num_radix_blocks)
PANIC("Cuda error: input num radix blocks should not be lower "
"than the number of blocks to operate on")
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto message_modulus = params.message_modulus;
@@ -274,21 +285,19 @@ __host__ void host_compare_with_zero_equality(
uint32_t num_elements_to_fill_carry = (total_modulus - 1) / message_max;
size_t big_lwe_size = big_lwe_dimension + 1;
size_t big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
int num_sum_blocks = 0;
// Accumulator
auto sum = lwe_array_out;
if (num_radix_blocks == 1) {
// Just copy
cuda_memcpy_async_gpu_to_gpu(sum, lwe_array_in, big_lwe_size_bytes,
streams[0], gpu_indexes[0]);
copy_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0], sum, 0,
1, lwe_array_in, 0, 1);
num_sum_blocks = 1;
} else {
uint32_t remainder_blocks = num_radix_blocks;
auto sum_i = sum;
auto chunk = lwe_array_in;
auto sum_i = (Torus *)sum->ptr;
auto chunk = (Torus *)lwe_array_in->ptr;
while (remainder_blocks > 1) {
uint32_t chunk_size =
std::min(remainder_blocks, num_elements_to_fill_carry);
@@ -305,28 +314,31 @@ __host__ void host_compare_with_zero_equality(
}
}
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, sum, sum, bsks, ksks, num_sum_blocks,
zero_comparison);
are_all_comparisons_block_true<Torus>(streams, gpu_indexes, gpu_count,
lwe_array_out, sum, mem_ptr, bsks, ksks,
num_sum_blocks);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, sum, bsks, ksks,
zero_comparison, num_sum_blocks);
reset_radix_ciphertext_blocks(lwe_array_out, num_sum_blocks);
}
template <typename Torus>
__host__ void host_integer_radix_equality_check_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_1,
Torus const *lwe_array_2, int_comparison_buffer<Torus> *mem_ptr,
void *const *bsks, Torus *const *ksks, uint32_t num_radix_blocks) {
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_1,
CudaRadixCiphertextFFI const *lwe_array_2,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_array_1->lwe_dimension ||
lwe_array_out->lwe_dimension != lwe_array_2->lwe_dimension)
PANIC("Cuda error: input lwe dimensions must be the same")
auto eq_buffer = mem_ptr->eq_buffer;
// Applies the LUT for the comparison operation
auto comparisons = mem_ptr->tmp_block_comparisons;
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, comparisons, lwe_array_1, lwe_array_2,
bsks, ksks, num_radix_blocks, eq_buffer->operator_lut,
bsks, ksks, eq_buffer->operator_lut, num_radix_blocks,
eq_buffer->operator_lut->params.message_modulus);
// This takes a Vec of blocks, where each block is either 0 or 1.
@@ -341,10 +353,16 @@ __host__ void host_integer_radix_equality_check_kb(
template <typename Torus>
__host__ void compare_radix_blocks_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_left,
Torus const *lwe_array_right, int_comparison_buffer<Torus> *mem_ptr,
void *const *bsks, Torus *const *ksks, uint32_t num_radix_blocks) {
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_left,
CudaRadixCiphertextFFI const *lwe_array_right,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_array_left->lwe_dimension ||
lwe_array_out->lwe_dimension != lwe_array_right->lwe_dimension)
PANIC("Cuda error: input and output radix ciphertexts should have the same "
"lwe dimension")
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto message_modulus = params.message_modulus;
@@ -364,35 +382,43 @@ __host__ void compare_radix_blocks_kb(
// space, so (-1) % (4 * 4) = 15 = 1|1111 We then add one and get 0 = 0|0000
// Subtract
// Here we need the true lwe sub, not the one that comes from shortint.
host_subtraction<Torus>(streams[0], gpu_indexes[0], lwe_array_out,
lwe_array_left, lwe_array_right, big_lwe_dimension,
num_radix_blocks);
host_subtraction<Torus>(
streams[0], gpu_indexes[0], (Torus *)lwe_array_out->ptr,
(Torus *)lwe_array_left->ptr, (Torus *)lwe_array_right->ptr,
big_lwe_dimension, num_radix_blocks);
// Apply LUT to compare to 0
auto is_non_zero_lut = mem_ptr->eq_buffer->is_non_zero_lut;
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, lwe_array_out, bsks, ksks,
num_radix_blocks, is_non_zero_lut);
is_non_zero_lut, num_radix_blocks);
// Add one
// Here Lhs can have the following values: (-1) % (message modulus * carry
// modulus), 0, 1 So the output values after the addition will be: 0, 1, 2
legacy_host_integer_radix_add_scalar_one_inplace<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, big_lwe_dimension,
num_radix_blocks, message_modulus, carry_modulus);
host_integer_radix_add_scalar_one_inplace<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, message_modulus,
carry_modulus);
}
// Reduces a vec containing shortint blocks that encrypts a sign
// (inferior, equal, superior) to one single shortint block containing the
// final sign
template <typename Torus>
__host__ void tree_sign_reduction(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus *lwe_block_comparisons,
int_tree_sign_reduction_buffer<Torus> *tree_buffer,
std::function<Torus(Torus)> sign_handler_f, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
__host__ void
tree_sign_reduction(cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI *lwe_block_comparisons,
int_tree_sign_reduction_buffer<Torus> *tree_buffer,
std::function<Torus(Torus)> sign_handler_f,
void *const *bsks, Torus *const *ksks,
uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_block_comparisons->lwe_dimension)
PANIC("Cuda error: input lwe dimensions must be the same")
if (lwe_block_comparisons->num_radix_blocks < num_radix_blocks)
PANIC("Cuda error: block comparisons num radix blocks should not be lower "
"than the number of blocks to operate on")
auto params = tree_buffer->params;
auto big_lwe_dimension = params.big_lwe_dimension;
@@ -405,37 +431,31 @@ __host__ void tree_sign_reduction(
// Reduces a vec containing shortint blocks that encrypts a sign
// (inferior, equal, superior) to one single shortint block containing the
// final sign
size_t big_lwe_size = big_lwe_dimension + 1;
size_t big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
auto x = tree_buffer->tmp_x;
auto y = tree_buffer->tmp_y;
if (x != lwe_block_comparisons)
cuda_memcpy_async_gpu_to_gpu(x, lwe_block_comparisons,
big_lwe_size_bytes * num_radix_blocks,
streams[0], gpu_indexes[0]);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], x, 0, num_radix_blocks,
lwe_block_comparisons, 0, num_radix_blocks);
uint32_t partial_block_count = num_radix_blocks;
auto inner_tree_leaf = tree_buffer->tree_inner_leaf_lut;
while (partial_block_count > 2) {
pack_blocks<Torus>(streams[0], gpu_indexes[0], y, x, big_lwe_dimension,
partial_block_count, 4);
pack_blocks<Torus>(streams[0], gpu_indexes[0], y, x, partial_block_count,
4);
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, x, y, bsks, ksks,
partial_block_count >> 1, inner_tree_leaf);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, x, y, bsks, ksks, inner_tree_leaf,
partial_block_count >> 1);
if ((partial_block_count % 2) != 0) {
partial_block_count >>= 1;
partial_block_count++;
auto last_y_block = y + (partial_block_count - 1) * big_lwe_size;
auto last_x_block = x + (partial_block_count - 1) * big_lwe_size;
cuda_memcpy_async_gpu_to_gpu(last_x_block, last_y_block,
big_lwe_size_bytes, streams[0],
gpu_indexes[0]);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], x, partial_block_count - 1,
partial_block_count, y, partial_block_count - 1, partial_block_count);
} else {
partial_block_count >>= 1;
}
@@ -446,8 +466,8 @@ __host__ void tree_sign_reduction(
std::function<Torus(Torus)> f;
if (partial_block_count == 2) {
pack_blocks<Torus>(streams[0], gpu_indexes[0], y, x, big_lwe_dimension,
partial_block_count, 4);
pack_blocks<Torus>(streams[0], gpu_indexes[0], y, x, partial_block_count,
4);
f = [block_selector_f, sign_handler_f](Torus x) -> Torus {
int msb = (x >> 2) & 3;
@@ -468,58 +488,64 @@ __host__ void tree_sign_reduction(
last_lut->broadcast_lut(streams, gpu_indexes, 0);
// Last leaf
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, y, bsks, ksks, 1,
last_lut);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out, y, bsks, ksks, last_lut,
1);
}
template <typename Torus>
__host__ void host_integer_radix_difference_check_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_left,
Torus const *lwe_array_right, int_comparison_buffer<Torus> *mem_ptr,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_left,
CudaRadixCiphertextFFI const *lwe_array_right,
int_comparison_buffer<Torus> *mem_ptr,
std::function<Torus(Torus)> reduction_lut_f, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_array_left->lwe_dimension ||
lwe_array_out->lwe_dimension != lwe_array_right->lwe_dimension)
PANIC("Cuda error: input lwe dimensions must be the same")
auto diff_buffer = mem_ptr->diff_buffer;
auto params = mem_ptr->params;
auto big_lwe_dimension = params.big_lwe_dimension;
auto big_lwe_size = big_lwe_dimension + 1;
auto message_modulus = params.message_modulus;
auto carry_modulus = params.carry_modulus;
uint32_t packed_num_radix_blocks = num_radix_blocks;
Torus *lhs = (Torus *)lwe_array_left;
Torus *rhs = (Torus *)lwe_array_right;
CudaRadixCiphertextFFI lhs;
as_radix_ciphertext_slice<Torus>(&lhs, diff_buffer->tmp_packed, 0,
num_radix_blocks / 2);
CudaRadixCiphertextFFI rhs;
as_radix_ciphertext_slice<Torus>(&rhs, diff_buffer->tmp_packed,
num_radix_blocks / 2, num_radix_blocks);
if (carry_modulus >= message_modulus) {
// Packing is possible
// Pack inputs
Torus *packed_left = diff_buffer->tmp_packed;
Torus *packed_right =
diff_buffer->tmp_packed + num_radix_blocks / 2 * big_lwe_size;
// In case the ciphertext is signed, the sign block and the one before it
// are handled separately
if (mem_ptr->is_signed) {
packed_num_radix_blocks -= 2;
}
pack_blocks<Torus>(streams[0], gpu_indexes[0], packed_left, lwe_array_left,
big_lwe_dimension, packed_num_radix_blocks,
message_modulus);
pack_blocks<Torus>(streams[0], gpu_indexes[0], packed_right,
lwe_array_right, big_lwe_dimension,
pack_blocks<Torus>(streams[0], gpu_indexes[0], &lhs, lwe_array_left,
packed_num_radix_blocks, message_modulus);
pack_blocks<Torus>(streams[0], gpu_indexes[0], &rhs, lwe_array_right,
packed_num_radix_blocks, message_modulus);
// From this point we have half number of blocks
packed_num_radix_blocks /= 2;
// Clean noise
auto identity_lut = mem_ptr->identity_lut;
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, packed_left, packed_left, bsks, ksks,
2 * packed_num_radix_blocks, identity_lut);
lhs = packed_left;
rhs = packed_right;
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, diff_buffer->tmp_packed,
diff_buffer->tmp_packed, bsks, ksks, identity_lut,
2 * packed_num_radix_blocks);
} else {
as_radix_ciphertext_slice<Torus>(&lhs, lwe_array_left, 0,
lwe_array_left->num_radix_blocks);
as_radix_ciphertext_slice<Torus>(&rhs, lwe_array_right, 0,
lwe_array_right->num_radix_blocks);
}
// comparisons will be assigned
@@ -532,7 +558,7 @@ __host__ void host_integer_radix_difference_check_kb(
// Compare packed blocks, or simply the total number of radix blocks in the
// inputs
compare_radix_blocks_kb<Torus>(streams, gpu_indexes, gpu_count, comparisons,
lhs, rhs, mem_ptr, bsks, ksks,
&lhs, &rhs, mem_ptr, bsks, ksks,
packed_num_radix_blocks);
num_comparisons = packed_num_radix_blocks;
} else {
@@ -540,38 +566,59 @@ __host__ void host_integer_radix_difference_check_kb(
if (carry_modulus >= message_modulus) {
// Compare (num_radix_blocks - 2) / 2 packed blocks
compare_radix_blocks_kb<Torus>(streams, gpu_indexes, gpu_count,
comparisons, lhs, rhs, mem_ptr, bsks, ksks,
packed_num_radix_blocks);
comparisons, &lhs, &rhs, mem_ptr, bsks,
ksks, packed_num_radix_blocks);
// Compare the last block before the sign block separately
auto identity_lut = mem_ptr->identity_lut;
Torus *packed_left = diff_buffer->tmp_packed;
Torus *packed_right =
diff_buffer->tmp_packed + num_radix_blocks / 2 * big_lwe_size;
Torus *last_left_block_before_sign_block =
packed_left + packed_num_radix_blocks * big_lwe_size;
Torus *last_right_block_before_sign_block =
packed_right + packed_num_radix_blocks * big_lwe_size;
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, last_left_block_before_sign_block,
lwe_array_left + (num_radix_blocks - 2) * big_lwe_size, bsks, ksks, 1,
identity_lut);
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, last_right_block_before_sign_block,
lwe_array_right + (num_radix_blocks - 2) * big_lwe_size, bsks, ksks,
1, identity_lut);
CudaRadixCiphertextFFI last_left_block_before_sign_block;
as_radix_ciphertext_slice<Torus>(
&last_left_block_before_sign_block, diff_buffer->tmp_packed,
packed_num_radix_blocks, packed_num_radix_blocks + 1);
CudaRadixCiphertextFFI shifted_lwe_array_left;
as_radix_ciphertext_slice<Torus>(&shifted_lwe_array_left, lwe_array_left,
num_radix_blocks - 2,
num_radix_blocks - 1);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, &last_left_block_before_sign_block,
&shifted_lwe_array_left, bsks, ksks, identity_lut, 1);
CudaRadixCiphertextFFI last_right_block_before_sign_block;
as_radix_ciphertext_slice<Torus>(
&last_right_block_before_sign_block, diff_buffer->tmp_packed,
num_radix_blocks / 2 + packed_num_radix_blocks,
num_radix_blocks / 2 + packed_num_radix_blocks + 1);
CudaRadixCiphertextFFI shifted_lwe_array_right;
as_radix_ciphertext_slice<Torus>(&shifted_lwe_array_right,
lwe_array_right, num_radix_blocks - 2,
num_radix_blocks - 1);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, &last_right_block_before_sign_block,
&shifted_lwe_array_right, bsks, ksks, identity_lut, 1);
CudaRadixCiphertextFFI shifted_comparisons;
as_radix_ciphertext_slice<Torus>(&shifted_comparisons, comparisons,
packed_num_radix_blocks,
packed_num_radix_blocks + 1);
compare_radix_blocks_kb<Torus>(
streams, gpu_indexes, gpu_count,
comparisons + packed_num_radix_blocks * big_lwe_size,
last_left_block_before_sign_block, last_right_block_before_sign_block,
mem_ptr, bsks, ksks, 1);
streams, gpu_indexes, gpu_count, &shifted_comparisons,
&last_left_block_before_sign_block,
&last_right_block_before_sign_block, mem_ptr, bsks, ksks, 1);
// Compare the sign block separately
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count,
comparisons + (packed_num_radix_blocks + 1) * big_lwe_size,
lwe_array_left + (num_radix_blocks - 1) * big_lwe_size,
lwe_array_right + (num_radix_blocks - 1) * big_lwe_size, bsks, ksks,
1, mem_ptr->signed_lut, mem_ptr->signed_lut->params.message_modulus);
as_radix_ciphertext_slice<Torus>(&shifted_comparisons, comparisons,
packed_num_radix_blocks + 1,
packed_num_radix_blocks + 2);
CudaRadixCiphertextFFI last_left_block;
as_radix_ciphertext_slice<Torus>(&last_left_block, lwe_array_left,
num_radix_blocks - 1, num_radix_blocks);
CudaRadixCiphertextFFI last_right_block;
as_radix_ciphertext_slice<Torus>(&last_right_block, lwe_array_right,
num_radix_blocks - 1, num_radix_blocks);
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, &shifted_comparisons,
&last_left_block, &last_right_block, bsks, ksks, mem_ptr->signed_lut,
1, mem_ptr->signed_lut->params.message_modulus);
num_comparisons = packed_num_radix_blocks + 2;
} else {
@@ -579,12 +626,19 @@ __host__ void host_integer_radix_difference_check_kb(
streams, gpu_indexes, gpu_count, comparisons, lwe_array_left,
lwe_array_right, mem_ptr, bsks, ksks, num_radix_blocks - 1);
// Compare the sign block separately
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count,
comparisons + (num_radix_blocks - 1) * big_lwe_size,
lwe_array_left + (num_radix_blocks - 1) * big_lwe_size,
lwe_array_right + (num_radix_blocks - 1) * big_lwe_size, bsks, ksks,
1, mem_ptr->signed_lut, mem_ptr->signed_lut->params.message_modulus);
CudaRadixCiphertextFFI shifted_comparisons;
as_radix_ciphertext_slice<Torus>(&shifted_comparisons, comparisons,
num_radix_blocks - 1, num_radix_blocks);
CudaRadixCiphertextFFI last_left_block;
as_radix_ciphertext_slice<Torus>(&last_left_block, lwe_array_left,
num_radix_blocks - 1, num_radix_blocks);
CudaRadixCiphertextFFI last_right_block;
as_radix_ciphertext_slice<Torus>(&last_right_block, lwe_array_right,
num_radix_blocks - 1, num_radix_blocks);
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, &shifted_comparisons,
&last_left_block, &last_right_block, bsks, ksks, mem_ptr->signed_lut,
1, mem_ptr->signed_lut->params.message_modulus);
num_comparisons = num_radix_blocks;
}
}
@@ -612,32 +666,42 @@ __host__ void scratch_cuda_integer_radix_comparison_check_kb(
template <typename Torus>
__host__ void host_integer_radix_maxmin_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_left,
Torus const *lwe_array_right, int_comparison_buffer<Torus> *mem_ptr,
void *const *bsks, Torus *const *ksks, uint32_t total_num_radix_blocks) {
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_left,
CudaRadixCiphertextFFI const *lwe_array_right,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
if (lwe_array_out->lwe_dimension != lwe_array_left->lwe_dimension ||
lwe_array_out->lwe_dimension != lwe_array_right->lwe_dimension)
PANIC("Cuda error: input and output lwe dimensions must be the same")
if (lwe_array_out->num_radix_blocks < num_radix_blocks ||
lwe_array_left->num_radix_blocks < num_radix_blocks ||
lwe_array_right->num_radix_blocks < num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks should not be lower "
"than the number of blocks to operate on")
// Compute the sign
host_integer_radix_difference_check_kb<Torus>(
streams, gpu_indexes, gpu_count, mem_ptr->tmp_lwe_array_out,
lwe_array_left, lwe_array_right, mem_ptr, mem_ptr->identity_lut_f, bsks,
ksks, total_num_radix_blocks);
ksks, num_radix_blocks);
// Selector
legacy_host_integer_radix_cmux_kb<Torus>(
streams, gpu_indexes, gpu_count, lwe_array_out,
mem_ptr->tmp_lwe_array_out, lwe_array_left, lwe_array_right,
mem_ptr->cmux_buffer, bsks, ksks, total_num_radix_blocks);
host_integer_radix_cmux_kb<Torus>(streams, gpu_indexes, gpu_count,
lwe_array_out, mem_ptr->tmp_lwe_array_out,
lwe_array_left, lwe_array_right,
mem_ptr->cmux_buffer, bsks, ksks);
}
template <typename Torus>
__host__ void host_integer_are_all_comparisons_block_true_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_in,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
auto eq_buffer = mem_ptr->eq_buffer;
// It returns a block encrypting 1 if all input blocks are 1
// otherwise the block encrypts 0
are_all_comparisons_block_true<Torus>(streams, gpu_indexes, gpu_count,
@@ -648,12 +712,11 @@ __host__ void host_integer_are_all_comparisons_block_true_kb(
template <typename Torus>
__host__ void host_integer_is_at_least_one_comparisons_block_true_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *lwe_array_out, Torus const *lwe_array_in,
uint32_t gpu_count, CudaRadixCiphertextFFI *lwe_array_out,
CudaRadixCiphertextFFI const *lwe_array_in,
int_comparison_buffer<Torus> *mem_ptr, void *const *bsks,
Torus *const *ksks, uint32_t num_radix_blocks) {
auto eq_buffer = mem_ptr->eq_buffer;
// It returns a block encrypting 1 if all input blocks are 1
// otherwise the block encrypts 0
is_at_least_one_comparisons_block_true<Torus>(

View File

@@ -43,6 +43,9 @@ __global__ void pack(Torus *array_out, Torus *array_in, uint32_t log_modulus,
}
}
/// Packs `num_lwes` LWE-ciphertext contained in `num_glwes` GLWE-ciphertext in
/// a compressed array This function follows the naming used in the CPU
/// implementation
template <typename Torus>
__host__ void host_pack(cudaStream_t stream, uint32_t gpu_index,
Torus *array_out, Torus *array_in, uint32_t num_glwes,
@@ -55,26 +58,23 @@ __host__ void host_pack(cudaStream_t stream, uint32_t gpu_index,
auto log_modulus = mem_ptr->storage_log_modulus;
// [0..num_glwes-1) GLWEs
auto in_len = (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size;
auto in_len = num_glwes * compression_params.glwe_dimension *
compression_params.polynomial_size +
num_lwes;
auto number_bits_to_pack = in_len * log_modulus;
auto nbits = sizeof(Torus) * 8;
// number_bits_to_pack.div_ceil(Scalar::BITS)
auto nbits = sizeof(Torus) * 8;
auto out_len = (number_bits_to_pack + nbits - 1) / nbits;
// Last GLWE
number_bits_to_pack = in_len * log_modulus;
auto last_out_len = (number_bits_to_pack + nbits - 1) / nbits;
auto num_coeffs = (num_glwes - 1) * out_len + last_out_len;
int num_blocks = 0, num_threads = 0;
getNumBlocksAndThreads(num_coeffs, 1024, num_blocks, num_threads);
getNumBlocksAndThreads(out_len, 1024, num_blocks, num_threads);
dim3 grid(num_blocks);
dim3 threads(num_threads);
pack<Torus><<<grid, threads, 0, stream>>>(array_out, array_in, log_modulus,
num_coeffs, in_len, out_len);
out_len, in_len, out_len);
check_cuda_error(cudaGetLastError());
}
@@ -99,14 +99,13 @@ host_integer_compress(cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t lwe_in_size = input_lwe_dimension + 1;
uint32_t glwe_out_size = (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size;
uint32_t num_glwes_for_compression =
num_radix_blocks / mem_ptr->lwe_per_glwe + 1;
uint32_t num_glwes =
(num_radix_blocks + mem_ptr->lwe_per_glwe - 1) / mem_ptr->lwe_per_glwe;
// Keyswitch LWEs to GLWE
auto tmp_glwe_array_out = mem_ptr->tmp_glwe_array_out;
cuda_memset_async(tmp_glwe_array_out, 0,
num_glwes_for_compression *
(compression_params.glwe_dimension + 1) *
num_glwes * (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size * sizeof(Torus),
streams[0], gpu_indexes[0]);
auto fp_ks_buffer = mem_ptr->fp_ks_buffer;
@@ -131,23 +130,21 @@ host_integer_compress(cudaStream_t const *streams, uint32_t const *gpu_indexes,
// Modulus switch
host_modulus_switch_inplace<Torus>(
streams[0], gpu_indexes[0], tmp_glwe_array_out,
num_glwes_for_compression * (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size,
num_glwes * compression_params.glwe_dimension *
compression_params.polynomial_size +
num_radix_blocks,
mem_ptr->storage_log_modulus);
host_pack<Torus>(streams[0], gpu_indexes[0], glwe_array_out,
tmp_glwe_array_out, num_glwes_for_compression,
num_radix_blocks, mem_ptr);
tmp_glwe_array_out, num_glwes, num_radix_blocks, mem_ptr);
}
template <typename Torus>
__global__ void extract(Torus *glwe_array_out, Torus const *array_in,
uint32_t index, uint32_t log_modulus,
uint32_t input_len, uint32_t initial_out_len) {
uint32_t log_modulus, uint32_t initial_out_len) {
auto nbits = sizeof(Torus) * 8;
auto i = threadIdx.x + blockIdx.x * blockDim.x;
auto chunk_array_in = array_in + index * input_len;
if (i < initial_out_len) {
// Unpack
Torus mask = ((Torus)1 << log_modulus) - 1;
@@ -161,12 +158,11 @@ __global__ void extract(Torus *glwe_array_out, Torus const *array_in,
Torus unpacked_i;
if (start_block == end_block_inclusive) {
auto single_part = chunk_array_in[start_block] >> start_remainder;
auto single_part = array_in[start_block] >> start_remainder;
unpacked_i = single_part & mask;
} else {
auto first_part = chunk_array_in[start_block] >> start_remainder;
auto second_part = chunk_array_in[start_block + 1]
<< (nbits - start_remainder);
auto first_part = array_in[start_block] >> start_remainder;
auto second_part = array_in[start_block + 1] << (nbits - start_remainder);
unpacked_i = (first_part | second_part) & mask;
}
@@ -177,6 +173,7 @@ __global__ void extract(Torus *glwe_array_out, Torus const *array_in,
}
/// Extracts the glwe_index-nth GLWE ciphertext
/// This function follows the naming used in the CPU implementation
template <typename Torus>
__host__ void host_extract(cudaStream_t stream, uint32_t gpu_index,
Torus *glwe_array_out, Torus const *array_in,
@@ -188,36 +185,51 @@ __host__ void host_extract(cudaStream_t stream, uint32_t gpu_index,
cuda_set_device(gpu_index);
auto compression_params = mem_ptr->compression_params;
auto log_modulus = mem_ptr->storage_log_modulus;
auto glwe_ciphertext_size = (compression_params.glwe_dimension + 1) *
compression_params.polynomial_size;
uint32_t body_count = mem_ptr->body_count;
auto num_glwes = (body_count + compression_params.polynomial_size - 1) /
compression_params.polynomial_size;
// Compressed length of the compressed GLWE we want to extract
if (mem_ptr->body_count % compression_params.polynomial_size == 0)
body_count = compression_params.polynomial_size;
else if (glwe_index == num_glwes - 1)
body_count = mem_ptr->body_count % compression_params.polynomial_size;
else
body_count = compression_params.polynomial_size;
uint32_t body_count =
std::min(mem_ptr->body_count, compression_params.polynomial_size);
auto initial_out_len =
compression_params.glwe_dimension * compression_params.polynomial_size +
body_count;
auto compressed_glwe_accumulator_size =
(compression_params.glwe_dimension + 1) *
compression_params.polynomial_size;
auto number_bits_to_unpack = compressed_glwe_accumulator_size * log_modulus;
// Calculates how many bits this particular GLWE shall use
auto number_bits_to_unpack = initial_out_len * log_modulus;
auto nbits = sizeof(Torus) * 8;
// number_bits_to_unpack.div_ceil(Scalar::BITS)
auto input_len = (number_bits_to_unpack + nbits - 1) / nbits;
// We assure the tail of the glwe is zeroed
auto zeroed_slice = glwe_array_out + initial_out_len;
cuda_memset_async(zeroed_slice, 0,
(compression_params.polynomial_size - body_count) *
sizeof(Torus),
stream, gpu_index);
// Calculates how many bits a full-packed GLWE shall use
number_bits_to_unpack = glwe_ciphertext_size * log_modulus;
auto len = (number_bits_to_unpack + nbits - 1) / nbits;
// Uses that length to set the input pointer
auto chunk_array_in = array_in + glwe_index * len;
// Ensure the tail of the GLWE is zeroed
if (initial_out_len < glwe_ciphertext_size) {
auto zeroed_slice = glwe_array_out + initial_out_len;
cuda_memset_async(glwe_array_out, 0,
(glwe_ciphertext_size - initial_out_len) * sizeof(Torus),
stream, gpu_index);
}
int num_blocks = 0, num_threads = 0;
getNumBlocksAndThreads(initial_out_len, 128, num_blocks, num_threads);
dim3 grid(num_blocks);
dim3 threads(num_threads);
extract<Torus><<<grid, threads, 0, stream>>>(glwe_array_out, array_in,
glwe_index, log_modulus,
input_len, initial_out_len);
extract<Torus><<<grid, threads, 0, stream>>>(glwe_array_out, chunk_array_in,
log_modulus, initial_out_len);
check_cuda_error(cudaGetLastError());
}
@@ -235,18 +247,13 @@ __host__ void host_integer_decompress(
auto compression_params = h_mem_ptr->compression_params;
auto lwe_per_glwe = compression_params.polynomial_size;
if (indexes_array_size > lwe_per_glwe)
PANIC("Cuda error: too many LWEs to decompress. The number of LWEs should "
"be smaller than "
"polynomial_size.")
auto num_radix_blocks = h_mem_ptr->num_radix_blocks;
if (num_radix_blocks != indexes_array_size)
PANIC("Cuda error: wrong number of LWEs in decompress: the number of LWEs "
"should be the same as indexes_array_size.")
// the first element is the last index in h_indexes_array that lies in the
// related GLWE
// the first element is the number of LWEs that lies in the related GLWE
std::vector<std::pair<int, Torus *>> glwe_vec;
// Extract all GLWEs
@@ -257,7 +264,7 @@ __host__ void host_integer_decompress(
auto extracted_glwe = h_mem_ptr->tmp_extracted_glwe;
host_extract<Torus>(streams[0], gpu_indexes[0], extracted_glwe,
d_packed_glwe_in, current_glwe_index, h_mem_ptr);
glwe_vec.push_back(std::make_pair(0, extracted_glwe));
glwe_vec.push_back(std::make_pair(1, extracted_glwe));
for (int i = 1; i < indexes_array_size; i++) {
auto glwe_index = h_indexes_array[i] / lwe_per_glwe;
if (glwe_index != current_glwe_index) {
@@ -266,10 +273,10 @@ __host__ void host_integer_decompress(
// Extracts a new GLWE
host_extract<Torus>(streams[0], gpu_indexes[0], extracted_glwe,
d_packed_glwe_in, glwe_index, h_mem_ptr);
glwe_vec.push_back(std::make_pair(i, extracted_glwe));
glwe_vec.push_back(std::make_pair(1, extracted_glwe));
} else {
// Updates the index
glwe_vec.back().first++;
// Updates the quantity
++glwe_vec.back().first;
}
}
// Sample extract all LWEs
@@ -279,17 +286,16 @@ __host__ void host_integer_decompress(
uint32_t current_idx = 0;
auto d_indexes_array_chunk = d_indexes_array;
for (const auto &max_idx_and_glwe : glwe_vec) {
uint32_t last_idx = max_idx_and_glwe.first;
const auto num_lwes = max_idx_and_glwe.first;
extracted_glwe = max_idx_and_glwe.second;
auto num_lwes = last_idx + 1 - current_idx;
cuda_glwe_sample_extract_64(streams[0], gpu_indexes[0], extracted_lwe,
extracted_glwe, d_indexes_array_chunk, num_lwes,
compression_params.glwe_dimension,
compression_params.polynomial_size);
cuda_glwe_sample_extract_64(
streams[0], gpu_indexes[0], extracted_lwe, extracted_glwe,
d_indexes_array_chunk, num_lwes, compression_params.polynomial_size,
compression_params.glwe_dimension, compression_params.polynomial_size);
d_indexes_array_chunk += num_lwes;
extracted_lwe += num_lwes * lwe_accumulator_size;
current_idx = last_idx;
current_idx += num_lwes;
}
// Reset

View File

@@ -22,18 +22,16 @@ void scratch_cuda_integer_div_rem_radix_ciphertext_kb_64(
void cuda_integer_div_rem_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *quotient, void *remainder, void const *numerator, void const *divisor,
bool is_signed, int8_t *mem_ptr, void *const *bsks, void *const *ksks,
uint32_t num_blocks) {
CudaRadixCiphertextFFI *quotient, CudaRadixCiphertextFFI *remainder,
CudaRadixCiphertextFFI const *numerator,
CudaRadixCiphertextFFI const *divisor, bool is_signed, int8_t *mem_ptr,
void *const *bsks, void *const *ksks) {
auto mem = (int_div_rem_memory<uint64_t> *)mem_ptr;
host_integer_div_rem_kb<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(quotient), static_cast<uint64_t *>(remainder),
static_cast<const uint64_t *>(numerator),
static_cast<const uint64_t *>(divisor), is_signed, bsks,
(uint64_t **)(ksks), mem, num_blocks);
(cudaStream_t *)(streams), gpu_indexes, gpu_count, quotient, remainder,
numerator, divisor, is_signed, bsks, (uint64_t **)(ksks), mem);
}
void cleanup_cuda_integer_div_rem(void *const *streams,

View File

@@ -19,146 +19,6 @@
#include <string>
#include <vector>
int ceil_div(int a, int b) { return (a + b - 1) / b; }
// struct makes it easier to use list of ciphertexts and move data between them
// struct does not allocate or drop any memory,
// keeps track on number of ciphertexts inside list.
template <typename Torus> struct lwe_ciphertext_list {
Torus *data;
size_t max_blocks;
size_t len;
int_radix_params params;
size_t big_lwe_size;
size_t big_lwe_size_bytes;
size_t big_lwe_dimension;
lwe_ciphertext_list(Torus *src, int_radix_params params, size_t max_blocks)
: data(src), params(params), max_blocks(max_blocks) {
big_lwe_size = params.big_lwe_dimension + 1;
big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
big_lwe_dimension = params.big_lwe_dimension;
len = max_blocks;
}
// copies ciphertexts from Torus*, starting from `starting_block` including
// `finish_block`, does not change the value of self len
void copy_from(Torus *src, size_t start_block, size_t finish_block,
cudaStream_t stream, uint32_t gpu_index) {
size_t tmp_len = finish_block - start_block + 1;
cuda_memcpy_async_gpu_to_gpu(data, &src[start_block * big_lwe_size],
tmp_len * big_lwe_size_bytes, stream,
gpu_index);
}
// copies ciphertexts from lwe_ciphertext_list, starting from `starting_block`
// including `finish_block`, does not change the value of self len
void copy_from(const lwe_ciphertext_list &src, size_t start_block,
size_t finish_block, cudaStream_t stream, uint32_t gpu_index) {
copy_from(src.data, start_block, finish_block, stream, gpu_index);
}
// copies ciphertexts from Torus*, starting from `starting_block`
// including `finish_block`, updating the value of self len
void clone_from(Torus *src, size_t start_block, size_t finish_block,
cudaStream_t stream, uint32_t gpu_index) {
len = finish_block - start_block + 1;
cuda_memcpy_async_gpu_to_gpu(data, &src[start_block * big_lwe_size],
len * big_lwe_size_bytes, stream, gpu_index);
}
// copies ciphertexts from ciphertexts_list, starting from `starting_block`
// including `finish_block`, updating the value of self len
void clone_from(const lwe_ciphertext_list &src, size_t start_block,
size_t finish_block, cudaStream_t stream,
uint32_t gpu_index) {
clone_from(src.data, start_block, finish_block, stream, gpu_index);
}
// assign zero to blocks starting from `start_block` including `finish_block`
void assign_zero(size_t start_block, size_t finish_block, cudaStream_t stream,
uint32_t gpu_index) {
auto size = finish_block - start_block + 1;
cuda_memset_async(&data[start_block * big_lwe_size], 0,
size * big_lwe_size_bytes, stream, gpu_index);
}
// return pointer to last block
Torus *last_block() { return &data[(len - 1) * big_lwe_size]; }
// return pointer to first_block
Torus *first_block() { return data; }
// return block with `index`
Torus *get_block(size_t index) {
assert(index < len);
return &data[index * big_lwe_size];
}
bool is_empty() { return len == 0; }
// does not dop actual memory from `data`, only reduces value of `len` by one
void pop() {
if (len > 0)
len--;
else
assert(len > 0);
}
// insert ciphertext at index `ind`
void insert(size_t ind, Torus *ciphertext_block, cudaStream_t stream,
uint32_t gpu_index) {
assert(ind <= len);
assert(len < max_blocks);
size_t insert_offset = ind * big_lwe_size;
for (size_t i = len; i > ind; i--) {
Torus *src = &data[(i - 1) * big_lwe_size];
Torus *dst = &data[i * big_lwe_size];
cuda_memcpy_async_gpu_to_gpu(dst, src, big_lwe_size_bytes, stream,
gpu_index);
}
cuda_memcpy_async_gpu_to_gpu(&data[insert_offset], ciphertext_block,
big_lwe_size_bytes, stream, gpu_index);
len++;
}
// push ciphertext at the end of `data`
void push(Torus *ciphertext_block, cudaStream_t stream, uint32_t gpu_index) {
assert(len < max_blocks);
size_t offset = len * big_lwe_size;
cuda_memcpy_async_gpu_to_gpu(&data[offset], ciphertext_block,
big_lwe_size_bytes, stream, gpu_index);
len++;
}
// duplicate ciphertext into `number_of_blocks` ciphertexts
void fill_with_same_ciphertext(Torus *ciphertext, size_t number_of_blocks,
cudaStream_t stream, uint32_t gpu_index) {
assert(number_of_blocks <= max_blocks);
for (size_t i = 0; i < number_of_blocks; i++) {
Torus *dest = &data[i * big_lwe_size];
cuda_memcpy_async_gpu_to_gpu(dest, ciphertext, big_lwe_size_bytes, stream,
gpu_index);
}
len = number_of_blocks;
}
// used for debugging, prints body of each ciphertext.
void print_blocks_body(const char *name) {
for (int i = 0; i < len; i++) {
print_debug(name, &data[i * big_lwe_size + big_lwe_dimension], 1);
}
}
};
template <typename Torus>
__host__ void scratch_cuda_integer_div_rem_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
@@ -173,67 +33,50 @@ __host__ void scratch_cuda_integer_div_rem_kb(
template <typename Torus>
__host__ void host_unsigned_integer_div_rem_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *quotient, Torus *remainder,
Torus const *numerator, Torus const *divisor, void *const *bsks,
uint64_t *const *ksks, unsigned_int_div_rem_memory<uint64_t> *mem_ptr,
uint32_t num_blocks) {
uint32_t gpu_count, CudaRadixCiphertextFFI *quotient,
CudaRadixCiphertextFFI *remainder, CudaRadixCiphertextFFI const *numerator,
CudaRadixCiphertextFFI const *divisor, void *const *bsks,
uint64_t *const *ksks, unsigned_int_div_rem_memory<uint64_t> *mem_ptr) {
if (remainder->num_radix_blocks != numerator->num_radix_blocks ||
remainder->num_radix_blocks != divisor->num_radix_blocks ||
remainder->num_radix_blocks != quotient->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be equal")
if (remainder->lwe_dimension != numerator->lwe_dimension ||
remainder->lwe_dimension != divisor->lwe_dimension ||
remainder->lwe_dimension != quotient->lwe_dimension)
PANIC("Cuda error: input and output lwe dimension must be equal")
auto radix_params = mem_ptr->params;
auto big_lwe_dimension = radix_params.big_lwe_dimension;
auto big_lwe_size = big_lwe_dimension + 1;
auto big_lwe_size_bytes = big_lwe_size * sizeof(Torus);
auto num_blocks = quotient->num_radix_blocks;
uint32_t message_modulus = radix_params.message_modulus;
uint32_t carry_modulus = radix_params.carry_modulus;
uint32_t num_bits_in_message = 31 - __builtin_clz(message_modulus);
uint32_t total_bits = num_bits_in_message * num_blocks;
// put temporary buffers in lwe_ciphertext_list for easy use
lwe_ciphertext_list<Torus> remainder1(mem_ptr->remainder1, radix_params,
num_blocks);
lwe_ciphertext_list<Torus> remainder2(mem_ptr->remainder2, radix_params,
num_blocks);
lwe_ciphertext_list<Torus> numerator_block_stack(
mem_ptr->numerator_block_stack, radix_params, num_blocks);
lwe_ciphertext_list<Torus> numerator_block_1(mem_ptr->numerator_block_1,
radix_params, 1);
lwe_ciphertext_list<Torus> tmp_radix(mem_ptr->tmp_radix, radix_params,
num_blocks + 1);
lwe_ciphertext_list<Torus> interesting_remainder1(
mem_ptr->interesting_remainder1, radix_params, num_blocks + 1);
lwe_ciphertext_list<Torus> interesting_remainder2(
mem_ptr->interesting_remainder2, radix_params, num_blocks);
lwe_ciphertext_list<Torus> interesting_divisor(mem_ptr->interesting_divisor,
radix_params, num_blocks);
lwe_ciphertext_list<Torus> divisor_ms_blocks(mem_ptr->divisor_ms_blocks,
radix_params, num_blocks);
lwe_ciphertext_list<Torus> new_remainder(mem_ptr->new_remainder, radix_params,
num_blocks);
lwe_ciphertext_list<Torus> subtraction_overflowed(
mem_ptr->subtraction_overflowed, radix_params, 1);
lwe_ciphertext_list<Torus> did_not_overflow(mem_ptr->did_not_overflow,
radix_params, 1);
lwe_ciphertext_list<Torus> overflow_sum(mem_ptr->overflow_sum, radix_params,
1);
lwe_ciphertext_list<Torus> overflow_sum_radix(mem_ptr->overflow_sum_radix,
radix_params, num_blocks);
lwe_ciphertext_list<Torus> tmp_1(mem_ptr->tmp_1, radix_params, num_blocks);
lwe_ciphertext_list<Torus> at_least_one_upper_block_is_non_zero(
mem_ptr->at_least_one_upper_block_is_non_zero, radix_params, 1);
lwe_ciphertext_list<Torus> cleaned_merged_interesting_remainder(
mem_ptr->cleaned_merged_interesting_remainder, radix_params, num_blocks);
auto remainder1 = mem_ptr->remainder1;
auto remainder2 = mem_ptr->remainder2;
auto numerator_block_stack = mem_ptr->numerator_block_stack;
auto interesting_remainder1 = mem_ptr->interesting_remainder1;
auto interesting_remainder2 = mem_ptr->interesting_remainder2;
auto interesting_divisor = mem_ptr->interesting_divisor;
auto divisor_ms_blocks = mem_ptr->divisor_ms_blocks;
auto new_remainder = mem_ptr->new_remainder;
auto subtraction_overflowed = mem_ptr->subtraction_overflowed;
auto overflow_sum = mem_ptr->overflow_sum;
auto overflow_sum_radix = mem_ptr->overflow_sum_radix;
auto at_least_one_upper_block_is_non_zero =
mem_ptr->at_least_one_upper_block_is_non_zero;
auto cleaned_merged_interesting_remainder =
mem_ptr->cleaned_merged_interesting_remainder;
numerator_block_stack.clone_from((Torus *)numerator, 0, num_blocks - 1,
streams[0], gpu_indexes[0]);
remainder1.assign_zero(0, num_blocks - 1, streams[0], gpu_indexes[0]);
remainder2.assign_zero(0, num_blocks - 1, streams[0], gpu_indexes[0]);
cuda_memset_async(quotient, 0, big_lwe_size_bytes * num_blocks, streams[0],
gpu_indexes[0]);
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
numerator_block_stack, numerator);
set_zero_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0],
quotient, 0, num_blocks);
for (int i = total_bits - 1; i >= 0; i--) {
uint32_t block_of_bit = i / num_bits_in_message;
uint32_t pos_in_block = i % num_bits_in_message;
uint32_t msb_bit_set = total_bits - 1 - i;
uint32_t last_non_trivial_block = msb_bit_set / num_bits_in_message;
@@ -242,16 +85,27 @@ __host__ void host_unsigned_integer_div_rem_kb(
// and all blocks after it are also trivial zeros
// This number is in range 1..=num_bocks -1
uint32_t first_trivial_block = last_non_trivial_block + 1;
reset_radix_ciphertext_blocks(interesting_remainder1, first_trivial_block);
reset_radix_ciphertext_blocks(interesting_remainder2, first_trivial_block);
reset_radix_ciphertext_blocks(interesting_divisor, first_trivial_block);
reset_radix_ciphertext_blocks(divisor_ms_blocks,
num_blocks -
(msb_bit_set + 1) / num_bits_in_message);
interesting_remainder1.clone_from(remainder1, 0, last_non_trivial_block,
streams[0], gpu_indexes[0]);
interesting_remainder2.clone_from(remainder2, 0, last_non_trivial_block,
streams[0], gpu_indexes[0]);
interesting_divisor.clone_from((Torus *)divisor, 0, last_non_trivial_block,
streams[0], gpu_indexes[0]);
divisor_ms_blocks.clone_from((Torus *)divisor,
(msb_bit_set + 1) / num_bits_in_message,
num_blocks - 1, streams[0], gpu_indexes[0]);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], interesting_remainder1, 0,
first_trivial_block, remainder1, 0, first_trivial_block);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], interesting_remainder2, 0,
first_trivial_block, remainder2, 0, first_trivial_block);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], interesting_divisor, 0, first_trivial_block,
divisor, 0, first_trivial_block);
if ((msb_bit_set + 1) / num_bits_in_message < num_blocks)
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], divisor_ms_blocks, 0,
num_blocks - (msb_bit_set + 1) / num_bits_in_message, divisor,
(msb_bit_set + 1) / num_bits_in_message, num_blocks);
// We split the divisor at a block position, when in reality the split
// should be at a bit position meaning that potentially (depending on
@@ -285,16 +139,21 @@ __host__ void host_unsigned_integer_div_rem_kb(
// Shift the mask so that we will only keep bits we should
uint32_t shifted_mask = full_message_mask >> shift_amount;
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, interesting_divisor.last_block(),
interesting_divisor.last_block(), bsks, ksks, 1,
mem_ptr->masking_luts_1[shifted_mask]);
CudaRadixCiphertextFFI last_interesting_divisor_block;
as_radix_ciphertext_slice<Torus>(
&last_interesting_divisor_block, interesting_divisor,
interesting_divisor->num_radix_blocks - 1,
interesting_divisor->num_radix_blocks);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, &last_interesting_divisor_block,
&last_interesting_divisor_block, bsks, ksks,
mem_ptr->masking_luts_1[shifted_mask], 1);
}; // trim_last_interesting_divisor_bits
auto trim_first_divisor_ms_bits = [&](cudaStream_t const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count) {
if (divisor_ms_blocks.is_empty() ||
if (divisor_ms_blocks->num_radix_blocks == 0 ||
((msb_bit_set + 1) % num_bits_in_message) == 0) {
return;
}
@@ -314,10 +173,9 @@ __host__ void host_unsigned_integer_div_rem_kb(
// the estimated degree of the output is < msg_modulus
shifted_mask = shifted_mask & full_message_mask;
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, divisor_ms_blocks.first_block(),
divisor_ms_blocks.first_block(), bsks, ksks, 1,
mem_ptr->masking_luts_2[shifted_mask]);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, divisor_ms_blocks, divisor_ms_blocks,
bsks, ksks, mem_ptr->masking_luts_2[shifted_mask], 1);
}; // trim_first_divisor_ms_bits
// This does
@@ -332,46 +190,49 @@ __host__ void host_unsigned_integer_div_rem_kb(
auto left_shift_interesting_remainder1 = [&](cudaStream_t const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count) {
numerator_block_1.clone_from(
numerator_block_stack, numerator_block_stack.len - 1,
numerator_block_stack.len - 1, streams[0], gpu_indexes[0]);
numerator_block_stack.pop();
interesting_remainder1.insert(0, numerator_block_1.first_block(),
streams[0], gpu_indexes[0]);
pop_radix_ciphertext_block_async<Torus>(streams[0], gpu_indexes[0],
mem_ptr->numerator_block_1,
numerator_block_stack);
insert_block_in_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
mem_ptr->numerator_block_1,
interesting_remainder1, 0);
legacy_host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, interesting_remainder1.data, 1,
mem_ptr->shift_mem_1, bsks, ksks, interesting_remainder1.len);
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, interesting_remainder1, 1,
mem_ptr->shift_mem_1, bsks, ksks,
interesting_remainder1->num_radix_blocks);
tmp_radix.clone_from(interesting_remainder1, 0,
interesting_remainder1.len - 1, streams[0],
gpu_indexes[0]);
reset_radix_ciphertext_blocks(mem_ptr->tmp_radix,
interesting_remainder1->num_radix_blocks);
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
mem_ptr->tmp_radix,
interesting_remainder1);
legacy_host_radix_blocks_rotate_left<Torus>(
streams, gpu_indexes, gpu_count, interesting_remainder1.data,
tmp_radix.data, 1, interesting_remainder1.len, big_lwe_size);
host_radix_blocks_rotate_left<Torus>(
streams, gpu_indexes, gpu_count, interesting_remainder1,
mem_ptr->tmp_radix, 1, interesting_remainder1->num_radix_blocks);
numerator_block_1.clone_from(
interesting_remainder1, interesting_remainder1.len - 1,
interesting_remainder1.len - 1, streams[0], gpu_indexes[0]);
interesting_remainder1.pop();
pop_radix_ciphertext_block_async<Torus>(streams[0], gpu_indexes[0],
mem_ptr->numerator_block_1,
interesting_remainder1);
if (pos_in_block != 0) {
// We have not yet extracted all the bits from this numerator
// so, we put it back on the front so that it gets taken next
// iteration
numerator_block_stack.push(numerator_block_1.first_block(), streams[0],
gpu_indexes[0]);
push_block_to_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
mem_ptr->numerator_block_1,
numerator_block_stack);
}
}; // left_shift_interesting_remainder1
auto left_shift_interesting_remainder2 = [&](cudaStream_t const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count) {
legacy_host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, interesting_remainder2.data, 1,
mem_ptr->shift_mem_2, bsks, ksks, interesting_remainder2.len);
host_integer_radix_logical_scalar_shift_kb_inplace<Torus>(
streams, gpu_indexes, gpu_count, interesting_remainder2, 1,
mem_ptr->shift_mem_2, bsks, ksks,
interesting_remainder2->num_radix_blocks);
}; // left_shift_interesting_remainder2
for (uint j = 0; j < gpu_count; j++) {
@@ -400,24 +261,31 @@ __host__ void host_unsigned_integer_div_rem_kb(
// if interesting_remainder1 == 0 -> interesting_remainder2 != 0
// In practice interesting_remainder1 contains the numerator bit,
// but in that position, interesting_remainder2 always has a 0
auto &merged_interesting_remainder = interesting_remainder1;
auto merged_interesting_remainder = interesting_remainder1;
legacy_host_addition<Torus>(
streams[0], gpu_indexes[0], merged_interesting_remainder.data,
merged_interesting_remainder.data, interesting_remainder2.data,
radix_params.big_lwe_dimension, merged_interesting_remainder.len);
host_addition<Torus>(streams[0], gpu_indexes[0],
merged_interesting_remainder,
merged_interesting_remainder, interesting_remainder2,
merged_interesting_remainder->num_radix_blocks);
// after create_clean_version_of_merged_remainder
// `merged_interesting_remainder` will be reused as
// `cleaned_merged_interesting_remainder`
cleaned_merged_interesting_remainder.clone_from(
merged_interesting_remainder, 0, merged_interesting_remainder.len - 1,
streams[0], gpu_indexes[0]);
reset_radix_ciphertext_blocks(
cleaned_merged_interesting_remainder,
merged_interesting_remainder->num_radix_blocks);
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
cleaned_merged_interesting_remainder,
merged_interesting_remainder);
assert(merged_interesting_remainder.len == interesting_divisor.len);
if (merged_interesting_remainder->num_radix_blocks !=
interesting_divisor->num_radix_blocks)
PANIC("Cuda error: merged interesting remainder and interesting divisor "
"should have the same number of blocks")
// `new_remainder` is not initialized yet, so need to set length
new_remainder.len = merged_interesting_remainder.len;
reset_radix_ciphertext_blocks(
new_remainder, merged_interesting_remainder->num_radix_blocks);
// fills:
// `new_remainder` - radix ciphertext
@@ -427,22 +295,24 @@ __host__ void host_unsigned_integer_div_rem_kb(
uint32_t gpu_count) {
uint32_t compute_borrow = 1;
uint32_t uses_input_borrow = 0;
auto first_indexes = mem_ptr->first_indexes_for_overflow_sub
[merged_interesting_remainder.len - 1];
auto second_indexes = mem_ptr->second_indexes_for_overflow_sub
[merged_interesting_remainder.len - 1];
auto first_indexes =
mem_ptr->first_indexes_for_overflow_sub
[merged_interesting_remainder->num_radix_blocks - 1];
auto second_indexes =
mem_ptr->second_indexes_for_overflow_sub
[merged_interesting_remainder->num_radix_blocks - 1];
auto scalar_indexes =
mem_ptr
->scalars_for_overflow_sub[merged_interesting_remainder.len - 1];
mem_ptr->scalars_for_overflow_sub
[merged_interesting_remainder->num_radix_blocks - 1];
mem_ptr->overflow_sub_mem->update_lut_indexes(
streams, gpu_indexes, first_indexes, second_indexes, scalar_indexes,
merged_interesting_remainder.len);
legacy_host_integer_overflowing_sub<uint64_t>(
streams, gpu_indexes, gpu_count, new_remainder.data,
(uint64_t *)merged_interesting_remainder.data,
interesting_divisor.data, subtraction_overflowed.data,
(const Torus *)nullptr, mem_ptr->overflow_sub_mem, bsks, ksks,
merged_interesting_remainder.len, compute_borrow, uses_input_borrow);
merged_interesting_remainder->num_radix_blocks);
host_integer_overflowing_sub<uint64_t>(
streams, gpu_indexes, gpu_count, new_remainder,
merged_interesting_remainder, interesting_divisor,
subtraction_overflowed, (const CudaRadixCiphertextFFI *)nullptr,
mem_ptr->overflow_sub_mem, bsks, ksks, compute_borrow,
uses_input_borrow);
};
// fills:
@@ -450,27 +320,27 @@ __host__ void host_unsigned_integer_div_rem_kb(
auto check_divisor_upper_blocks = [&](cudaStream_t const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count) {
auto &trivial_blocks = divisor_ms_blocks;
if (trivial_blocks.is_empty()) {
cuda_memset_async(at_least_one_upper_block_is_non_zero.first_block(), 0,
big_lwe_size_bytes, streams[0], gpu_indexes[0]);
auto trivial_blocks = divisor_ms_blocks;
if (trivial_blocks->num_radix_blocks == 0) {
set_zero_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], at_least_one_upper_block_is_non_zero, 0,
1);
} else {
// We could call unchecked_scalar_ne
// But we are in the special case where scalar == 0
// So we can skip some stuff
host_compare_with_zero_equality<Torus>(
streams, gpu_indexes, gpu_count, tmp_1.data, trivial_blocks.data,
mem_ptr->comparison_buffer, bsks, ksks, trivial_blocks.len,
host_compare_blocks_with_zero<Torus>(
streams, gpu_indexes, gpu_count, mem_ptr->tmp_1, trivial_blocks,
mem_ptr->comparison_buffer, bsks, ksks,
trivial_blocks->num_radix_blocks,
mem_ptr->comparison_buffer->eq_buffer->is_non_zero_lut);
tmp_1.len =
ceil_div(trivial_blocks.len, message_modulus * carry_modulus - 1);
is_at_least_one_comparisons_block_true<Torus>(
streams, gpu_indexes, gpu_count,
at_least_one_upper_block_is_non_zero.data, tmp_1.data,
mem_ptr->comparison_buffer, bsks, ksks, tmp_1.len);
at_least_one_upper_block_is_non_zero, mem_ptr->tmp_1,
mem_ptr->comparison_buffer, bsks, ksks,
mem_ptr->tmp_1->num_radix_blocks);
}
};
@@ -481,12 +351,12 @@ __host__ void host_unsigned_integer_div_rem_kb(
auto create_clean_version_of_merged_remainder =
[&](cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count) {
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count,
cleaned_merged_interesting_remainder.data,
cleaned_merged_interesting_remainder.data, bsks, ksks,
cleaned_merged_interesting_remainder.len,
mem_ptr->message_extract_lut_1);
cleaned_merged_interesting_remainder,
cleaned_merged_interesting_remainder, bsks, ksks,
mem_ptr->message_extract_lut_1,
cleaned_merged_interesting_remainder->num_radix_blocks);
};
// phase 2
@@ -507,55 +377,56 @@ __host__ void host_unsigned_integer_div_rem_kb(
cuda_synchronize_stream(mem_ptr->sub_streams_3[j], gpu_indexes[j]);
}
legacy_host_addition<Torus>(streams[0], gpu_indexes[0], overflow_sum.data,
subtraction_overflowed.data,
at_least_one_upper_block_is_non_zero.data,
radix_params.big_lwe_dimension, 1);
host_addition<Torus>(streams[0], gpu_indexes[0], overflow_sum,
subtraction_overflowed,
at_least_one_upper_block_is_non_zero, 1);
int factor = (i) ? 3 : 2;
int factor_lut_id = factor - 2;
overflow_sum_radix.fill_with_same_ciphertext(
overflow_sum.first_block(), cleaned_merged_interesting_remainder.len,
streams[0], gpu_indexes[0]);
for (size_t i = 0;
i < cleaned_merged_interesting_remainder->num_radix_blocks; i++) {
copy_radix_ciphertext_slice_async<Torus>(streams[0], gpu_indexes[0],
overflow_sum_radix, i, i + 1,
overflow_sum, 0, 1);
}
auto conditionally_zero_out_merged_interesting_remainder =
[&](cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count) {
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count,
cleaned_merged_interesting_remainder.data,
cleaned_merged_interesting_remainder.data,
overflow_sum_radix.data, bsks, ksks,
cleaned_merged_interesting_remainder.len,
mem_ptr->zero_out_if_overflow_did_not_happen[factor_lut_id],
factor);
cleaned_merged_interesting_remainder,
cleaned_merged_interesting_remainder, overflow_sum_radix, bsks,
ksks, mem_ptr->zero_out_if_overflow_did_not_happen[factor_lut_id],
cleaned_merged_interesting_remainder->num_radix_blocks, factor);
};
auto conditionally_zero_out_merged_new_remainder =
[&](cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count) {
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, new_remainder.data,
new_remainder.data, overflow_sum_radix.data, bsks, ksks,
new_remainder.len,
mem_ptr->zero_out_if_overflow_happened[factor_lut_id], factor);
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, new_remainder, new_remainder,
overflow_sum_radix, bsks, ksks,
mem_ptr->zero_out_if_overflow_happened[factor_lut_id],
new_remainder->num_radix_blocks, factor);
};
auto set_quotient_bit = [&](cudaStream_t const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count) {
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, did_not_overflow.data,
subtraction_overflowed.data,
at_least_one_upper_block_is_non_zero.data, bsks, ksks, 1,
mem_ptr->merge_overflow_flags_luts[pos_in_block],
uint32_t block_of_bit = i / num_bits_in_message;
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
streams, gpu_indexes, gpu_count, mem_ptr->did_not_overflow,
subtraction_overflowed, at_least_one_upper_block_is_non_zero, bsks,
ksks, mem_ptr->merge_overflow_flags_luts[pos_in_block], 1,
mem_ptr->merge_overflow_flags_luts[pos_in_block]
->params.message_modulus);
legacy_host_addition<Torus>(
streams[0], gpu_indexes[0], &quotient[block_of_bit * big_lwe_size],
&quotient[block_of_bit * big_lwe_size], did_not_overflow.data,
radix_params.big_lwe_dimension, 1);
CudaRadixCiphertextFFI quotient_block;
as_radix_ciphertext_slice<Torus>(&quotient_block, quotient, block_of_bit,
block_of_bit + 1);
host_addition<Torus>(streams[0], gpu_indexes[0], &quotient_block,
&quotient_block, mem_ptr->did_not_overflow, 1);
};
for (uint j = 0; j < gpu_count; j++) {
@@ -575,32 +446,40 @@ __host__ void host_unsigned_integer_div_rem_kb(
cuda_synchronize_stream(mem_ptr->sub_streams_3[j], gpu_indexes[j]);
}
assert(first_trivial_block - 1 == cleaned_merged_interesting_remainder.len);
assert(first_trivial_block - 1 == new_remainder.len);
if (first_trivial_block !=
cleaned_merged_interesting_remainder->num_radix_blocks)
PANIC("Cuda error: first_trivial_block should be equal to "
"clean_merged_interesting_remainder num blocks")
if (first_trivial_block != new_remainder->num_radix_blocks)
PANIC("Cuda error: first_trivial_block should be equal to new_remainder "
"num blocks")
remainder1.copy_from(cleaned_merged_interesting_remainder, 0,
first_trivial_block - 1, streams[0], gpu_indexes[0]);
remainder2.copy_from(new_remainder, 0, first_trivial_block - 1, streams[0],
gpu_indexes[0]);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], remainder1, 0, first_trivial_block,
cleaned_merged_interesting_remainder, 0, first_trivial_block);
copy_radix_ciphertext_slice_async<Torus>(
streams[0], gpu_indexes[0], remainder2, 0, first_trivial_block,
new_remainder, 0, first_trivial_block);
}
assert(remainder1.len == remainder2.len);
if (remainder1->num_radix_blocks != remainder2->num_radix_blocks)
PANIC("Cuda error: remainder1 and remainder2 should have the same number "
"of blocks")
// Clean the quotient and remainder
// as even though they have no carries, they are not at nominal noise level
legacy_host_addition<Torus>(streams[0], gpu_indexes[0], remainder,
remainder1.data, remainder2.data,
radix_params.big_lwe_dimension, remainder1.len);
host_addition<Torus>(streams[0], gpu_indexes[0], remainder, remainder1,
remainder2, remainder1->num_radix_blocks);
for (uint j = 0; j < gpu_count; j++) {
cuda_synchronize_stream(streams[j], gpu_indexes[j]);
}
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
integer_radix_apply_univariate_lookup_table_kb<Torus>(
mem_ptr->sub_streams_1, gpu_indexes, gpu_count, remainder, remainder,
bsks, ksks, num_blocks, mem_ptr->message_extract_lut_1);
legacy_integer_radix_apply_univariate_lookup_table_kb<Torus>(
bsks, ksks, mem_ptr->message_extract_lut_1, num_blocks);
integer_radix_apply_univariate_lookup_table_kb<Torus>(
mem_ptr->sub_streams_2, gpu_indexes, gpu_count, quotient, quotient, bsks,
ksks, num_blocks, mem_ptr->message_extract_lut_2);
ksks, mem_ptr->message_extract_lut_2, num_blocks);
for (uint j = 0; j < mem_ptr->active_gpu_count; j++) {
cuda_synchronize_stream(mem_ptr->sub_streams_1[j], gpu_indexes[j]);
cuda_synchronize_stream(mem_ptr->sub_streams_2[j], gpu_indexes[j]);
@@ -608,42 +487,43 @@ __host__ void host_unsigned_integer_div_rem_kb(
}
template <typename Torus>
__host__ void host_integer_div_rem_kb(cudaStream_t const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count, Torus *quotient,
Torus *remainder, Torus const *numerator,
Torus const *divisor, bool is_signed,
void *const *bsks, uint64_t *const *ksks,
int_div_rem_memory<uint64_t> *int_mem_ptr,
uint32_t num_blocks) {
__host__ void host_integer_div_rem_kb(
cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count, CudaRadixCiphertextFFI *quotient,
CudaRadixCiphertextFFI *remainder, CudaRadixCiphertextFFI const *numerator,
CudaRadixCiphertextFFI const *divisor, bool is_signed, void *const *bsks,
uint64_t *const *ksks, int_div_rem_memory<uint64_t> *int_mem_ptr) {
if (remainder->num_radix_blocks != numerator->num_radix_blocks ||
remainder->num_radix_blocks != divisor->num_radix_blocks ||
remainder->num_radix_blocks != quotient->num_radix_blocks)
PANIC("Cuda error: input and output num radix blocks must be equal")
if (remainder->lwe_dimension != numerator->lwe_dimension ||
remainder->lwe_dimension != divisor->lwe_dimension ||
remainder->lwe_dimension != quotient->lwe_dimension)
PANIC("Cuda error: input and output lwe dimension must be equal")
auto num_blocks = quotient->num_radix_blocks;
if (is_signed) {
auto radix_params = int_mem_ptr->params;
uint32_t big_lwe_size = radix_params.big_lwe_dimension + 1;
// temporary memory
lwe_ciphertext_list<Torus> positive_numerator(
int_mem_ptr->positive_numerator, radix_params, num_blocks);
lwe_ciphertext_list<Torus> positive_divisor(int_mem_ptr->positive_divisor,
radix_params, num_blocks);
positive_numerator.clone_from((Torus *)numerator, 0, num_blocks - 1,
streams[0], gpu_indexes[0]);
positive_divisor.clone_from((Torus *)divisor, 0, num_blocks - 1, streams[0],
gpu_indexes[0]);
auto positive_numerator = int_mem_ptr->positive_numerator;
auto positive_divisor = int_mem_ptr->positive_divisor;
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
positive_numerator, numerator);
copy_radix_ciphertext_async<Torus>(streams[0], gpu_indexes[0],
positive_divisor, divisor);
for (uint j = 0; j < gpu_count; j++) {
cuda_synchronize_stream(streams[j], gpu_indexes[j]);
}
legacy_host_integer_abs_kb_async<Torus>(
int_mem_ptr->sub_streams_1, gpu_indexes, gpu_count,
positive_numerator.data, bsks, ksks, int_mem_ptr->abs_mem_1, true,
num_blocks);
legacy_host_integer_abs_kb_async<Torus>(
int_mem_ptr->sub_streams_2, gpu_indexes, gpu_count,
positive_divisor.data, bsks, ksks, int_mem_ptr->abs_mem_2, true,
num_blocks);
host_integer_abs_kb<Torus>(int_mem_ptr->sub_streams_1, gpu_indexes,
gpu_count, positive_numerator, bsks, ksks,
int_mem_ptr->abs_mem_1, true);
host_integer_abs_kb<Torus>(int_mem_ptr->sub_streams_2, gpu_indexes,
gpu_count, positive_divisor, bsks, ksks,
int_mem_ptr->abs_mem_2, true);
for (uint j = 0; j < int_mem_ptr->active_gpu_count; j++) {
cuda_synchronize_stream(int_mem_ptr->sub_streams_1[j], gpu_indexes[j]);
cuda_synchronize_stream(int_mem_ptr->sub_streams_2[j], gpu_indexes[j]);
@@ -651,15 +531,19 @@ __host__ void host_integer_div_rem_kb(cudaStream_t const *streams,
host_unsigned_integer_div_rem_kb<Torus>(
int_mem_ptr->sub_streams_1, gpu_indexes, gpu_count, quotient, remainder,
positive_numerator.data, positive_divisor.data, bsks, ksks,
int_mem_ptr->unsigned_mem, num_blocks);
positive_numerator, positive_divisor, bsks, ksks,
int_mem_ptr->unsigned_mem);
legacy_integer_radix_apply_bivariate_lookup_table_kb<Torus>(
CudaRadixCiphertextFFI numerator_sign;
as_radix_ciphertext_slice<Torus>(&numerator_sign, numerator, num_blocks - 1,
num_blocks);
CudaRadixCiphertextFFI divisor_sign;
as_radix_ciphertext_slice<Torus>(&divisor_sign, divisor, num_blocks - 1,
num_blocks);
integer_radix_apply_bivariate_lookup_table_kb<Torus>(
int_mem_ptr->sub_streams_2, gpu_indexes, gpu_count,
int_mem_ptr->sign_bits_are_different,
&numerator[big_lwe_size * (num_blocks - 1)],
&divisor[big_lwe_size * (num_blocks - 1)], bsks, ksks, 1,
int_mem_ptr->compare_signed_bits_lut,
int_mem_ptr->sign_bits_are_different, &numerator_sign, &divisor_sign,
bsks, ksks, int_mem_ptr->compare_signed_bits_lut, 1,
int_mem_ptr->compare_signed_bits_lut->params.message_modulus);
for (uint j = 0; j < int_mem_ptr->active_gpu_count; j++) {
@@ -667,40 +551,37 @@ __host__ void host_integer_div_rem_kb(cudaStream_t const *streams,
cuda_synchronize_stream(int_mem_ptr->sub_streams_2[j], gpu_indexes[j]);
}
legacy_host_integer_radix_negation(
int_mem_ptr->sub_streams_1, gpu_indexes, gpu_count,
int_mem_ptr->negated_quotient, quotient, radix_params.big_lwe_dimension,
num_blocks, radix_params.message_modulus, radix_params.carry_modulus);
host_integer_radix_negation<Torus>(int_mem_ptr->sub_streams_1, gpu_indexes,
gpu_count, int_mem_ptr->negated_quotient,
quotient, radix_params.message_modulus,
radix_params.carry_modulus, num_blocks);
uint32_t requested_flag = outputFlag::FLAG_NONE;
uint32_t uses_carry = 0;
legacy_host_propagate_single_carry<Torus>(
int_mem_ptr->sub_streams_1, gpu_indexes, gpu_count,
int_mem_ptr->negated_quotient, nullptr, nullptr, int_mem_ptr->scp_mem_1,
bsks, ksks, num_blocks, requested_flag, uses_carry);
host_propagate_single_carry<Torus>(int_mem_ptr->sub_streams_1, gpu_indexes,
gpu_count, int_mem_ptr->negated_quotient,
nullptr, nullptr, int_mem_ptr->scp_mem_1,
bsks, ksks, requested_flag, uses_carry);
legacy_host_integer_radix_negation(
host_integer_radix_negation<Torus>(
int_mem_ptr->sub_streams_2, gpu_indexes, gpu_count,
int_mem_ptr->negated_remainder, remainder,
radix_params.big_lwe_dimension, num_blocks,
radix_params.message_modulus, radix_params.carry_modulus);
int_mem_ptr->negated_remainder, remainder, radix_params.message_modulus,
radix_params.carry_modulus, num_blocks);
legacy_host_propagate_single_carry<Torus>(
host_propagate_single_carry<Torus>(
int_mem_ptr->sub_streams_2, gpu_indexes, gpu_count,
int_mem_ptr->negated_remainder, nullptr, nullptr,
int_mem_ptr->scp_mem_2, bsks, ksks, num_blocks, requested_flag,
uses_carry);
int_mem_ptr->scp_mem_2, bsks, ksks, requested_flag, uses_carry);
legacy_host_integer_radix_cmux_kb<Torus>(
host_integer_radix_cmux_kb<Torus>(
int_mem_ptr->sub_streams_1, gpu_indexes, gpu_count, quotient,
int_mem_ptr->sign_bits_are_different, int_mem_ptr->negated_quotient,
quotient, int_mem_ptr->cmux_quotient_mem, bsks, ksks, num_blocks);
quotient, int_mem_ptr->cmux_quotient_mem, bsks, ksks);
legacy_host_integer_radix_cmux_kb<Torus>(
host_integer_radix_cmux_kb<Torus>(
int_mem_ptr->sub_streams_2, gpu_indexes, gpu_count, remainder,
&numerator[big_lwe_size * (num_blocks - 1)],
int_mem_ptr->negated_remainder, remainder,
int_mem_ptr->cmux_remainder_mem, bsks, ksks, num_blocks);
&numerator_sign, int_mem_ptr->negated_remainder, remainder,
int_mem_ptr->cmux_remainder_mem, bsks, ksks);
for (uint j = 0; j < int_mem_ptr->active_gpu_count; j++) {
cuda_synchronize_stream(int_mem_ptr->sub_streams_1[j], gpu_indexes[j]);
@@ -709,7 +590,7 @@ __host__ void host_integer_div_rem_kb(cudaStream_t const *streams,
} else {
host_unsigned_integer_div_rem_kb<Torus>(
streams, gpu_indexes, gpu_count, quotient, remainder, numerator,
divisor, bsks, ksks, int_mem_ptr->unsigned_mem, num_blocks);
divisor, bsks, ksks, int_mem_ptr->unsigned_mem);
}
}

View File

@@ -4,17 +4,17 @@
void cuda_full_propagation_64_inplace(void *const *streams,
uint32_t const *gpu_indexes,
uint32_t gpu_count, void *input_blocks,
uint32_t gpu_count,
CudaRadixCiphertextFFI *input_blocks,
int8_t *mem_ptr, void *const *ksks,
void *const *bsks, uint32_t num_blocks) {
int_fullprop_buffer<uint64_t> *buffer =
(int_fullprop_buffer<uint64_t> *)mem_ptr;
host_full_propagate_inplace<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(input_blocks), buffer, (uint64_t **)(ksks), bsks,
num_blocks);
host_full_propagate_inplace<uint64_t>((cudaStream_t *)(streams), gpu_indexes,
gpu_count, input_blocks, buffer,
(uint64_t **)(ksks), bsks, num_blocks);
}
void scratch_cuda_full_propagation_64(
@@ -315,11 +315,9 @@ void cuda_integer_compute_prefix_sum_hillis_steele_64(
CudaRadixCiphertextFFI *generates_or_propagates, int8_t *mem_ptr,
void *const *ksks, void *const *bsks, uint32_t num_radix_blocks) {
int_radix_params params = ((int_radix_lut<uint64_t> *)mem_ptr)->params;
host_compute_prefix_sum_hillis_steele<uint64_t>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count, output_radix_lwe,
generates_or_propagates, params, (int_radix_lut<uint64_t> *)mem_ptr, bsks,
generates_or_propagates, (int_radix_lut<uint64_t> *)mem_ptr, bsks,
(uint64_t **)(ksks), num_radix_blocks);
}
@@ -338,3 +336,20 @@ void cuda_integer_reverse_blocks_64_inplace(void *const *streams,
host_radix_blocks_reverse_inplace<uint64_t>((cudaStream_t *)(streams),
gpu_indexes, lwe_array);
}
void reverseArray(uint64_t arr[], size_t n) {
size_t start = 0;
size_t end = n - 1;
// Swap elements from the start with elements from the end
while (start < end) {
// Swap arr[start] and arr[end]
uint64_t temp = arr[start];
arr[start] = arr[end];
arr[end] = temp;
// Move towards the middle
start++;
end--;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,7 +13,7 @@
* as output ids, -1 value as an input id means that zero ciphertext will be
* copied on output index.
*/
void generate_ids_update_degrees(int *terms_degree, size_t *h_lwe_idx_in,
void generate_ids_update_degrees(uint64_t *terms_degree, size_t *h_lwe_idx_in,
size_t *h_lwe_idx_out,
int32_t *h_smart_copy_in,
int32_t *h_smart_copy_out, size_t ch_amount,
@@ -127,66 +127,53 @@ void scratch_cuda_integer_mult_radix_ciphertext_kb_64(
*/
void cuda_integer_mult_radix_ciphertext_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *radix_lwe_out, void const *radix_lwe_left, bool const is_bool_left,
void const *radix_lwe_right, bool const is_bool_right, void *const *bsks,
void *const *ksks, int8_t *mem_ptr, uint32_t polynomial_size,
uint32_t num_blocks) {
CudaRadixCiphertextFFI *radix_lwe_out,
CudaRadixCiphertextFFI const *radix_lwe_left, bool const is_bool_left,
CudaRadixCiphertextFFI const *radix_lwe_right, bool const is_bool_right,
void *const *bsks, void *const *ksks, int8_t *mem_ptr,
uint32_t polynomial_size, uint32_t num_blocks) {
switch (polynomial_size) {
case 256:
host_integer_mult_radix_kb<uint64_t, AmortizedDegree<256>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<const uint64_t *>(radix_lwe_left), is_bool_left,
static_cast<const uint64_t *>(radix_lwe_right), is_bool_right, bsks,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_left, is_bool_left, radix_lwe_right, is_bool_right, bsks,
(uint64_t **)(ksks), (int_mul_memory<uint64_t> *)mem_ptr, num_blocks);
break;
case 512:
host_integer_mult_radix_kb<uint64_t, AmortizedDegree<512>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<const uint64_t *>(radix_lwe_left), is_bool_left,
static_cast<const uint64_t *>(radix_lwe_right), is_bool_right, bsks,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_left, is_bool_left, radix_lwe_right, is_bool_right, bsks,
(uint64_t **)(ksks), (int_mul_memory<uint64_t> *)mem_ptr, num_blocks);
break;
case 1024:
host_integer_mult_radix_kb<uint64_t, AmortizedDegree<1024>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<const uint64_t *>(radix_lwe_left), is_bool_left,
static_cast<const uint64_t *>(radix_lwe_right), is_bool_right, bsks,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_left, is_bool_left, radix_lwe_right, is_bool_right, bsks,
(uint64_t **)(ksks), (int_mul_memory<uint64_t> *)mem_ptr, num_blocks);
break;
case 2048:
host_integer_mult_radix_kb<uint64_t, AmortizedDegree<2048>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<const uint64_t *>(radix_lwe_left), is_bool_left,
static_cast<const uint64_t *>(radix_lwe_right), is_bool_right, bsks,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_left, is_bool_left, radix_lwe_right, is_bool_right, bsks,
(uint64_t **)(ksks), (int_mul_memory<uint64_t> *)mem_ptr, num_blocks);
break;
case 4096:
host_integer_mult_radix_kb<uint64_t, AmortizedDegree<4096>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<const uint64_t *>(radix_lwe_left), is_bool_left,
static_cast<const uint64_t *>(radix_lwe_right), is_bool_right, bsks,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_left, is_bool_left, radix_lwe_right, is_bool_right, bsks,
(uint64_t **)(ksks), (int_mul_memory<uint64_t> *)mem_ptr, num_blocks);
break;
case 8192:
host_integer_mult_radix_kb<uint64_t, AmortizedDegree<8192>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<const uint64_t *>(radix_lwe_left), is_bool_left,
static_cast<const uint64_t *>(radix_lwe_right), is_bool_right, bsks,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_left, is_bool_left, radix_lwe_right, is_bool_right, bsks,
(uint64_t **)(ksks), (int_mul_memory<uint64_t> *)mem_ptr, num_blocks);
break;
case 16384:
host_integer_mult_radix_kb<uint64_t, AmortizedDegree<16384>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<const uint64_t *>(radix_lwe_left), is_bool_left,
static_cast<const uint64_t *>(radix_lwe_right), is_bool_right, bsks,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_left, is_bool_left, radix_lwe_right, is_bool_right, bsks,
(uint64_t **)(ksks), (int_mul_memory<uint64_t> *)mem_ptr, num_blocks);
break;
default:
@@ -226,79 +213,77 @@ void scratch_cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
void cuda_integer_radix_partial_sum_ciphertexts_vec_kb_64(
void *const *streams, uint32_t const *gpu_indexes, uint32_t gpu_count,
void *radix_lwe_out, void *radix_lwe_vec, uint32_t num_radix_in_vec,
int8_t *mem_ptr, void *const *bsks, void *const *ksks,
uint32_t num_blocks_in_radix) {
CudaRadixCiphertextFFI *radix_lwe_out,
CudaRadixCiphertextFFI *radix_lwe_vec, int8_t *mem_ptr, void *const *bsks,
void *const *ksks) {
auto mem = (int_sum_ciphertexts_vec_memory<uint64_t> *)mem_ptr;
int *terms_degree =
(int *)malloc(num_blocks_in_radix * num_radix_in_vec * sizeof(int));
for (int i = 0; i < num_radix_in_vec * num_blocks_in_radix; i++) {
terms_degree[i] = mem->params.message_modulus - 1;
if (radix_lwe_vec->num_radix_blocks % radix_lwe_out->num_radix_blocks != 0)
PANIC("Cuda error: input vector length should be a multiple of the "
"output's number of radix blocks")
// FIXME: this should not be necessary, we should make sure sum_ctxt works in
// the general case
for (int i = 0; i < radix_lwe_vec->num_radix_blocks; i++) {
radix_lwe_vec->degrees[i] = mem->params.message_modulus - 1;
}
switch (mem->params.polynomial_size) {
case 512:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t, AmortizedDegree<512>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsks,
(uint64_t **)(ksks), mem, num_blocks_in_radix, num_radix_in_vec,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, bsks, (uint64_t **)(ksks), mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks,
nullptr);
break;
case 1024:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<1024>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsks,
(uint64_t **)(ksks), mem, num_blocks_in_radix, num_radix_in_vec,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, bsks, (uint64_t **)(ksks), mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks,
nullptr);
break;
case 2048:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<2048>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsks,
(uint64_t **)(ksks), mem, num_blocks_in_radix, num_radix_in_vec,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, bsks, (uint64_t **)(ksks), mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks,
nullptr);
break;
case 4096:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<4096>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsks,
(uint64_t **)(ksks), mem, num_blocks_in_radix, num_radix_in_vec,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, bsks, (uint64_t **)(ksks), mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks,
nullptr);
break;
case 8192:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<8192>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsks,
(uint64_t **)(ksks), mem, num_blocks_in_radix, num_radix_in_vec,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, bsks, (uint64_t **)(ksks), mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks,
nullptr);
break;
case 16384:
host_integer_partial_sum_ciphertexts_vec_kb<uint64_t,
AmortizedDegree<16384>>(
(cudaStream_t *)(streams), gpu_indexes, gpu_count,
static_cast<uint64_t *>(radix_lwe_out),
static_cast<uint64_t *>(radix_lwe_vec), terms_degree, bsks,
(uint64_t **)(ksks), mem, num_blocks_in_radix, num_radix_in_vec,
(cudaStream_t *)(streams), gpu_indexes, gpu_count, radix_lwe_out,
radix_lwe_vec, bsks, (uint64_t **)(ksks), mem,
radix_lwe_out->num_radix_blocks,
radix_lwe_vec->num_radix_blocks / radix_lwe_out->num_radix_blocks,
nullptr);
break;
default:
PANIC("Cuda error (integer multiplication): unsupported polynomial size. "
"Supported N's are powers of two in the interval [256..16384].")
}
free(terms_degree);
}
void cleanup_cuda_integer_radix_partial_sum_ciphertexts_vec(

Some files were not shown because too many files have changed in this diff Show More