Compare commits

...

249 Commits

Author SHA1 Message Date
Enzo Di Maria
cfb1d1340e refactor(gpu): AES 64 2025-10-28 10:42:13 +01:00
Nicolas Sarlin
95593b1ea9 fix(zk): missing compressed proof version 2025-10-28 09:50:00 +01:00
Agnes Leroy
231d0c5e50 chore(gpu): disable lto in gpu bench compilation 2025-10-28 09:37:14 +01:00
David Testé
1d0a5c96a4 chore(ci): add bench type selection to core_crypto bench workflow 2025-10-27 18:09:54 +01:00
David Testé
b0b49ae533 chore(bench): new parameters set to run core_crypto bench for docs
This creates extended parameters set to reflect what's displayed
in the documentation.
2025-10-27 17:25:41 +01:00
Pedro Alves
70773e442c fix(gpu): fix 128-bit compression benchmark 2025-10-27 17:06:45 +01:00
dependabot[bot]
7b797b8af9 chore(deps): bump actions/upload-artifact from 4.6.2 to 5.0.0
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.2 to 5.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](ea165f8d65...330a01c490)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-27 16:08:28 +01:00
dependabot[bot]
b6efb109aa chore(deps): bump actions/download-artifact from 5.0.0 to 6.0.0
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 5.0.0 to 6.0.0.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](634f93cb29...018cc2cf5b)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: 6.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-27 16:08:19 +01:00
David Testé
fd6323b311 chore(ci): add throughput and hpu support to data extractor
Now throughput results can be fetched.
HPU backend is supported for integer formatting
2025-10-27 14:39:46 +01:00
Arthur Meyre
b02a3b16ff test: add rerand atomic pattern for noise checks
- make sure it works with KS32 parameters
2025-10-27 13:21:50 +01:00
Arthur Meyre
a95ee140f5 refactor: remove noise check function with PBS for sanity check
- it's a lot of code to "just" compute an additional PBS to make shortint
sanity checks, so run the function which gives the ms result, and complete
the AP by running the PBS as shortint would, gets rid of a big function
that was doing the same thing
2025-10-27 13:21:50 +01:00
Guillermo Oyarzun
62780ac500 fix(gpu): fix decompression mem leak 2025-10-24 13:02:41 +02:00
Thomas Montaigu
c10f1def70 fix: Tag propagation in XofKeySet 2025-10-24 10:40:22 +02:00
Mayeul@Zama
31a0136655 test(all): test multi bit decompression 2025-10-24 09:28:17 +02:00
Mayeul@Zama
859d5e4e1f chore: add backward multi bit decompression keys 2025-10-24 09:28:17 +02:00
Mayeul@Zama
92dcd38e30 chore: add decompression_grouping_factor to TestCompressionParameterSet 2025-10-24 09:28:17 +02:00
Mayeul@Zama
777bbe437a feat(shortint): add multi bit decompression 2025-10-24 09:28:17 +02:00
Mayeul@Zama
3842032f08 chore(shortint): fix unused function 2025-10-24 09:28:17 +02:00
Arthur Meyre
23246f63f7 chore: update fast_dedup opset to match the latency benchmarks in the docs
- signed bench update
2025-10-23 10:42:19 +02:00
Arthur Meyre
11c79b5237 chore: update fast_dedup opset to match the latency benchmarks in the docs 2025-10-23 10:42:19 +02:00
Nicolas Sarlin
a694e08ddc fix(core): par_encrypt_and_prove was using sequential encryption 2025-10-23 10:08:06 +02:00
Guillermo Oyarzun
e12638dabe feat(gpu): extend specialized version to classical pbs 2025-10-22 09:20:40 +02:00
pgardratzama
79f1d22573 fix(hpu): scalar rot & shift were not doing anything and not tested in test/hpu.rs 2025-10-21 13:29:59 +02:00
pgardratzama
f9c89212ea fix(hpu): display name on shift looked wrong 2025-10-21 13:29:59 +02:00
pgardratzama
b918f77859 chore(hpu): add force_reload option in v80 config, remove added line in sim config 2025-10-21 13:29:59 +02:00
Helder Campos
054c5028a1 feat(hpu): Added the option to forcefully reload the HPU 2025-10-21 13:29:59 +02:00
Helder Campos
7b621e57b0 feat(hpu): LLT ROT/SHIFT IOPs 2025-10-21 13:29:59 +02:00
Agnes Leroy
b4b6275ca5 chore(gpu): remove device synchronize in drop for cudavec 2025-10-21 11:33:46 +02:00
Agnes Leroy
42644349ef chore(gpu): remove remaining async functions from the integer gpu api 2025-10-20 16:19:19 +02:00
Thomas Montaigu
20b7b06ffb chore: add check_fmt_js to pcc_batch 2025-10-20 14:37:36 +02:00
Thomas Montaigu
39fbc20360 fix(js): catch undefined variant using Option<>
In the JS ShortintParametersName, users could
make typo in the variant used e.g:
`ShortintParametersName.PARAM_MESSAGE_2_CARRY128`

In JS this returns `undefined` which is then later casted to an
int and it becomes 0, leading to match the first variant

We modify the input to receive an `Option<ShortintParametersName>`
as it seems to allow us to catch the `undefined` and return a proper
error
2025-10-20 14:37:36 +02:00
dependabot[bot]
c3ae852aa2 chore(deps): bump peter-evans/create-or-update-comment
Bumps [peter-evans/create-or-update-comment](https://github.com/peter-evans/create-or-update-comment) from 4.0.0 to 5.0.0.
- [Release notes](https://github.com/peter-evans/create-or-update-comment/releases)
- [Commits](71345be026...e8674b0752)

---
updated-dependencies:
- dependency-name: peter-evans/create-or-update-comment
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-20 14:18:08 +02:00
Arthur Meyre
4a89792579 chore: use pull request permissions to be able to post comments
- the "print URL" comment failed, while the "bench failed" comment worked
difference is the former has an issues write permissions, and the latter
a pull request write permissions
2025-10-20 13:45:39 +02:00
Arthur Meyre
205b767fc1 chore: fix various target issues for benchmarks following renames
- renames were done to uniformize and make it easier to setup perf
regression measurements, some names were not updated this PR fixes that
2025-10-20 13:45:27 +02:00
Beka Barbakadze
39862c2861 fix(gpu): fix bug in are_all_comparison_blocks_true when number of blocks is 0 2025-10-20 13:26:50 +02:00
Thomas Montaigu
eed5a6c5ba chore(bench): add grep check for trivial in benches 2025-10-20 12:26:44 +02:00
Thomas Montaigu
0dd0ead4e2 chore(bench): remove trivial encryptions
It makes benches not accurate
2025-10-20 12:26:44 +02:00
dependabot[bot]
5d5e9d47e9 chore(deps): bump actions/setup-node from 5.0.0 to 6.0.0
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 5.0.0 to 6.0.0.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](https://github.com/actions/setup-node/compare/v5...2028fbc5c25fe9cf00d9f06a71cc4710d4507903)

---
updated-dependencies:
- dependency-name: actions/setup-node
  dependency-version: 6.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-20 12:14:50 +02:00
dependabot[bot]
45b7491726 chore(deps): bump rust-lang/crates-io-auth-action from 1.0.1 to 1.0.2
Bumps [rust-lang/crates-io-auth-action](https://github.com/rust-lang/crates-io-auth-action) from 1.0.1 to 1.0.2.
- [Release notes](https://github.com/rust-lang/crates-io-auth-action/releases)
- [Commits](e919bc7605...041cce5b4b)

---
updated-dependencies:
- dependency-name: rust-lang/crates-io-auth-action
  dependency-version: 1.0.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-20 12:14:42 +02:00
dependabot[bot]
f84a4275ef chore(deps): bump actions/checkout from 4.2.2 to 5.0.0
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4.2.2...08c6903cd8c0fde910a37f88322edcfb5dd907a8)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-20 12:12:50 +02:00
Agnes Leroy
34ffbadc72 chore(gpu): remove async from div, even odd, ilog2 2025-10-20 11:34:37 +02:00
Agnes Leroy
4322214d8f chore(gpu): remove async bitop cmux comparisons neg 2025-10-20 11:34:37 +02:00
David Testé
cf20d73a5f chore(ci): show action run url on custom regression benchmark
When triggered via issue comment, a performance regression
benchmark doesn't appear in the check suite. To ease tracking,
a comment is created which display URL of the workflow run.
2025-10-20 09:18:17 +02:00
Agnes Leroy
c30835fc30 chore(gpu): remove async entry points for abs, add, sub, aes 2025-10-17 15:42:06 +02:00
David Testé
70b0c0ff19 chore(ci): echo post-commit checks sub-recipe names
This is done to improve readability in case of recipe failure.
2025-10-17 15:30:19 +02:00
David Testé
206553e9ee chore(ci): check for performance regression and create report
After running performances regression benchmarks, a performance
changes checking is executed. It will fetch results data with an
external tool then it will look for anomaly in changes.
Finally it will produce a report as an issue comment with any
anomaly display in a Markdown array. A folded section of the
report message contains all the results from the benchmark.

Note that a fully custom benchmark triggered from an issue comment
would not generate a report. In addition HPU performance
regression benchmark is not supported yet.
2025-10-17 15:05:24 +02:00
Agnes Leroy
f78bea23be chore(gpu): remove async functions in radix mod.rs 2025-10-17 13:22:05 +02:00
Thomas Montaigu
106b46be7c chore(docs): add KVStore docs 2025-10-17 13:05:52 +02:00
Nicolas Sarlin
2cdc804670 chore(backward): backward compat data targeted generation 2025-10-17 12:43:13 +02:00
David Testé
23d7e0d844 chore(ci): use trusted publishing for npm packages 2025-10-17 12:06:34 +02:00
David Testé
0e1082f465 chore(docs): update benchmark results for all backends
This also removes tables in PBS benchmarks for failure probability
of 2**-40.
2025-10-17 09:49:47 +02:00
Guillermo Oyarzun
c22e63895e fix(gpu): fix multi-gpu throughput benches with classical pbs 2025-10-16 17:55:10 +02:00
Arthur Meyre
375a4f80ae docs: add ReRand documentation 2025-10-16 16:50:19 +02:00
Arthur Meyre
21b6863c5d chore: update dedup tool to be smarter when finding already aliased params
- keep the alias if found
- update the imports if all items are found to be aliases
2025-10-16 15:23:36 +02:00
Arthur Meyre
20a91337c1 chore: prepare v1.5 2025-10-16 15:23:36 +02:00
Arthur Meyre
a8520a2e22 chore: make main compile with 2024 edition dependencies
- resolver 3 makes sure that incompatible dependencies (rust version wise)
are not selected
- fix a new lint
2025-10-16 15:04:37 +02:00
Agnes Leroy
c8db338376 chore(gpu): remove use of duplicate async in hl api and non async integer ops 2025-10-16 14:30:57 +02:00
Nicolas Sarlin
e849394ea7 chore(tfhe): remove tuniform example 2025-10-16 09:48:24 +02:00
Enzo Di Maria
126e779533 refactor(gpu): oprf_unsigned_custom_range + tests 2025-10-16 09:31:01 +02:00
Enzo Di Maria
353237c0d6 refactor(gpu): oprf_unsigned_custom_range 2025-10-16 09:31:01 +02:00
Agnes Leroy
7bad509f9a fix(gpu): fix perf regression introduced in cf3f25efdd 2025-10-16 09:21:05 +02:00
yuxizama
c99bc6d97f chore(docs): update doc designs 2025-10-15 17:03:20 +02:00
Andrei Stoian
a84cf4ed21 fix(gpu): coprocessor install workflow 2025-10-15 15:38:30 +02:00
Agnes Leroy
ab40df4b7f chore(gpu): change coprocessor gpu bench name to match other names 2025-10-15 14:38:53 +02:00
Thomas Montaigu
3b9eb360c1 chore(backward): regenerate KVStore backward data
This is because now that the KVstore uses a BTreeMap
which is a sorted collection, the serialization of the data
is deterministic
2025-10-14 17:04:13 +02:00
Thomas Montaigu
498b0e6e5c refactor: use BTreeMap as internals of KVStore
This is to make the order of the key and value lists
deterministic when compressing
2025-10-14 17:04:13 +02:00
Arthur Meyre
a9d0b9a3fb chore: fix cmux doctstrings
- mismatched text/rust blocks made the docs broken on docs.rs
- should we remove all ```rust given it is implied by ``` ?
2025-10-14 10:25:42 +02:00
Agnes Leroy
cf3f25efdd chore(gpu): add missing syncs in linearalgebra functions and aes 2025-10-14 09:23:11 +02:00
Agnes Leroy
c3ed1a7558 chore(gpu): internal renaming 2025-10-14 09:23:11 +02:00
Agnes Leroy
6347f25668 chore(gpu): synchronize after every release 2025-10-14 09:23:11 +02:00
Arthur Meyre
58ae2f5359 chore: don't import deprecated GenericArray use the aes crate Block instead
- allow deprecated methods for now since aes 0.9 is not out yet
2025-10-13 13:20:34 +02:00
dependabot[bot]
9ea5c04be6 chore(deps): bump foundry-rs/foundry-toolchain from 1.4.0 to 1.5.0
Bumps [foundry-rs/foundry-toolchain](https://github.com/foundry-rs/foundry-toolchain) from 1.4.0 to 1.5.0.
- [Release notes](https://github.com/foundry-rs/foundry-toolchain/releases)
- [Changelog](https://github.com/foundry-rs/foundry-toolchain/blob/master/RELEASE.md)
- [Commits](82dee4ba65...50d5a8956f)

---
updated-dependencies:
- dependency-name: foundry-rs/foundry-toolchain
  dependency-version: 1.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-13 12:29:29 +02:00
Enzo Di Maria
79fdb33632 refactor(gpu): tests and long run tests for oprf 2025-10-10 17:32:34 +02:00
Agnes Leroy
91b263d480 chore(gpu): split integer utilities file 2025-10-10 14:49:02 +02:00
Thomas Montaigu
41a41278e6 chore(docs): fix docs for docs.rs
doc_auto_cfg is no longer available in nightly >= 1.92

This prevents the docs to be build on docs.rs, as docs.rs
uses the latest nightly

This commit also make the `make doc` target use the lastest
nightly so that we can catch these errors
2025-10-10 13:07:30 +02:00
Andrei Stoian
30938eec74 chore(gpu): use active streams in int_radix_lut 2025-10-09 21:59:15 +02:00
Nicolas Sarlin
516789bd5d chore(backward): add data for ks32 noise squashing server key 2025-10-09 14:03:21 +02:00
dependabot[bot]
027792d659 chore(deps): bump docker/login-action from 3.5.0 to 3.6.0
Bumps [docker/login-action](https://github.com/docker/login-action) from 3.5.0 to 3.6.0.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](184bdaa072...5e57cd1181)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-version: 3.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-08 13:24:30 +02:00
dependabot[bot]
1ed9d6a85e chore(deps): bump actions/stale from 10.0.0 to 10.1.0
Bumps [actions/stale](https://github.com/actions/stale) from 10.0.0 to 10.1.0.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](3a9db7e6a4...5f858e3efb)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-version: 10.1.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-10-08 13:24:21 +02:00
Thomas Montaigu
126138a59d chore: only run KVStore benches on CPU
As its the only backend that supports it
2025-10-08 11:52:14 +02:00
Nicolas Sarlin
241685fccc chore(backward): add data for ks32 client key, server key and ct 2025-10-08 10:27:06 +02:00
Thomas Montaigu
e739f43ec5 chore: add CompressedKVStore backward compat tests 2025-10-07 16:36:36 +02:00
pgardratzama
3073d60f11 fix(hpu): work-around a criterion assert by reducing number of elements on division & modulus throughput bench 2025-10-07 14:23:07 +02:00
Himess
a05d228899 docs(wasm): remove obsolete TODO in CompactPkeCrs::deserialize 2025-10-07 11:24:37 +02:00
Arthur Meyre
63055d5ca8 test: add KS32 compatibility for the dp_ks_pbs128_packingks AP 2025-10-07 10:22:38 +02:00
Arthur Meyre
46a3008739 test: add KS32 compatibility for the dp_ks_ms AP 2025-10-07 10:22:38 +02:00
Arthur Meyre
f2674da031 test: add KS32 compatibility for the br_dp_ks_ms AP 2025-10-07 10:22:38 +02:00
Arthur Meyre
12c2a2a8b7 feat: make FheUint/FheInt/FheBool compatible with AP params for conformance
- update From impl for conformance parameters to manage the AP params
2025-10-07 10:22:11 +02:00
pgardratzama
b61dd21ef7 fix(hpu): HPU HLAPI ERC20 bench was missing pbs-stats feature 2025-10-07 10:14:43 +02:00
pgardratzama
ca4159f123 fix(hpu): fix overflow flag of OVF_MUL & OVF_MULS, also update simulation HPU config 2025-10-07 10:14:43 +02:00
pgardratzama
ab25919187 fix(hpu): throughput benchmarks were done 1 IOp per 1 IOp... 2025-10-07 10:14:43 +02:00
pgardratzama
1b38f8ccfc fix(hpu): fix expected value of ilog2 & modulus operation 2025-10-07 10:14:43 +02:00
Nicolas Sarlin
6a676551d8 chore(shortint): add metaparams for ks32 2025-10-07 09:51:09 +02:00
Thomas Montaigu
afb79a0b1c chore(hlapi): export CompressedKVStore
Without this, users cannot use the CompressedKVStore type
that is required in the KVStore deserialization
2025-10-07 09:07:28 +02:00
Thomas Montaigu
0277403c45 feat: add From<MetaParameters> for Config
This convertion is important to make the use
of meta parameters for high level API
users
2025-10-06 16:47:13 +02:00
Thomas Montaigu
18159d6458 chore(MetaParameters)!: move re-rand ksk params in
Re-Randomization is something that requires a
dedicated public key.

Thus we move the parameters of the KSK
into the struct for dedicated PKE parameters

BREAKING CHANGE: This is breaking change regarding the latest alpha
released. But MetaParameters did not seem to be used directly in
fhevm/kms
2025-10-06 16:47:13 +02:00
Nicolas Sarlin
728409aef8 chore(hl): and cpk tests for ks32 2025-10-06 13:59:15 +02:00
Thomas Montaigu
034f3b3c25 feat(xofkeyset): add ks32 support 2025-10-06 13:59:15 +02:00
Nicolas Sarlin
c30e9c39f6 feat(shortint): add compact pke for the ks32 atomic pattern 2025-10-06 13:59:15 +02:00
Arthur Meyre
1513c3bc8c chore: bump TFHE-rs to 1.4.0 2025-10-06 13:26:54 +02:00
Arthur Meyre
e07f07c4c8 chore: bump tfhe-cuda-backend to 0.12.0 2025-10-06 13:26:54 +02:00
Arthur Meyre
81cc0c31b4 chore: constrain bytemuck < 1.24.0 as we don't have avx512 updated code 2025-10-06 13:24:16 +02:00
tmontaigu
c95e38e26f feat(hlapi): add flip operation 2025-10-06 11:07:12 +02:00
Enzo Di Maria
f0f3dd76eb feat(gpu): aes 128 2025-10-06 09:31:36 +02:00
Andrei Stoian
0604d237eb chore(gpu): multi-gpu debug target 2025-10-03 16:48:42 +02:00
Thomas Montaigu
e523fd2cb6 feat: add KVStore to the high level api
* Added Value type name to crate::integer::KVStore impl of Named trait
  as well as a bool to check we deserialize the correct value type
  (Radix vs SignedRadix)
* Add KVStore to high_level_api
* Add KVStore hlapi benches
* Remove specialized `[add,mul,sub]_to_slot` as `map` is now the
  intended API.
    - mul_to_slot was way slower than using `map`
    - add/mul_to_slot were a bit faster (~5% latency-wise), but returned
      less information (no old_value, no new_value, no boolean to check)
      if the key matched
    - Some known improvement can be made to map, which should result in
      it being better than add/sub_to_slot
* Add FheIntegerType trait to make the KVStore generic over
  FheUint/FheInt, and should make GPU integration "easy"
2025-10-03 15:01:23 +02:00
David Testé
33dee7673c chore(ci): enable multi-bit parameters for core_crypto benchmarks
Classical or multi-bit or both parameters type can be now run on
CPU core_crypto benchmarks at user discretion.
2025-10-03 12:17:42 +02:00
Mayeul@Zama
9b5596ca66 feat(integer): add oprf over any range 2025-10-03 10:00:18 +02:00
Nicolas Sarlin
aefec1fe64 feat(shortint): add ct compression for the ks32 atomic pattern 2025-10-03 09:59:50 +02:00
Agnes Leroy
f9e876730a chore(gpu): remove support for drift noise reduction 2025-10-03 09:45:20 +02:00
pgardratzama
f3cddb5635 chore(hpu): force benches to run on specific board 2025-10-02 13:20:36 +02:00
pgardratzama
a395cfe9bf chore(hpu): new runners missing in actionlint whitelist 2025-10-02 13:20:36 +02:00
pgardratzama
602c6faf8a chore(hpu): update hpu-backend dependencies, fix pcc 2025-10-02 13:20:36 +02:00
pgardratzama
563502a6a6 chore(hpu): update tfhe-hpu-backend version, readme and run-on-hpu doc 2025-10-02 13:20:36 +02:00
pgardratzama
5f30569452 fix(hpu): update AMC firmware in bitstream to lower polling period 2025-10-02 13:20:36 +02:00
pgardratzama
39b81a8ded feat(hpu): move to new bitstream at 400Mhz with GRAM_NB 3
- update SIMD_N and min_batch_size to 12 which seems to give better
  latency and ERC20 throughput
- support IOp on several lines in ami /proc file
- reduce amount of ERC_20_SIMD per batch in HLAPI bench
2025-10-02 13:20:36 +02:00
pgardratzama
da223b36b6 fix(hpu): reduce polling period of backend on iop ack file from 10 to 2us 2025-10-02 13:20:36 +02:00
JJ-hw
db16276715 chore(hpu): Remove all references to U55C, which is not supported anymore. 2025-10-02 13:20:36 +02:00
pgardratzama
a59742f518 fix(hpu): uuid comparison is now done in lower case from both value (metadata, ami read) 2025-10-02 13:20:36 +02:00
pgardratzama
2bf595d0e2 fix(hpu): missing bench numbers for less_than & less_or_equal because lower != less 2025-10-02 13:20:36 +02:00
Nicolas Sarlin
fb2b1a13e7 chore(core): fix encryption of single lwe to use the noise generator
This is aligned with what is done with the list encryption
2025-10-02 10:36:12 +02:00
Arthur Meyre
9fdaa983e3 chore: fix october typos 2025-10-01 14:32:41 +02:00
Andrei Stoian
73de886c07 chore(gpu): make deterministic long run GPU test 2025-10-01 13:37:05 +02:00
Nicolas Sarlin
45a849ad36 feat(shortint): add noise squashing for ks32 2025-10-01 10:36:11 +02:00
Nicolas Sarlin
ef5b984448 docs(core): fix fft128 blind rotation doc 2025-10-01 10:36:11 +02:00
Thomas Montaigu
6abed1f228 chore: complete gpu meta params
Add noise-squashing params, noise-squashing compression and re-rand
2025-10-01 10:30:22 +02:00
Agnes Leroy
71b45c14da chore(gpu): refactor subset_first and subset 2025-09-30 12:21:39 +02:00
David Testé
4f5d711c4e chore(bench): add crs size in wasm zero-knowledge benchmark
Done to improve result display in Grafana.
2025-09-30 10:42:19 +02:00
Arthur Meyre
2602c9e1b3 fix(hlapi): clear rerand metadata once rerand is done 2025-09-29 18:17:35 +02:00
Arthur Meyre
06dffc60bd chore: bump version to 1.4.0-alpha.3 2025-09-29 18:17:35 +02:00
Arthur Meyre
2a82076121 fix(shortint): accept trivial ciphertexts for rerand
- make sure to set their noise to NOMINAL once rerand is done
2025-09-29 17:51:44 +02:00
Beka Barbakadze
7549474aac feat(gpu): Implements optimized division algorithm for message_2_carry_2, when 4 or more gpus are used 2025-09-29 15:16:34 +02:00
dependabot[bot]
4fcff55745 chore(deps): bump zgosalvez/github-actions-ensure-sha-pinned-actions
Bumps [zgosalvez/github-actions-ensure-sha-pinned-actions](https://github.com/zgosalvez/github-actions-ensure-sha-pinned-actions) from 3.0.25 to 4.0.0.
- [Release notes](https://github.com/zgosalvez/github-actions-ensure-sha-pinned-actions/releases)
- [Commits](fc87bb5b5a...9e9574ef04)

---
updated-dependencies:
- dependency-name: zgosalvez/github-actions-ensure-sha-pinned-actions
  dependency-version: 4.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-29 14:21:26 +02:00
dependabot[bot]
3d345a648b chore(deps): bump actions/cache from 4.2.4 to 4.3.0
Bumps [actions/cache](https://github.com/actions/cache) from 4.2.4 to 4.3.0.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](0400d5f644...0057852bfa)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-version: 4.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-29 14:21:17 +02:00
dependabot[bot]
3975e0115b chore(deps): bump JS-DevTools/npm-publish from 4.1.0 to 4.1.1
Bumps [JS-DevTools/npm-publish](https://github.com/js-devtools/npm-publish) from 4.1.0 to 4.1.1.
- [Release notes](https://github.com/js-devtools/npm-publish/releases)
- [Changelog](https://github.com/JS-DevTools/npm-publish/blob/main/CHANGELOG.md)
- [Commits](1fe17a0931...7f8fe47b3b)

---
updated-dependencies:
- dependency-name: JS-DevTools/npm-publish
  dependency-version: 4.1.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-29 14:21:08 +02:00
David Testé
6494a82fb3 chore(ci): split cargo builds into several jobs
Post-commit checks was the bottleneck regarding running duration.
It's now split into 7 batches to improve parallelism.
Builds that are specific to Ubuntu are run in their own jobs, so
that only build_tfhe_full recipe call remains in the os matrix.
A final check is performed to ensure all the checks have passed,
this very job is used as branch protection rule.
2025-09-29 11:22:56 +02:00
David Testé
8aa60f8882 chore(ci): use large runners for windows and macos builds 2025-09-29 11:22:56 +02:00
Agnes Leroy
15cab8b413 chore(gpu): get decompress size on gpu without calling on_gpu 2025-09-29 11:00:18 +02:00
Agnes Leroy
23d46ba2bc fix(gpu): fix oprf output degree 2025-09-29 08:33:25 +02:00
Agnes Leroy
daf0e79e4a fix(gpu): fix get oprf size on gpu 2025-09-29 08:33:25 +02:00
Arthur Meyre
c5ad73865c chore: prepare alpha.2
- bump tfhe-cuda-backend to 0.12.0-alpha.2
- bump tfhe to 1.4.0-alpha.2
2025-09-27 11:35:27 +02:00
Agnes Leroy
9aab79e23a chore(gpu): fix compilation warning 2025-09-26 17:04:17 +02:00
Arthur Meyre
6ca48132e1 chore: bump TFHE-rs to 1.4.0-alpha.1 2025-09-26 15:08:09 +02:00
Agnes Leroy
f53c75636d chore(gpu): refactor oprf test, remove unused arg and fix multi-GPU for oprf 2025-09-26 13:19:34 +02:00
Arthur Meyre
ce63cabc05 chore: bump tfhe-cuda-backend to 0.12.0-alpha.1 2025-09-26 10:39:24 +02:00
dependabot[bot]
4fec2e17ae chore(deps): bump JS-DevTools/npm-publish from 3.1.1 to 4.0.1
Bumps [JS-DevTools/npm-publish](https://github.com/js-devtools/npm-publish) from 3.1.1 to 4.0.1.
- [Release notes](https://github.com/js-devtools/npm-publish/releases)
- [Changelog](https://github.com/JS-DevTools/npm-publish/blob/main/CHANGELOG.md)
- [Commits](19c28f1ef1...ad693561f8)

---
updated-dependencies:
- dependency-name: JS-DevTools/npm-publish
  dependency-version: 4.0.1
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:23:05 +02:00
dependabot[bot]
e87c36beb4 chore(deps): bump docker/login-action from 3.3.0 to 3.5.0
Bumps [docker/login-action](https://github.com/docker/login-action) from 3.3.0 to 3.5.0.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](https://github.com/docker/login-action/compare/v3.3.0...184bdaa0721073962dff0199f1fb9940f07167d1)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-version: 3.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:22:15 +02:00
dependabot[bot]
24c6ffc24a chore(deps): bump actions/checkout from 4.2.2 to 5.0.0
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4.2.2...08c6903cd8c0fde910a37f88322edcfb5dd907a8)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-25 18:22:07 +02:00
JJ-hw
3680f796af feat(hpu): Now the mockup takes into account the field position from the regmap toml to generate its register read and write answers. 2025-09-25 14:00:07 +02:00
JJ-hw
3ded3fe7c9 fix(hpu): (From hcampos-zama) Missing a field. 2025-09-25 14:00:07 +02:00
Nicolas Sarlin
451cfe3aba fix(core): removed sanity check for scalar size before zk pke encryption 2025-09-25 13:44:06 +02:00
David Testé
1e28cf7f3b chore(ci): use artifact for cuda backend to speed up publishing 2025-09-25 10:27:44 +02:00
Thomas Montaigu
91b62c737f chore(ci): increase timeout to 8hours for unsigned integer tests 2025-09-24 18:22:35 +02:00
Nicolas Sarlin
da12bb29d8 chore(core): fix typo in ms noise test comment 2025-09-24 17:20:05 +02:00
Arthur Meyre
d60028c47c chore: bump tfhe-cuda-backend to 0.12.0-alpha.0 2025-09-24 15:57:30 +02:00
Arthur Meyre
d5b5369a9a chore: bump tfhe-zk-pok to 0.7.3 2025-09-24 15:52:33 +02:00
David Testé
9457ca786c chore(ci): fix release workflows token permissions 2025-09-24 15:01:24 +02:00
Thomas Montaigu
8b5d7321fb chore: split up more xof key gen function 2025-09-24 14:08:13 +02:00
Thomas Montaigu
736185bb31 feat: make XofKeySet serializable 2025-09-24 14:08:13 +02:00
Thomas Montaigu
e4b230aaf1 chore(XofKetSet): generate mod switch key after BSK 2025-09-24 14:08:13 +02:00
Thomas Montaigu
7ed827808c feat: add noise squashing compression to xof keyset 2025-09-24 14:08:13 +02:00
Thomas Montaigu
6e7aaac90f feat: add re randomization key to XofKeySet 2025-09-24 14:08:13 +02:00
Thomas Montaigu
d1c190fac6 feat(hlapi): add XofKeySet
This adds a specialized struct that is able to generate keys for the
high_level_api in a way that is compatible with the NIST/MPC protocol

There are still things to be done in later commits:
- Backward compatibility
- NIST compliant ClientKey generation
2025-09-24 14:08:13 +02:00
Arthur Meyre
7e1c8f7db5 chore: make NoiseSimulationLwe/NoiseSimulationGlwe properly private
- this avoids submodules of the noise_simulation module to be able to
partially update an output
- switch the NEG_INFINITY default value for Variance to NAN, NAN will fail
all comparisons and absorb all computations which is a nice way to
propagate an undefined noise value in our case
2025-09-24 10:42:39 +02:00
Arthur Meyre
d30c2060bf test: implemented noise simulation traits for shortint keys
- now manages mean reduction and shifted ms
2025-09-24 10:42:39 +02:00
Arthur Meyre
4ccd5ea262 chore: update noise formulas with latest automated code gen 2025-09-24 10:42:39 +02:00
Arthur Meyre
1ab3022df8 chore: update parameters with mean reduction technique
- parameters checked visually with 1.1 for those who differ, all seems ok
2025-09-24 10:42:39 +02:00
Arthur Meyre
a257849c66 chore: various README fixes
co-authored-by: Sumitds074 <sumitds074@gmail.com>
2025-09-23 21:04:27 +02:00
Arthur Meyre
0f4f8dd755 chore(versionable): bump version to 0.6.2 2025-09-23 21:03:30 +02:00
Nicolas Sarlin
aaaa929c2e chore(tfhe): prepare release 1.4.0-alpha.0 2025-09-23 16:35:42 +02:00
David Testé
d397ea3a39 chore(bench): handle ks32 atomic pattern in key size measurements 2025-09-23 12:01:33 +02:00
Arthur Meyre
3e25536021 test: add multi bit blind rotate traits
- given the nature of the mod switch it seems easier to think in terms of
mod switch + blind rotate, the classic PBS might get updated in a similar
way, to be determined
2025-09-23 10:36:53 +02:00
Arthur Meyre
1c19851491 test: add multi bit modswitch noise simulation traits 2025-09-23 10:36:53 +02:00
Agnes Leroy
4b0623da4a chore(gpu): remove unused variable 2025-09-22 16:36:34 +02:00
Guillermo Oyarzun
d415d47894 chore(gpu): remove unnecessary nvtx lib dependency 2025-09-22 16:34:57 +02:00
Nicolas Sarlin
e22f9c09e3 chore(ci): fix audit workflow name 2025-09-22 15:31:55 +02:00
Nicolas Sarlin
4d02d3abb4 fix(hpu): clippy lint 2025-09-22 14:02:41 +02:00
Nicolas Sarlin
ae6f96e0ec chore(core): use a single rng for cpk encryption 2025-09-22 14:02:41 +02:00
Nicolas Sarlin
70e1828c58 chore(backward): add backward compat tests for rerand 2025-09-22 14:02:41 +02:00
Nicolas Sarlin
1b1e6a7068 chore(shortint): add rerand to the meta parameters 2025-09-22 14:02:41 +02:00
Nicolas Sarlin
fc447fd2d0 fix: backward compatibility tests with cache misses 2025-09-22 14:02:41 +02:00
Arthur Meyre
d5e5902f61 feat: add ciphertexts re-randomization 2025-09-22 14:02:41 +02:00
Thomas Montaigu
9f54777ee1 feat(integer): add KVStore compression and serialization 2025-09-22 09:39:59 +02:00
Nicolas Sarlin
4a73b7bb4b fix(versionable): use full type path in proc macro
This avoids name clashes if user re-defines the type
2025-09-19 16:03:56 +02:00
Guillermo Oyarzun
022cb3b18a fix(gpu): avoid out of memory when benchmarking throughput 2025-09-19 14:44:12 +02:00
David Testé
c4feabbfa3 chore(ci): revert package-lock.json 2025-09-19 09:30:15 +02:00
David Testé
3c6ed37a18 chore(ci): factorize release workflows by using a sub-workflow 2025-09-18 17:52:34 +02:00
Agnes Leroy
fe6e81ff78 chore(gpu): post hackathon cleanup 2025-09-18 16:30:45 +02:00
Andrei Stoian
87c0d646a4 fix(gpu): coprocessor bench 2025-09-18 13:56:55 +02:00
Agnes Leroy
e5b39a6d4d fix(gpu): fix memory leak in multi-gpu calculations 2025-09-18 13:55:03 +02:00
Arthur Meyre
27e2fbd972 chore: add implementation note for the NTT formula 2025-09-18 09:51:53 +02:00
Arthur Meyre
f54fbf52ce chore: bump tfhe-ntt version to 0.6.1 2025-09-18 09:51:53 +02:00
Arthur Meyre
2a0dfa5b17 fix(ntt): same update for 64 bits code 2025-09-18 09:51:53 +02:00
Arthur Meyre
a4841036b7 fix: make sure computations don't overflow for certain primes for 32 bits
- The original code seemed to assume that the Barrett reduction would not
overflow if p <= 2^31, this is incorrect but rare
- The correctness constraint has a bound much smaller than 2^31, some
primes bigger than the derived threshold can still use the fast code
given a certain criterion is respected which corresponds to a "lucky" case
of the Barrett reduction, the new code now manages this

maths explained in https://blog.zksecurity.xyz/posts/barrett-tighter-bound/
and copiously in comments in the code
2025-09-18 09:51:53 +02:00
Andrei Stoian
1dcc3c8c89 chore(gpu): structure to encapsulate streams 2025-09-18 09:43:17 +02:00
Nicolas Sarlin
1a2643d1da fix(ci): use precise wasm-bindgen version for the cli 2025-09-17 13:17:57 +02:00
David Testé
bc257904e3 chore(ci): fix issue_comment trigger event for regression bench 2025-09-17 12:15:32 +02:00
Arthur Meyre
8982844a5b chore: adapt naming of traits to better match current scheme
- Standard -> Classic when referring to original PBS implementation
2025-09-17 10:32:40 +02:00
Arthur Meyre
e80d2548af fix: fix noise simulation modulus instantiation 2025-09-17 10:32:40 +02:00
Arthur Meyre
c0ab0a5752 chore: split noise simulation primitives in sub modules
- keep things easier to manage in terms of file size and content density
2025-09-17 10:32:40 +02:00
Arthur Meyre
f7bfe2f10c chore: uniformize noise check tools naming 2025-09-17 10:32:40 +02:00
Arthur Meyre
29c390d92c chore: reorg noise check tools 2025-09-17 10:32:40 +02:00
Pedro Alves
becd08db71 fix(gpu): fix an overflow that may happen when the user tries to allocate a huge amount of blocks 2025-09-16 16:17:32 -03:00
David Testé
ffd7470ef1 chore(ci): check if regression workflow should be run early
Before, any issue comment or label event would trigger the verify-actor job. Then the next job, prepare-benchmarks, would check if the rest of the workflow should run. Moving this very check in verify-actor ensures the whole workflow to run only if required.
2025-09-16 20:55:54 +02:00
David Testé
a3750504c4 chore(ci): use dedicated token to sync repositories 2025-09-16 18:53:17 +02:00
David Testé
378c5ccb73 chore(ci): perform sync on push without third-party action
This is done to better handle git-lfs related changes when syncing
with another repository.
2025-09-16 16:29:44 +02:00
David Testé
4ba1787e12 chore(bench): add crs size in zk-pke benchmark names
This is done get more details about the benchmarks when parsing
results.
2025-09-16 16:06:41 +02:00
David Testé
366d359441 chore(bench): measure ciphertext and key sizes at a large scale
Ciphertext sizes are measured at HLAPI layer with several
parameters set.
Keys sizes are measured at shortint level.
This benchmark has now its dedicated GitHub workflow that would
run, at least, each 24th of the month.
2025-09-16 15:43:36 +02:00
dependabot[bot]
0ece9e684a chore(deps): bump tj-actions/changed-files from 46.0.5 to 47.0.0
Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 46.0.5 to 47.0.0.
- [Release notes](https://github.com/tj-actions/changed-files/releases)
- [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md)
- [Commits](ed68ef82c0...24d32ffd49)

---
updated-dependencies:
- dependency-name: tj-actions/changed-files
  dependency-version: 47.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-16 14:02:32 +02:00
David Testé
f8684d1f67 chore(ci): add regression benchmark workflow
Regression benchmarks are meant to be run in pull-request. They
can be launched in two flavors:
 * issue comment: using command like "/bench --backend cpu"
 * adding a label: `bench-perfs-cpu` or `bench-perfs-gpu`

Benchmark definitions are written in TOML and located at
ci/regression.toml.
While not exhaustive, it can be easily modified by reading the
embbeded documentation.

"/bench" commands are parsed by a Python script located at
ci/perf_regression.py. This script produces output files that
contains cargo commands and a shell script generating custom
environment variables. The Python script and generated files are
meant to be used only by the workflow
benchmark_perf_regression.yml.
2025-09-16 13:33:49 +02:00
Nicolas Sarlin
b4066df77f chore(ci): run cargo audit 2025-09-16 12:03:32 +02:00
Pedro Alves
6b94872a00 fix(gpu): add an assert to be sure the carry part has correct size in expand 2025-09-15 12:57:11 -03:00
Nicolas Sarlin
d88caff6dd fix(ci): fix serde root crate in tfhe-lints 2025-09-15 15:18:58 +02:00
Thomas Montaigu
75a265f93b fix(integer): fix aggregate_one_hot_vector
`aggregate_one_hot_vector`` was modified when the KVStore was
added to support inputs where information in the blocks was not packed.
And to detect if blocks where packed it was relying on the degree value.

However, the inputs may come from LUTs that had precise degree, and
could lead to believe the inputs were not packed.

To fix this we split in 2 fn:
* aggregate_one_hot_vector
* aggregate_and_unpack_one_hot_vector

And use the correct one when we know if the inputs are packed
2025-09-15 10:27:24 +02:00
Nicolas Sarlin
bfbf638fed fix(zk): add a size check for the public key 2025-09-12 11:10:06 +02:00
David Testé
01651d6fb2 chore(ci): update lattice estimator version 2025-09-12 11:07:25 +02:00
Pedro Alves
b2624d1a76 chore(gpu): refactor the indexing logic for the LWE expand 2025-09-11 13:10:18 -03:00
tmontaigu
9fb7b56629 feat(integer): add KVStore
The KVStore is a Hash Table, with homomorphic capabilities

The keys are meant to be clear integers, values are meant to be
Radix/SignedRadix

The ServerKey now has functions to be able to do operations that modify
an existing key,value pair using an encrypted key.
2025-09-11 13:55:42 +02:00
Arthur Meyre
24feeb8609 chore(ci): avoid backward compat workflow cancel
- re-use formulas from the integer workflow which also executes on main
2025-09-11 10:43:23 +02:00
pgardratzama
757c2fc828 chore(hpu): make hpu integer bench fast by default 2025-09-10 22:24:31 +02:00
pgardratzama
4ff0d6cac2 feat(hpu): integer bench update (adds mod, div -> div_mod), erc20_simd simd batch size read from iop prototype 2025-09-10 22:24:31 +02:00
pgardratzama
1530f52c79 feat(hpu): adds support of ERC20 SIMD in hpu ERC20 bench 2025-09-10 22:24:31 +02:00
David Testé
9918dacd6a chore(ci): change workflow jobs naming convention
The term "bpr" means Branch Protection Rule. It helps one to
identify any job that must pass before being able to merge to the
base branch.
2025-09-10 15:36:45 +02:00
tmontaigu
2b503acf18 chore(shortint): add consts for MetaParameters 2025-09-10 15:15:06 +02:00
tmontaigu
57cc326a64 feat(shortint): MetaParameters struct
There are a lot of different parameter types in tfhe-rs, related to
different but linked features. Thus when some PBS parameters are
selected, compatible compression parameters must be selected from the
possible parameters.

To make things easier, the MetaParameters struct is added, this stores
in one place parameters that can be used together.
2025-09-10 15:15:06 +02:00
Arthur Meyre
84eb8aeb63 test(shortint): add BR + DP + KS + MS noise checks
- sanity check, noise measurement and pfail are done
2025-09-10 14:50:28 +02:00
Arthur Meyre
f09acfa581 chore: rename test files to remove redundant name fragment 2025-09-10 14:50:28 +02:00
Arthur Meyre
8335a6b6b5 chore(ci): run backward compat tests on merge to main
- this is to prime cache and check backward data on merge to main
2025-09-10 14:49:50 +02:00
tmontaigu
f80fd157ae fix(c-api): add missing safe_deser for ServerKey 2025-09-10 13:40:44 +02:00
Agnes Leroy
0ed97cfba8 chore(gpu): update sxm5 cost 2025-09-10 10:49:25 +02:00
Agnes Leroy
daee3f1850 chore(gpu): fix out of memory error in 4090 doc tests 2025-09-10 10:46:04 +02:00
tmontaigu
e8dc403ebd feat(integer): add flip operation
Add the flip(condition: BooleanBlock, a: T, b: T) -> (T, T)
operation that homomorphically flip/swap two values if the
given encrypted boolean encrypts true
2025-09-10 09:44:28 +02:00
Pedro Alves
63e5504c80 doc(gpu): add a section about noise squashing 2025-09-09 13:10:23 -03:00
Nicolas Sarlin
d664e4ada6 docs(safe_ser): document panics if max size is too large 2025-09-09 17:03:23 +02:00
Pedro Alves
c78cc2d2e9 chore(gpu): add a benchmark for 128-bit multi-bit noise squashing
- Also, remove the lut indexes concept from the 128-bit multi-bit pbs. It's assumed not to exist by the entire backend (as it doesn't for classical PBS). So to keep it here would be a bit error prone.
2025-09-09 07:51:35 -03:00
Pedro Alves
b566d78621 chore(gpu): improve the 128-bit multi-bit PBS core crypto test 2025-09-09 07:51:35 -03:00
Pedro Alves
7da6786d59 feat(gpu): add support to the 128-bit multi-bit PBS on HL's noise squashing 2025-09-09 07:51:35 -03:00
Himess
6edf6b9e26 chore: gate backward_compatibility_tests.rs with shortint feature 2025-09-09 09:35:59 +02:00
Himess
6fde90ad9c chore(clap): Replace use of deprecated attributes
Replace deprecated #[clap(...)] attributes to #[arg]/#[command] and remove redundant use of value_parser
2025-09-09 09:35:59 +02:00
Agnes Leroy
5d70ae4232 fix(gpu): add missing broadcast lut 2025-09-09 08:47:53 +02:00
David Testé
89b36ebca0 chore(bench): remove 2-bits size for full precision bench on gpu
GPU backend cannot accept less than 2 blocks for integer
benchmarks. Since 2-bits precision benchmarks are run with
*_MESSAGE_2_CARRY_2_* parameters, it will create only one block of
ciphertext, thus making the benchmarks unsuitable for GPU backend.
2025-09-08 12:24:24 +02:00
dependabot[bot]
bfc97385f4 chore(deps): bump actions/stale from 9.1.0 to 10.0.0
Bumps [actions/stale](https://github.com/actions/stale) from 9.1.0 to 10.0.0.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](5bef64f19d...3a9db7e6a4)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-version: 10.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-08 10:39:56 +02:00
dependabot[bot]
7ab763abba chore(deps): bump codecov/codecov-action from 5.5.0 to 5.5.1
Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.5.0 to 5.5.1.
- [Release notes](https://github.com/codecov/codecov-action/releases)
- [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md)
- [Commits](fdcc847654...5a1091511a)

---
updated-dependencies:
- dependency-name: codecov/codecov-action
  dependency-version: 5.5.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-08 10:39:41 +02:00
dependabot[bot]
a05db18ba3 chore(deps): bump actions/setup-node from 4.4.0 to 5.0.0
Bumps [actions/setup-node](https://github.com/actions/setup-node) from 4.4.0 to 5.0.0.
- [Release notes](https://github.com/actions/setup-node/releases)
- [Commits](49933ea528...a0853c2454)

---
updated-dependencies:
- dependency-name: actions/setup-node
  dependency-version: 5.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-08 10:39:27 +02:00
Guillermo Oyarzun
a3168eb1b5 feat(gpu): enable lut generation with preallocated buffers 2025-09-08 10:01:34 +02:00
Arthur Meyre
7fccb851d7 fix(csprng): harmonize behavior for UnixSeeder between small and big endian
- bytes are generated in a given order and endianness needs to be given
to the buffer for the generated number to make sense
- Seed(pub u128) exposes that endianness so it needs to be consistent to
outside users
2025-09-08 09:40:00 +02:00
Arthur Meyre
a78d5cc57b fix(csprng): make Seed interface less confusing wrt endianness
- From a user perspective giving the same u128 seed e.g. 1u128 should have
the same behavior no matter the endianness of the system
2025-09-08 09:40:00 +02:00
Nicolas Sarlin
9c0d078e1a chore(zk): bump tfhe-zk-pok to 0.7.2 2025-09-08 09:30:34 +02:00
Nicolas Sarlin
6016755f9d fix(js): bump wasm bindgen version 2025-09-05 17:55:33 +02:00
853 changed files with 72260 additions and 34401 deletions

12
.cargo/audit.toml Normal file
View File

@@ -0,0 +1,12 @@
[advisories]
ignore = [
# Ignoring unmaintained 'paste' advisory as it is a widely used, low-risk build dependency.
"RUSTSEC-2024-0436",
]
[output]
# Deny advisories that are warnings by default.
# At the moment this works if we allow paste, we might want to disable this in the future if it
# becomes too tedious
deny = ["warnings"]
quiet = false

View File

@@ -7,6 +7,8 @@ self-hosted-runner:
- large_ubuntu_16
- large_ubuntu_16-22.04
- v80-desktop
- v80-marais
- v80-couperin
# Configuration variables in array of strings defined in your repository or
# organization. `null` means disabling configuration variables check.
# Empty array means no configuration variable is allowed.

View File

@@ -1,5 +1,5 @@
# Add labels in pull request
name: PR label manager
name: approve_label
on:
pull_request:
@@ -11,6 +11,7 @@ permissions: {}
jobs:
trigger-tests:
name: approve_label/trigger-tests
runs-on: ubuntu-latest
permissions:
pull-requests: write

View File

@@ -1,5 +1,5 @@
# Run backward compatibility tests
name: Backward compatibility Tests on CPU
name: aws_tfhe_backward_compat_tests
env:
CARGO_TERM_COLOR: always
@@ -22,13 +22,16 @@ on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
pull_request:
push:
branches:
- main
permissions:
contents: read
jobs:
setup-instance:
name: Setup instance (backward-compat-tests)
name: aws_tfhe_backward_compat_tests/setup-instance
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
@@ -53,24 +56,19 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
backward-compat-tests:
name: Backward compatibility tests
name: aws_tfhe_backward_compat_tests/backward-compat-tests (bpr)
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: true
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
persist-credentials: 'true' # Needed to pull lfs data
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
# Cache key is an aggregated hash of lfs files hashes
- name: Get LFS data sha
id: hash-lfs-data
@@ -80,7 +78,7 @@ jobs:
- name: Retrieve data from cache
id: retrieve-data-cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
utils/tfhe-backward-compat-data/**/*.cbor
@@ -92,6 +90,16 @@ jobs:
run: |
make pull_backward_compat_data
# Pull token was stored by action/checkout to be used by lfs, we don't need it anymore
- name: Remove git credentials
run: |
git config --local --unset-all http.https://github.com/.extraheader
- name: Install latest stable
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Run backward compatibility tests
run: |
make test_backward_compatibility_ci
@@ -99,7 +107,7 @@ jobs:
- name: Store data in cache
if: steps.retrieve-data-cache.outputs.cache-hit != 'true'
continue-on-error: true
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
utils/tfhe-backward-compat-data/**/*.cbor
@@ -123,7 +131,7 @@ jobs:
SLACK_MESSAGE: "Backward compatibility tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (backward-compat-tests)
name: aws_tfhe_backward_compat_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, backward-compat-tests ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run a small subset of tests to ensure quick feedback.
name: Fast AWS Tests on CPU
name: aws_tfhe_fast_tests
env:
CARGO_TERM_COLOR: always
@@ -29,6 +29,7 @@ permissions:
jobs:
should-run:
name: aws_tfhe_fast_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -68,7 +69,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
dependencies:
@@ -132,7 +133,7 @@ jobs:
echo "any_changed=true" >> "$GITHUB_OUTPUT"
setup-instance:
name: Setup instance (fast-tests)
name: aws_tfhe_fast_tests/setup-instance
if: github.event_name == 'workflow_dispatch' ||
(github.event_name != 'workflow_dispatch' && needs.should-run.outputs.any_file_changed == 'true')
needs: should-run
@@ -216,7 +217,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
~/.nvm
@@ -229,7 +230,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |
@@ -288,7 +289,7 @@ jobs:
SLACK_MESSAGE: "Fast AWS tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (fast-tests)
name: aws_tfhe_fast_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, fast-tests ]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: AWS Unsigned Integer Tests on CPU
name: aws_tfhe_integer_tests
env:
CARGO_TERM_COLOR: always
@@ -35,6 +35,7 @@ permissions:
jobs:
should-run:
name: aws_tfhe_integer_tests/should-run
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'pull_request' && contains(github.event.label.name, 'approved')) ||
@@ -55,7 +56,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
integer:
@@ -69,7 +70,7 @@ jobs:
- .github/workflows/aws_tfhe_integer_tests.yml
setup-instance:
name: Setup instance (unsigned-integer-tests)
name: aws_tfhe_integer_tests/setup-instance
needs: should-run
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.integer_test == 'true') ||
@@ -100,12 +101,13 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
unsigned-integer-tests:
name: Unsigned integer tests
name: aws_tfhe_integer_tests/unsigned-integer-tests
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 480 # 8 hours
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
@@ -156,7 +158,7 @@ jobs:
SLACK_MESSAGE: "Unsigned Integer tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (unsigned-integer-tests)
name: aws_tfhe_integer_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [setup-instance, unsigned-integer-tests]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: Run noise checks on CPU
name: aws_tfhe_noise_checks
env:
CARGO_TERM_COLOR: always
@@ -25,7 +25,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (noise-checks)
name: aws_tfhe_noise_checks/setup-instance
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
@@ -52,7 +52,7 @@ jobs:
exit 1
noise-checks:
name: CPU noise checks
name: aws_tfhe_noise_checks/noise-checks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440
@@ -90,7 +90,7 @@ jobs:
SLACK_MESSAGE: "Noise checks tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (noise-checks)
name: aws_tfhe_noise_checks/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, noise-checks ]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: AWS Signed Integer Tests on CPU
name: aws_tfhe_signed_integer_tests
env:
CARGO_TERM_COLOR: always
@@ -35,6 +35,7 @@ permissions:
jobs:
should-run:
name: aws_tfhe_signed_integer_tests/should-run
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
@@ -56,7 +57,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
integer:
@@ -70,7 +71,7 @@ jobs:
- .github/workflows/aws_tfhe_signed_integer_tests.yml
setup-instance:
name: Setup instance (unsigned-integer-tests)
name: aws_tfhe_signed_integer_tests/setup-instance
needs: should-run
if:
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.integer_test == 'true') ||
@@ -101,7 +102,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
signed-integer-tests:
name: Signed integer tests
name: aws_tfhe_signed_integer_tests/signed-integer-tests
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
@@ -161,7 +162,7 @@ jobs:
SLACK_MESSAGE: "Signed Integer tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (signed-integer-tests)
name: aws_tfhe_signed_integer_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [setup-instance, signed-integer-tests]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: AWS Tests on CPU
name: aws_tfhe_tests
env:
CARGO_TERM_COLOR: always
@@ -32,6 +32,7 @@ permissions:
jobs:
should-run:
name: aws_tfhe_tests/should-run
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -77,7 +78,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
dependencies:
@@ -141,7 +142,7 @@ jobs:
echo "any_changed=true" >> "$GITHUB_OUTPUT"
setup-instance:
name: Setup instance (cpu-tests)
name: aws_tfhe_tests/setup-instance
if: github.event_name != 'pull_request' ||
(github.event.action == 'labeled' && github.event.label.name == 'approved' && needs.should-run.outputs.any_file_changed == 'true')
needs: should-run
@@ -169,7 +170,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cpu-tests:
name: CPU tests
name: aws_tfhe_tests/cpu-tests
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
needs: [ should-run, setup-instance ]
@@ -268,7 +269,7 @@ jobs:
SLACK_MESSAGE: "CPU tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cpu-tests)
name: aws_tfhe_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cpu-tests ]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: AWS WASM Tests on CPU
name: aws_tfhe_wasm_tests
env:
CARGO_TERM_COLOR: always
@@ -28,7 +28,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (wasm-tests)
name: aws_tfhe_wasm_tests/setup-instance
if: ${{ github.event_name == 'workflow_dispatch' || contains(github.event.label.name, 'approved') }}
runs-on: ubuntu-latest
outputs:
@@ -54,7 +54,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
wasm-tests:
name: WASM tests
name: aws_tfhe_wasm_tests/wasm-tests
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}
@@ -78,7 +78,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
~/.nvm
@@ -91,7 +91,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |
@@ -137,7 +137,7 @@ jobs:
SLACK_MESSAGE: "WASM tests finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (wasm-tests)
name: aws_tfhe_wasm_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, wasm-tests ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run boolean benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Boolean benchmarks
name: benchmark_boolean
on:
workflow_dispatch:
@@ -23,7 +23,7 @@ permissions: {}
jobs:
setup-instance:
name: Setup instance (boolean-benchmarks)
name: benchmark_boolean/setup-instance
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -42,7 +42,7 @@ jobs:
profile: bench
boolean-benchmarks:
name: Execute boolean benchmarks in EC2
name: benchmark_boolean/boolean-benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
@@ -101,7 +101,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_boolean
path: ${{ env.RESULTS_FILENAME }}
@@ -132,7 +132,7 @@ jobs:
SLACK_MESSAGE: "Boolean benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (boolean-benchmarks)
name: benchmark_boolean/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, boolean-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -1,8 +1,29 @@
# Run core crypto benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Core crypto benchmarks
name: benchmark_core_crypto
on:
workflow_dispatch:
inputs:
param_type:
description: "Parameters type"
type: choice
default: classical
options:
- classical
- multi_bit
- classical + multi_bit
- classical_documentation
- multi_bit_documentation
- classical_documentation + multi_bit_documentation
bench_type:
description: "Benchmarks type"
type: choice
default: latency
options:
- latency
- throughput
- both
schedule:
# Weekly benchmarks will be triggered each Saturday at 5a.m.
- cron: '0 5 * * 6'
@@ -22,8 +43,63 @@ env:
permissions: {}
jobs:
prepare-matrix:
name: benchmark_core_crypto/prepare-matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
outputs:
param_type: ${{ steps.set_param_type.outputs.param_type }}
bench_type: ${{ steps.set_bench_type.outputs.bench_type }}
steps:
- name: Set parameters types
if: github.event_name == 'workflow_dispatch'
run: |
if [[ "${INPUTS_PARAM_TYPE}" == "classical + multi_bit" ]]; then
echo "PARAM_TYPE=[\"classical\", \"multi_bit\"]" >> "${GITHUB_ENV}"
elif [[ "${INPUTS_PARAM_TYPE}" == "classical_documentation + multi_bit_documentation" ]]; then
echo "PARAM_TYPE=[\"classical_documentation\", \"multi_bit_documentation\"]" >> "${GITHUB_ENV}"
else
echo "PARAM_TYPE=[\"${INPUTS_PARAM_TYPE}\"]" >> "${GITHUB_ENV}"
fi
env:
INPUTS_PARAM_TYPE: ${{ inputs.param_type }}
- name: Default parameters type
if: github.event_name != 'workflow_dispatch'
run: |
echo "PARAM_TYPE=[\"classical\"]" >> "${GITHUB_ENV}"
- name: Set benchmark types
if: github.event_name == 'workflow_dispatch'
run: |
echo "OP_FLAVOR=[\"default\"]" >> "${GITHUB_ENV}"
if [[ "${INPUTS_BENCH_TYPE}" == "both" ]]; then
echo "BENCH_TYPE=[\"latency\", \"throughput\"]" >> "${GITHUB_ENV}"
else
echo "BENCH_TYPE=[\"${INPUTS_BENCH_TYPE}\"]" >> "${GITHUB_ENV}"
fi
env:
INPUTS_BENCH_TYPE: ${{ inputs.bench_type }}
- name: Default benchmark type
if: github.event_name != 'workflow_dispatch'
run: |
echo "BENCH_TYPE=[\"latency\"]" >> "${GITHUB_ENV}"
- name: Set parameters types output
id: set_param_type
run: | # zizmor: ignore[template-injection] this env variable is safe
echo "param_type=${{ toJSON(env.PARAM_TYPE) }}" >> "${GITHUB_OUTPUT}"
- name: Set benchmark types output
id: set_bench_type
run: | # zizmor: ignore[template-injection] this env variable is safe
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (core-crypto-benchmarks)
name: benchmark_core_crypto/setup-instance
needs: prepare-matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -42,12 +118,18 @@ jobs:
profile: bench
core-crypto-benchmarks:
name: Execute core crypto benchmarks in EC2
needs: setup-instance
name: benchmark_core_crypto/core-crypto-benchmarks
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow_ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
timeout-minutes: 1440 # 24 hours
strategy:
max-parallel: 1
matrix:
param_type: ${{ fromJSON(needs.prepare-matrix.outputs.param_type) }}
bench_type: ${{ fromJSON(needs.prepare-matrix.outputs.bench_type) }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
@@ -78,6 +160,9 @@ jobs:
make bench_pbs
make bench_pbs128
make bench_ks
env:
BENCH_PARAM_TYPE: ${{ matrix.param_type }}
BENCH_TYPE: ${{ matrix.bench_type }}
- name: Parse results
run: |
@@ -94,9 +179,9 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_core_crypto
name: ${{ github.sha }}_core_crypto_${{ matrix.param_type }}_pbs
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
@@ -125,7 +210,7 @@ jobs:
SLACK_MESSAGE: "PBS benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (core-crypto-benchmarks)
name: benchmark_core_crypto/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, core-crypto-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -0,0 +1,152 @@
# Run sizes benchmarks on an instance and return parsed results to Slab CI bot.
name: Ciphertext and Keys sizes benchmarks
on:
workflow_dispatch:
schedule:
# Monthly benchmarks will be triggered each 24th of the month at 1a.m.
- cron: '0 1 24 * 6'
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
permissions: {}
jobs:
setup-instance:
name: Setup instance (sizes-benchmarks)
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: aws
profile: cpu-big
sizes-benchmarks:
name: Execute sizes client benchmarks
needs: setup-instance
if: needs.setup-instance.result != 'skipped'
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Get benchmark details
run: |
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=${COMMIT_DATE}";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
env:
SHA: ${{ github.sha }}
- name: Install rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: nightly
- name: Measure public key and ciphertext sizes in HL Api
run: |
make measure_hlapi_compact_pk_ct_sizes
- name: Parse key and ciphertext sizes results
run: |
python3 ./ci/benchmark_parser.py tfhe-benchmark/hlapi_ct_key_sizes.csv "${RESULTS_FILENAME}" \
--database tfhe_rs \
--hardware "m6i.32xlarge" \
--project-version "${COMMIT_HASH}" \
--branch "${REF_NAME}" \
--commit-date "${COMMIT_DATE}" \
--bench-date "${BENCH_DATE}" \
--object-sizes
env:
REF_NAME: ${{ github.ref_name }}
- name: Measure key sizes in shortint
run: |
make measure_shortint_key_sizes
- name: Parse key sizes results
run: |
python3 ./ci/benchmark_parser.py tfhe-benchmark/shortint_key_sizes.csv "${RESULTS_FILENAME}" \
--object-sizes \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_ct_key_sizes
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
repository: zama-ai/slab
path: slab
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
--slab-url "${SLAB_URL}"
env:
JOB_SECRET: ${{ secrets.JOB_SECRET }}
SLAB_URL: ${{ secrets.SLAB_URL }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Sizes benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (sizes-benchmarks)
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, sizes-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (sizes-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,5 +1,5 @@
# Run all DEX benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: DEX benchmarks
name: benchmark_dex
on:
workflow_dispatch:
@@ -22,7 +22,7 @@ permissions: {}
jobs:
setup-instance:
name: Setup instance (dex-benchmarks)
name: benchmark_dex/setup-instance
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -41,7 +41,7 @@ jobs:
profile: bench
dex-benchmarks:
name: Execute DEX benchmarks
name: benchmark_dex/dex-benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
@@ -123,7 +123,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_dex
path: ${{ env.RESULTS_FILENAME }}
@@ -146,7 +146,7 @@ jobs:
SLACK_MESSAGE: "DEX benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (dex-benchmarks)
name: benchmark_dex/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, dex-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run all ERC20 benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: ERC20 benchmarks
name: benchmark_erc20
on:
workflow_dispatch:
@@ -23,7 +23,7 @@ permissions: {}
jobs:
setup-instance:
name: Setup instance (erc20-benchmarks)
name: benchmark_erc20/setup-instance
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -42,7 +42,7 @@ jobs:
profile: bench
erc20-benchmarks:
name: Execute ERC20 benchmarks
name: benchmark_erc20/erc20-benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
@@ -106,7 +106,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_erc20
path: ${{ env.RESULTS_FILENAME }}
@@ -129,7 +129,7 @@ jobs:
SLACK_MESSAGE: "ERC20 benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (erc20-benchmarks)
name: benchmark_erc20/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, erc20-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run CUDA benchmarks on a Hyperstack VM and return parsed results to Slab CI bot.
name: Cuda benchmarks
name: benchmark_gpu
on:
workflow_dispatch:
@@ -59,13 +59,17 @@ on:
options:
- classical
- multi_bit
- both
- classical + multi_bit
- classical_documentation
- multi_bit_documentation
- classical_documentation + multi_bit_documentation
permissions: {}
jobs:
parse-inputs:
name: benchmark_gpu/parse-inputs
runs-on: ubuntu-latest
outputs:
profile: ${{ steps.parse_profile.outputs.profile }}
@@ -90,7 +94,7 @@ jobs:
echo "name=${NAME}" >> "${GITHUB_OUTPUT}"
run-benchmarks:
name: Run benchmarks
name: benchmark_gpu/run-benchmarks
needs: parse-inputs
uses: ./.github/workflows/benchmark_gpu_common.yml
with:

View File

@@ -1,5 +1,5 @@
# Run benchmarks on an RTX 4090 machine and return parsed results to Slab CI bot.
name: TFHE Cuda Backend - 4090 benchmarks
name: benchmark_gpu_4090
env:
CARGO_TERM_COLOR: always
@@ -27,7 +27,7 @@ permissions:
jobs:
cuda-integer-benchmarks:
name: Cuda integer benchmarks (RTX 4090)
name: benchmark_gpu_4090/cuda-integer-benchmarks
if: ${{ github.event_name == 'workflow_dispatch' ||
github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs' ||
contains(github.event.label.name, '4090_bench') }}
@@ -88,7 +88,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_integer_multi_bit_gpu_default
path: ${{ env.RESULTS_FILENAME }}
@@ -111,7 +111,7 @@ jobs:
SLACK_MESSAGE: "Integer RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
cuda-core-crypto-benchmarks:
name: Cuda core crypto benchmarks (RTX 4090)
name: benchmark_gpu_4090/cuda-core-crypto-benchmarks
if: ${{ github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' || contains(github.event.label.name, '4090_bench') }}
needs: cuda-integer-benchmarks
concurrency:
@@ -172,7 +172,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_core_crypto
path: ${{ env.RESULTS_FILENAME }}
@@ -195,7 +195,7 @@ jobs:
SLACK_MESSAGE: "Core crypto RTX 4090 full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
remove_github_label:
name: Remove 4090 bench label
name: benchmark_gpu_4090/remove_github_label
if: ${{ always() && github.event_name == 'pull_request' }}
needs: [cuda-integer-benchmarks, cuda-core-crypto-benchmarks]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run benchmarks on CUDA instance and return parsed results to Slab CI bot.
name: Cuda benchmarks - common
name: benchmark_gpu_common
on:
workflow_call:
@@ -63,7 +63,7 @@ permissions: {}
jobs:
prepare-matrix:
name: Prepare operations matrix
name: benchmark_gpu_common/prepare-matrix
runs-on: ubuntu-latest
outputs:
command: ${{ steps.set_command.outputs.command }}
@@ -141,7 +141,7 @@ jobs:
echo "params_type=${{ toJSON(env.PARAMS_TYPE) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (cuda-${{ inputs.profile }}-benchmarks)
name: benchmark_gpu_common/setup-instance
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
@@ -185,7 +185,7 @@ jobs:
# Install dependencies only once since cuda-benchmarks uses a matrix strategy, thus running multiple times.
install-dependencies:
name: Install dependencies
name: benchmark_gpu_common/install-dependencies
needs: [ setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -210,7 +210,7 @@ jobs:
gcc-version: ${{ matrix.gcc }}
cuda-benchmarks:
name: Cuda benchmarks (${{ inputs.profile }})
name: benchmark_gpu_common/cuda-benchmarks
needs: [ prepare-matrix, setup-instance, install-dependencies ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
timeout-minutes: 1440 # 24 hours
@@ -306,7 +306,7 @@ jobs:
BENCH_TYPE: ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ inputs.profile }}_${{ matrix.bench_type }}_${{ matrix.params_type }}
path: ${{ env.RESULTS_FILENAME }}
@@ -329,7 +329,7 @@ jobs:
SLAB_URL: ${{ secrets.SLAB_URL }}
slack-notify:
name: Slack Notification
name: benchmark_gpu_common/slack-notify
needs: [ setup-instance, cuda-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-benchmarks.result != 'skipped' && failure() }}
@@ -342,7 +342,7 @@ jobs:
SLACK_MESSAGE: "Cuda benchmarks (${{ inputs.profile }}) finished with status: ${{ needs.cuda-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-${{ inputs.profile }}-benchmarks)
name: benchmark_gpu_common/teardown-instance
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-benchmarks, slack-notify ]
runs-on: ubuntu-latest

View File

@@ -1,8 +1,24 @@
# Run all fhevm coprocessor benchmarks on a GPU instance on Hyperstack and return parsed results to Slab CI bot.
name: Cuda Coprocessor benchmarks
name: benchmark_gpu_coprocessor
on:
workflow_dispatch:
inputs:
profile:
description: "Instance type"
required: true
type: choice
options:
- "l40 (n3-L40x1)"
- "4-l40 (n3-L40x4)"
- "single-h100 (n3-H100x1)"
- "2-h100 (n3-H100x2)"
- "4-h100 (n3-H100x4)"
- "multi-h100 (n3-H100x8)"
- "multi-h100-nvlink (n3-H100x8-NVLink)"
- "multi-h100-sxm5 (n3-H100x8-SXM5)"
- "multi-h100-sxm5_fallback (n3-H100x8-SXM5)"
schedule:
# Weekly tests @ 1AM
- cron: "0 1 * * 6"
@@ -17,7 +33,9 @@ env:
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
PROFILE: "multi-h100-sxm5 (n3-H100x8-SXM5)"
PROFILE_SCHEDULED_RUN: "multi-h100-sxm5 (n3-H100x8-SXM5)"
PROFILE_MANUAL_RUN: ${{ inputs.profile }}
IS_MANUAL_RUN: ${{ github.event_name == 'workflow_dispatch' }}
BENCHMARK_TYPE: "ALL"
OPTIMIZATION_TARGET: "throughput"
BATCH_SIZE: "5000"
@@ -29,7 +47,7 @@ env:
jobs:
parse-inputs:
name: coprocessor-benchmark-gpu/parse-inputs
name: benchmark_gpu_coprocessor/parse-inputs
runs-on: ubuntu-latest
permissions:
contents: 'read'
@@ -40,19 +58,29 @@ jobs:
- name: Parse profile
id: parse_profile
run: |
if [[ ${IS_MANUAL_RUN} == true ]]; then
PROFILE_RAW="${PROFILE_MANUAL_RUN}"
else
PROFILE_RAW="${PROFILE_SCHEDULED_RUN}"
fi
# shellcheck disable=SC2001
PROFILE_VAL=$(echo "${PROFILE}" | sed 's|\(.*\)[[:space:]](.*)|\1|')
PROFILE_VAL=$(echo "${PROFILE_RAW}" | sed 's|\(.*\)[[:space:]](.*)|\1|')
echo "profile=$PROFILE_VAL" >> "${GITHUB_OUTPUT}"
- name: Parse hardware name
id: parse_hardware_name
run: |
if [[ ${IS_MANUAL_RUN} == true ]]; then
PROFILE_RAW="${PROFILE_MANUAL_RUN}"
else
PROFILE_RAW="${PROFILE}"
fi
# shellcheck disable=SC2001
PROFILE_VAL=$(echo "${PROFILE}" | sed 's|.*[[:space:]](\(.*\))|\1|')
PROFILE_VAL=$(echo "${PROFILE_RAW}" | sed 's|.*[[:space:]](\(.*\))|\1|')
echo "name=$PROFILE_VAL" >> "${GITHUB_OUTPUT}"
setup-instance:
name: coprocessor-benchmark-gpu/setup-instance
name: benchmark_gpu_coprocessor/setup-instance
needs: parse-inputs
runs-on: ubuntu-latest
permissions:
@@ -71,8 +99,8 @@ jobs:
backend: hyperstack
profile: ${{ needs.parse-inputs.outputs.profile }}
benchmark:
name: coprocessor-benchmark-gpu/benchmark-gpu (bpr)
benchmark-gpu:
name: benchmark_gpu_coprocessor/benchmark-gpu (bpr)
needs: [ parse-inputs, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
continue-on-error: true
@@ -130,6 +158,13 @@ jobs:
} >> "${GITHUB_ENV}"
working-directory: tfhe-rs/
- name: Setup Hyperstack dependencies
uses: ./tfhe-rs/.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
gcc-version: ${{ matrix.gcc }}
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Check fhEVM and TFHE-rs repos
run: |
pwd
@@ -140,13 +175,6 @@ jobs:
run: git lfs checkout
working-directory: fhevm/
- name: Setup Hyperstack dependencies
uses: ./fhevm/.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
gcc-version: ${{ matrix.gcc }}
github-instance: ${{ env.SECRETS_AVAILABLE == 'false' }}
- name: Install rust
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
@@ -154,7 +182,7 @@ jobs:
- name: Install cargo dependencies
run: |
sudo apt-get install -y protobuf-compiler cmake pkg-config libssl-dev \
sudo apt-get install -y protobuf-compiler pkg-config libssl-dev \
libclang-dev docker-compose-v2 docker.io acl
sudo usermod -aG docker "$USER"
newgrp docker
@@ -162,10 +190,10 @@ jobs:
cargo install sqlx-cli
- name: Install foundry
uses: foundry-rs/foundry-toolchain@82dee4ba654bd2146511f85f0d013af94670c4de
uses: foundry-rs/foundry-toolchain@50d5a8956f2e319df19e6b57539d7e2acb9f8c1e
- name: Cache cargo
uses: actions/cache@0400d5f644dc74513175e3cd8d07132dd4860809 # v4.2.4
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: |
~/.cargo/registry
@@ -175,18 +203,25 @@ jobs:
restore-keys: ${{ runner.os }}-cargo-
- name: Login to GitHub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Chainguard Registry
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: cgr.dev
username: ${{ secrets.CGR_USERNAME }}
password: ${{ secrets.CGR_PASSWORD }}
- name: Init database
run: make init_db
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
- name: Use Node.js
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with:
node-version: 20.x
@@ -197,14 +232,19 @@ jobs:
ls
pwd
cp ./host-contracts/.env.example ./host-contracts/.env
npm --prefix ./host-contracts ci --include=optional
cd host-contracts && npm install && npm run deploy:emptyProxies && npx hardhat compile
cd ./host-contracts
npm ci --include=optional
npm install && npm run deploy:emptyProxies && npx hardhat compile
working-directory: fhevm/
- name: Profile erc20 no-cmux benchmark on GPU
run: |
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" BENCHMARK_TYPE="LATENCY" OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" make -e "profile_erc20_gpu"
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" \
FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" \
BENCHMARK_TYPE="THROUGHPUT_200" \
OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" \
make -e "profile_erc20_gpu"
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
- name: Get nsys profile name
id: nsys_profile_name
@@ -215,25 +255,25 @@ jobs:
REPORT_NAME: ${{ steps.nsys_profile_name.outputs.profile }}
run: |
mv report1.nsys-rep ${{ env.REPORT_NAME }}
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
- name: Upload profile artifact
env:
REPORT_NAME: ${{ steps.nsys_profile_name.outputs.profile }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ env.REPORT_NAME }}
path: fhevm/coprocessor/fhevm-engine/coprocessor/${{ env.REPORT_NAME }}
path: fhevm/coprocessor/fhevm-engine/tfhe-worker/${{ env.REPORT_NAME }}
- name: Run latency benchmark on GPU
run: |
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" BENCHMARK_TYPE="LATENCY" OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" make -e "benchmark_${BENCHMARKS}_gpu"
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
- name: Run throughput benchmarks on GPU
run: |
BENCHMARK_BATCH_SIZE="${BATCH_SIZE}" FHEVM_DF_SCHEDULE="${SCHEDULING_POLICY}" BENCHMARK_TYPE="THROUGHPUT_200" OPTIMIZATION_TARGET="${OPTIMIZATION_TARGET}" make -e "benchmark_${BENCHMARKS}_gpu"
working-directory: fhevm/coprocessor/fhevm-engine/coprocessor
working-directory: fhevm/coprocessor/fhevm-engine/tfhe-worker
- name: Parse results
run: |
@@ -246,12 +286,12 @@ jobs:
--commit-date "${COMMIT_DATE}" \
--bench-date "${BENCH_DATE}" \
--walk-subdirs \
--crate "coprocessor/fhevm-engine/coprocessor" \
--crate "coprocessor/fhevm-engine/tfhe-worker" \
--name-suffix "operation_batch_size_${BATCH_SIZE}-schedule_${SCHEDULING_POLICY}-optimization_target_${OPTIMIZATION_TARGET}"
working-directory: fhevm/
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${COMMIT_SHA}_${BENCHMARKS}_${{ needs.parse-inputs.outputs.profile }}
path: fhevm/$${{ env.RESULTS_FILENAME }}
@@ -273,9 +313,9 @@ jobs:
--slab-url "${SLAB_URL}"
teardown-instance:
name: coprocessor-benchmark-gpu/teardown
name: benchmark_gpu_coprocessor/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, benchmark ]
needs: [ setup-instance, benchmark-gpu ]
runs-on: ubuntu-latest
permissions:
contents: 'read'

View File

@@ -1,5 +1,5 @@
# Run CUDA DEX benchmarks on a Hyperstack VM and return parsed results to Slab CI bot.
name: Cuda DEX benchmarks
name: benchmark_gpu_dex/
on:
workflow_dispatch:
@@ -23,6 +23,7 @@ permissions: {}
jobs:
parse-inputs:
name: benchmark_gpu_dex/parse-inputs
runs-on: ubuntu-latest
outputs:
profile: ${{ steps.parse_profile.outputs.profile }}
@@ -47,7 +48,7 @@ jobs:
echo "name=${NAME}" >> "${GITHUB_OUTPUT}"
run-benchmarks:
name: Run benchmarks
name: benchmark_gpu_dex/run-benchmarks
needs: parse-inputs
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
with:

View File

@@ -1,5 +1,5 @@
# Run DEX benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Cuda DEX benchmarks - common
name: benchmark_gpu_dex_common
on:
workflow_call:
@@ -47,7 +47,7 @@ permissions: {}
jobs:
setup-instance:
name: Setup instance (cuda-dex-benchmarks)
name: benchmark_gpu_dex_common/setup-instance
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -91,7 +91,7 @@ jobs:
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
cuda-dex-benchmarks:
name: Cuda DEX benchmarks (${{ inputs.profile }})
name: benchmark_gpu_dex_common/cuda-dex-benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -154,7 +154,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_dex_${{ inputs.profile }}
path: ${{ env.RESULTS_FILENAME }}
@@ -177,7 +177,7 @@ jobs:
SLAB_URL: ${{ secrets.SLAB_URL }}
slack-notify:
name: Slack Notification
name: benchmark_gpu_dex_common/slack-notify
needs: [ setup-instance, cuda-dex-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-dex-benchmarks.result != 'skipped' && failure() }}
@@ -190,7 +190,7 @@ jobs:
SLACK_MESSAGE: "Cuda DEX benchmarks (${{ inputs.profile }}) finished with status: ${{ needs.cuda-dex-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-dex-${{ inputs.profile }}-benchmarks)
name: benchmark_gpu_dex_common/teardown-instance
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-dex-benchmarks, slack-notify ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run CUDA DEX benchmarks on multiple Hyperstack VMs and return parsed results to Slab CI bot.
name: Cuda DEX weekly benchmarks
name: benchmark_gpu_dex_weekly
on:
schedule:
@@ -10,7 +10,7 @@ permissions: {}
jobs:
run-benchmarks-1-h100:
name: Run benchmarks (1xH100)
name: benchmark_gpu_dex_weekly/run-benchmarks-1-h100
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
with:
@@ -27,7 +27,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-2-h100:
name: Run benchmarks (2xH100)
name: benchmark_gpu_dex_weekly/run-benchmarks-2-h100
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
with:
@@ -44,7 +44,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100:
name: Run benchmarks (8xH100)
name: benchmark_gpu_dex_weekly/run-benchmarks-8-h100
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_dex_common.yml
with:

View File

@@ -1,5 +1,5 @@
# Run CUDA ERC20 benchmarks on a Hyperstack VM and return parsed results to Slab CI bot.
name: Cuda ERC20 benchmarks
name: benchmark_gpu_erc20
on:
workflow_dispatch:
@@ -24,6 +24,7 @@ permissions: {}
jobs:
parse-inputs:
name: benchmark_gpu_erc20/parse-inputs
runs-on: ubuntu-latest
outputs:
profile: ${{ steps.parse_profile.outputs.profile }}
@@ -48,7 +49,7 @@ jobs:
echo "name=${NAME}" >> "${GITHUB_OUTPUT}"
run-benchmarks:
name: Run benchmarks
name: benchmark_gpu_erc20/run-benchmarks
needs: parse-inputs
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
with:

View File

@@ -1,5 +1,5 @@
# Run ERC20 benchmarks on an instance with CUDA and return parsed results to Slab CI bot.
name: Cuda ERC20 benchmarks - common
name: benchmark_gpu_erc20_common
on:
workflow_call:
@@ -48,7 +48,7 @@ permissions: {}
jobs:
setup-instance:
name: Setup instance (cuda-erc20-benchmarks)
name: benchmark_gpu_erc20_common/setup-instance
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -92,7 +92,7 @@ jobs:
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
cuda-erc20-benchmarks:
name: Cuda ERC20 benchmarks (${{ inputs.profile }})
name: benchmark_gpu_erc20_common/cuda-erc20-benchmarks
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
@@ -155,7 +155,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_erc20_${{ inputs.profile }}
path: ${{ env.RESULTS_FILENAME }}
@@ -178,7 +178,7 @@ jobs:
SLAB_URL: ${{ secrets.SLAB_URL }}
slack-notify:
name: Slack Notification
name: benchmark_gpu_erc20_common/slack-notify
needs: [ setup-instance, cuda-erc20-benchmarks ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-erc20-benchmarks.result != 'skipped' && failure() }}
@@ -191,7 +191,7 @@ jobs:
SLACK_MESSAGE: "Cuda ERC20 benchmarks (${{ inputs.profile }}) finished with status: ${{ needs.cuda-erc20-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-erc20-${{ inputs.profile }}-benchmarks)
name: benchmark_gpu_erc20_common/teardown-instance
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-erc20-benchmarks, slack-notify ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run CUDA ERC20 benchmarks on multiple Hyperstack VMs and return parsed results to Slab CI bot.
name: Cuda ERC20 weekly benchmarks
name: benchmark_gpu_erc20_weekly
on:
schedule:
@@ -11,7 +11,7 @@ permissions: {}
jobs:
run-benchmarks-1-h100:
name: Run benchmarks (1xH100)
name: benchmark_gpu_erc20_weekly/run-benchmarks-1-h100
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
with:
@@ -28,7 +28,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-2-h100:
name: Run benchmarks (2xH100)
name: benchmark_gpu_erc20_weekly/run-benchmarks-2-h100
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
with:
@@ -45,7 +45,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100:
name: Run benchmarks (8xH100)
name: benchmark_gpu_erc20_weekly/run-benchmarks-8-h100
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_erc20_common.yml
with:

View File

@@ -1,5 +1,5 @@
# Run CUDA benchmarks on multiple Hyperstack VMs and return parsed results to Slab CI bot.
name: Cuda weekly benchmarks
name: benchmark_gpu_weekly
on:
schedule:
@@ -11,7 +11,7 @@ permissions: {}
jobs:
run-benchmarks-8-h100-sxm5-integer:
name: Run integer benchmarks (8xH100-SXM5)
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-integer
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
@@ -32,7 +32,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100-sxm5-integer-compression:
name: Run integer compression benchmarks (8xH100-SXM5)
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-integer-compression
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
@@ -53,7 +53,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100-sxm5-integer-zk:
name: Run integer zk benchmarks (8xH100-SXM5)
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-integer-zk
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
@@ -74,7 +74,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-8-h100-sxm5-noise-squash:
name: Run integer zk benchmarks (8xH100-SXM5)
name: benchmark_gpu_weekly/run-benchmarks-8-h100-sxm5-noise-squash
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:
@@ -95,7 +95,7 @@ jobs:
SLAB_BASE_URL: ${{ secrets.SLAB_BASE_URL }}
run-benchmarks-1-h100-core-crypto:
name: Run core-crypto benchmarks (1xH100)
name: benchmark_gpu_weekly/run-benchmarks-1-h100-core-crypto (1xH100)
if: github.repository == 'zama-ai/tfhe-rs'
uses: ./.github/workflows/benchmark_gpu_common.yml
with:

View File

@@ -16,7 +16,7 @@ permissions: {}
jobs:
hlapi-benchmarks-hpu:
name: Execute HLAPI benchmarks for HPU backend
runs-on: v80-desktop
runs-on: v80-marais
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
@@ -60,11 +60,14 @@ jobs:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Select HPU board
run: |
echo "V80_PCIE_DEV=24" >> "${GITHUB_ENV}"
echo "V80_SERIAL_NUMBER=XFL12NWY3ZKG" >> "${GITHUB_ENV}"
- name: Run benchmarks
run: |
make pull_hpu_files
export V80_SERIAL_NUMBER=XFL12E4XJXWK
source /opt/xilinx/Vivado/2024.2/settings64.sh
make bench_hlapi_erc20_hpu
make bench_hlapi_hpu
@@ -83,7 +86,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_hlapi_benchmarks
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -1,9 +1,12 @@
# Run all integer benchmarks on a permanent HPU instance and return parsed results to Slab CI bot.
name: Hpu Integer Benchmarks
name: benchmark_hpu_integer
on:
workflow_dispatch:
inputs:
all_precisions:
description: "Run all precisions"
type: boolean
bench_type:
description: "Benchmarks type"
type: choice
@@ -19,13 +22,14 @@ env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
FAST_BENCH: TRUE
permissions: {}
jobs:
prepare-matrix:
name: Prepare operations matrix
runs-on: v80-desktop
runs-on: v80-marais
outputs:
bench_type: ${{ steps.set_bench_type.outputs.bench_type }}
steps:
@@ -44,17 +48,17 @@ jobs:
if: github.event_name != 'workflow_dispatch'
run: |
echo "BENCH_TYPE=[\"latency\"]" >> "${GITHUB_ENV}"
- name: Set benchmark types output
id: set_bench_type
run: | # zizmor: ignore[template-injection] this env variable is safe
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
integer-benchmarks-hpu:
name: Execute integer & erc20 benchmarks for HPU backend
name: benchmark_hpu_integer/integer-benchmarks-hpu
needs: prepare-matrix
runs-on: v80-desktop
runs-on: v80-marais
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
@@ -102,11 +106,20 @@ jobs:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Should run benchmarks with all precisions
if: inputs.all_precisions
run: |
echo "FAST_BENCH=FALSE" >> "${GITHUB_ENV}"
- name: Select HPU board
run: |
echo "V80_PCIE_DEV=24" >> "${GITHUB_ENV}"
echo "V80_SERIAL_NUMBER=XFL12NWY3ZKG" >> "${GITHUB_ENV}"
- name: Run benchmarks
run: |
echo "${V80_PCIE_DEV} ${V80_SERIAL_NUMBER}"
make pull_hpu_files
export V80_SERIAL_NUMBER=XFL12E4XJXWK
source /opt/xilinx/Vivado/2024.2/settings64.sh
make BENCH_TYPE="${BENCH_TYPE}" bench_integer_hpu
env:
BENCH_TYPE: ${{ matrix.bench_type }}
@@ -128,7 +141,7 @@ jobs:
BENCH_TYPE: ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_${{ matrix.bench_type }}_integer_benchmarks
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -1,5 +1,5 @@
# Run all integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Integer benchmarks
name: benchmark_integer
on:
workflow_dispatch:
@@ -41,7 +41,7 @@ permissions: {}
jobs:
prepare-matrix:
name: Prepare operations matrix
name: benchmark_integer/prepare-matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -87,7 +87,7 @@ jobs:
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (integer-benchmarks)
name: benchmark_integer/setup-instance
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
@@ -105,7 +105,7 @@ jobs:
profile: bench
integer-benchmarks:
name: Execute integer benchmarks for all operations flavor
name: benchmark_integer/integer-benchmarks
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
@@ -188,7 +188,7 @@ jobs:
BENCH_TYPE: ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}
path: ${{ env.RESULTS_FILENAME }}
@@ -211,7 +211,7 @@ jobs:
SLACK_MESSAGE: "Integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (integer-benchmarks)
name: benchmark_integer/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, integer-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -0,0 +1,403 @@
# Run performance regression benchmarks and return parsed results to associated pull-request.
name: benchmark_perf_regression
on:
issue_comment:
types: [ created ]
pull_request:
types: [ labeled ]
env:
CARGO_TERM_COLOR: always
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUST_BACKTRACE: "full"
RUST_MIN_STACK: "8388608"
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
permissions: { }
jobs:
verify-triggering-actor:
name: benchmark_perf_regression/verify-actor
if: (github.event_name == 'pull_request' &&
(contains(github.event.label.name, 'bench-perfs-cpu') ||
contains(github.event.label.name, 'bench-perfs-gpu'))) ||
(github.event.issue.pull_request && startsWith(github.event.comment.body, '/bench'))
uses: ./.github/workflows/verify_triggering_actor.yml
secrets:
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
prepare-benchmarks:
name: benchmark_perf_regression/prepare-benchmarks
needs: verify-triggering-actor
runs-on: ubuntu-latest
outputs:
commands: ${{ steps.set_commands.outputs.commands }}
slab-backend: ${{ steps.set_slab_details.outputs.backend }}
slab-profile: ${{ steps.set_slab_details.outputs.profile }}
hardware-name: ${{ steps.get_hardware_name.outputs.name }}
tfhe-backend: ${{ steps.set_regression_details.outputs.tfhe-backend }}
selected-regression-profile: ${{ steps.set_regression_details.outputs.selected-profile }}
custom-env: ${{ steps.get_custom_env.outputs.custom_env }}
permissions:
# Needed to write a comment in a pull-request
pull-requests: write
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Acknowledge issue comment
if: github.event_name == 'issue_comment'
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
with:
comment-id: ${{ github.event.comment.id }}
reactions: '+1'
- name: Display workflow run URL
if: github.event_name == 'issue_comment'
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
with:
issue-number: ${{ github.event.issue.number }}
body: |
User triggered performance regression benchmark.
Workflow run URL: ${{ env.ACTION_RUN_URL }}
- name: Generate CPU benchmarks command from label
if: (github.event_name == 'pull_request' && contains(github.event.label.name, 'bench-perfs-cpu'))
run: |
echo "DEFAULT_BENCH_OPTIONS=--backend cpu" >> "${GITHUB_ENV}"
- name: Generate GPU benchmarks command from label
if: (github.event_name == 'pull_request' && contains(github.event.label.name, 'bench-perfs-gpu'))
run: |
echo "DEFAULT_BENCH_OPTIONS=--backend gpu" >> "${GITHUB_ENV}"
# TODO add support for HPU backend
- name: Install Python requirements
run: |
python3 -m pip install -r ci/perf_regression/requirements.txt
- name: Generate cargo commands and env from label
if: github.event_name == 'pull_request'
run: |
python3 ci/perf_regression/perf_regression.py parse_profile --issue-comment "/bench ${DEFAULT_BENCH_OPTIONS}"
echo "COMMANDS=$(cat ci/perf_regression/perf_regression_generated_commands.json)" >> "${GITHUB_ENV}"
- name: Dump issue comment into file # To avoid possible code-injection
if: github.event_name == 'issue_comment'
run: |
echo "${COMMENT_BODY}" >> dumped_comment.txt
env:
COMMENT_BODY: ${{ github.event.comment.body }}
- name: Generate cargo commands and env
if: github.event_name == 'issue_comment'
run: |
python3 ci/perf_regression/perf_regression.py parse_profile --issue-comment "$(cat dumped_comment.txt)"
echo "COMMANDS=$(cat ci/perf_regression/perf_regression_generated_commands.json)" >> "${GITHUB_ENV}"
- name: Set commands output
id: set_commands
run: | # zizmor: ignore[template-injection] this env variable is safe
echo "commands=${{ toJSON(env.COMMANDS) }}" >> "${GITHUB_OUTPUT}"
- name: Set Slab details outputs
id: set_slab_details
run: |
echo "backend=$(cat ci/perf_regression/perf_regression_slab_backend_config.txt)" >> "${GITHUB_OUTPUT}"
echo "profile=$(cat ci/perf_regression/perf_regression_slab_profile_config.txt)" >> "${GITHUB_OUTPUT}"
- name: Get hardware name
id: get_hardware_name
run: | # zizmor: ignore[template-injection] these interpolations are safe
HARDWARE_NAME=$(python3 ci/hardware_finder.py "${{ steps.set_slab_details.outputs.backend }}" "${{ steps.set_slab_details.outputs.profile }}");
echo "name=${HARDWARE_NAME}" >> "${GITHUB_OUTPUT}"
- name: Set regression details outputs
id: set_regression_details
run: |
echo "tfhe-backend=$(cat ci/perf_regression/perf_regression_tfhe_rs_backend_config.txt)" >> "${GITHUB_OUTPUT}"
echo "selected-profile=$(cat ci/perf_regression/perf_regression_selected_profile_config.txt)" >> "${GITHUB_OUTPUT}"
- name: Get custom env vars
id: get_custom_env
run: |
echo "custom_env=$(cat ci/perf_regression/perf_regression_custom_env.sh)" >> "${GITHUB_OUTPUT}"
setup-instance:
name: benchmark_perf_regression/setup-instance
needs: prepare-benchmarks
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
steps:
- name: Start instance
id: start-instance
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: start
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: ${{ needs.prepare-benchmarks.outputs.slab-backend }}
profile: ${{ needs.prepare-benchmarks.outputs.slab-profile }}
install-cuda-dependencies-if-required:
name: benchmark_perf_regression/install-cuda-dependencies-if-required
needs: [ prepare-benchmarks, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
strategy:
matrix:
# explicit include-based build matrix, of known valid options
include:
- cuda: "12.8"
gcc: 11
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Setup Hyperstack dependencies
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
uses: ./.github/actions/gpu_setup
with:
cuda-version: ${{ matrix.cuda }}
gcc-version: ${{ matrix.gcc }}
regression-benchmarks:
name: benchmark_perf_regression/regression-benchmarks
needs: [ prepare-benchmarks, setup-instance, install-cuda-dependencies-if-required ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
group: ${{ github.workflow_ref }}_${{ needs.prepare-benchmarks.outputs.slab-backend }}_${{ needs.prepare-benchmarks.outputs.slab-profile }}
cancel-in-progress: true
timeout-minutes: 720 # 12 hours
strategy:
fail-fast: false
max-parallel: 1
matrix:
command: ${{ fromJson(needs.prepare-benchmarks.outputs.commands) }}
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
fetch-depth: 0 # Needed to get commit hash
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Get benchmark details
run: |
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
{
echo "BENCH_DATE=$(date --iso-8601=seconds)";
echo "COMMIT_DATE=${COMMIT_DATE}";
echo "COMMIT_HASH=$(git describe --tags --dirty)";
} >> "${GITHUB_ENV}"
env:
SHA: ${{ github.sha }}
- name: Export custom env variables
run: | # zizmor: ignore[template-injection] this env variable is safe
{
${{ needs.prepare-benchmarks.outputs.custom-env }}
} >> "$GITHUB_ENV"
# Re-export environment variables as dependencies setup perform this task in the previous job.
# Local env variables are cleaned at the end of each job.
- name: Export CUDA variables
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
shell: bash
run: |
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
echo "PATH=$PATH:$CUDA_PATH/bin" >> "${GITHUB_PATH}"
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib64:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
echo "CUDA_MODULE_LOADER=EAGER" >> "${GITHUB_ENV}"
env:
CUDA_PATH: /usr/local/cuda-12.8
- name: Export gcc and g++ variables
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
shell: bash
run: |
{
echo "CC=/usr/bin/gcc-${GCC_VERSION}";
echo "CXX=/usr/bin/g++-${GCC_VERSION}";
echo "CUDAHOSTCXX=/usr/bin/g++-${GCC_VERSION}";
} >> "${GITHUB_ENV}"
env:
GCC_VERSION: 11
- name: Install rust
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: nightly
- name: Checkout Slab repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
repository: zama-ai/slab
path: slab
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Run regression benchmarks
run: |
make BENCH_CUSTOM_COMMAND="${BENCH_COMMAND}" bench_custom
env:
BENCH_COMMAND: ${{ matrix.command }}
- name: Parse results
run: |
python3 ./ci/benchmark_parser.py target/criterion "${RESULTS_FILENAME}" \
--database tfhe_rs \
--hardware "${HARDWARE_NAME}" \
--backend "${TFHE_BACKEND}" \
--project-version "${COMMIT_HASH}" \
--branch "${REF_NAME}" \
--commit-date "${COMMIT_DATE}" \
--bench-date "${BENCH_DATE}" \
--walk-subdirs \
--name-suffix regression \
--bench-type "${BENCH_TYPE}"
echo "RESULTS_FILE_SHA=$(sha256sum "${RESULTS_FILENAME}" | cut -d " " -f1)" >> "${GITHUB_ENV}"
env:
HARDWARE_NAME: ${{ needs.prepare-benchmarks.outputs.hardware-name }}
TFHE_BACKEND: ${{ needs.prepare-benchmarks.outputs.tfhe-backend }}
REF_NAME: ${{ github.head_ref || github.ref_name }}
BENCH_TYPE: ${{ env.__TFHE_RS_BENCH_TYPE }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_regression_${{ env.RESULTS_FILE_SHA }} # RESULT_FILE_SHA is needed to avoid collision between matrix.command runs
path: ${{ env.RESULTS_FILENAME }}
- name: Send data to Slab
shell: bash
run: |
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
--slab-url "${SLAB_URL}"
env:
JOB_SECRET: ${{ secrets.JOB_SECRET }}
SLAB_URL: ${{ secrets.SLAB_URL }}
check-regressions:
name: benchmark_perf_regression/check-regressions
needs: [ prepare-benchmarks, regression-benchmarks ]
runs-on: ubuntu-latest
permissions:
# Needed to write a comment in a pull-request
pull-requests: write
# Needed to set up Python dependencies
contents: read
env:
REF_NAME: ${{ github.head_ref || github.ref_name }}
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Install recent Python
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: '3.12'
- name: Fetch data
run: |
python3 -m pip install -r ci/data_extractor/requirements.txt
python3 ci/data_extractor/src/data_extractor.py regression_data \
--generate-regression-json \
--regression-profiles ci/regression.toml \
--regression-selected-profile "${REGRESSION_PROFILE}" \
--backend "${TFHE_BACKEND}" \
--hardware "${HARDWARE_NAME}" \
--branch "${REF_NAME}" \
--time-span-days 60
env:
REGRESSION_PROFILE: ${{ needs.prepare-benchmarks.outputs.selected-regression-profile }}
TFHE_BACKEND: ${{ needs.prepare-benchmarks.outputs.tfhe-backend }}
HARDWARE_NAME: ${{ needs.prepare-benchmarks.outputs.hardware-name }}
DATA_EXTRACTOR_DATABASE_HOST: ${{ secrets.DATABASE_HOST }}
DATA_EXTRACTOR_DATABASE_USER: ${{ secrets.DATABASE_USER }}
DATA_EXTRACTOR_DATABASE_PASSWORD: ${{ secrets.DATABASE_PASSWORD }}
- name: Generate regression report
run: |
python3 -m pip install -r ci/perf_regression/requirements.txt
python3 ci/perf_regression/perf_regression.py check_regression \
--results-file regression_data.json \
--generate-report
- name: Write report in pull-request
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
with:
issue-number: ${{ github.event.pull_request.number || github.event.issue.number }}
body-path: ci/perf_regression/regression_report.md
comment-on-failure:
name: benchmark_perf_regression/comment-on-failure
needs: [ prepare-benchmarks, setup-instance, regression-benchmarks, check-regressions ]
runs-on: ubuntu-latest
if: ${{ failure() && github.event_name == 'issue_comment' }}
continue-on-error: true
permissions:
# Needed to write a comment in a pull-request
pull-requests: write
steps:
- name: Write failure message
uses: peter-evans/create-or-update-comment@e8674b075228eee787fea43ef493e45ece1004c9 # v5.0.0
with:
issue-number: ${{ github.event.issue.number }}
body: |
:x: Performance regression benchmark failed ([workflow run](${{ env.ACTION_RUN_URL }}))
slack-notify:
name: benchmark_perf_regression/slack-notify
needs: [ prepare-benchmarks, setup-instance, regression-benchmarks, check-regressions ]
runs-on: ubuntu-latest
if: ${{ failure() }}
continue-on-error: true
steps:
- name: Send message
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "Performance regression benchmarks failed. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: benchmark_perf_regression/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, regression-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
id: stop-instance
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
with:
mode: stop
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (regression-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,5 +1,5 @@
# Run all shortint benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Shortint full benchmarks
name: benchmark_shortint
on:
workflow_dispatch:
@@ -27,7 +27,7 @@ permissions: {}
jobs:
prepare-matrix:
name: Prepare operations matrix
name: benchmark_shortint/prepare-matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -51,7 +51,7 @@ jobs:
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (shortint-benchmarks)
name: benchmark_shortint/setup-instance
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
@@ -69,7 +69,7 @@ jobs:
profile: bench
shortint-benchmarks:
name: Execute shortint benchmarks for all operations flavor
name: benchmark_shortint/shortint-benchmarks
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
@@ -131,21 +131,8 @@ jobs:
env:
REF_NAME: ${{ github.ref_name }}
# This small benchmark needs to be executed only once.
- name: Measure key sizes
if: matrix.op_flavor == 'default'
run: |
make measure_shortint_key_sizes
- name: Parse key sizes results
if: matrix.op_flavor == 'default'
run: |
python3 ./ci/benchmark_parser.py tfhe-benchmark/shortint_key_sizes.csv "${RESULTS_FILENAME}" \
--object-sizes \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_shortint_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}
@@ -168,7 +155,7 @@ jobs:
SLACK_MESSAGE: "Shortint full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (shortint-benchmarks)
name: benchmark_shortint/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, shortint-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run all signed integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: Signed Integer full benchmarks
name: benchmark_signed_integer
on:
workflow_dispatch:
@@ -41,7 +41,7 @@ permissions: {}
jobs:
prepare-matrix:
name: Prepare operations matrix
name: benchmark_signed_integer/prepare-matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -87,7 +87,7 @@ jobs:
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (signed-integer-benchmarks)
name: benchmark_signed_integer/setup-instance
needs: prepare-matrix
runs-on: ubuntu-latest
outputs:
@@ -105,7 +105,7 @@ jobs:
profile: bench
signed-integer-benchmarks:
name: Execute signed integer benchmarks for all operations flavor
name: benchmark_signed_integer/signed-integer-benchmarks
needs: [ prepare-matrix, setup-instance ]
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
concurrency:
@@ -180,7 +180,7 @@ jobs:
BENCH_TYPE: ${{ matrix.bench_type }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}_${{ matrix.bench_type }}
path: ${{ env.RESULTS_FILENAME }}
@@ -203,7 +203,7 @@ jobs:
SLACK_MESSAGE: "Signed integer full benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (integer-benchmarks)
name: benchmark_signed_integer/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, signed-integer-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run FFT benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: FFT benchmarks
name: benchmark_tfhe_fft
env:
CARGO_TERM_COLOR: always
@@ -27,8 +27,8 @@ on:
permissions: {}
jobs:
setup-ec2:
name: Setup EC2 instance (fft-benchmarks)
setup-instance:
name: benchmark_tfhe_fft/setup-instance
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
@@ -45,12 +45,12 @@ jobs:
profile: bench
fft-benchmarks:
name: Execute FFT benchmarks in EC2
needs: setup-ec2
name: benchmark_tfhe_fft/fft-benchmarks
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: true
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
@@ -94,7 +94,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_fft
path: ${{ env.RESULTS_FILENAME }}
@@ -124,10 +124,10 @@ jobs:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-fft benchmarks failed. (${{ env.ACTION_RUN_URL }})"
teardown-ec2:
name: Teardown EC2 instance (fft-benchmarks)
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
needs: [ setup-ec2, fft-benchmarks ]
teardown-instance:
name: benchmark_tfhe_fft/teardown-instance
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [ setup-instance, fft-benchmarks ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
@@ -138,7 +138,7 @@ jobs:
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
@@ -146,4 +146,4 @@ jobs:
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (fft-benchmarks) failed. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (fft-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,5 +1,5 @@
# Run NTT benchmarks on an AWS instance and return parsed results to Slab CI bot.
name: NTT benchmarks
name: benchmark_tfhe_ntt
env:
CARGO_TERM_COLOR: always
@@ -27,8 +27,8 @@ on:
permissions: {}
jobs:
setup-ec2:
name: Setup EC2 instance (ntt-benchmarks)
setup-instance:
name: benchmark_tfhe_ntt/setup-instance
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
@@ -45,12 +45,12 @@ jobs:
profile: bench
ntt-benchmarks:
name: Execute NTT benchmarks in EC2
needs: setup-ec2
name: benchmark_tfhe_ntt/ntt-benchmarks
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}${{ github.ref == 'refs/heads/main' && github.sha || '' }}
cancel-in-progress: true
runs-on: ${{ needs.setup-ec2.outputs.runner-name }}
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
steps:
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
@@ -94,7 +94,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_ntt
path: ${{ env.RESULTS_FILENAME }}
@@ -124,10 +124,10 @@ jobs:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-ntt benchmarks failed. (${{ env.ACTION_RUN_URL }})"
teardown-ec2:
name: Teardown EC2 instance (ntt-benchmarks)
if: ${{ always() && needs.setup-ec2.result != 'skipped' }}
needs: [setup-ec2, ntt-benchmarks]
teardown-instance:
name: benchmark_tfhe_ntt/teardown-instance
if: ${{ always() && needs.setup-instance.result != 'skipped' }}
needs: [setup-instance, ntt-benchmarks]
runs-on: ubuntu-latest
steps:
- name: Stop instance
@@ -138,7 +138,7 @@ jobs:
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
label: ${{ needs.setup-ec2.outputs.runner-name }}
label: ${{ needs.setup-instance.outputs.runner-name }}
- name: Slack Notification
if: ${{ failure() }}
@@ -146,4 +146,4 @@ jobs:
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "EC2 teardown (ntt-benchmarks) failed. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "EC2 teardown (ntt-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,5 +1,5 @@
# Run benchmarks of the tfhe-zk-pok crate on an instance and return parsed results to Slab CI bot.
name: tfhe-zk-pok benchmarks
name: benchmark_tfhe_zk_pok
on:
workflow_dispatch:
@@ -35,6 +35,7 @@ permissions: {}
jobs:
should-run:
name: benchmark_tfhe_zk_pok/should-run
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
((github.event_name == 'push' || github.event_name == 'schedule') && github.repository == 'zama-ai/tfhe-rs')
@@ -50,7 +51,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
zk_pok:
@@ -58,7 +59,7 @@ jobs:
- .github/workflows/benchmark_tfhe_zk_pok.yml
setup-instance:
name: Setup instance (tfhe-zk-pok-benchmarks)
name: benchmark_tfhe_zk_pok/setup-instance
runs-on: ubuntu-latest
needs: should-run
if: github.event_name == 'workflow_dispatch' ||
@@ -81,7 +82,7 @@ jobs:
profile: bench
tfhe-zk-pok-benchmarks:
name: Execute tfhe-zk-pok benchmarks
name: benchmark_tfhe_zk_pok/tfhe-zk-pok-benchmarks
if: needs.setup-instance.result != 'skipped'
needs: setup-instance
concurrency:
@@ -142,7 +143,7 @@ jobs:
REF_NAME: ${{ github.ref_name }}
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_tfhe_zk_pok_${{ env.BENCH_TYPE }}
path: ${{ env.RESULTS_FILENAME }}
@@ -173,7 +174,7 @@ jobs:
SLACK_MESSAGE: "tfhe-zk-pok benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (tfhe-zk-pok-benchmarks)
name: benchmark_tfhe_zk_pok/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, tfhe-zk-pok-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run WASM client benchmarks on an instance and return parsed results to Slab CI bot.
name: WASM client benchmarks
name: benchmark_wasm_client
on:
workflow_dispatch:
@@ -26,6 +26,7 @@ permissions: {}
jobs:
should-run:
name: benchmark_wasm_client/should-run
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
@@ -44,7 +45,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
wasm_bench:
@@ -57,7 +58,7 @@ jobs:
- .github/workflows/wasm_client_benchmark.yml
setup-instance:
name: Setup instance (wasm-client-benchmarks)
name: benchmark_wasm_client/setup-instance
if: github.event_name == 'workflow_dispatch' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
(github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs' && needs.should-run.outputs.wasm_bench)
@@ -78,7 +79,7 @@ jobs:
profile: cpu-small
wasm-client-benchmarks:
name: Execute WASM client benchmarks
name: benchmark_wasm_client/wasm-client-benchmarks
needs: setup-instance
if: needs.setup-instance.result != 'skipped'
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
@@ -116,7 +117,7 @@ jobs:
- name: Node cache restoration
id: node-cache
uses: actions/cache/restore@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
with:
path: |
~/.nvm
@@ -129,7 +130,7 @@ jobs:
make install_node
- name: Node cache save
uses: actions/cache/save@0400d5f644dc74513175e3cd8d07132dd4860809 #v4.2.4
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 #v4.3.0
if: steps.node-cache.outputs.cache-hit != 'true'
with:
path: |
@@ -165,21 +166,8 @@ jobs:
env:
REF_NAME: ${{ github.ref_name }}
# Run these benchmarks only once
- name: Measure public key and ciphertext sizes in HL Api
if: matrix.browser == 'chrome'
run: |
make measure_hlapi_compact_pk_ct_sizes
- name: Parse key and ciphertext sizes results
if: matrix.browser == 'chrome'
run: |
python3 ./ci/benchmark_parser.py tfhe-benchmark/hlapi_cpk_and_cctl_sizes.csv "${RESULTS_FILENAME}" \
--key-gen \
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_wasm_${{ matrix.browser }}
path: ${{ env.RESULTS_FILENAME }}
@@ -210,7 +198,7 @@ jobs:
SLACK_MESSAGE: "WASM benchmarks (${{ matrix.browser }}) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (wasm-client-benchmarks)
name: benchmark_wasm_client/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, wasm-client-benchmarks ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Run PKE Zero-Knowledge benchmarks on an instance and return parsed results to Slab CI bot.
name: PKE ZK benchmarks
name: benchmark_zk_pke
on:
workflow_dispatch:
@@ -36,6 +36,7 @@ permissions: {}
jobs:
should-run:
name: benchmark_zk_pke/should-run
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch' ||
((github.event_name == 'push' || github.event_name == 'schedule') && github.repository == 'zama-ai/tfhe-rs')
@@ -51,7 +52,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
zk_pok:
@@ -67,7 +68,7 @@ jobs:
- .github/workflows/zk_pke_benchmark.yml
prepare-matrix:
name: Prepare operations matrix
name: benchmark_zk_pke/prepare-matrix
runs-on: ubuntu-latest
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
@@ -96,7 +97,7 @@ jobs:
echo "bench_type=${{ toJSON(env.BENCH_TYPE) }}" >> "${GITHUB_OUTPUT}"
setup-instance:
name: Setup instance (pke-zk-benchmarks)
name: benchmark_zk_pke/setup-instance
runs-on: ubuntu-latest
needs: [ should-run, prepare-matrix ]
if: github.event_name == 'workflow_dispatch' ||
@@ -119,7 +120,7 @@ jobs:
profile: bench
pke-zk-benchmarks:
name: Execute PKE ZK benchmarks
name: benchmark_zk_pke/pke-zk-benchmarks
if: needs.setup-instance.result != 'skipped'
needs: [ prepare-matrix, setup-instance ]
concurrency:
@@ -192,7 +193,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with:
name: ${{ github.sha }}_integer_zk_${{ matrix.bench_type }}
path: ${{ env.RESULTS_FILENAME }}
@@ -223,7 +224,7 @@ jobs:
SLACK_MESSAGE: "PKE ZK benchmarks finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (pke-zk-benchmarks)
name: benchmark_zk_pke/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, pke-zk-benchmarks ]
runs-on: ubuntu-latest

42
.github/workflows/cargo_audit.yml vendored Normal file
View File

@@ -0,0 +1,42 @@
# Run cargo audit
name: cargo_audit
on:
workflow_dispatch:
schedule:
# runs every day at 4am UTC
- cron: '0 4 * * *'
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN || secrets.GITHUB_TOKEN }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACKIFY_MARKDOWN: true
permissions: {}
jobs:
audit:
name: cargo_audit/audit
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Audit dependencies
run: |
make audit_dependencies
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "cargo-audit finished with status: ${{ job.status }}. ([action run](${{ env.ACTION_RUN_URL }}))"

View File

@@ -1,4 +1,4 @@
name: Cargo Build TFHE-rs
name: cargo_build
on:
pull_request:
@@ -18,16 +18,95 @@ permissions:
contents: read
jobs:
cargo-builds:
runs-on: ${{ matrix.os }}
prepare-parallel-pcc-matrix:
name: cargo_build/prepare-parallel-pcc-matrix
runs-on: ubuntu-latest
outputs:
matrix_command: ${{ steps.set-pcc-commands-matrix.outputs.commands }}
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: "false"
token: ${{ env.CHECKOUT_TOKEN }}
# Fetch all the Make recipes that start with `pcc_batch_`
- name: Set pcc commands matrix
id: set-pcc-commands-matrix
run: |
COMMANDS=$(grep -oE '^pcc_batch_[^:]*:' Makefile | sed 's/:/\"/; s/^/\"/' | paste -sd,)
echo "commands=[${COMMANDS}]" >> "$GITHUB_OUTPUT"
parallel-pcc-cpu:
name: cargo_build/parallel-pcc-cpu
needs: prepare-parallel-pcc-matrix
runs-on: large_ubuntu_16
strategy:
matrix:
command: ${{fromJson(needs.prepare-parallel-pcc-matrix.outputs.matrix_command)}}
fail-fast: false
steps:
- name: Checkout tfhe-rs repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Run pcc checks batch
run: |
make "${COMMAND}"
env:
COMMAND: ${{ matrix.command }}
pcc-hpu:
name: cargo_build/pcc-hpu
runs-on: large_ubuntu_16
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Run Hpu pcc checks
run: |
make pcc_hpu
build-tfhe-full:
name: cargo_build/build-tfhe-full
runs-on: ${{ matrix.os }}
strategy:
matrix:
# GitHub macos-latest are now M1 macs, so use ours, we limit what runs so it will be fast
# even with a few PRs
os: [large_ubuntu_16, macos-latest, windows-latest]
os: [large_ubuntu_16, macos-latest-xlarge, large_windows_16_latest]
fail-fast: false
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Build Release tfhe full
run: |
make build_tfhe_full
build:
name: cargo_build/build
runs-on: large_ubuntu_16
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
@@ -40,7 +119,6 @@ jobs:
toolchain: stable
- name: Install and run newline linter checks
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
wget https://github.com/fernandrone/linelint/releases/download/0.0.6/linelint-linux-amd64
echo "16b70fb7b471d6f95cbdc0b4e5dc2b0ac9e84ba9ecdc488f7bdf13df823aca4b linelint-linux-amd64" > checksum
@@ -49,60 +127,93 @@ jobs:
mv linelint-linux-amd64 /usr/local/bin/linelint
make check_newline
- name: Run pcc checks
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make pcc
- name: Build tfhe-csprng
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_tfhe_csprng
- name: Build with MSRV
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_tfhe_msrv
- name: Build coverage tests
run: |
make build_tfhe_coverage
build-layers:
name: cargo_build/build-layers
runs-on: large_ubuntu_16
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Build Release core
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_core AVX512_SUPPORT=ON
make build_core_experimental AVX512_SUPPORT=ON
- name: Build Release boolean
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_boolean
- name: Build Release shortint
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_shortint
- name: Build Release integer
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_integer
- name: Build Release tfhe full
run: |
make build_tfhe_full
build-c-api:
name: cargo_build/build-c-api
runs-on: large_ubuntu_16
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
persist-credentials: 'false'
token: ${{ env.CHECKOUT_TOKEN }}
- name: Install latest stable
uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # zizmor: ignore[stale-action-refs] this action doesn't create releases
with:
toolchain: stable
- name: Build Release c_api
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_c_api
- name: Build coverage tests
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make build_tfhe_coverage
- name: Run Hpu pcc checks
if: ${{ contains(matrix.os, 'ubuntu') }}
run: |
make pcc_hpu
# The wasm build check is a bit annoying to set-up here and is done during the tests in
# aws_tfhe_tests.yml
cargo-builds:
name: cargo_build/cargo-builds (bpr)
needs: [ parallel-pcc-cpu, pcc-hpu, build-tfhe-full, build, build-layers, build-c-api ]
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
- name: Check all builds success
if: needs.parallel-pcc-cpu.result == 'success' &&
needs.pcc-hpu.result == 'success' &&
needs.build-tfhe-full.result == 'success' &&
needs.build.result == 'success' &&
needs.build-layers.result == 'success' &&
needs.build-c-api.result == 'success'
run: |
echo "All tfhe-rs build checks passed"
- name: Check builds failure
if: needs.parallel-pcc-cpu.result != 'success' ||
needs.pcc-hpu.result != 'success' ||
needs.build-tfhe-full.result != 'success' ||
needs.build.result != 'success' ||
needs.build-layers.result != 'success' ||
needs.build-c-api.result != 'success'
run: |
echo "Some tfhe-rs build checks failed"
exit 1

View File

@@ -1,5 +1,5 @@
# Build tfhe-fft
name: Cargo Build tfhe-fft
name: cargo_build_tfhe_fft
on:
pull_request:
@@ -17,6 +17,7 @@ permissions:
jobs:
cargo-builds-fft:
name: cargo_build_tfhe_fft/cargo-builds-fft (bpr)
runs-on: ${{ matrix.runner_type }}
strategy:

View File

@@ -1,5 +1,5 @@
# Build tfhe-ntt
name: Cargo Build tfhe-ntt
name: cargo_build_tfhe_ntt
on:
pull_request:
@@ -17,6 +17,7 @@ permissions:
jobs:
cargo-builds-ntt:
name: cargo_build_tfhe_ntt/cargo-builds-ntt (bpr)
runs-on: ${{ matrix.os }}
strategy:
matrix:

View File

@@ -1,5 +1,5 @@
# Test tfhe-fft
name: Cargo Test tfhe-fft
name: cargo_test_fft
on:
pull_request:
@@ -21,6 +21,7 @@ permissions:
jobs:
should-run:
name: cargo_test_fft/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -36,7 +37,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
fft:
@@ -46,6 +47,7 @@ jobs:
- '.github/workflows/cargo_test_fft.yml'
cargo-tests-fft:
name: cargo_test_fft/cargo-tests-fft
needs: should-run
if: needs.should-run.outputs.fft_test == 'true'
runs-on: ${{ matrix.runner_type }}
@@ -77,6 +79,7 @@ jobs:
make test_fft_no_std
cargo-tests-fft-nightly:
name: cargo_test_fft/cargo-tests-fft-nightly
needs: should-run
if: needs.should-run.outputs.fft_test == 'true'
runs-on: ${{ matrix.runner_type }}
@@ -104,6 +107,7 @@ jobs:
make test_fft_no_std_nightly
cargo-tests-fft-node-js:
name: cargo_test_fft/cargo-tests-fft-node-js
needs: should-run
if: needs.should-run.outputs.fft_test == 'true'
runs-on: ubuntu-latest
@@ -119,6 +123,7 @@ jobs:
make test_fft_node_js_ci
cargo-tests-fft-successful:
name: cargo_test_fft/cargo-tests-fft-successful (bpr)
needs: [ should-run, cargo-tests-fft, cargo-tests-fft-nightly, cargo-tests-fft-node-js ]
if: ${{ always() }}
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Test tfhe-ntt
name: Cargo Test tfhe-ntt
name: cargo_test_ntt
on:
pull_request:
@@ -22,6 +22,7 @@ permissions:
jobs:
should-run:
name: cargo_test_ntt/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -37,7 +38,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
ntt:
@@ -47,6 +48,7 @@ jobs:
- '.github/workflows/cargo_test_ntt.yml'
setup-instance:
name: cargo_test_ntt/setup-instance
needs: should-run
if: needs.should-run.outputs.ntt_test == 'true'
runs-on: ubuntu-latest
@@ -75,6 +77,7 @@ jobs:
echo "matrix_os=[\"${INSTANCE_TO_USE}\", \"macos-latest\", \"windows-latest\"]" >> "$GITHUB_OUTPUT"
cargo-tests-ntt:
name: cargo_test_ntt/cargo-tests-ntt
needs: [should-run, setup-instance]
if: needs.should-run.outputs.ntt_test == 'true'
runs-on: ${{ matrix.os }}
@@ -101,6 +104,7 @@ jobs:
run: make test_ntt_no_std
cargo-tests-ntt-nightly:
name: cargo_test_ntt/cargo-tests-ntt-nightly
needs: [should-run, setup-instance]
if: needs.should-run.outputs.ntt_test == 'true'
runs-on: ${{ matrix.os }}
@@ -126,6 +130,7 @@ jobs:
run: make test_ntt_no_std_nightly
cargo-tests-ntt-successful:
name: cargo_test_ntt/cargo-tests-ntt-successful (bpr)
needs: [should-run, cargo-tests-ntt, cargo-tests-ntt-nightly]
if: ${{ always() }}
runs-on: ubuntu-latest
@@ -151,7 +156,7 @@ jobs:
exit 1
teardown-instance:
name: Teardown instance (cargo-tests-ntt-successful)
name: cargo_test_ntt/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [setup-instance, cargo-tests-ntt-successful]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Check commit and PR compliance
name: Check commit and PR compliance
name: check_commit
on:
pull_request:
@@ -7,7 +7,7 @@ permissions: {}
jobs:
check-commit-pr:
name: Check commit and PR
name: check_commit/check-commit-pr (bpr)
runs-on: ubuntu-latest
permissions:
contents: read

View File

@@ -1,5 +1,5 @@
# Lint and check CI
name: CI Lint and Checks
name: ci_lint
on:
pull_request:
@@ -14,7 +14,7 @@ permissions:
jobs:
lint-check:
name: Lint and checks
name: ci_lint/lint-check (bpr)
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
@@ -42,7 +42,7 @@ jobs:
GH_TOKEN: ${{ env.CHECKOUT_TOKEN }}
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@fc87bb5b5a97953d987372e74478de634726b3e5 # v3.0.25
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@9e9574ef04ea69da568d6249bd69539ccc704e74 # v4.0.0
with:
allowlist: |
slsa-framework/slsa-github-generator

View File

@@ -1,4 +1,4 @@
name: Code Coverage
name: code_coverage
env:
CARGO_TERM_COLOR: always
@@ -22,7 +22,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (code-coverage)
name: code_coverage/setup-instance
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
@@ -38,8 +38,8 @@ jobs:
backend: aws
profile: cpu-small
code-coverage:
name: Code coverage tests
code-coverage-tests:
name: code_coverage/code-coverage-tests
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}_${{ github.event_name }}
@@ -60,7 +60,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
tfhe:
@@ -90,7 +90,7 @@ jobs:
make test_shortint_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
@@ -104,7 +104,7 @@ jobs:
make test_integer_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00
uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
@@ -121,9 +121,9 @@ jobs:
SLACK_MESSAGE: "Code coverage finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (code-coverage)
name: code_coverage/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, code-coverage ]
needs: [ setup-instance, code-coverage-tests ]
runs-on: ubuntu-latest
steps:
- name: Stop instance
@@ -142,4 +142,4 @@ jobs:
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "Instance teardown (code-coverage) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Instance teardown (code-coverage-tests) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,4 +1,4 @@
name: CSPRNG randomness testing Workflow
name: csprng_randomness_tests
env:
CARGO_TERM_COLOR: always
@@ -26,7 +26,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (csprng-randomness-tests)
name: csprng_randomness_tests/setup-instance
if: ${{ github.event_name == 'workflow_dispatch' || contains(github.event.label.name, 'approved') }}
runs-on: ubuntu-latest
outputs:
@@ -52,7 +52,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
csprng-randomness-tests:
name: CSPRNG randomness tests
name: csprng_randomness_tests/csprng-randomness-tests
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}
@@ -83,7 +83,7 @@ jobs:
SLACK_MESSAGE: "tfhe-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (csprng-randomness-tests)
name: csprng_randomness_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, csprng-randomness-tests ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend on an RTX 4090 machine
name: Cuda - 4090 full tests
name: gpu_4090_tests
env:
CARGO_TERM_COLOR: always
@@ -27,7 +27,7 @@ permissions:
jobs:
cuda-tests-linux:
name: CUDA tests (RTX 4090)
name: gpu_4090_tests/cuda-tests-linux
if: github.event_name == 'workflow_dispatch' ||
contains(github.event.label.name, '4090_test') ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend on an AWS instance
name: Cuda - CPU Memory Checks
name: gpu_code_validation_tests
env:
CARGO_TERM_COLOR: always
@@ -31,7 +31,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (cuda-tests)
name: gpu_code_validation_tests/setup-instance
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' ||
(github.event.action == 'labeled' && github.event.label.name == 'approved')
@@ -58,7 +58,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA Memory Checks tests
name: gpu_code_validation_tests/cuda-tests-linux
needs: [ setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -105,7 +105,7 @@ jobs:
make test_high_level_api_gpu_valgrind
slack-notify:
name: Slack Notification
name: gpu_code_validation_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -127,7 +127,7 @@ jobs:
SLACK_MESSAGE: "GPU Memory Checks tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
name: gpu_code_validation_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend on an H100 VM on hyperstack
name: Cuda - Fast tests on H100
name: gpu_fast_h100_tests
env:
CARGO_TERM_COLOR: always
@@ -30,6 +30,7 @@ permissions:
jobs:
should-run:
name: gpu_fast_h100_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -45,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -66,7 +67,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-h100-tests)
name: gpu_fast_h100_tests/setup-instance
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
@@ -108,7 +109,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA H100 tests
name: gpu_fast_h100_tests/cuda-tests-linux
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -165,7 +166,7 @@ jobs:
BIG_TESTS_INSTANCE=TRUE make test_high_level_api_gpu
slack-notify:
name: Slack Notification
name: gpu_fast_h100_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -187,7 +188,7 @@ jobs:
SLACK_MESSAGE: "Fast H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
name: gpu_fast_h100_tests/teardown-instance
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend on an AWS instance
name: Cuda - Fast tests
name: gpu_fast_tests
env:
CARGO_TERM_COLOR: always
@@ -29,6 +29,7 @@ permissions:
jobs:
should-run:
name: gpu_fast_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -44,7 +45,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -65,7 +66,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-tests)
name: gpu_fast_tests/setup-instance
needs: should-run
if: github.event_name == 'workflow_dispatch' ||
needs.should-run.outputs.gpu_test == 'true'
@@ -93,7 +94,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA tests
name: gpu_fast_tests/cuda-tests-linux
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -151,7 +152,7 @@ jobs:
make test_high_level_api_gpu
slack-notify:
name: Slack Notification
name: gpu_fast_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -173,7 +174,7 @@ jobs:
SLACK_MESSAGE: "Base GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
name: gpu_fast_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend on an H100 VM on hyperstack
name: Cuda - Full tests on H100
name: gpu_full_h100_tests
env:
CARGO_TERM_COLOR: always
@@ -20,7 +20,7 @@ permissions: {}
jobs:
setup-instance:
name: Setup instance (cuda-h100-tests)
name: gpu_full_h100_tests/setup-instance
runs-on: ubuntu-latest
outputs:
# Use permanent remote instance label first as on-demand remote instance label output is set before the end of start-remote-instance step.
@@ -50,7 +50,7 @@ jobs:
echo "runner_group=h100x1" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA H100 tests
name: gpu_full_h100_tests/cuda-tests-linux
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow_ref }}
@@ -102,7 +102,7 @@ jobs:
make test_high_level_api_gpu
slack-notify:
name: Slack Notification
name: gpu_full_h100_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ failure() }}
@@ -115,7 +115,7 @@ jobs:
SLACK_MESSAGE: "Full H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
name: gpu_full_h100_tests/teardown-instance
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend on an AWS instance
name: Cuda - Full tests multi-GPU
name: gpu_full_multi_gpu_tests
env:
CARGO_TERM_COLOR: always
@@ -30,6 +30,7 @@ permissions:
jobs:
should-run:
name: gpu_full_multi_gpu_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -45,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -66,7 +67,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-tests-multi-gpu)
name: gpu_full_multi_gpu_tests/setup-instance
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
@@ -85,7 +86,7 @@ jobs:
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: multi-gpu-test
profile: 4-l40
# This instance will be spawned especially for pull-request from forked repository
- name: Start GitHub instance
@@ -95,7 +96,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA multi-GPU tests
name: gpu_full_multi_gpu_tests/cuda-tests-linux
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -154,7 +155,7 @@ jobs:
make test_high_level_api_gpu
slack-notify:
name: Slack Notification
name: gpu_full_multi_gpu_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -176,7 +177,7 @@ jobs:
SLACK_MESSAGE: "Multi-GPU tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests-multi-gpu)
name: gpu_full_multi_gpu_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: Cuda - Long Run Tests on GPU
name: gpu_integer_long_run_tests
env:
CARGO_TERM_COLOR: always
@@ -27,7 +27,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (gpu-tests)
name: gpu_integer_long_run_tests/setup-instance
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
runs-on: ubuntu-latest
@@ -43,10 +43,10 @@ jobs:
slab-url: ${{ secrets.SLAB_BASE_URL }}
job-secret: ${{ secrets.JOB_SECRET }}
backend: hyperstack
profile: multi-gpu-test
profile: 4-l40
cuda-tests:
name: Long run GPU tests
name: gpu_integer_long_run_tests/cuda-tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow_ref }}_${{github.event_name}}
@@ -90,7 +90,7 @@ jobs:
fi
slack-notify:
name: Slack Notification
name: gpu_integer_long_run_tests/slack-notify
needs: [ setup-instance, cuda-tests ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests.result != 'skipped' && failure() }}
@@ -103,7 +103,7 @@ jobs:
SLACK_MESSAGE: "Integer GPU long run tests finished with status: ${{ needs.cuda-tests.result }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (gpu-tests)
name: gpu_integer_long_run_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend on an AWS instance
name: Cuda - GPU Memory Checks
name: gpu_memory_sanitizer
env:
CARGO_TERM_COLOR: always
@@ -30,7 +30,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (cuda-tests)
name: gpu_memory_sanitizer/setup-instance
runs-on: ubuntu-latest
if: github.event_name != 'pull_request' ||
(github.event.action == 'labeled' && github.event.label.name == 'approved')
@@ -57,7 +57,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA Memory Checks tests
name: gpu_memory_sanitizer/cuda-tests-linux
needs: [ setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -102,7 +102,7 @@ jobs:
make test_high_level_api_gpu_sanitizer
slack-notify:
name: Slack Notification
name: gpu_memory_sanitizer/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -124,7 +124,7 @@ jobs:
SLACK_MESSAGE: "GPU Memory Checks tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
name: gpu_memory_sanitizer/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Perform tfhe-cuda-backend post-commit checks on an AWS instance
name: Cuda - Post-commit Checks
name: gpu_pcc
env:
CARGO_TERM_COLOR: always
@@ -28,7 +28,7 @@ permissions:
jobs:
setup-instance:
name: Setup instance (cuda-pcc)
name: gpu_pcc/setup-instance
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-remote-instance.outputs.label || steps.start-github-instance.outputs.runner_group }}
@@ -53,7 +53,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-pcc:
name: CUDA post-commit checks
name: gpu_pcc/cuda-pcc (bpr)
needs: setup-instance
concurrency:
group: ${{ github.workflow_ref }}
@@ -149,7 +149,7 @@ jobs:
SLACK_MESSAGE: "CUDA AWS post-commit checks finished with status: ${{ job.status }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-pcc)
name: cuda_pcc/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-pcc ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Signed integer GPU tests on an RTXA6000 VM on hyperstack with classical PBS
name: Cuda - Signed integer tests with classical PBS
name: gpu_signed_integer_classic_tests
env:
CARGO_TERM_COLOR: always
@@ -30,6 +30,7 @@ permissions:
jobs:
should-run:
name: gpu_signed_integer_classic_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -45,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -66,7 +67,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-signed-classic-tests)
name: gpu_signed_integer_classic_tests/setup-instance
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
@@ -95,7 +96,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA signed integer tests with classical PBS
name: gpu_signed_integer_classic_tests/cuda-tests-linux
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -137,7 +138,7 @@ jobs:
BIG_TESTS_INSTANCE=TRUE make test_signed_integer_gpu_ci
slack-notify:
name: Slack Notification
name: gpu_signed_integer_classic_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -159,7 +160,7 @@ jobs:
SLACK_MESSAGE: "Integer GPU signed integer tests with classical PBS finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-signed-classic-tests)
name: gpu_signed_integer_classic_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Signed integer GPU tests on an H100 VM on hyperstack
name: Cuda - Signed integer tests on H100
name: gpu_signed_integer_h100_tests
env:
CARGO_TERM_COLOR: always
@@ -30,6 +30,7 @@ permissions:
jobs:
should-run:
name: gpu_signed_integer_h100_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -45,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -66,7 +67,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-h100-tests)
name: gpu_signed_integer_h100_tests/setup-instance
needs: should-run
if: github.event_name != 'pull_request' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
@@ -108,7 +109,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA H100 signed integer tests
name: gpu_signed_integer_h100_tests/cuda-tests-linux
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -151,7 +152,7 @@ jobs:
BIG_TESTS_INSTANCE=TRUE make test_signed_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
name: gpu_signed_integer_h100_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -173,7 +174,7 @@ jobs:
SLACK_MESSAGE: "Integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
name: gpu_signed_integer_h100_tests/teardown-instance
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend signed integer on an AWS instance
name: Cuda - Signed integer tests
name: gpu_signed_integer_tests
env:
CARGO_TERM_COLOR: always
@@ -31,6 +31,7 @@ permissions:
jobs:
should-run:
name: gpu_signed_integer_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -46,7 +47,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -67,7 +68,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-signed-integer-tests)
name: gpu_signed_integer_tests/setup-instance
runs-on: ubuntu-latest
needs: should-run
if: (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
@@ -96,7 +97,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-signed-integer-tests:
name: CUDA signed integer tests
name: gpu_signed_integer_tests/cuda-signed-integer-tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -146,7 +147,7 @@ jobs:
make test_signed_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
name: gpu_signed_integer_tests/slack-notify
needs: [ setup-instance, cuda-signed-integer-tests ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-signed-integer-tests.result != 'skipped' && failure() }}
@@ -168,7 +169,7 @@ jobs:
SLACK_MESSAGE: "Signed GPU tests finished with status: ${{ needs.cuda-signed-integer-tests.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
name: gpu_signed_integer_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-signed-integer-tests ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Test unsigned integers on an RTXA6000 VM on hyperstack with the classical PBS
name: Cuda - Unsigned integer tests with classical PBS
name: gpu_unsigned_integer_classic_tests
env:
CARGO_TERM_COLOR: always
@@ -30,6 +30,7 @@ permissions:
jobs:
should-run:
name: gpu_unsigned_integer_classic_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -45,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -66,7 +67,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-unsigned-classic-tests)
name: gpu_unsigned_integer_classic_tests/setup-instance
needs: should-run
if: github.event_name == 'workflow_dispatch' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
@@ -95,7 +96,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA unsigned integer tests with classical PBS
name: gpu_unsigned_integer_classic_tests/cuda-tests-linux
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -137,7 +138,7 @@ jobs:
BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_gpu_ci
slack-notify:
name: Slack Notification
name: gpu_unsigned_integer_classic_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -159,7 +160,7 @@ jobs:
SLACK_MESSAGE: "Unsigned integer GPU classic tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-unsigned-classic-tests)
name: gpu_unsigned_integer_classic_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Test unsigned integers on an H100 VM on hyperstack
name: Cuda - Unsigned integer tests on H100
name: gpu_unsigned_integer_h100_tests/
env:
CARGO_TERM_COLOR: always
@@ -30,6 +30,7 @@ permissions:
jobs:
should-run:
name: gpu_unsigned_integer_h100_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -45,7 +46,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -66,7 +67,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-h100-tests)
name: gpu_unsigned_integer_h100_tests/setup-instance
needs: should-run
if: github.event_name == 'workflow_dispatch' ||
(github.event.action != 'labeled' && needs.should-run.outputs.gpu_test == 'true') ||
@@ -108,7 +109,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-tests-linux:
name: CUDA H100 unsigned integer tests
name: gpu_unsigned_integer_h100_tests/cuda-tests-linux
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -151,7 +152,7 @@ jobs:
BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
name: gpu_unsigned_integer_h100_tests/slack-notify
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-tests-linux.result != 'skipped' && failure() }}
@@ -173,7 +174,7 @@ jobs:
SLACK_MESSAGE: "Unsigned integer GPU H100 tests finished with status: ${{ needs.cuda-tests-linux.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-h100-tests)
name: gpu_unsigned_integer_h100_tests/teardown-instance
if: ${{ always() && needs.setup-instance.outputs.remote-instance-outcome == 'success' }}
needs: [ setup-instance, cuda-tests-linux ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Compile and test tfhe-cuda-backend unsigned integer on an AWS instance
name: Cuda - Unsigned integer tests
name: gpu_unsigned_integer_tests
env:
CARGO_TERM_COLOR: always
@@ -31,6 +31,7 @@ permissions:
jobs:
should-run:
name: gpu_unsigned_integer_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -46,7 +47,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
gpu:
@@ -67,7 +68,7 @@ jobs:
- ci/slab.toml
setup-instance:
name: Setup instance (cuda-unsigned-integer-tests)
name: gpu_unsigned_integer_tests/setup-instance
runs-on: ubuntu-latest
needs: should-run
if: (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
@@ -96,7 +97,7 @@ jobs:
echo "runner_group=${EXTERNAL_CONTRIBUTION_RUNNER}" >> "$GITHUB_OUTPUT"
cuda-unsigned-integer-tests:
name: CUDA unsigned integer tests
name: gpu_unsigned_integer_tests/cuda-unsigned-integer-tests
needs: [ should-run, setup-instance ]
if: github.event_name != 'pull_request' ||
(github.event_name == 'pull_request' && needs.setup-instance.result != 'skipped')
@@ -146,7 +147,7 @@ jobs:
make test_unsigned_integer_multi_bit_gpu_ci
slack-notify:
name: Slack Notification
name: gpu_unsigned_integer_tests/slack-notify
needs: [ setup-instance, cuda-unsigned-integer-tests ]
runs-on: ubuntu-latest
if: ${{ always() && needs.cuda-unsigned-integer-tests.result != 'skipped' && failure() }}
@@ -168,7 +169,7 @@ jobs:
SLACK_MESSAGE: "Unsigned integer GPU tests finished with status: ${{ needs.cuda-unsigned-integer-tests.result }}. (${{ env.PULL_REQUEST_MD_LINK }}[action run](${{ env.ACTION_RUN_URL }}))"
teardown-instance:
name: Teardown instance (cuda-tests)
name: gpu_unsigned_integer_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cuda-unsigned-integer-tests ]
runs-on: ubuntu-latest

View File

@@ -1,5 +1,5 @@
# Test tfhe-fft
name: Cargo Test HLAPI HPU
# Test HPU backend HLAPI layer
name: hpu_hlapi_tests
on:
pull_request:
@@ -21,6 +21,7 @@ permissions: { }
jobs:
should-run:
name: hpu_hlapi_tests/should-run
runs-on: ubuntu-latest
permissions:
pull-requests: read
@@ -36,7 +37,7 @@ jobs:
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0
with:
files_yaml: |
hpu:
@@ -46,6 +47,7 @@ jobs:
- mockups/tfhe-hpu-mockup/**
cargo-tests-hpu:
name: hpu_hlapi_tests/cargo-tests-hpu (bpr)
needs: should-run
if: needs.should-run.outputs.hpu_test == 'true'
runs-on: large_ubuntu_16

View File

@@ -1,4 +1,4 @@
name: AWS Long Run Tests on CPU
name: integer_long_run_tests
env:
CARGO_TERM_COLOR: always
@@ -23,7 +23,7 @@ permissions: {}
jobs:
setup-instance:
name: Setup instance (cpu-tests)
name: integer_long_run_tests/setup-instance
if: github.event_name != 'schedule' ||
(github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs')
runs-on: ubuntu-latest
@@ -42,7 +42,7 @@ jobs:
profile: cpu-big
cpu-tests:
name: Long run CPU tests
name: integer_long_run_tests/cpu-tests
needs: [ setup-instance ]
concurrency:
group: ${{ github.workflow_ref }}_${{github.event_name}}
@@ -74,7 +74,7 @@ jobs:
SLACK_MESSAGE: "CPU long run tests finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (cpu-tests)
name: integer_long_run_tests/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [ setup-instance, cpu-tests ]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: Tests on M1 CPU
name: m1_tests
on:
workflow_dispatch:
@@ -32,6 +32,7 @@ permissions:
jobs:
cargo-builds-m1:
name: m1_tests/cargo-builds-m1
if: ${{ (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') ||
github.event_name == 'workflow_dispatch' ||
contains(github.event.label.name, 'm1_test') }}
@@ -178,7 +179,7 @@ jobs:
make test_integer_multi_bit_ci
remove_label:
name: Remove m1_test label
name: m1_tests/remove_label
runs-on: ubuntu-latest
needs:
- cargo-builds-m1

View File

@@ -1,172 +0,0 @@
# Publish new release of tfhe-rs on various platform.
name: Publish release
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
push_to_crates:
description: "Push to crate"
type: boolean
default: true
push_web_package:
description: "Push web js package"
type: boolean
default: true
push_node_package:
description: "Push node js package"
type: boolean
default: true
npm_latest_tag:
description: "Set NPM tag as latest"
type: boolean
default: false
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
NPM_TAG: ""
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
permissions: {}
jobs:
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package:
runs-on: ubuntu-latest
needs: verify_tag
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
# Needed to create the provenance via GitHub OIDC
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: Publish Release
needs: [package] # for comparing hashes
runs-on: ubuntu-latest
# For provenance of npmjs publish
permissions:
contents: read
id-token: write # also needed for OIDC token exchange on crates.io
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Create NPM version tag
if: ${{ inputs.npm_latest_tag }}
run: |
echo "NPM_TAG=latest" >> "${GITHUB_ENV}"
- name: Download artifact
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with:
name: crate
path: target/package
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
if: ${{ inputs.push_to_crates }}
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
# would fail. This is safe since DRY_RUN is handled in the env section above.
# shellcheck disable=SC2086
cargo publish -p tfhe ${DRY_RUN}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Build web package
if: ${{ inputs.push_web_package }}
run: |
make build_web_js_api_parallel
- name: Publish web package
if: ${{ inputs.push_web_package }}
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
dry-run: ${{ inputs.dry_run }}
tag: ${{ env.NPM_TAG }}
provenance: true
- name: Build Node package
if: ${{ inputs.push_node_package }}
run: |
rm -rf tfhe/pkg
make build_node_js_api
sed -i 's/"tfhe"/"node-tfhe"/g' tfhe/pkg/package.json
- name: Publish Node package
if: ${{ inputs.push_node_package }}
uses: JS-DevTools/npm-publish@19c28f1ef146469e409470805ea4279d47c3d35c
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
dry-run: ${{ inputs.dry_run }}
tag: ${{ env.NPM_TAG }}
provenance: true
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe release failed: (${{ env.ACTION_RUN_URL }})"

View File

@@ -0,0 +1,143 @@
# Common workflow to make crate release
name: make_release_common
on:
workflow_call:
inputs:
package-name:
type: string
required: true
dry-run:
type: boolean
default: true
secrets:
REPO_CHECKOUT_TOKEN:
required: true
SLACK_CHANNEL:
required: true
BOT_USERNAME:
required: true
SLACK_WEBHOOK:
required: true
ALLOWED_TEAM:
required: true
READ_ORG_TOKEN:
required: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
permissions: {}
jobs:
verify-triggering-actor:
name: make_release_common/verify-triggering-actor
if: startsWith(github.ref, 'refs/tags/')
uses: ./.github/workflows/verify_triggering_actor.yml
secrets:
ALLOWED_TEAM: ${{ secrets.ALLOWED_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package:
name: make_release_common/package
runs-on: ubuntu-latest
needs: verify-triggering-actor
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
env:
PACKAGE: ${{ inputs.package-name }}
run: |
cargo package -p "${PACKAGE}"
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: crate-${{ inputs.package-name }}
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
name: make_release_common/provenance
if: ${{ !inputs.dry-run }}
needs: package
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
# Needed to create the provenance via GitHub OIDC
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: make_release_common/publish-release
needs: package
runs-on: ubuntu-latest
permissions:
# Needed for OIDC token exchange on crates.io
id-token: write
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
name: crate-${{ inputs.package-name }}
path: target/package
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@041cce5b4b821e6b0ebc9c9c38b58cac4e34dcc2 # v1.0.2
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
PACKAGE: ${{ inputs.package-name }}
DRY_RUN: ${{ inputs.dry-run && '--dry-run' || '' }}
run: |
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
# would fail. This is safe since DRY_RUN is handled in the env section above.
# shellcheck disable=SC2086
cargo publish -p "${PACKAGE}" ${DRY_RUN}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA ${{ inputs.package-name }} - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "${{ inputs.package-name }} release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,4 +1,4 @@
name: Publish CUDA release
name: make_release_cuda
on:
workflow_dispatch:
@@ -18,15 +18,17 @@ env:
permissions: {}
jobs:
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
verify-triggering-actor:
name: make_release_cuda/verify-triggering-actor
if: startsWith(github.ref, 'refs/tags/')
uses: ./.github/workflows/verify_triggering_actor.yml
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
setup-instance:
name: Setup instance (publish-cuda-release)
needs: verify_tag
name: make_release_cuda/setup-instance
needs: verify-triggering-actor
runs-on: ubuntu-latest
outputs:
runner-name: ${{ steps.start-instance.outputs.label }}
@@ -43,7 +45,7 @@ jobs:
profile: gpu-build
package:
name: Package CUDA Release for provenance
name: make_release_cuda/package
needs: setup-instance
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
outputs:
@@ -99,11 +101,18 @@ jobs:
- name: Prepare package
run: |
cargo package -p tfhe-cuda-backend
- uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0
with:
name: crate-tfhe-cuda-backend
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
name: make_release_cuda/provenance
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
@@ -119,7 +128,7 @@ jobs:
base64-subjects: ${{ needs.package.outputs.hash }}
publish-cuda-release:
name: Publish CUDA Release
name: make_release_cuda/publish-cuda-release
needs: [setup-instance, package] # for comparing hashes
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
permissions:
@@ -166,8 +175,14 @@ jobs:
env:
GCC_VERSION: ${{ matrix.gcc }}
- name: Download artifact
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 # v6.0.0
with:
name: crate-tfhe-cuda-backend
path: target/package
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
uses: rust-lang/crates-io-auth-action@041cce5b4b821e6b0ebc9c9c38b58cac4e34dcc2 # v1.0.2
id: auth
- name: Publish crate.io package
@@ -201,7 +216,7 @@ jobs:
SLACK_MESSAGE: "tfhe-cuda-backend release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
teardown-instance:
name: Teardown instance (publish-release)
name: make_release_cuda/teardown-instance
if: ${{ always() && needs.setup-instance.result == 'success' }}
needs: [setup-instance, publish-cuda-release]
runs-on: ubuntu-latest

View File

@@ -1,4 +1,4 @@
name: Publish HPU release
name: make_release_hpu
on:
workflow_dispatch:
@@ -18,39 +18,12 @@ env:
permissions: {}
jobs:
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package:
runs-on: ubuntu-latest
needs: verify_tag
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe-hpu-backend
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
make-release:
name: make_release_hpu/make-release
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe-hpu-backend"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -58,55 +31,10 @@ jobs:
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: Publish tfhe-hpu-backend Release
runs-on: ubuntu-latest
needs: [verify_tag, package] # for comparing hashes
permissions:
# Needed for OIDC token exchange on crates.io
id-token: write
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
# would fail. This is safe since DRY_RUN is handled in the env section above.
# shellcheck disable=SC2086
cargo publish -p tfhe-hpu-backend ${DRY_RUN}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe-hpu-backend crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-hpu-backend release failed: (${{ env.ACTION_RUN_URL }})"
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}

124
.github/workflows/make_release_tfhe.yml vendored Normal file
View File

@@ -0,0 +1,124 @@
# Publish new release of tfhe-rs on various platform.
name: make_release_tfhe
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
push_to_crates:
description: "Push to crate"
type: boolean
default: true
push_web_package:
description: "Push web js package"
type: boolean
default: true
push_node_package:
description: "Push node js package"
type: boolean
default: true
npm_latest_tag:
description: "Set NPM tag as latest"
type: boolean
default: false
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
NPM_TAG: ""
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
permissions: {}
jobs:
make-release:
name: make_release_tfhe/make-release
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
# Needed to create the provenance via GitHub OIDC
id-token: write
# Needed to upload assets/artifacts
contents: write
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
make-release-js:
name: make_release_tfhe/make-release-js
needs: make-release
runs-on: ubuntu-latest
# For provenance of npmjs publish
permissions:
contents: read
id-token: write # also needed for OIDC token exchange on crates.io and npmjs.com
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Create NPM version tag
if: ${{ inputs.npm_latest_tag }}
run: |
echo "NPM_TAG=latest" >> "${GITHUB_ENV}"
- name: Build web package
if: ${{ inputs.push_web_package }}
run: |
make build_web_js_api_parallel
- name: Authenticate on NPM
uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with:
node-version: '22'
registry-url: 'https://registry.npmjs.org'
- name: Publish web package
if: ${{ inputs.push_web_package }}
uses: JS-DevTools/npm-publish@7f8fe47b3bea1be0c3aec2b717c5ec1f3e03410b
with:
package: tfhe/pkg/package.json
dry-run: ${{ inputs.dry_run }}
tag: ${{ env.NPM_TAG }}
provenance: true
- name: Build Node package
if: ${{ inputs.push_node_package }}
run: |
rm -rf tfhe/pkg
make build_node_js_api
sed -i 's/"tfhe"/"node-tfhe"/g' tfhe/pkg/package.json
- name: Publish Node package
if: ${{ inputs.push_node_package }}
uses: JS-DevTools/npm-publish@7f8fe47b3bea1be0c3aec2b717c5ec1f3e03410b
with:
package: tfhe/pkg/package.json
dry-run: ${{ inputs.dry_run }}
tag: ${{ env.NPM_TAG }}
provenance: true
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,4 +1,4 @@
name: Publish tfhe-csprng release
name: make_release_tfhe_csprng
on:
workflow_dispatch:
@@ -8,49 +8,15 @@ on:
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
permissions: {}
jobs:
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe-csprng
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-tfhe-csprng
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
make-release:
name: make_release_tfhe_csprng/make-release
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe-csprng"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -58,56 +24,10 @@ jobs:
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: Publish tfhe-csprng Release
needs: [verify_tag, package]
runs-on: ubuntu-latest
permissions:
# Needed for OIDC token exchange on crates.io
id-token: write
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with:
name: crate-tfhe-csprng
path: target/package
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
# would fail. This is safe since DRY_RUN is handled in the env section above.
# shellcheck disable=SC2086
cargo publish -p tfhe-csprng ${DRY_RUN}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe-csprng - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-csprng release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}

View File

@@ -1,5 +1,5 @@
# Publish new release of tfhe-fft
name: Publish tfhe-fft release
name: make_release_tfhe_fft
on:
workflow_dispatch:
@@ -19,39 +19,12 @@ env:
permissions: {}
jobs:
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package:
runs-on: ubuntu-latest
needs: verify_tag
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe-fft
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
make-release:
name: make_release_tfhe_fft/make-release
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe-fft"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -59,55 +32,10 @@ jobs:
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: Publish tfhe-fft Release
runs-on: ubuntu-latest
needs: [verify_tag, package] # for comparing hashes
permissions:
# Needed for OIDC token exchange on crates.io
id-token: write
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
# would fail. This is safe since DRY_RUN is handled in the env section above.
# shellcheck disable=SC2086
cargo publish -p tfhe-fft ${DRY_RUN}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe-fft crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-fft release failed: (${{ env.ACTION_RUN_URL }})"
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}

View File

@@ -1,5 +1,5 @@
# Publish new release of tfhe-ntt
name: Publish tfhe-ntt release
name: make_release_tfhe_ntt
on:
workflow_dispatch:
@@ -19,39 +19,12 @@ env:
permissions: {}
jobs:
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package:
runs-on: ubuntu-latest
needs: verify_tag
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe-ntt
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
make-release:
name: make_release_tfhe_ntt/make-release
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe-ntt"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -59,55 +32,10 @@ jobs:
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: Publish tfhe-ntt Release
runs-on: ubuntu-latest
needs: [verify_tag, package] # for comparing hashes
permissions:
# Needed for OIDC token exchange on crates.io
id-token: write
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
# would fail. This is safe since DRY_RUN is handled in the env section above.
# shellcheck disable=SC2086
cargo publish -p tfhe-ntt ${DRY_RUN}
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe-ntt crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-ntt release failed: (${{ env.ACTION_RUN_URL }})"
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}

View File

@@ -1,7 +1,12 @@
name: Publish tfhe-versionable release
name: make_release_tfhe_versionable
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
@@ -13,38 +18,34 @@ env:
permissions: {}
jobs:
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
make-release-derive:
name: make_release_tfhe_versionable/make-release-derive
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe-versionable-derive"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
# Needed to create the provenance via GitHub OIDC
id-token: write
# Needed to upload assets/artifacts
contents: write
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
package-derive:
name: Package tfhe-versionable-derive Release
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe-versionable-derive
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-tfhe-versionable-derive
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance-derive:
needs: [package-derive]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
make-release:
name: make_release_tfhe_versionable/make-release
needs: make-release-derive
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe-versionable"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -52,131 +53,10 @@ jobs:
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package-derive.outputs.hash }}
publish_release-derive:
name: Publish tfhe-versionable-derive Release
needs: [ verify_tag, package-derive ] # for comparing hashes
runs-on: ubuntu-latest
permissions:
# Needed for OIDC token exchange on crates.io
id-token: write
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with:
name: crate-tfhe-versionable-derive
path: target/package
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
run: |
cargo publish -p tfhe-versionable-derive
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package-derive.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe-versionable-derive - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-versionable-derive release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
package:
name: Package tfhe-versionable Release
needs: publish_release-derive
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe-versionable
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-tfhe-versionable
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
needs: package
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
permissions:
# Needed to detect the GitHub Actions environment
actions: read
# Needed to create the provenance via GitHub OIDC
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
publish_release:
name: Publish tfhe-versionable Release
needs: package # for comparing hashes
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with:
name: crate-tfhe-versionable
path: target/package
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
run: |
cargo publish -p tfhe-versionable
- name: Generate hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe-versionable - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-versionable release finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
secrets:
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}

View File

@@ -1,4 +1,4 @@
name: Publish tfhe-zk-pok release
name: make_release_zk_pok
on:
workflow_dispatch:
@@ -15,34 +15,15 @@ env:
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
permissions: {}
permissions: { }
jobs:
package:
runs-on: ubuntu-latest
outputs:
hash: ${{ steps.hash.outputs.hash }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Prepare package
run: |
cargo package -p tfhe-zk-pok
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: crate-zk-pok
path: target/package/*.crate
- name: generate hash
id: hash
run: cd target/package && echo "hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
provenance:
if: ${{ !inputs.dry_run }}
needs: [package]
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
make-release:
name: make_release_zk_pok/make-release
uses: ./.github/workflows/make_release_common.yml
with:
package-name: "tfhe-zk-pok"
dry-run: ${{ inputs.dry_run }}
permissions:
# Needed to detect the GitHub Actions environment
actions: read
@@ -50,61 +31,10 @@ jobs:
id-token: write
# Needed to upload assets/artifacts
contents: write
with:
# SHA-256 hashes of the Crate package.
base64-subjects: ${{ needs.package.outputs.hash }}
verify_tag:
uses: ./.github/workflows/verify_tagged_commit.yml
secrets:
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
BOT_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
REPO_CHECKOUT_TOKEN: ${{ secrets.REPO_CHECKOUT_TOKEN }}
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
publish_release:
name: Publish tfhe-zk-pok Release
needs: [verify_tag, package] # for comparing hashes
runs-on: ubuntu-latest
permissions:
# Needed for OIDC token exchange on crates.io
id-token: write
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: Download artifact
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with:
name: crate-zk-pok
path: target/package
- name: Authenticate on registry
uses: rust-lang/crates-io-auth-action@e919bc7605cde86df457cf5b93c5e103838bd879 # v1.0.1
id: auth
- name: Publish crate.io package
env:
CARGO_REGISTRY_TOKEN: ${{ steps.auth.outputs.token }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
# DRY_RUN expansion cannot be double quoted when variable contains empty string otherwise cargo publish
# would fail. This is safe since DRY_RUN is handled in the env section above.
# shellcheck disable=SC2086
cargo publish -p tfhe-zk-pok ${DRY_RUN}
- name: Verify hash
id: published_hash
run: cd target/package && echo "pub_hash=$(sha256sum ./*.crate | base64 -w0)" >> "${GITHUB_OUTPUT}"
- name: Slack notification (hashes comparison)
if: ${{ needs.package.outputs.hash != steps.published_hash.outputs.pub_hash }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: failure
SLACK_MESSAGE: "SLSA tfhe-zk-pok crate - hash comparison failure: (${{ env.ACTION_RUN_URL }})"
- name: Slack Notification
if: ${{ failure() || (cancelled() && github.event_name != 'pull_request') }}
continue-on-error: true
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661 # v2.3.3
env:
SLACK_COLOR: ${{ job.status }}
SLACK_MESSAGE: "tfhe-zk-pok release failed: (${{ env.ACTION_RUN_URL }})"

View File

@@ -1,5 +1,5 @@
# Perform a security check on all the cryptographic parameters set
name: Parameters curves security check
name: parameters_check
env:
CARGO_TERM_COLOR: always
@@ -16,6 +16,7 @@ permissions: {}
jobs:
params-curves-security-check:
name: parameters_check/params-curves-security-check
runs-on: large_ubuntu_16-22.04
steps:
- name: Checkout tfhe-rs
@@ -29,7 +30,7 @@ jobs:
with:
repository: malb/lattice-estimator
path: lattice_estimator
ref: '52f4b7a99ae7b5dfd088c5c295070bd38ff0d1e0'
ref: 'e35f45b7976a90a79c3c6625a45bbc344c1abc67'
persist-credentials: 'false'
- name: Install Sage

View File

@@ -1,5 +1,5 @@
# Placeholder workflow file allowing running it without having to merge to main first
name: Placeholder Workflow
name: placeholder_workflow
on:
workflow_dispatch:
@@ -8,7 +8,7 @@ permissions: {}
jobs:
placeholder:
name: Placeholder
name: placeholder_workflow/placeholder
runs-on: ubuntu-latest
steps:

View File

@@ -1,5 +1,5 @@
# Sync repos
name: Sync repos
name: sync_on_push
on:
push:
@@ -7,30 +7,62 @@ on:
- 'main'
workflow_dispatch:
permissions: {}
permissions: { }
jobs:
sync-repo:
name: sync_on_push/sync-repo
if: ${{ github.repository == 'zama-ai/tfhe-rs' }}
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with:
fetch-depth: 0
persist-credentials: 'false'
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
- name: git-sync
uses: valtech-sd/git-sync@e734cfe9485a92e720eac5af8a4555dde5fecf88
with:
source_repo: "zama-ai/tfhe-rs"
source_branch: "main"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_branch: "main"
- name: git-sync tags
uses: wei/git-sync@55c6b63b4f21607da0e9877ca9b4d11a29fc6d83
with:
source_repo: "zama-ai/tfhe-rs"
source_branch: "refs/tags/*"
destination_repo: "https://${{ secrets.BOT_USERNAME }}:${{ secrets.FHE_ACTIONS_TOKEN }}@github.com/${{ secrets.SYNC_DEST_REPO }}"
destination_branch: "refs/tags/*"
env:
SOURCE_REPO: "zama-ai/tfhe-rs"
SOURCE_BRANCH: "main"
DESTINATION_BRANCH: "main"
USERNAME: ${{ secrets.BOT_USERNAME }}
TOKEN: ${{ secrets.SYNC_REPO_TOKEN }}
DEST_REPO: ${{ secrets.SYNC_DEST_REPO }}
run: |
echo ">>> Cloning source repo..."
git lfs install
git clone "https://${USERNAME}:${TOKEN}@github.com/${SOURCE_REPO}.git" ./tfhe-rs --origin source && cd ./tfhe-rs
git remote add destination "https://${USERNAME}:${TOKEN}@github.com/${DEST_REPO}.git"
echo ">>> Fetching all branches references down locally so subsequent commands can see them..."
git fetch source '+refs/heads/*:refs/heads/*' --update-head-ok
echo ">>> Print out all branches"
git --no-pager branch -a -vv
echo ">>> Fetching all LFS items from source..."
git lfs fetch --all source "${SOURCE_BRANCH}"
echo ">>> Pushing git changes..."
git push destination "${SOURCE_BRANCH}:${DESTINATION_BRANCH}" -f
echo ">>> Pushing all LFS items..."
git lfs push --all destination "${DESTINATION_BRANCH}"
- name: git-sync-tags
env:
SOURCE_REPO: "zama-ai/tfhe-rs"
SOURCE_BRANCH: "refs/tags/*"
DESTINATION_BRANCH: "refs/tags/*"
USERNAME: ${{ secrets.BOT_USERNAME }}
TOKEN: ${{ secrets.SYNC_REPO_TOKEN }}
DEST_REPO: ${{ secrets.SYNC_DEST_REPO }}
run: |
echo ">>> Cloning source repo..."
git lfs install
git clone "https://${USERNAME}:${TOKEN}@github.com/${SOURCE_REPO}.git" ./tfhe-rs-tag --origin source && cd ./tfhe-rs-tag
git remote add destination "https://${USERNAME}:${TOKEN}@github.com/${DEST_REPO}.git"
echo ">>> Fetching all branches references down locally so subsequent commands can see them..."
git fetch source '+refs/heads/*:refs/heads/*' --update-head-ok
echo ">>> Print out all branches"
git --no-pager branch -a -vv
echo ">>> Pushing git changes..."
git push destination "${SOURCE_BRANCH}:${DESTINATION_BRANCH}" -f

View File

@@ -1,4 +1,5 @@
name: 'Close unverified PRs'
# Close unverified PRs'
name: unverified_prs
on:
schedule:
- cron: '30 1 * * *'
@@ -7,12 +8,13 @@ permissions: {}
jobs:
stale:
name: unverified_prs/stale
runs-on: ubuntu-latest
permissions:
issues: read
pull-requests: write
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0
with:
stale-pr-message: 'This PR is unverified and has been open for 2 days, it will now be closed. If you want to contribute please sign the CLA as indicated by the bot.'
days-before-stale: 2

View File

@@ -1,10 +1,10 @@
# Verify a tagged commit
name: Verify tagged commit
# Verify a triggering actor
name: verify_triggering_actor
on:
workflow_call:
secrets:
RELEASE_TEAM:
ALLOWED_TEAM:
required: true
READ_ORG_TOKEN:
required: true
@@ -12,9 +12,9 @@ on:
permissions: {}
jobs:
checks:
check-actor:
name: verify_triggering_actor/check-actor
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
steps:
# Check triggering actor membership
- name: Actor verification
@@ -23,7 +23,7 @@ jobs:
with:
username: ${{ github.triggering_actor }}
org: ${{ github.repository_owner }}
team: ${{ secrets.RELEASE_TEAM }}
team: ${{ secrets.ALLOWED_TEAM }}
github_token: ${{ secrets.READ_ORG_TOKEN }}
- name: Actor authorized

View File

@@ -1,5 +1,5 @@
[workspace]
resolver = "2"
resolver = "3"
members = [
"tfhe",
"tfhe-benchmark",
@@ -24,7 +24,7 @@ exclude = [
]
[workspace.dependencies]
aligned-vec = { version = "0.6", default-features = false }
bytemuck = "1.14.3"
bytemuck = "<1.24"
dyn-stack = { version = "0.11", default-features = false }
itertools = "0.14"
num-complex = "0.4"
@@ -32,7 +32,7 @@ pulp = { version = "0.21", default-features = false }
rand = "0.8"
rayon = "1.11"
serde = { version = "1.0", default-features = false }
wasm-bindgen = "0.2.100"
wasm-bindgen = "0.2.101"
getrandom = "0.2.8"
[profile.bench]
@@ -54,3 +54,10 @@ debug-assertions = false
[workspace.metadata.dylint]
libraries = [{ path = "utils/tfhe-lints" }]
[profile.debug_lto_off]
inherits = "dev"
debug = true
lto = "off"
debug-assertions = false
overflow-checks = false

282
Makefile
View File

@@ -21,11 +21,13 @@ BENCH_OP_FLAVOR?=DEFAULT
BENCH_TYPE?=latency
BENCH_PARAM_TYPE?=classical
BENCH_PARAMS_SET?=default
BENCH_CUSTOM_COMMAND:=
NODE_VERSION=22.6
BACKWARD_COMPAT_DATA_DIR=utils/tfhe-backward-compat-data
BACKWARD_COMPAT_DATA_GEN_VERSION:=$(TFHE_VERSION)
CURRENT_TFHE_VERSION:=$(shell grep '^version[[:space:]]*=' tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
WASM_PACK_VERSION="0.13.1"
# We are kind of hacking the cut here, the version cannot contain a quote '"'
WASM_BINDGEN_VERSION:=$(shell grep '^wasm-bindgen[[:space:]]*=' Cargo.toml | cut -d '"' -f 2 | xargs)
WASM_BINDGEN_VERSION:=$(shell cargo tree --target wasm32-unknown-unknown -e all --prefix none | grep "wasm-bindgen v" | head -n 1 | cut -d 'v' -f2)
WEB_RUNNER_DIR=web-test-runner
WEB_SERVER_DIR=tfhe/web_wasm_parallel_tests
# This is done to avoid forgetting it, we still precise the RUSTFLAGS in the commands to be able to
@@ -54,6 +56,7 @@ TFHECUDA_BUILD=$(TFHECUDA_SRC)/build
# tfhe-hpu-backend
HPU_CONFIG=v80
V80_PCIE_DEV?=01
# Exclude these files from coverage reports
define COVERAGE_EXCLUDED_FILES
@@ -71,6 +74,12 @@ define COVERAGE_EXCLUDED_FILES
--exclude-files tfhe/examples/utilities/*
endef
# Prints out recipe name at the beginning of the execution and print it out again at the end if a failure occurs.
define run_recipe_with_details
@echo "Running recipe: $1"
@$(MAKE) $1 --no-print-directory || { echo "Recipe '$@' failed"; exit 1; }
endef
.PHONY: rs_check_toolchain # Echo the rust toolchain used for checks
rs_check_toolchain:
@echo $(RS_CHECK_TOOLCHAIN)
@@ -86,6 +95,14 @@ install_rs_check_toolchain:
( echo "Unable to install $(RS_CHECK_TOOLCHAIN) toolchain, check your rustup installation. \
Rustup can be downloaded at https://rustup.rs/" && exit 1 )
.PHONY: install_rs_latest_nightly_toolchain # Install the nightly toolchain used to build docs using same version as docs.rs
# We don't check that it exists, because we always want the latest
# and the command below will install/update
install_rs_latest_nightly_toolchain:
rustup toolchain install --profile default nightly || \
( echo "Unable to install nightly toolchain, check your rustup installation. \
Rustup can be downloaded at https://rustup.rs/" && exit 1 )
.PHONY: install_rs_build_toolchain # Install the toolchain used for builds
install_rs_build_toolchain:
@( rustup toolchain list | grep -q "$(RS_BUILD_TOOLCHAIN)" && \
@@ -114,10 +131,6 @@ install_cargo_nextest: install_rs_build_toolchain
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install cargo-nextest --locked || \
( echo "Unable to install cargo nextest, unknown error." && exit 1 )
# The installation should use the ^ symbol if the specified version in the root Cargo.toml is of the
# form "0.2.96" then we get ^0.2.96 e.g., as we don't lock those dependencies
# this allows to get the matching CLI
# If a version range is specified no need to add the leading ^
.PHONY: install_wasm_bindgen_cli # Install wasm-bindgen-cli to get access to the test runner
install_wasm_bindgen_cli: install_rs_build_toolchain
cargo +$(RS_BUILD_TOOLCHAIN) install --locked wasm-bindgen-cli --version "$(WASM_BINDGEN_VERSION)"
@@ -160,9 +173,13 @@ install_tarpaulin: install_rs_build_toolchain
( echo "Unable to install cargo tarpaulin, unknown error." && exit 1 )
.PHONY: install_cargo_dylint # Install custom tfhe-rs lints
install_cargo_dylint:
install_cargo_dylint: install_rs_build_toolchain
cargo install --locked cargo-dylint dylint-link
.PHONY: install_cargo_audit # Check dependencies
install_cargo_audit: install_rs_build_toolchain
cargo install --locked cargo-audit
.PHONY: install_typos_checker # Install typos checker
install_typos_checker: install_rs_build_toolchain
@typos --version > /dev/null 2>&1 || \
@@ -498,7 +515,7 @@ clippy_backward_compat_data: install_rs_check_toolchain # the toolchain is selec
@# Some old crates are x86 specific, only run in that case
@if uname -a | grep -q x86; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" -Z unstable-options \
-C $(BACKWARD_COMPAT_DATA_DIR) clippy --all-targets \
-C $(BACKWARD_COMPAT_DATA_DIR) clippy --all --all-targets \
-- --no-deps -D warnings; \
else \
echo "Cannot run clippy for backward compat crate on non x86 platform for now."; \
@@ -545,6 +562,10 @@ tfhe_lints: install_cargo_dylint
RUSTFLAGS="$(RUSTFLAGS)" cargo dylint --all -p tfhe-zk-pok --no-deps -- \
--features=experimental
.PHONY: audit_dependencies # Run cargo audit to check vulnerable dependencies
audit_dependencies: install_rs_build_toolchain install_cargo_audit
cargo audit
.PHONY: build_core # Build core_crypto without experimental features
build_core: install_rs_build_toolchain install_rs_check_toolchain
@@ -693,7 +714,7 @@ test_integer_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=integer,gpu -p tfhe -- integer::gpu::server_key:: --test-threads=2
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --doc --profile $(CARGO_PROFILE) \
--features=integer,gpu -p tfhe -- integer::gpu::server_key::
--features=integer,gpu -p tfhe -- integer::gpu::server_key:: --test-threads=4
.PHONY: test_integer_gpu_debug # Run the tests of the integer module with Debug flags for CUDA
test_integer_gpu_debug: install_rs_build_toolchain
@@ -741,6 +762,16 @@ test_integer_short_run_gpu: install_rs_check_toolchain install_cargo_nextest
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=integer,gpu -p tfhe -- integer::gpu::server_key::radix::tests_long_run::test_random_op_sequence integer::gpu::server_key::radix::tests_long_run::test_signed_random_op_sequence --test-threads=1 --nocapture
.PHONY: build_debug_integer_short_run_gpu # Run the long run integer tests on the gpu backend
build_debug_integer_short_run_gpu: install_rs_check_toolchain install_cargo_nextest
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test -vv --no-run --profile debug_lto_off \
--features=integer,gpu-debug-fake-multi-gpu -p tfhe
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile debug_lto_off \
--features=integer,gpu-debug-fake-multi-gpu -p tfhe -- integer::gpu::server_key::radix::tests_long_run::test_random_op_sequence::test_gpu_short_random --list
@echo "To debug fake-multi-gpu short run tests run:"
@echo "TFHE_RS_TEST_LONG_TESTS_MINIMAL=TRUE <executable> integer::gpu::server_key::radix::tests_long_run::test_random_op_sequence::test_gpu_short_random_op_sequence_param_gpu_multi_bit_group_4_message_2_carry_2_ks_pbs_tuniform_2m128 --nocapture"
@echo "Where <executable> = the one printed in the () in the 'Running unittests src/lib.rs ()' line above"
.PHONY: test_integer_compression
test_integer_compression: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
@@ -995,6 +1026,21 @@ test_high_level_api_gpu: install_rs_build_toolchain install_cargo_nextest
--test-threads=4 --features=integer,internal-keycache,gpu,zk-pok -p tfhe \
-E "test(/high_level_api::.*gpu.*/)"
test_list_gpu: install_rs_build_toolchain install_cargo_nextest
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) nextest list --cargo-profile $(CARGO_PROFILE) \
--features=integer,internal-keycache,gpu,zk-pok -p tfhe \
-E "test(/.*gpu.*/)"
.PHONY: build_one_hl_api_test_gpu
build_one_hl_api_test_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
--features=integer,gpu-debug -vv -p tfhe -- "$${TEST}" --test-threads=1 --nocapture
.PHONY: build_one_hl_api_test_fake_multi_gpu
build_one_hl_api_test_fake_multi_gpu: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
--features=integer,gpu-debug-fake-multi-gpu -vv -p tfhe -- "$${TEST}" --test-threads=1 --nocapture
test_high_level_api_hpu: install_rs_build_toolchain install_cargo_nextest
ifeq ($(HPU_CONFIG), v80)
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) nextest run --cargo-profile $(CARGO_PROFILE) \
@@ -1108,8 +1154,16 @@ test_tfhe_lints: install_cargo_dylint
rustup toolchain install && \
cargo test
# The backward compat data repo holds historical binary data but also rust code to generate and load them.
# Here we use the "patch" functionality of Cargo to make sure the repo used for the data is the same as the one used for the code.
# The backward compat data folder holds historical binary data but also rust code to generate and load them.
.PHONY: gen_backward_compat_data # Re-generate backward compatibility data
gen_backward_compat_data: install_rs_check_toolchain # the toolchain is selected with toolchain.toml
$(BACKWARD_COMPAT_DATA_DIR)/gen_data.sh $(BACKWARD_COMPAT_DATA_GEN_VERSION)
# Instantiate a new backward data crate for the current TFHE-rs version, if it does not already exists
.PHONY: new_backward_compat_crate
new_backward_compat_crate: install_rs_check_toolchain # the toolchain is selected with toolchain.toml
cd $(BACKWARD_COMPAT_DATA_DIR) && cargo run -p add_new_version -- --tfhe-version $(CURRENT_TFHE_VERSION)
.PHONY: test_backward_compatibility_ci
test_backward_compatibility_ci: install_rs_build_toolchain
TFHE_BACKWARD_COMPAT_DATA_DIR="../$(BACKWARD_COMPAT_DATA_DIR)" RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
@@ -1123,7 +1177,7 @@ doc: install_rs_check_toolchain
@# Even though we are not in docs.rs, this allows to "just" build the doc
DOCS_RS=1 \
RUSTDOCFLAGS="--html-in-header katex-header.html" \
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" doc \
cargo +nightly doc \
--features=boolean,shortint,integer,strings,gpu,internal-keycache,experimental,zk-pok --no-deps -p tfhe
.PHONY: docs # Build rust doc alias for doc
@@ -1175,6 +1229,8 @@ check_compile_tests: install_rs_build_toolchain
--features=experimental,boolean,shortint,integer,internal-keycache \
-p tfhe
.PHONY: check_compile_tests_c_api # Build C API tests without running them
check_compile_tests_c_api: install_rs_build_toolchain
@if [[ "$(OS)" == "Linux" || "$(OS)" == "Darwin" ]]; then \
"$(MAKE)" build_c_api && \
./scripts/c_api_tests.sh --build-only --cargo-profile "$(CARGO_PROFILE)"; \
@@ -1261,6 +1317,7 @@ dieharder_csprng: install_dieharder build_tfhe_csprng
.PHONY: clippy_bench # Run clippy lints on tfhe-benchmark
clippy_bench: install_rs_check_toolchain
! (grep --recursive "trivial" tfhe-benchmark && echo "trivial found in benches")
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=boolean,shortint,integer,internal-keycache,nightly-avx512,pbs-stats,zk-pok \
-p tfhe-benchmark -- --no-deps -D warnings
@@ -1286,65 +1343,73 @@ print_doc_bench_parameters:
bench_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--bench integer \
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
.PHONY: bench_signed_integer # Run benchmarks for signed integer
bench_signed_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-signed-bench \
--bench integer-signed \
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
.PHONY: bench_integer_gpu # Run benchmarks for integer on GPU backend
bench_integer_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
--bench integer \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_signed_integer_gpu # Run benchmarks for signed integer on GPU backend
bench_signed_integer_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-signed-bench \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
--bench integer-signed \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_integer_hpu # Run benchmarks for integer on HPU backend
bench_integer_hpu: install_rs_check_toolchain
source ./setup_hpu.sh --config $(HPU_CONFIG) -p ; \
source ./setup_hpu.sh --config $(HPU_CONFIG); \
export V80_PCIE_DEV=${V80_PCIE_DEV}; \
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,internal-keycache,pbs-stats,hpu,hpu-v80 -p tfhe-benchmark --
--bench integer \
--features=integer,internal-keycache,pbs-stats,hpu,hpu-v80 -p tfhe-benchmark -- --quick
.PHONY: bench_integer_compression # Run benchmarks for unsigned integer compression
bench_integer_compression: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench glwe_packing_compression-integer-bench \
--bench integer-glwe_packing_compression \
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
.PHONY: bench_integer_compression_gpu
bench_integer_compression_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench glwe_packing_compression-integer-bench \
--bench integer-glwe_packing_compression \
--features=integer,internal-keycache,gpu,pbs-stats -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_integer_compression_128b_gpu
bench_integer_compression_128b_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench glwe_packing_compression_128b-integer-bench \
--features=integer,internal-keycache,gpu,pbs-stats -p tfhe-benchmark --
.PHONY: bench_integer_zk_gpu
bench_integer_zk_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench zk-pke-bench \
--features=integer,internal-keycache,gpu,pbs-stats,zk-pok -p tfhe-benchmark --
--bench integer-zk-pke \
--features=integer,internal-keycache,gpu,pbs-stats,zk-pok -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_integer_multi_bit # Run benchmarks for unsigned integer using multi-bit parameters
bench_integer_multi_bit: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--bench integer \
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
.PHONY: bench_signed_integer_multi_bit # Run benchmarks for signed integer using multi-bit parameters
@@ -1352,7 +1417,7 @@ bench_signed_integer_multi_bit: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-signed-bench \
--bench integer-signed \
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
.PHONY: bench_integer_multi_bit_gpu # Run benchmarks for integer on GPU backend using multi-bit parameters
@@ -1360,22 +1425,22 @@ bench_integer_multi_bit_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT \
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
--bench integer \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_signed_integer_multi_bit_gpu # Run benchmarks for signed integer on GPU backend using multi-bit parameters
bench_signed_integer_multi_bit_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=MULTI_BIT \
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-signed-bench \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
--bench integer-signed \
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_integer_zk # Run benchmarks for integer encryption with ZK proofs
bench_integer_zk: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench zk-pke-bench \
--bench integer-zk-pke \
--features=integer,internal-keycache,zk-pok,nightly-avx512,pbs-stats \
-p tfhe-benchmark --
@@ -1383,77 +1448,77 @@ bench_integer_zk: install_rs_check_toolchain
bench_shortint: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench shortint-bench \
--bench shortint \
--features=shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
.PHONY: bench_shortint_oprf # Run benchmarks for shortint
bench_shortint_oprf: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench oprf-shortint-bench \
--bench shortint-oprf \
--features=shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
.PHONY: bench_boolean # Run benchmarks for boolean
bench_boolean: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench boolean-bench \
--bench boolean \
--features=boolean,internal-keycache,nightly-avx512 -p tfhe-benchmark
.PHONY: bench_ks # Run benchmarks for keyswitch
bench_ks: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-bench \
--bench core_crypto-ks \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
.PHONY: bench_ks_gpu # Run benchmarks for keyswitch on GPU backend
bench_ks_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
--bench core_crypto-ks \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
.PHONY: bench_pbs # Run benchmarks for PBS
bench_pbs: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs-bench \
--bench core_crypto-pbs \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
.PHONY: bench_pbs_gpu # Run benchmarks for PBS on GPU backend
bench_pbs_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
--bench core_crypto-pbs \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
.PHONY: bench_ks_pbs # Run benchmarks for KS-PBS
bench_ks_pbs: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-pbs-bench \
--bench core_crypto-ks-pbs \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
.PHONY: bench_ks_pbs_gpu # Run benchmarks for KS-PBS on GPU backend
bench_ks_pbs_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_PARAM_TYPE=$(BENCH_PARAM_TYPE) __TFHE_RS_PARAMS_SET=$(BENCH_PARAMS_SET) __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ks-pbs-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
--bench core_crypto-ks-pbs \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
.PHONY: bench_pbs128 # Run benchmarks for PBS using FFT 128 bits
bench_pbs128: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs128-bench \
--bench core_crypto-pbs128 \
--features=boolean,shortint,internal-keycache,nightly-avx512 -p tfhe-benchmark
.PHONY: bench_pbs128_gpu # Run benchmarks for PBS using FFT 128 bits on GPU
bench_pbs128_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs128-bench \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark
--bench core_crypto-pbs128 \
--features=boolean,shortint,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --profile release_lto_off
bench_web_js_api_parallel_chrome: browser_path = "$(WEB_RUNNER_DIR)/chrome/chrome-linux64/chrome"
bench_web_js_api_parallel_chrome: driver_path = "$(WEB_RUNNER_DIR)/chrome/chromedriver-linux64/chromedriver"
@@ -1489,21 +1554,22 @@ bench_web_js_api_parallel_firefox_ci: setup_venv
bench_hlapi: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi \
--features=integer,internal-keycache,nightly-avx512 -p tfhe-benchmark --
--features=integer,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --
.PHONY: bench_hlapi_gpu # Run benchmarks for integer operations on GPU
bench_hlapi_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi \
--features=integer,gpu,internal-keycache,nightly-avx512 -p tfhe-benchmark --
--features=integer,gpu,internal-keycache,nightly-avx512,pbs-stats -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_hlapi_hpu # Run benchmarks for HLAPI operations on HPU
bench_hlapi_hpu: install_rs_check_toolchain
source ./setup_hpu.sh --config $(HPU_CONFIG) -p ; \
source ./setup_hpu.sh --config $(HPU_CONFIG); \
export V80_PCIE_DEV=${V80_PCIE_DEV}; \
RUSTFLAGS="$(RUSTFLAGS)" \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi \
--features=integer,internal-keycache,hpu,hpu-v80 -p tfhe-benchmark --
--features=integer,internal-keycache,hpu,hpu-v80,pbs-stats -p tfhe-benchmark --
.PHONY: bench_hlapi_erc20 # Run benchmarks for ERC20 operations
bench_hlapi_erc20: install_rs_check_toolchain
@@ -1515,7 +1581,7 @@ bench_hlapi_erc20: install_rs_check_toolchain
bench_hlapi_erc20_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi-erc20 \
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_hlapi_dex # Run benchmarks for DEX operations
bench_hlapi_dex: install_rs_check_toolchain
@@ -1527,15 +1593,16 @@ bench_hlapi_dex: install_rs_check_toolchain
bench_hlapi_dex_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi-dex \
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_hlapi_erc20_hpu # Run benchmarks for ECR20 operations on HPU
bench_hlapi_erc20_hpu: install_rs_check_toolchain
source ./setup_hpu.sh --config $(HPU_CONFIG) -p ; \
source ./setup_hpu.sh --config $(HPU_CONFIG); \
export V80_PCIE_DEV=${V80_PCIE_DEV}; \
RUSTFLAGS="$(RUSTFLAGS)" \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi-erc20 \
--features=integer,internal-keycache,hpu,hpu-v80 -p tfhe-benchmark --
--features=integer,internal-keycache,hpu,hpu-v80,pbs-stats -p tfhe-benchmark --
.PHONY: bench_tfhe_zk_pok # Run benchmarks for the tfhe_zk_pok crate
bench_tfhe_zk_pok: install_rs_check_toolchain
@@ -1554,7 +1621,12 @@ bench_hlapi_noise_squash_gpu: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=$(BENCH_TYPE) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench hlapi-noise-squash \
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --profile release_lto_off --
.PHONY: bench_custom # Run benchmarks with a user-defined command
bench_custom: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench -p tfhe-benchmark $(BENCH_CUSTOM_COMMAND)
#
# Utility tools
@@ -1643,22 +1715,100 @@ sha256_bool: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) run --profile $(CARGO_PROFILE) \
--example sha256_bool --features=boolean
.PHONY: pcc # pcc stands for pre commit checks (except GPU)
pcc: no_tfhe_typo no_dbg_log check_parameter_export_ok check_fmt check_typos lint_doc \
check_md_docs_are_tested check_intra_md_links check_doc_paths_use_dash \
clippy_all check_compile_tests test_tfhe_lints \
tfhe_lints
.PHONY: pcc # pcc stands for pre commit checks for CPU compilation
pcc: pcc_batch_1 pcc_batch_2 pcc_batch_3 pcc_batch_4 pcc_batch_5 pcc_batch_6 pcc_batch_7
#
# PCC split into several batches to speed-up CI feedback.
# Each batch have roughly the same execution time.
# Durations are given from GitHub Ubuntu large runner with 16 CPU.
#
.PHONY: pcc_batch_1 # duration: 6'10''
pcc_batch_1:
$(call run_recipe_with_details,no_tfhe_typo)
$(call run_recipe_with_details,no_dbg_log)
$(call run_recipe_with_details,check_parameter_export_ok)
$(call run_recipe_with_details,check_fmt)
$(call run_recipe_with_details,check_typos)
$(call run_recipe_with_details,lint_doc)
$(call run_recipe_with_details,check_md_docs_are_tested)
$(call run_recipe_with_details,check_intra_md_links)
$(call run_recipe_with_details,check_doc_paths_use_dash)
$(call run_recipe_with_details,test_tfhe_lints)
$(call run_recipe_with_details,tfhe_lints)
$(call run_recipe_with_details,clippy_rustdoc)
.PHONY: pcc_batch_2 # duration: 6'10'' (shortest one, extend it with further checks)
pcc_batch_2:
$(call run_recipe_with_details,clippy)
$(call run_recipe_with_details,clippy_all_targets)
$(call run_recipe_with_details,check_fmt_js)
.PHONY: pcc_batch_3 # duration: 6'50''
pcc_batch_3:
$(call run_recipe_with_details,clippy_shortint)
$(call run_recipe_with_details,clippy_integer)
.PHONY: pcc_batch_4 # duration: 7'40''
pcc_batch_4:
$(call run_recipe_with_details,clippy_core)
$(call run_recipe_with_details,clippy_js_wasm_api)
$(call run_recipe_with_details,clippy_ws_tests)
$(call run_recipe_with_details,clippy_bench)
.PHONY: pcc_batch_5 # duration: 7'20''
pcc_batch_5:
$(call run_recipe_with_details,clippy_tfhe_lints)
$(call run_recipe_with_details,check_compile_tests)
$(call run_recipe_with_details,clippy_backward_compat_data)
.PHONY: pcc_batch_6 # duration: 6'32''
pcc_batch_6:
$(call run_recipe_with_details,clippy_boolean)
$(call run_recipe_with_details,clippy_c_api)
$(call run_recipe_with_details,clippy_tasks)
$(call run_recipe_with_details,clippy_tfhe_csprng)
$(call run_recipe_with_details,clippy_zk_pok)
$(call run_recipe_with_details,clippy_trivium)
$(call run_recipe_with_details,clippy_versionable)
$(call run_recipe_with_details,clippy_param_dedup)
$(call run_recipe_with_details,docs)
.PHONY: pcc_batch_7 # duration: 7'50'' (currently PCC execution bottleneck)
pcc_batch_7:
$(call run_recipe_with_details,check_compile_tests_c_api)
.PHONY: pcc_gpu # pcc stands for pre commit checks for GPU compilation
pcc_gpu: check_rust_bindings_did_not_change clippy_rustdoc_gpu \
clippy_gpu clippy_cuda_backend clippy_bench_gpu check_compile_tests_benches_gpu test_integer_hl_test_gpu_check_warnings
pcc_gpu:
$(call run_recipe_with_details,check_rust_bindings_did_not_change)
$(call run_recipe_with_details,clippy_rustdoc_gpu)
$(call run_recipe_with_details,clippy_gpu)
$(call run_recipe_with_details,clippy_cuda_backend)
$(call run_recipe_with_details,clippy_bench_gpu)
$(call run_recipe_with_details,check_compile_tests_benches_gpu)
$(call run_recipe_with_details,test_integer_hl_test_gpu_check_warnings)
.PHONY: pcc_hpu # pcc stands for pre commit checks for HPU compilation
pcc_hpu: clippy_hpu clippy_hpu_backend clippy_hpu_mockup test_integer_hpu_mockup_ci_fast
pcc_hpu:
$(call run_recipe_with_details,clippy_hpu)
$(call run_recipe_with_details,clippy_hpu_backend)
$(call run_recipe_with_details,clippy_hpu_mockup)
$(call run_recipe_with_details,test_integer_hpu_mockup_ci_fast)
.PHONY: fpcc # pcc stands for pre commit checks, the f stands for fast
fpcc: no_tfhe_typo no_dbg_log check_parameter_export_ok check_fmt check_typos lint_doc \
check_md_docs_are_tested check_intra_md_links check_doc_paths_use_dash clippy_fast check_compile_tests
fpcc:
$(call run_recipe_with_details,no_tfhe_typo)
$(call run_recipe_with_details,no_dbg_log)
$(call run_recipe_with_details,check_parameter_export_ok)
$(call run_recipe_with_details,check_fmt)
$(call run_recipe_with_details,check_typos)
$(call run_recipe_with_details,lint_doc)
$(call run_recipe_with_details,check_md_docs_are_tested)
$(call run_recipe_with_details,check_intra_md_links)
$(call run_recipe_with_details,check_doc_paths_use_dash)
$(call run_recipe_with_details,clippy_fast)
$(call run_recipe_with_details,check_compile_tests)
.PHONY: conformance # Automatically fix problems that can be fixed
conformance: fix_newline fmt fmt_js

View File

@@ -45,7 +45,7 @@ production-ready library for all the advanced features of TFHE.
- **Short integer API** that enables exact, unbounded FHE integer arithmetics with up to 8 bits of message space
- **Size-efficient public key encryption**
- **Ciphertext and server key compression** for efficient data transfer
- **Full Rust API, C bindings to the Rust High-Level API, and client-side Javascript API using WASM**.
- **Full Rust API, C bindings to the Rust High-Level API, and client-side JavaScript API using WASM**.
*Learn more about TFHE-rs features in the [documentation](https://docs.zama.ai/tfhe-rs/readme).*
<br></br>
@@ -79,7 +79,7 @@ tfhe = { version = "*", features = ["boolean", "shortint", "integer"] }
```
> [!Note]
> Note: You need to use Rust version >= 1.84 to compile TFHE-rs.
> Note: You need Rust version 1.84 or newer to compile TFHE-rs. You can check your version with `rustc --version`.
> [!Note]
> Note: AArch64-based machines are not supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs.
@@ -147,7 +147,7 @@ To run this code, use the following command:
> [!Note]
> Note that when running code that uses `TFHE-rs`, it is highly recommended
to run in release mode with cargo's `--release` flag to have the best performances possible.
to run in release mode with cargo's `--release` flag to have the best performance possible.
*Find an example with more explanations in [this part of the documentation](https://docs.zama.ai/tfhe-rs/get-started/quick-start)*

View File

@@ -13,6 +13,7 @@ extend-ignore-identifiers-re = [
# Example in trivium
"C9217BA0D762ACA1",
"0x[0-9a-fA-F]+",
"xrt_coreutil",
]
[files]

View File

@@ -129,7 +129,7 @@ Other sizes than 64 bit are expected to be available in the future.
# FHE shortint Trivium implementation
The same implementation is also available for generic Ciphertexts representing bits (meant to be used with parameters `V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128`).
The same implementation is also available for generic Ciphertexts representing bits (meant to be used with parameters `V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128`).
It uses a lower level API of tfhe-rs, so the syntax is a little bit different. It also implements the `TransCiphering` trait. For optimization purposes, it does not internally run
on the same cryptographic parameters as the high level API of tfhe-rs. As such, it requires the usage of a casting key, to switch from one parameter space to another, which makes
its setup a little more intricate.
@@ -138,9 +138,9 @@ Example code:
```rust
use tfhe::shortint::prelude::*;
use tfhe::shortint::parameters::current_params::{
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::{ConfigBuilder, generate_keys, FheUint64};
use tfhe::prelude::*;
@@ -148,17 +148,17 @@ use tfhe_trivium::TriviumStreamShortint;
fn test_shortint() {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let (client_key, server_key): (ClientKey, ServerKey) = gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();

View File

@@ -1,9 +1,9 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::parameters::current_params::{
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::shortint::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
@@ -11,19 +11,19 @@ use tfhe_trivium::{KreyviumStreamShortint, TransCiphering};
pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
@@ -64,19 +64,19 @@ pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
pub fn kreyvium_shortint_gen(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
@@ -112,19 +112,19 @@ pub fn kreyvium_shortint_gen(c: &mut Criterion) {
pub fn kreyvium_shortint_trans(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();

View File

@@ -1,9 +1,9 @@
use criterion::Criterion;
use tfhe::prelude::*;
use tfhe::shortint::parameters::current_params::{
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::shortint::prelude::*;
use tfhe::{generate_keys, ConfigBuilder, FheUint64};
@@ -11,19 +11,19 @@ use tfhe_trivium::{TransCiphering, TriviumStreamShortint};
pub fn trivium_shortint_warmup(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
@@ -64,19 +64,19 @@ pub fn trivium_shortint_warmup(c: &mut Criterion) {
pub fn trivium_shortint_gen(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();
@@ -112,19 +112,19 @@ pub fn trivium_shortint_gen(c: &mut Criterion) {
pub fn trivium_shortint_trans(c: &mut Criterion) {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();

View File

@@ -1,9 +1,9 @@
use crate::{KreyviumStream, KreyviumStreamByte, KreyviumStreamShortint, TransCiphering};
use tfhe::prelude::*;
use tfhe::shortint::parameters::current_params::{
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
// Values for these tests come from the github repo renaud1239/Kreyvium,
@@ -221,19 +221,19 @@ use tfhe::shortint::prelude::*;
#[test]
fn kreyvium_test_shortint_long() {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();

View File

@@ -1,9 +1,9 @@
use crate::{TransCiphering, TriviumStream, TriviumStreamByte, TriviumStreamShortint};
use tfhe::prelude::*;
use tfhe::shortint::parameters::current_params::{
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128,
};
use tfhe::{generate_keys, ConfigBuilder, FheBool, FheUint64, FheUint8};
// Values for these tests come from the github repo cantora/avr-crypto-lib, commit 2a5b018,
@@ -357,19 +357,19 @@ use tfhe::shortint::prelude::*;
#[test]
fn trivium_test_shortint_long() {
let config = ConfigBuilder::default()
.use_custom_parameters(V1_4_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.use_custom_parameters(V1_5_PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128)
.build();
let (hl_client_key, hl_server_key) = generate_keys(config);
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
let (client_key, server_key): (ClientKey, ServerKey) =
gen_keys(V1_4_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
gen_keys(V1_5_PARAM_MESSAGE_1_CARRY_1_KS_PBS_GAUSSIAN_2M128);
let ksk = KeySwitchingKey::new(
(&client_key, Some(&server_key)),
(&underlying_ck, &underlying_sk),
V1_4_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
V1_5_PARAM_KEYSWITCH_1_1_KS_PBS_TO_2_2_KS_PBS_GAUSSIAN_2M128,
);
let key_string = "0053A6F94C9FF24598EB".to_string();

View File

@@ -1,6 +1,6 @@
[package]
name = "tfhe-cuda-backend"
version = "0.11.0"
version = "0.12.0"
edition = "2021"
authors = ["Zama team"]
license = "BSD-3-Clause-Clear"
@@ -20,3 +20,4 @@ bindgen = "0.71"
experimental-multi-arch = []
profile = []
debug = []
debug-fake-multi-gpu = []

View File

@@ -48,13 +48,16 @@ fn main() {
// Conditionally pass the "USE_NVTOOLS" variable to CMake if the feature is enabled
if cfg!(feature = "profile") {
cmake_config.define("USE_NVTOOLS", "ON");
println!("cargo:rustc-link-lib=nvToolsExt");
} else {
cmake_config.define("USE_NVTOOLS", "OFF");
}
if cfg!(feature = "debug") {
cmake_config.define("CMAKE_BUILD_TYPE", "Debug");
} else if cfg!(feature = "debug-fake-multi-gpu") {
cmake_config.define("CMAKE_BUILD_TYPE", "DebugOnlyCpu");
cmake_config.define("CMAKE_VERBOSE_MAKEFILE", "ON");
cmake_config.define("FAKE_MULTI_GPU", "ON");
}
// Build the CMake project
@@ -81,6 +84,7 @@ fn main() {
"cuda/include/ciphertext.h",
"cuda/include/integer/compression/compression.h",
"cuda/include/integer/integer.h",
"cuda/include/aes/aes.h",
"cuda/include/zk/zk.h",
"cuda/include/keyswitch/keyswitch.h",
"cuda/include/keyswitch/ks_enums.h",

View File

@@ -86,6 +86,10 @@ if(CMAKE_BUILD_TYPE_LOWERCASE STREQUAL "debug")
message("Compiling in Debug mode")
add_definitions(-DDEBUG)
set(OPTIMIZATION_FLAGS "${OPTIMIZATION_FLAGS} -O0 -G -g")
set(USE_NVTOOLS 1)
elseif(CMAKE_BUILD_TYPE_LOWERCASE STREQUAL "debugonlycpu")
message("Compiling GPU kernels in Release and CPU code in Debug")
set(OPTIMIZATION_FLAGS "${OPTIMIZATION_FLAGS} -O0 -g")
else()
# Release mode
message("Compiling in Release mode")
@@ -98,6 +102,11 @@ if(${USE_NVTOOLS})
add_definitions(-DUSE_NVTOOLS)
endif()
if(${FAKE_MULTI_GPU})
message(STATUS "Fake multi-gpu debugging is enabled")
add_definitions(-DDEBUG_FAKE_MULTI_GPU)
endif()
# in production, should use -arch=sm_70 --ptxas-options=-v to see register spills -lineinfo for better debugging to use
# nvtx when profiling -lnvToolsExt
set(CMAKE_CUDA_FLAGS

View File

@@ -0,0 +1,44 @@
#ifndef AES_H
#define AES_H
#include "../integer/integer.h"
extern "C" {
uint64_t scratch_cuda_integer_aes_encrypt_64(
CudaStreamsFFI streams, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t lwe_dimension, uint32_t ks_level,
uint32_t ks_base_log, uint32_t pbs_level, uint32_t pbs_base_log,
uint32_t grouping_factor, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, bool allocate_gpu_memory,
PBS_MS_REDUCTION_T noise_reduction_type, uint32_t num_aes_inputs,
uint32_t sbox_parallelism);
void cuda_integer_aes_ctr_encrypt_64(CudaStreamsFFI streams,
CudaRadixCiphertextFFI *output,
CudaRadixCiphertextFFI const *iv,
CudaRadixCiphertextFFI const *round_keys,
const uint64_t *counter_bits_le_all_blocks,
uint32_t num_aes_inputs, int8_t *mem_ptr,
void *const *bsks, void *const *ksks);
void cleanup_cuda_integer_aes_encrypt_64(CudaStreamsFFI streams,
int8_t **mem_ptr_void);
uint64_t scratch_cuda_integer_key_expansion_64(
CudaStreamsFFI streams, int8_t **mem_ptr, uint32_t glwe_dimension,
uint32_t polynomial_size, uint32_t lwe_dimension, uint32_t ks_level,
uint32_t ks_base_log, uint32_t pbs_level, uint32_t pbs_base_log,
uint32_t grouping_factor, uint32_t message_modulus, uint32_t carry_modulus,
PBS_TYPE pbs_type, bool allocate_gpu_memory,
PBS_MS_REDUCTION_T noise_reduction_type);
void cuda_integer_key_expansion_64(CudaStreamsFFI streams,
CudaRadixCiphertextFFI *expanded_keys,
CudaRadixCiphertextFFI const *key,
int8_t *mem_ptr, void *const *bsks,
void *const *ksks);
void cleanup_cuda_integer_key_expansion_64(CudaStreamsFFI streams,
int8_t **mem_ptr_void);
}
#endif

View File

@@ -0,0 +1,445 @@
#ifndef AES_UTILITIES
#define AES_UTILITIES
#include "../integer/integer_utilities.h"
/**
* This structure holds pre-computed LUTs for essential bitwise operations
* required by the homomorphic AES circuit. Pre-computing these tables allows
* for efficient application of non-linear functions like AND during the PBS
* process. It includes LUTs for:
* - AND: for the non-linear part of the S-Box.
* - FLUSH: to clear carry bits and isolate the message bit (x -> x & 1).
* - CARRY: to extract the carry bit for additions (x -> (x >> 1) & 1).
*/
template <typename Torus> struct int_aes_lut_buffers {
int_radix_lut<Torus> *and_lut;
int_radix_lut<Torus> *flush_lut;
int_radix_lut<Torus> *carry_lut;
int_aes_lut_buffers(CudaStreams streams, const int_radix_params &params,
bool allocate_gpu_memory, uint32_t num_aes_inputs,
uint32_t sbox_parallelism, uint64_t &size_tracker) {
constexpr uint32_t AES_STATE_BITS = 64;
constexpr uint32_t SBOX_MAX_AND_GATES = 18;
this->and_lut = new int_radix_lut<Torus>(
streams, params, 1,
SBOX_MAX_AND_GATES * num_aes_inputs * sbox_parallelism,
allocate_gpu_memory, size_tracker);
std::function<Torus(Torus, Torus)> and_lambda =
[](Torus a, Torus b) -> Torus { return a & b; };
generate_device_accumulator_bivariate<Torus>(
streams.stream(0), streams.gpu_index(0), this->and_lut->get_lut(0, 0),
this->and_lut->get_degree(0), this->and_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, and_lambda, allocate_gpu_memory);
auto active_streams_and_lut = streams.active_gpu_subset(
SBOX_MAX_AND_GATES * num_aes_inputs * sbox_parallelism);
this->and_lut->broadcast_lut(active_streams_and_lut);
this->flush_lut = new int_radix_lut<Torus>(
streams, params, 1, AES_STATE_BITS * num_aes_inputs,
allocate_gpu_memory, size_tracker);
std::function<Torus(Torus)> flush_lambda = [](Torus x) -> Torus {
return x & 1;
};
generate_device_accumulator(
streams.stream(0), streams.gpu_index(0), this->flush_lut->get_lut(0, 0),
this->flush_lut->get_degree(0), this->flush_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, flush_lambda, allocate_gpu_memory);
auto active_streams_flush_lut =
streams.active_gpu_subset(AES_STATE_BITS * num_aes_inputs);
this->flush_lut->broadcast_lut(active_streams_flush_lut);
this->carry_lut = new int_radix_lut<Torus>(
streams, params, 1, num_aes_inputs, allocate_gpu_memory, size_tracker);
std::function<Torus(Torus)> carry_lambda = [](Torus x) -> Torus {
return (x >> 1) & 1;
};
generate_device_accumulator(
streams.stream(0), streams.gpu_index(0), this->carry_lut->get_lut(0, 0),
this->carry_lut->get_degree(0), this->carry_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, carry_lambda, allocate_gpu_memory);
auto active_streams_carry_lut = streams.active_gpu_subset(num_aes_inputs);
this->carry_lut->broadcast_lut(active_streams_carry_lut);
}
void release(CudaStreams streams) {
this->and_lut->release(streams);
delete this->and_lut;
this->and_lut = nullptr;
this->flush_lut->release(streams);
delete this->flush_lut;
this->flush_lut = nullptr;
this->carry_lut->release(streams);
delete this->carry_lut;
this->carry_lut = nullptr;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};
/**
* The operations within an AES round, particularly MixColumns, require
* intermediate storage for calculations. These buffers are designed to hold
* temporary values like copies of columns or the results of multiplications,
* avoiding overwriting data that is still needed in the same round.
*/
template <typename Torus> struct int_aes_round_workspaces {
CudaRadixCiphertextFFI *mix_columns_col_copy_buffer;
CudaRadixCiphertextFFI *mix_columns_mul_workspace_buffer;
CudaRadixCiphertextFFI *vec_tmp_bit_buffer;
int_aes_round_workspaces(CudaStreams streams, const int_radix_params &params,
bool allocate_gpu_memory, uint32_t num_aes_inputs,
uint64_t &size_tracker) {
constexpr uint32_t BITS_PER_BYTE = 8;
constexpr uint32_t BYTES_PER_COLUMN = 4;
constexpr uint32_t BITS_PER_COLUMN = BITS_PER_BYTE * BYTES_PER_COLUMN;
constexpr uint32_t MIX_COLUMNS_MUL_WORKSPACE_BYTES = BYTES_PER_COLUMN + 1;
this->mix_columns_col_copy_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0),
this->mix_columns_col_copy_buffer, BITS_PER_COLUMN * num_aes_inputs,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
this->mix_columns_mul_workspace_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0),
this->mix_columns_mul_workspace_buffer,
MIX_COLUMNS_MUL_WORKSPACE_BYTES * BITS_PER_BYTE * num_aes_inputs,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
this->vec_tmp_bit_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->vec_tmp_bit_buffer,
num_aes_inputs, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
}
void release(CudaStreams streams, bool allocate_gpu_memory) {
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->mix_columns_col_copy_buffer,
allocate_gpu_memory);
delete this->mix_columns_col_copy_buffer;
this->mix_columns_col_copy_buffer = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->mix_columns_mul_workspace_buffer,
allocate_gpu_memory);
delete this->mix_columns_mul_workspace_buffer;
this->mix_columns_mul_workspace_buffer = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->vec_tmp_bit_buffer,
allocate_gpu_memory);
delete this->vec_tmp_bit_buffer;
this->vec_tmp_bit_buffer = nullptr;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};
/**
* In CTR mode, a counter is homomorphically added to the encrypted IV. This
* structure holds the necessary buffers for this 128-bit ripple-carry
* addition, such as the buffer for the propagating carry bit
* (`vec_tmp_carry_buffer`) across the addition chain.
*/
template <typename Torus> struct int_aes_counter_workspaces {
CudaRadixCiphertextFFI *vec_tmp_carry_buffer;
CudaRadixCiphertextFFI *vec_tmp_sum_buffer;
CudaRadixCiphertextFFI *vec_trivial_b_bits_buffer;
Torus *h_counter_bits_buffer;
Torus *d_counter_bits_buffer;
int_aes_counter_workspaces(CudaStreams streams,
const int_radix_params &params,
bool allocate_gpu_memory, uint32_t num_aes_inputs,
uint64_t &size_tracker) {
this->vec_tmp_carry_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->vec_tmp_carry_buffer,
num_aes_inputs, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
this->vec_tmp_sum_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->vec_tmp_sum_buffer,
num_aes_inputs, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
this->vec_trivial_b_bits_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0),
this->vec_trivial_b_bits_buffer, num_aes_inputs,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
this->h_counter_bits_buffer =
(Torus *)malloc(num_aes_inputs * sizeof(Torus));
size_tracker += num_aes_inputs * sizeof(Torus);
this->d_counter_bits_buffer = (Torus *)cuda_malloc_with_size_tracking_async(
num_aes_inputs * sizeof(Torus), streams.stream(0), streams.gpu_index(0),
size_tracker, allocate_gpu_memory);
}
void release(CudaStreams streams, bool allocate_gpu_memory) {
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->vec_tmp_carry_buffer,
allocate_gpu_memory);
delete this->vec_tmp_carry_buffer;
this->vec_tmp_carry_buffer = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->vec_tmp_sum_buffer,
allocate_gpu_memory);
delete this->vec_tmp_sum_buffer;
this->vec_tmp_sum_buffer = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->vec_trivial_b_bits_buffer,
allocate_gpu_memory);
delete this->vec_trivial_b_bits_buffer;
this->vec_trivial_b_bits_buffer = nullptr;
if (allocate_gpu_memory) {
cuda_drop_async(this->d_counter_bits_buffer, streams.stream(0),
streams.gpu_index(0));
}
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
free(this->h_counter_bits_buffer);
}
};
/**
* This structure allocates the most significant memory blocks:
* - `sbox_internal_workspace`: A large workspace for the complex, parallel
* evaluation of the S-Box circuit.
* - `main_bitsliced_states_buffer`: Holds the entire set of AES states in a
* bitsliced layout, which is optimal for parallel bitwise operations on the
* GPU.
* - Other buffers are used for data layout transformations (transposition) and
* for batching small operations into larger, more efficient launches.
*/
template <typename Torus> struct int_aes_main_workspaces {
CudaRadixCiphertextFFI *sbox_internal_workspace;
CudaRadixCiphertextFFI *initial_states_and_jit_key_workspace;
CudaRadixCiphertextFFI *main_bitsliced_states_buffer;
CudaRadixCiphertextFFI *tmp_tiled_key_buffer;
CudaRadixCiphertextFFI *batch_processing_buffer;
int_aes_main_workspaces(CudaStreams streams, const int_radix_params &params,
bool allocate_gpu_memory, uint32_t num_aes_inputs,
uint32_t sbox_parallelism, uint64_t &size_tracker) {
constexpr uint32_t AES_STATE_BITS = 64;
constexpr uint32_t SBOX_MAX_AND_GATES = 18;
constexpr uint32_t BATCH_BUFFER_OPERANDS = 3;
this->sbox_internal_workspace = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->sbox_internal_workspace,
num_aes_inputs * AES_STATE_BITS * sbox_parallelism,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
this->initial_states_and_jit_key_workspace = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0),
this->initial_states_and_jit_key_workspace,
num_aes_inputs * AES_STATE_BITS, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
this->main_bitsliced_states_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0),
this->main_bitsliced_states_buffer, num_aes_inputs * AES_STATE_BITS,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
this->tmp_tiled_key_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->tmp_tiled_key_buffer,
num_aes_inputs * AES_STATE_BITS, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
this->batch_processing_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->batch_processing_buffer,
num_aes_inputs * SBOX_MAX_AND_GATES * BATCH_BUFFER_OPERANDS *
sbox_parallelism,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
}
void release(CudaStreams streams, bool allocate_gpu_memory) {
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->sbox_internal_workspace,
allocate_gpu_memory);
delete this->sbox_internal_workspace;
this->sbox_internal_workspace = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->initial_states_and_jit_key_workspace,
allocate_gpu_memory);
delete this->initial_states_and_jit_key_workspace;
this->initial_states_and_jit_key_workspace = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->main_bitsliced_states_buffer,
allocate_gpu_memory);
delete this->main_bitsliced_states_buffer;
this->main_bitsliced_states_buffer = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->tmp_tiled_key_buffer,
allocate_gpu_memory);
delete this->tmp_tiled_key_buffer;
this->tmp_tiled_key_buffer = nullptr;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->batch_processing_buffer,
allocate_gpu_memory);
delete this->batch_processing_buffer;
this->batch_processing_buffer = nullptr;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};
/**
* This structure acts as a container, holding instances of all the other buffer
* management structs. It provides a
* single object to manage the entire lifecycle of memory needed for a complete
* AES-CTR encryption operation.
*/
template <typename Torus> struct int_aes_encrypt_buffer {
int_radix_params params;
bool allocate_gpu_memory;
uint32_t num_aes_inputs;
uint32_t sbox_parallel_instances;
int_aes_lut_buffers<Torus> *luts;
int_aes_round_workspaces<Torus> *round_workspaces;
int_aes_counter_workspaces<Torus> *counter_workspaces;
int_aes_main_workspaces<Torus> *main_workspaces;
int_aes_encrypt_buffer(CudaStreams streams, const int_radix_params &params,
bool allocate_gpu_memory, uint32_t num_aes_inputs,
uint32_t sbox_parallelism, uint64_t &size_tracker) {
PANIC_IF_FALSE(num_aes_inputs >= 1,
"num_aes_inputs should be greater or equal to 1");
this->params = params;
this->allocate_gpu_memory = allocate_gpu_memory;
this->num_aes_inputs = num_aes_inputs;
this->sbox_parallel_instances = sbox_parallelism;
this->luts = new int_aes_lut_buffers<Torus>(
streams, params, allocate_gpu_memory, num_aes_inputs, sbox_parallelism,
size_tracker);
this->round_workspaces = new int_aes_round_workspaces<Torus>(
streams, params, allocate_gpu_memory, num_aes_inputs, size_tracker);
this->counter_workspaces = new int_aes_counter_workspaces<Torus>(
streams, params, allocate_gpu_memory, num_aes_inputs, size_tracker);
this->main_workspaces = new int_aes_main_workspaces<Torus>(
streams, params, allocate_gpu_memory, num_aes_inputs, sbox_parallelism,
size_tracker);
}
void release(CudaStreams streams) {
luts->release(streams);
delete luts;
luts = nullptr;
round_workspaces->release(streams, allocate_gpu_memory);
delete round_workspaces;
round_workspaces = nullptr;
counter_workspaces->release(streams, allocate_gpu_memory);
delete counter_workspaces;
counter_workspaces = nullptr;
main_workspaces->release(streams, allocate_gpu_memory);
delete main_workspaces;
main_workspaces = nullptr;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};
/**
* This structure holds the buffer for the 44 words of the expanded key
* and temporary storage for word manipulations.
* It contains its own instance of `int_aes_encrypt_buffer` because the
* key expansion algorithm itself requires using the S-Box.
* This separation ensures that memory for key expansion can be allocated and
* freed independently of the main encryption process.
*/
template <typename Torus> struct int_key_expansion_buffer {
int_radix_params params;
bool allocate_gpu_memory;
CudaRadixCiphertextFFI *words_buffer;
CudaRadixCiphertextFFI *tmp_word_buffer;
CudaRadixCiphertextFFI *tmp_rotated_word_buffer;
int_aes_encrypt_buffer<Torus> *aes_encrypt_buffer;
int_key_expansion_buffer(CudaStreams streams, const int_radix_params &params,
bool allocate_gpu_memory, uint64_t &size_tracker) {
this->params = params;
this->allocate_gpu_memory = allocate_gpu_memory;
constexpr uint32_t TOTAL_WORDS = 22;
constexpr uint32_t BITS_PER_WORD = 32;
constexpr uint32_t TOTAL_BITS = TOTAL_WORDS * BITS_PER_WORD;
this->words_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->words_buffer, TOTAL_BITS,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
this->tmp_word_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->tmp_word_buffer,
BITS_PER_WORD, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
this->tmp_rotated_word_buffer = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), this->tmp_rotated_word_buffer,
BITS_PER_WORD, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
this->aes_encrypt_buffer = new int_aes_encrypt_buffer<Torus>(
streams, params, allocate_gpu_memory, 1, 4, size_tracker);
}
void release(CudaStreams streams) {
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->words_buffer, allocate_gpu_memory);
delete this->words_buffer;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->tmp_word_buffer, allocate_gpu_memory);
delete this->tmp_word_buffer;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
this->tmp_rotated_word_buffer,
allocate_gpu_memory);
delete this->tmp_rotated_word_buffer;
this->aes_encrypt_buffer->release(streams);
delete this->aes_encrypt_buffer;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};
#endif

View File

@@ -4,9 +4,7 @@
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cuda_runtime.h>
#include <vector>
extern "C" {
@@ -141,4 +139,5 @@ bool cuda_check_support_thread_block_clusters();
template <typename Torus>
void cuda_set_value_async(cudaStream_t stream, uint32_t gpu_index,
Torus *d_array, Torus value, Torus n);
#endif

View File

@@ -4,6 +4,8 @@
#include <variant>
#include <vector>
#include "integer/integer.h"
extern std::mutex m;
extern bool p2p_enabled;
extern const int THRESHOLD_MULTI_GPU;
@@ -37,10 +39,237 @@ get_variant_element(const std::variant<std::vector<Torus>, Torus> &variant,
}
}
int get_active_gpu_count(int num_inputs, int gpu_count);
uint32_t get_active_gpu_count(uint32_t num_inputs, uint32_t gpu_count);
int get_num_inputs_on_gpu(int total_num_inputs, int gpu_index, int gpu_count);
int get_gpu_offset(int total_num_inputs, int gpu_index, int gpu_count);
// A Set of GPU Streams and associated GPUs
// Can be constructed from the FFI struct CudaStreamsFFI which
// is only used to pass the streams/gpus at the rust/C interface
// This class should only be constructed from the FFI struct,
// through class methods or through the copy constructor. The class
// can also be constructed as an empty set
struct CudaStreams {
private:
cudaStream_t const *_streams;
uint32_t const *_gpu_indexes;
uint32_t _gpu_count;
bool _owns_streams;
// Prevent the construction of a CudaStreams class from user-code
CudaStreams(cudaStream_t const *streams, uint32_t const *gpu_indexes,
uint32_t gpu_count)
: _streams(streams), _gpu_indexes(gpu_indexes), _gpu_count(gpu_count),
_owns_streams(false) {}
public:
// Construct an empty set. Invalid use of an empty set should raise an error
// right away through asserts or because of a nullptr dereference
CudaStreams()
: _streams(nullptr), _gpu_indexes(nullptr), _gpu_count((uint32_t)-1),
_owns_streams(false) {}
// Returns a subset of this set as an active subset. An active subset is one
// that is temporarily used to perform some computation
CudaStreams active_gpu_subset(int num_radix_blocks) {
return CudaStreams(_streams, _gpu_indexes,
get_active_gpu_count(num_radix_blocks, _gpu_count));
}
// Returns a CudaStreams struct containing only the ith stream
CudaStreams get_ith(int i) const {
return CudaStreams(&_streams[i], &_gpu_indexes[i], 1);
}
// Synchronize all the streams in the set
void synchronize() const {
for (uint32_t i = 0; i < _gpu_count; i++) {
cuda_synchronize_stream(_streams[i], _gpu_indexes[i]);
}
}
cudaStream_t stream(uint32_t idx) const {
PANIC_IF_FALSE(idx < _gpu_count, "Invalid GPU index");
return _streams[idx];
}
uint32_t gpu_index(uint32_t idx) const {
PANIC_IF_FALSE(idx < _gpu_count, "Invalid GPU index");
return _gpu_indexes[idx];
}
uint32_t count() const { return _gpu_count; }
// Construct from the rust FFI stream set. Streams are created in rust
// using the bindings.
CudaStreams(CudaStreamsFFI &ffi)
: _streams((cudaStream_t *)ffi.streams), _gpu_indexes(ffi.gpu_indexes),
_gpu_count(ffi.gpu_count), _owns_streams(false) {}
// Create a new set of streams on the same gpus as those of the current stream
// set Can be used to parallelize computation by issuing kernels on multiple
// streams on the same GPU
void create_on_same_gpus(const CudaStreams &other) {
PANIC_IF_FALSE(_streams == nullptr,
"Assign clone to non-empty cudastreams");
cudaStream_t *new_streams = new cudaStream_t[other._gpu_count];
uint32_t *gpu_indexes_clone = new uint32_t[_gpu_count];
for (uint32_t i = 0; i < other._gpu_count; ++i) {
new_streams[i] = cuda_create_stream(other._gpu_indexes[i]);
gpu_indexes_clone[i] = other._gpu_indexes[i];
}
this->_streams = new_streams;
this->_gpu_indexes = gpu_indexes_clone;
this->_gpu_count = other._gpu_count;
// Flag this instance as owning streams so that we can destroy
// the streams when they aren't needed anymore
this->_owns_streams = true;
}
// Copy constructor, setting the own flag to false
// Only the initial instance of CudaStreams created with
// assign_clone owns streams, all copies of it do not own the
// streams
CudaStreams(const CudaStreams &src)
: _streams(src._streams), _gpu_indexes(src._gpu_indexes),
_gpu_count(src._gpu_count), _owns_streams(false) {}
CudaStreams &operator=(CudaStreams const &other) {
PANIC_IF_FALSE(this->_streams == nullptr ||
this->_streams == other._streams,
"Assigning an already initialized CudaStreams");
this->_streams = other._streams;
this->_gpu_indexes = other._gpu_indexes;
this->_gpu_count = other._gpu_count;
// Only the initial instance of CudaStreams created with
// assign_clone owns streams, all copies of it do not own the
// streams
this->_owns_streams = false;
return *this;
}
// Destroy the streams if they are created by assign_clone.
// We require the developer to call `destroy` on all instances
// of cloned streams.
void release() {
// If this instance doesn't own streams, there's nothing to do
// as the streams were created on the Rust side.
if (_owns_streams) {
for (uint32_t i = 0; i < _gpu_count; ++i) {
cuda_destroy_stream(_streams[i], _gpu_indexes[i]);
}
delete[] _streams;
_streams = nullptr;
delete[] _gpu_indexes;
_gpu_indexes = nullptr;
}
}
// The destructor checks that streams created with assign_clone
// were destroyed manually with `destroy`.
~CudaStreams() {
// Ensure streams are destroyed
PANIC_IF_FALSE(
!_owns_streams || _streams == nullptr,
"Destroy (this=%p) was not called on a CudaStreams object that "
"is a clone "
"of another one, %p",
this, this->_streams);
}
};
struct CudaStreamsBarrier {
private:
std::vector<cudaEvent_t> _events;
CudaStreams _streams;
CudaStreamsBarrier(const CudaStreamsBarrier &) {} // Prevent copy-construction
CudaStreamsBarrier &operator=(const CudaStreamsBarrier &) {
return *this;
} // Prevent assignment
public:
void create_on(const CudaStreams &streams) {
_streams = streams;
GPU_ASSERT(streams.count() > 1, "CudaStreamsFirstWaitsWorkersBarrier: "
"Attempted to create on single GPU");
_events.resize(streams.count());
for (int i = 0; i < streams.count(); i++) {
_events[i] = cuda_create_event(streams.gpu_index(i));
}
}
CudaStreamsBarrier(){};
void local_streams_wait_for_stream_0(const CudaStreams &user_streams) {
GPU_ASSERT(!_events.empty(),
"CudaStreamsBarrier: must call create_on before use");
GPU_ASSERT(user_streams.gpu_index(0) == _streams.gpu_index(0),
"CudaStreamsBarrier: synchronization can only be performed on "
"the GPUs the barrier was initially created on.");
cuda_event_record(_events[0], user_streams.stream(0),
user_streams.gpu_index(0));
for (int j = 1; j < user_streams.count(); j++) {
GPU_ASSERT(user_streams.gpu_index(j) == _streams.gpu_index(j),
"CudaStreamsBarrier: synchronization can only be performed on "
"the GPUs the barrier was initially created on.");
cuda_stream_wait_event(user_streams.stream(j), _events[0],
user_streams.gpu_index(j));
}
}
void stream_0_wait_for_local_streams(const CudaStreams &user_streams) {
GPU_ASSERT(
!_events.empty(),
"CudaStreamsFirstWaitsWorkersBarrier: must call create_on before use");
GPU_ASSERT(
user_streams.count() <= _events.size(),
"CudaStreamsFirstWaitsWorkersBarrier: trying to synchronize too many "
"streams. "
"The barrier was created on a LUT that had %lu active streams, while "
"the user stream set has %u streams",
_events.size(), user_streams.count());
if (user_streams.count() > 1) {
// Worker GPUs record their events
for (int j = 1; j < user_streams.count(); j++) {
GPU_ASSERT(_streams.gpu_index(j) == user_streams.gpu_index(j),
"CudaStreamsBarrier: The user stream "
"set GPU[%d]=%u while the LUT stream set GPU[%d]=%u",
j, user_streams.gpu_index(j), j, _streams.gpu_index(j));
cuda_event_record(_events[j], user_streams.stream(j),
user_streams.gpu_index(j));
}
// GPU 0 waits for all workers
for (int j = 1; j < user_streams.count(); j++) {
cuda_stream_wait_event(user_streams.stream(0), _events[j],
user_streams.gpu_index(0));
}
}
}
void release() {
for (int j = 0; j < _streams.count(); j++) {
cuda_event_destroy(_events[j], _streams.gpu_index(j));
}
_events.clear();
}
~CudaStreamsBarrier() {
GPU_ASSERT(_events.empty(),
"CudaStreamsBarrier: must "
"call release before destruction: events size = %lu",
_events.size());
}
};
#endif

View File

@@ -0,0 +1,52 @@
#pragma once
#include "bitwise_ops.h"
#include "integer_utilities.h"
#include "scalar_shifts.h"
template <typename Torus> struct int_abs_buffer {
int_radix_params params;
int_arithmetic_scalar_shift_buffer<Torus> *arithmetic_scalar_shift_mem;
int_sc_prop_memory<Torus> *scp_mem;
int_bitop_buffer<Torus> *bitxor_mem;
CudaRadixCiphertextFFI *mask;
bool allocate_gpu_memory;
int_abs_buffer(CudaStreams streams, int_radix_params params,
uint32_t num_radix_blocks, bool allocate_gpu_memory,
uint64_t &size_tracker) {
this->params = params;
this->allocate_gpu_memory = allocate_gpu_memory;
arithmetic_scalar_shift_mem = new int_arithmetic_scalar_shift_buffer<Torus>(
streams, SHIFT_OR_ROTATE_TYPE::RIGHT_SHIFT, params, num_radix_blocks,
allocate_gpu_memory, size_tracker);
uint32_t requested_flag = outputFlag::FLAG_NONE;
scp_mem = new int_sc_prop_memory<Torus>(streams, params, num_radix_blocks,
requested_flag, allocate_gpu_memory,
size_tracker);
bitxor_mem = new int_bitop_buffer<Torus>(streams, BITOP_TYPE::BITXOR,
params, num_radix_blocks,
allocate_gpu_memory, size_tracker);
mask = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), mask, num_radix_blocks,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
}
void release(CudaStreams streams) {
arithmetic_scalar_shift_mem->release(streams);
scp_mem->release(streams);
bitxor_mem->release(streams);
delete arithmetic_scalar_shift_mem;
delete scp_mem;
delete bitxor_mem;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
mask, this->allocate_gpu_memory);
delete mask;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};

View File

@@ -0,0 +1,107 @@
#pragma once
#include "integer_utilities.h"
template <typename Torus> struct int_bitop_buffer {
int_radix_params params;
int_radix_lut<Torus> *lut;
BITOP_TYPE op;
bool gpu_memory_allocated;
int_bitop_buffer(CudaStreams streams, BITOP_TYPE op, int_radix_params params,
uint32_t num_radix_blocks, bool allocate_gpu_memory,
uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
this->op = op;
this->params = params;
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
switch (op) {
case BITAND:
case BITOR:
case BITXOR:
lut = new int_radix_lut<Torus>(streams, params, 1, num_radix_blocks,
allocate_gpu_memory, size_tracker);
{
auto lut_bivariate_f = [op](Torus lhs, Torus rhs) -> Torus {
if (op == BITOP_TYPE::BITAND) {
// AND
return lhs & rhs;
} else if (op == BITOP_TYPE::BITOR) {
// OR
return lhs | rhs;
} else {
// XOR
return lhs ^ rhs;
}
};
generate_device_accumulator_bivariate<Torus>(
streams.stream(0), streams.gpu_index(0), lut->get_lut(0, 0),
lut->get_degree(0), lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus,
params.carry_modulus, lut_bivariate_f, gpu_memory_allocated);
lut->broadcast_lut(active_streams);
}
break;
default:
// Scalar OP
lut = new int_radix_lut<Torus>(streams, params, params.message_modulus,
num_radix_blocks, allocate_gpu_memory,
size_tracker);
for (int i = 0; i < params.message_modulus; i++) {
auto rhs = i;
auto lut_univariate_scalar_f = [op, rhs](Torus x) -> Torus {
if (op == BITOP_TYPE::SCALAR_BITAND) {
// AND
return x & rhs;
} else if (op == BITOP_TYPE::SCALAR_BITOR) {
// OR
return x | rhs;
} else {
// XOR
return x ^ rhs;
}
};
generate_device_accumulator<Torus>(
streams.stream(0), streams.gpu_index(0), lut->get_lut(0, i),
lut->get_degree(i), lut->get_max_degree(i), params.glwe_dimension,
params.polynomial_size, params.message_modulus,
params.carry_modulus, lut_univariate_scalar_f,
gpu_memory_allocated);
lut->broadcast_lut(active_streams);
}
}
}
void release(CudaStreams streams) {
lut->release(streams);
delete lut;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};
void update_degrees_after_bitand(uint64_t *output_degrees,
uint64_t *lwe_array_1_degrees,
uint64_t *lwe_array_2_degrees,
uint32_t num_radix_blocks);
void update_degrees_after_bitor(uint64_t *output_degrees,
uint64_t *lwe_array_1_degrees,
uint64_t *lwe_array_2_degrees,
uint32_t num_radix_blocks);
void update_degrees_after_bitxor(uint64_t *output_degrees,
uint64_t *lwe_array_1_degrees,
uint64_t *lwe_array_2_degrees,
uint32_t num_radix_blocks);
void update_degrees_after_scalar_bitand(uint64_t *output_degrees,
uint64_t const *clear_degrees,
uint64_t const *input_degrees,
uint32_t num_clear_blocks);
void update_degrees_after_scalar_bitor(uint64_t *output_degrees,
uint64_t const *clear_degrees,
uint64_t const *input_degrees,
uint32_t num_clear_blocks);
void update_degrees_after_scalar_bitxor(uint64_t *output_degrees,
uint64_t const *clear_degrees,
uint64_t const *input_degrees,
uint32_t num_clear_blocks);

View File

@@ -0,0 +1,77 @@
#pragma once
#include "integer_utilities.h"
template <typename Torus> struct int_extend_radix_with_sign_msb_buffer {
int_radix_params params;
bool allocate_gpu_memory;
int_radix_lut<Torus> *lut = nullptr;
CudaRadixCiphertextFFI *last_block = nullptr;
CudaRadixCiphertextFFI *padding_block = nullptr;
int_extend_radix_with_sign_msb_buffer(CudaStreams streams,
const int_radix_params params,
uint32_t num_radix_blocks,
uint32_t num_additional_blocks,
const bool allocate_gpu_memory,
uint64_t &size_tracker) {
this->params = params;
this->allocate_gpu_memory = allocate_gpu_memory;
if (num_additional_blocks != 0) {
this->lut = new int_radix_lut<Torus>(streams, params, 1, num_radix_blocks,
allocate_gpu_memory, size_tracker);
uint32_t bits_per_block = std::log2(params.message_modulus);
uint32_t msg_modulus = params.message_modulus;
generate_device_accumulator<Torus>(
streams.stream(0), streams.gpu_index(0), lut->get_lut(0, 0),
lut->get_degree(0), lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
[msg_modulus, bits_per_block](Torus x) {
const auto xm = x % msg_modulus;
const auto sign_bit = (xm >> (bits_per_block - 1)) & 1;
return (Torus)((msg_modulus - 1) * sign_bit);
},
allocate_gpu_memory);
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
lut->broadcast_lut(active_streams);
this->last_block = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), last_block, 1,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
this->padding_block = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), padding_block, 1,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
}
}
void release(CudaStreams streams) {
if (lut != nullptr) {
lut->release(streams);
delete lut;
}
if (last_block != nullptr) {
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
last_block, allocate_gpu_memory);
delete last_block;
}
if (padding_block != nullptr) {
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
padding_block, allocate_gpu_memory);
delete padding_block;
}
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};

View File

@@ -0,0 +1,141 @@
#pragma once
#include "integer_utilities.h"
template <typename Torus> struct int_zero_out_if_buffer {
int_radix_params params;
CudaRadixCiphertextFFI *tmp;
bool gpu_memory_allocated;
int_zero_out_if_buffer(CudaStreams streams, int_radix_params params,
uint32_t num_radix_blocks, bool allocate_gpu_memory,
uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
this->params = params;
auto active_streams = streams.active_gpu_subset(num_radix_blocks);
tmp = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), tmp, num_radix_blocks,
params.big_lwe_dimension, size_tracker, allocate_gpu_memory);
}
void release(CudaStreams streams) {
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0), tmp,
gpu_memory_allocated);
delete tmp;
tmp = nullptr;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};
template <typename Torus> struct int_cmux_buffer {
int_radix_lut<Torus> *predicate_lut;
int_radix_lut<Torus> *message_extract_lut;
CudaRadixCiphertextFFI *buffer_in;
CudaRadixCiphertextFFI *buffer_out;
CudaRadixCiphertextFFI *condition_array;
int_radix_params params;
bool allocate_gpu_memory;
bool gpu_memory_allocated;
int_cmux_buffer(CudaStreams streams,
std::function<Torus(Torus)> predicate_lut_f,
int_radix_params params, uint32_t num_radix_blocks,
bool allocate_gpu_memory, uint64_t &size_tracker) {
gpu_memory_allocated = allocate_gpu_memory;
this->params = params;
this->allocate_gpu_memory = allocate_gpu_memory;
buffer_in = new CudaRadixCiphertextFFI;
buffer_out = new CudaRadixCiphertextFFI;
condition_array = new CudaRadixCiphertextFFI;
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), buffer_in,
2 * num_radix_blocks, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), buffer_out,
2 * num_radix_blocks, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
create_zero_radix_ciphertext_async<Torus>(
streams.stream(0), streams.gpu_index(0), condition_array,
2 * num_radix_blocks, params.big_lwe_dimension, size_tracker,
allocate_gpu_memory);
auto lut_f = [predicate_lut_f](Torus block, Torus condition) -> Torus {
return predicate_lut_f(condition) ? 0 : block;
};
auto inverted_lut_f = [predicate_lut_f](Torus block,
Torus condition) -> Torus {
return predicate_lut_f(condition) ? block : 0;
};
auto message_extract_lut_f = [params](Torus x) -> Torus {
return x % params.message_modulus;
};
predicate_lut =
new int_radix_lut<Torus>(streams, params, 2, 2 * num_radix_blocks,
allocate_gpu_memory, size_tracker);
message_extract_lut =
new int_radix_lut<Torus>(streams, params, 1, num_radix_blocks,
allocate_gpu_memory, size_tracker);
generate_device_accumulator_bivariate<Torus>(
streams.stream(0), streams.gpu_index(0), predicate_lut->get_lut(0, 0),
predicate_lut->get_degree(0), predicate_lut->get_max_degree(0),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, inverted_lut_f, gpu_memory_allocated);
generate_device_accumulator_bivariate<Torus>(
streams.stream(0), streams.gpu_index(0), predicate_lut->get_lut(0, 1),
predicate_lut->get_degree(1), predicate_lut->get_max_degree(1),
params.glwe_dimension, params.polynomial_size, params.message_modulus,
params.carry_modulus, lut_f, gpu_memory_allocated);
generate_device_accumulator<Torus>(
streams.stream(0), streams.gpu_index(0),
message_extract_lut->get_lut(0, 0), message_extract_lut->get_degree(0),
message_extract_lut->get_max_degree(0), params.glwe_dimension,
params.polynomial_size, params.message_modulus, params.carry_modulus,
message_extract_lut_f, gpu_memory_allocated);
Torus *h_lut_indexes = predicate_lut->h_lut_indexes;
for (int index = 0; index < 2 * num_radix_blocks; index++) {
if (index < num_radix_blocks) {
h_lut_indexes[index] = 0;
} else {
h_lut_indexes[index] = 1;
}
}
cuda_memcpy_with_size_tracking_async_to_gpu(
predicate_lut->get_lut_indexes(0, 0), h_lut_indexes,
2 * num_radix_blocks * sizeof(Torus), streams.stream(0),
streams.gpu_index(0), allocate_gpu_memory);
auto active_streams_pred = streams.active_gpu_subset(2 * num_radix_blocks);
predicate_lut->broadcast_lut(active_streams_pred);
auto active_streams_msg = streams.active_gpu_subset(num_radix_blocks);
message_extract_lut->broadcast_lut(active_streams_msg);
}
void release(CudaStreams streams) {
predicate_lut->release(streams);
delete predicate_lut;
message_extract_lut->release(streams);
delete message_extract_lut;
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
buffer_in, gpu_memory_allocated);
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
buffer_out, gpu_memory_allocated);
release_radix_ciphertext_async(streams.stream(0), streams.gpu_index(0),
condition_array, gpu_memory_allocated);
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
delete buffer_in;
delete buffer_out;
delete condition_array;
cuda_synchronize_stream(streams.stream(0), streams.gpu_index(0));
}
};

Some files were not shown because too many files have changed in this diff Show More