Compare commits

..

1 Commits

Author SHA1 Message Date
Theo Souchon
65045be63f chore(ci): add a workflows documentation 2026-01-08 15:33:41 +01:00
22 changed files with 945 additions and 772 deletions

View File

@@ -2,8 +2,6 @@
ignore = [
# Ignoring unmaintained 'paste' advisory as it is a widely used, low-risk build dependency.
"RUSTSEC-2024-0436",
# Ignoring unmaintained 'bincode' crate. Getting rid of it would be too complex on the short term.
"RUSTSEC-2025-0141",
]
[output]

View File

@@ -23,8 +23,6 @@ runs:
echo "${CMAKE_SCRIPT_SHA} cmake-${CMAKE_VERSION}-linux-x86_64.sh" > checksum
sha256sum -c checksum
sudo bash cmake-"${CMAKE_VERSION}"-linux-x86_64.sh --skip-license --prefix=/usr/ --exclude-subdir
sudo apt-get clean
sudo rm -rf /var/lib/apt/lists/*
sudo apt update
sudo apt remove -y unattended-upgrades
sudo apt install -y cmake-format libclang-dev

1
.gitignore vendored
View File

@@ -10,7 +10,6 @@ target/
**/*.rmeta
**/Cargo.lock
**/*.bin
**/.DS_Store
# Some of our bench outputs
/tfhe/benchmarks_parameters

View File

@@ -11,7 +11,7 @@
/tfhe/src/core_crypto/gpu @agnesLeroy
/tfhe/src/core_crypto/hpu @zama-ai/hardware
/tfhe/src/shortint/ @mayeul-zama @nsarlin-zama
/tfhe/src/shortint/ @mayeul-zama
/tfhe/src/integer/ @tmontaigu
/tfhe/src/integer/gpu @agnesLeroy
@@ -19,12 +19,8 @@
/tfhe/src/high_level_api/ @tmontaigu
/tfhe-zk-pok/ @nsarlin-zama
/tfhe-benchmark/ @soonum
/utils/ @nsarlin-zama
/Makefile @IceTDrinker @soonum
/mockups/tfhe-hpu-mockup @zama-ai/hardware

View File

@@ -36,7 +36,6 @@ rayon = "1.11"
serde = { version = "1.0", default-features = false }
wasm-bindgen = "0.2.101"
getrandom = "0.2.8"
# The project maintainers consider that this is the last version of the 1.3 branch, any newer version should not be trusted
bincode = "=1.3.3"
[profile.bench]

704
WORKFLOWS.md Normal file
View File

@@ -0,0 +1,704 @@
# 🔄 TFHE-rs GitHub Workflows Documentation
This document provides a comprehensive overview of all GitHub Actions workflows in the TFHE-rs project, organized by category with visual diagrams showing their triggers and purposes.
## 📊 Workflow Overview
The project contains **71 workflows** organized into the following categories:
- **Testing & Validation** (31 workflows) - AWS CPU (7), GPU (16), HPU (1), M1 (1), special tests (4), cargo tests (2)
- **Benchmarking** (17 workflows) - CPU, GPU, HPU, WASM, specialized benchmarks
- **Building & Compilation** (4 workflows) - Cargo builds
- **Release Management** (9 workflows) - Publishing to crates.io and npm
- **CI/CD & Maintenance** (10 workflows) - Linting, PR management, security
---
## 🔍 Workflow Trigger Types
```mermaid
graph LR
A[Workflow Triggers] --> B[Pull Request]
A --> C[Push to main]
A --> D[Schedule/Cron]
A --> E[Workflow Dispatch]
A --> F[Label Events]
B --> B1[On PR Open]
B --> B2[On PR Approval]
F --> F1[approved label]
F --> F2[m1_test label]
D --> D1[Daily]
D --> D2[Weekly]
D --> D3[Nightly]
```
---
## 🧪 Testing & Validation Workflows
### CPU Testing Workflows
```mermaid
flowchart TB
subgraph "CPU Test Workflows"
AWS[aws_tfhe_tests]
FAST[aws_tfhe_fast_tests]
INT[aws_tfhe_integer_tests]
SIGN[aws_tfhe_signed_integer_tests]
BACK[aws_tfhe_backward_compat_tests]
WASM[aws_tfhe_wasm_tests]
NOISE[aws_tfhe_noise_checks]
M1[m1_tests]
end
subgraph "Triggers"
PR[Pull Request<br/>+ approved label]
SCHED[Schedule<br/>Nightly Mon-Fri]
DISP[Workflow Dispatch]
M1LABEL[m1_test label]
end
PR --> AWS
SCHED --> AWS
DISP --> AWS
PR --> FAST
PR --> INT
PR --> SIGN
PR --> BACK
PR --> WASM
DISP --> M1
M1LABEL --> M1
SCHED --> M1
```
| Workflow | Trigger | Purpose | Runner |
|----------|---------|---------|--------|
| **aws_tfhe_tests** | PR (approved) / Nightly / Manual | Comprehensive CPU tests (csprng, zk-pok, core_crypto, boolean, shortint, strings, high-level API, C API, examples, apps) | AWS cpu-big |
| **aws_tfhe_fast_tests** | PR (approved) / Manual | Fast subset of tests for quick validation | AWS cpu-small |
| **aws_tfhe_integer_tests** | PR (approved) / Manual | Integer operations testing | AWS cpu-big |
| **aws_tfhe_signed_integer_tests** | PR (approved) / Manual | Signed integer operations testing | AWS cpu-big |
| **aws_tfhe_backward_compat_tests** | PR (approved) / Manual | Backward compatibility validation | AWS cpu-small |
| **aws_tfhe_wasm_tests** | PR (approved) / Manual | WebAssembly tests | AWS cpu-small |
| **aws_tfhe_noise_checks** | PR (approved) / Manual | Cryptographic noise validation | AWS cpu-small |
| **m1_tests** | Manual / Schedule (10pm daily) / m1_test label | Tests on Apple M1 architecture | Self-hosted M1 Mac |
---
### GPU Testing Workflows
```mermaid
flowchart TB
subgraph "GPU Test Workflows"
GFAST[gpu_fast_tests]
G4090[gpu_4090_tests]
GH100F[gpu_fast_h100_tests]
GH100[gpu_full_h100_tests]
GMULTI[gpu_full_multi_gpu_tests]
GVAL[gpu_code_validation_tests]
GMEM[gpu_memory_sanitizer]
GMEMH[gpu_memory_sanitizer_h100]
GUINT[gpu_unsigned_integer_tests]
GSINT[gpu_signed_integer_tests]
GUINTC[gpu_unsigned_integer_classic_tests]
GSINTC[gpu_signed_integer_classic_tests]
GUINTH[gpu_unsigned_integer_h100_tests]
GSINTH[gpu_signed_integer_h100_tests]
GLONG[gpu_integer_long_run_tests]
GPCC[gpu_pcc]
end
subgraph "Triggers"
PR[Pull Request]
DISP[Workflow Dispatch]
APPR[PR approved label]
end
PR --> GFAST
DISP --> GFAST
DISP --> G4090
DISP --> GH100F
DISP --> GH100
DISP --> GMULTI
DISP --> GVAL
APPR --> GMEM
APPR --> GMEMH
APPR --> GUINT
APPR --> GSINT
APPR --> GPCC
```
| Workflow | Trigger | Purpose | GPU |
|----------|---------|---------|-----|
| **gpu_fast_tests** | PR / Manual | Quick GPU validation tests | Hyperstack GPU |
| **gpu_4090_tests** | Manual | Tests on RTX 4090 hardware | RTX 4090 |
| **gpu_fast_h100_tests** | Manual | Fast tests on H100 GPU | H100 |
| **gpu_full_h100_tests** | Manual | Comprehensive H100 tests | H100 |
| **gpu_full_multi_gpu_tests** | Manual | Multi-GPU testing | Multiple GPUs |
| **gpu_code_validation_tests** | Manual | GPU code validation | GPU |
| **gpu_memory_sanitizer** | PR (approved) / Manual | Memory leak detection | GPU |
| **gpu_memory_sanitizer_h100** | PR (approved) / Manual | Memory sanitizer on H100 | H100 |
| **gpu_unsigned_integer_tests** | PR (approved) / Manual | Unsigned integer GPU tests | GPU |
| **gpu_signed_integer_tests** | PR (approved) / Manual | Signed integer GPU tests | GPU |
| **gpu_unsigned_integer_classic_tests** | Manual | Classic unsigned integer tests | GPU |
| **gpu_signed_integer_classic_tests** | Manual | Classic signed integer tests | GPU |
| **gpu_unsigned_integer_h100_tests** | Manual | Unsigned integer tests on H100 | H100 |
| **gpu_signed_integer_h100_tests** | Manual | Signed integer tests on H100 | H100 |
| **gpu_integer_long_run_tests** | Manual | Long-running integer tests | GPU |
| **gpu_pcc** | PR (approved) / Manual | GPU PCC checks | GPU |
---
### HPU Testing Workflows
```mermaid
flowchart LR
HPU[hpu_hlapi_tests]
DISP[Workflow Dispatch] --> HPU
HPU --> |Tests on|INTEL[Intel HPU Hardware]
```
| Workflow | Trigger | Purpose | Hardware |
|----------|---------|---------|----------|
| **hpu_hlapi_tests** | Manual | High-level API tests on Intel HPU | Intel HPU |
---
### Special Testing Workflows
```mermaid
flowchart TB
subgraph "Special Tests"
COV[code_coverage]
CSPRNG[csprng_randomness_tests]
LONG[integer_long_run_tests]
PARAMS[parameters_check]
end
subgraph "Cargo Tests"
TESTFFT[cargo_test_fft]
TESTNTT[cargo_test_ntt]
end
DISP[Workflow Dispatch] --> COV
DISP --> CSPRNG
DISP --> LONG
APPR[PR approved label] --> CSPRNG
PUSH[Push to main] --> PARAMS
PR[PR on specific paths] --> PARAMS
DISP --> PARAMS
PR --> TESTFFT
PR --> TESTNTT
```
| Workflow | Trigger | Purpose |
|----------|---------|---------|
| **code_coverage** | Manual | Generate code coverage reports and upload to Codecov |
| **csprng_randomness_tests** | Manual / PR (approved) | Dieharder randomness test suite for CSPRNG |
| **integer_long_run_tests** | Manual | Extended integer testing |
| **parameters_check** | Push to main / PR (specific paths) / Manual | Security check on cryptographic parameters using lattice estimator |
| **cargo_test_fft** | PR | Run tfhe-fft tests |
| **cargo_test_ntt** | PR | Run tfhe-ntt tests |
---
## 🏗️ Building & Compilation Workflows (4 workflows)
```mermaid
flowchart TB
subgraph "Build Workflows"
BUILD[cargo_build]
COMMON[cargo_build_common]
FFT[cargo_build_tfhe_fft]
NTT[cargo_build_tfhe_ntt]
end
subgraph "Build Jobs"
PCC[Parallel PCC CPU]
PCCHPU[PCC HPU]
FULL[Build TFHE Full]
LAYERS[Build Layers]
CAPI[Build C API]
end
PR[Pull Request] --> BUILD
BUILD --> PCC
BUILD --> PCCHPU
BUILD --> FULL
BUILD --> LAYERS
BUILD --> CAPI
PR --> FFT
PR --> NTT
```
| Workflow | Trigger | Purpose |
|----------|---------|---------|
| **cargo_build** | PR | Main build workflow - coordinates all build jobs |
| **cargo_build_common** | Reusable | Shared build logic for different targets |
| **cargo_build_tfhe_fft** | PR | Build and validate tfhe-fft crate |
| **cargo_build_tfhe_ntt** | PR | Build and validate tfhe-ntt crate |
**Build Targets:**
- ✅ Parallel PCC (Program Counter Checks) for CPU
- ✅ PCC for HPU
- ✅ Full TFHE build (Linux, macOS M1, Windows)
- ✅ Layer-by-layer builds
- ✅ C API builds
---
## 📊 Benchmarking Workflows (17 workflows)
All benchmark workflows are **triggered manually** via workflow_dispatch.
```mermaid
flowchart TB
subgraph "CPU Benchmarks - 3 workflows"
BCPU[benchmark_cpu<br/>Main CPU benchmarks]
BCPUW[benchmark_cpu_weekly<br/>Weekly CPU benchmarks]
BCPUC[benchmark_cpu_common<br/>Reusable workflow]
end
subgraph "GPU Benchmarks - 5 workflows"
BGPU[benchmark_gpu<br/>Main GPU benchmarks]
BGPUW[benchmark_gpu_weekly<br/>Weekly GPU benchmarks]
BGPUC[benchmark_gpu_common<br/>Reusable workflow]
BGPU4090[benchmark_gpu_4090<br/>RTX 4090 specific]
BGPUCOP[benchmark_gpu_coprocessor<br/>Coprocessor mode]
end
subgraph "HPU Benchmarks - 2 workflows"
BHPU[benchmark_hpu<br/>Intel HPU benchmarks]
BHPUC[benchmark_hpu_common<br/>Reusable workflow]
end
subgraph "Specialized Benchmarks - 7 workflows"
BWASM[benchmark_wasm_client<br/>WebAssembly client]
BCT[benchmark_ct_key_sizes<br/>Ciphertext & key sizes]
BFFT[benchmark_tfhe_fft<br/>FFT performance]
BNTT[benchmark_tfhe_ntt<br/>NTT performance]
BWHITE[benchmark_whitepaper<br/>Whitepaper params]
BREG[benchmark_perf_regression<br/>Regression detection]
BDOC[benchmark_documentation<br/>Generate docs]
end
DISP[Workflow Dispatch<br/>Manual Trigger] --> BCPU
DISP --> BCPUW
DISP --> BGPU
DISP --> BGPUW
DISP --> BHPU
DISP --> BWASM
DISP --> BCT
DISP --> BFFT
DISP --> BNTT
DISP --> BWHITE
DISP --> BREG
DISP --> BDOC
DISP --> BGPU4090
DISP --> BGPUCOP
```
### CPU Benchmarks (3 workflows)
| Workflow | Purpose | Operations Tested |
|----------|---------|-------------------|
| **benchmark_cpu** | Main CPU performance benchmarks | integer, signed_integer, integer_compression, integer_zk, shortint, shortint_oprf, hlapi, hlapi_erc20, hlapi_dex, hlapi_noise_squash, tfhe_zk_pok, boolean, pbs, pbs128, ks, ks_pbs |
| **benchmark_cpu_weekly** | Weekly scheduled CPU benchmarks | Similar to benchmark_cpu |
| **benchmark_cpu_common** | Reusable workflow for CPU benchmarks | Shared logic |
### GPU Benchmarks (5 workflows)
| Workflow | Purpose | Hardware |
|----------|---------|----------|
| **benchmark_gpu** | Main GPU performance benchmarks | Standard GPU |
| **benchmark_gpu_weekly** | Weekly scheduled GPU benchmarks | Standard GPU |
| **benchmark_gpu_4090** | Benchmarks on RTX 4090 | RTX 4090 |
| **benchmark_gpu_coprocessor** | GPU coprocessor mode benchmarks | GPU |
| **benchmark_gpu_common** | Reusable workflow for GPU benchmarks | Shared logic |
### HPU Benchmarks (2 workflows)
| Workflow | Purpose | Hardware |
|----------|---------|----------|
| **benchmark_hpu** | Intel HPU performance benchmarks | Intel HPU |
| **benchmark_hpu_common** | Reusable workflow for HPU benchmarks | Shared logic |
### Specialized Benchmarks (7 workflows)
| Workflow | Purpose | Focus |
|----------|---------|-------|
| **benchmark_wasm_client** | WebAssembly client performance | WASM execution |
| **benchmark_ct_key_sizes** | Measure ciphertext and key sizes | Memory footprint |
| **benchmark_tfhe_fft** | FFT library performance | tfhe-fft crate |
| **benchmark_tfhe_ntt** | NTT library performance | tfhe-ntt crate |
| **benchmark_whitepaper** | Whitepaper parameter validation | Research params |
| **benchmark_perf_regression** | Detect performance regressions | Regression testing |
| **benchmark_documentation** | Generate benchmark documentation | Documentation |
### Benchmark Configuration Options
**📏 Operation Flavors:**
- `default` - Standard operations
- `fast_default` - Fast variant operations
- `smart` - Smart operations (with automatic PBS)
- `unchecked` - Unchecked operations (no PBS)
- `misc` - Miscellaneous operations
**🎯 Precision Sets:**
- `fast` - Quick validation subset
- `all` - All supported bit precisions
- `documentation` - Precisions for documentation
**⏱️ Benchmark Types:**
- `latency` - Single operation timing
- `throughput` - Operations per second
- `both` - Both latency and throughput
**🔧 Parameter Types:**
- `classical` - Classical parameters
- `multi_bit` - Multi-bit parameters
- `classical + multi_bit` - Both parameter sets
- `classical_documentation` - Classical for docs
- `multi_bit_documentation` - Multi-bit for docs
- `classical_documentation + multi_bit_documentation` - Both for docs
---
## 📦 Release Management Workflows (9 workflows)
```mermaid
flowchart TB
subgraph "Release Workflows"
RTFHE[make_release_tfhe]
RCUDA[make_release_cuda]
RHPU[make_release_hpu]
RFFT[make_release_tfhe_fft]
RNTT[make_release_tfhe_ntt]
RCSPRNG[make_release_tfhe_csprng]
RZK[make_release_zk_pok]
RVER[make_release_tfhe_versionable]
RCOMMON[make_release_common]
end
DISP[Workflow Dispatch<br/>Manual Only] --> RTFHE
DISP --> RCUDA
DISP --> RHPU
DISP --> RFFT
DISP --> RNTT
DISP --> RCSPRNG
DISP --> RZK
DISP --> RVER
RTFHE --> |Publishes to|CRATES[crates.io]
RTFHE --> |Publishes to|NPM[npm registry]
style RTFHE fill:#ff6b6b
style DISP fill:#ffd93d
```
| Workflow | Purpose | Platforms |
|----------|---------|-----------|
| **make_release_tfhe** | Release main TFHE library | crates.io, npm (web & node packages) |
| **make_release_cuda** | Release CUDA backend | crates.io |
| **make_release_hpu** | Release HPU backend | crates.io |
| **make_release_tfhe_fft** | Release FFT library | crates.io |
| **make_release_tfhe_ntt** | Release NTT library | crates.io |
| **make_release_tfhe_csprng** | Release CSPRNG library | crates.io |
| **make_release_zk_pok** | Release Zero-Knowledge Proof of Knowledge library | crates.io |
| **make_release_tfhe_versionable** | Release versionable trait library | crates.io |
| **make_release_common** | Shared release logic | Reusable workflow |
**Release Options:**
- 🧪 Dry-run mode
- 📦 Push to crates.io
- 🌐 Push web JS package
- 📱 Push Node.js package
- 🏷️ Set NPM latest tag
---
## 🛠️ CI/CD & Maintenance Workflows (10 workflows)
```mermaid
flowchart TB
subgraph "Code Quality"
LINT[ci_lint]
COMMIT[check_commit]
AUDIT[cargo_audit]
end
subgraph "PR Management"
APPROVE[approve_label]
UNVER[unverified_prs]
VERIFY[verify_triggering_actor]
end
subgraph "Repository Sync"
SYNC[sync_on_push]
end
subgraph "SVG Generation"
SVG[generate_svgs]
SVGC[generate_svg_common]
end
PR[Pull Request] --> LINT
PR --> COMMIT
PR --> APPROVE
SCHED1[Daily 4am UTC] --> AUDIT
SCHED2[Daily 1:30am UTC] --> UNVER
PUSH[Push to main] --> SYNC
DISP[Workflow Dispatch] --> SVG
DISP --> AUDIT
DISP --> SYNC
```
| Workflow | Trigger | Purpose |
|----------|---------|---------|
| **ci_lint** | PR | Lint workflows with actionlint & check security with zizmor |
| **check_commit** | PR | Validate commit message format, line length, and signatures |
| **approve_label** | PR / PR Review | Auto-manage "approved" label on PRs |
| **cargo_audit** | Daily 4am UTC / Manual | Check dependencies for security vulnerabilities |
| **unverified_prs** | Daily 1:30am UTC | Close PRs without CLA signature after 2 days |
| **verify_triggering_actor** | Various | Verify actor permissions for sensitive workflows |
| **sync_on_push** | Push to main / Manual | Sync repository to internal mirror |
| **generate_svgs** | Manual | Generate parameter curve SVG visualizations |
| **generate_svg_common** | Reusable | Common SVG generation logic |
| **placeholder_workflow** | N/A | Template workflow |
---
## 🔐 Security & Quality Workflows
```mermaid
flowchart LR
subgraph "Security Checks"
A[commit signatures]
B[dependency audit]
C[zizmor security scan]
D[parameters security]
end
subgraph "Quality Checks"
E[commit format]
F[actionlint]
G[code coverage]
H[randomness tests]
end
COMMIT[check_commit] --> A
COMMIT --> E
AUDIT[cargo_audit] --> B
LINT[ci_lint] --> C
LINT --> F
PARAMS[parameters_check] --> D
COV[code_coverage] --> G
CSPRNG[csprng_randomness_tests] --> H
```
---
## 📈 Workflow Statistics
### By Trigger Type
| Trigger | Count | Examples |
|---------|-------|----------|
| **Workflow Dispatch** (Manual) | 65 | All benchmarks, releases, most tests |
| **Pull Request** | 18 | Build, lint, fast tests, GPU tests |
| **Pull Request (approved label)** | 12 | AWS tests, GPU memory tests |
| **Schedule/Cron** | 5 | Nightly tests, audit, unverified PRs |
| **Push to main** | 2 | Sync, parameters check |
| **Label Events** | 3 | M1 tests, approve workflow |
### By Runner Type
| Runner | Count | Purpose |
|--------|-------|---------|
| **AWS CPU** | 15 | Main testing infrastructure |
| **Hyperstack GPU** | 13 | GPU testing and benchmarks |
| **Self-hosted M1 Mac** | 1 | Apple Silicon testing |
| **Intel HPU** | 2 | HPU testing and benchmarks |
| **Ubuntu Latest** | 25 | CI/CD, builds, coordination |
| **Windows** | 1 | Windows builds |
---
## 🎯 Key Workflow Patterns
### 1. Instance Management Pattern
Many workflows follow this pattern for cost optimization:
```mermaid
sequenceDiagram
participant GitHub
participant Setup
participant Runner
participant Tests
participant Teardown
GitHub->>Setup: Trigger workflow
Setup->>Runner: Start AWS/GPU instance
Runner->>Tests: Execute tests
Tests->>Teardown: Complete (success/fail)
Teardown->>Runner: Stop instance
Teardown->>GitHub: Send Slack notification
```
**Workflows using this pattern:**
- All `aws_tfhe_*` workflows
- All `gpu_*` workflows
- `hpu_hlapi_tests`
- `code_coverage`
- `parameters_check`
- `csprng_randomness_tests`
### 2. Branch Protection Rules (BPR)
Workflows marked with `(bpr)` are required for PRs to be merged:
-`cargo_build/cargo-builds (bpr)`
-`ci_lint/lint-check (bpr)`
-`check_commit/check-commit-pr (bpr)`
### 3. File Change Detection
Many workflows use `tj-actions/changed-files` to conditionally run tests based on changed files, optimizing CI time and resources.
---
## 🔄 Workflow Dependencies
```mermaid
graph TD
subgraph "Reusable Workflows"
COMMON[cargo_build_common]
BENCH_CPU_C[benchmark_cpu_common]
BENCH_GPU_C[benchmark_gpu_common]
BENCH_HPU_C[benchmark_hpu_common]
REL_COMMON[make_release_common]
SVG_COMMON[generate_svg_common]
end
subgraph "Parent Workflows"
BUILD[cargo_build]
BENCH_CPU[benchmark_cpu]
BENCH_GPU[benchmark_gpu]
BENCH_HPU[benchmark_hpu]
RELEASES[make_release_*]
SVG[generate_svgs]
end
BUILD --> COMMON
BENCH_CPU --> BENCH_CPU_C
BENCH_GPU --> BENCH_GPU_C
BENCH_HPU --> BENCH_HPU_C
RELEASES --> REL_COMMON
SVG --> SVG_COMMON
```
---
## 📝 Workflow Naming Convention
```
<category>_<component>_<type>
```
Examples:
- `aws_tfhe_tests` - AWS infrastructure, TFHE component, tests type
- `gpu_fast_tests` - GPU infrastructure, fast variant, tests type
- `benchmark_cpu_weekly` - Benchmark category, CPU target, weekly schedule
- `make_release_tfhe` - Make/release action, TFHE component
---
## 🚀 Quick Reference
### Running Tests on PR
1. **Quick validation**: Automatic on PR creation
- `cargo_build` - Build checks
- `ci_lint` - Linting
- `check_commit` - Commit format
- `gpu_fast_tests` - Basic GPU tests
2. **Full test suite**: After PR approval (add "approved" label)
- `aws_tfhe_tests` - Comprehensive CPU tests
- `gpu_memory_sanitizer` - Memory checks
- GPU integer tests
3. **Special hardware**: Manual label addition
- Add `m1_test` label for M1 Mac tests
### Running Benchmarks
All benchmarks are **manual only** via workflow dispatch. Choose:
- Target: CPU, GPU, HPU, or WASM
- Operation flavor: default, smart, unchecked
- Precision set: fast, all, documentation
- Benchmark type: latency, throughput, both
### Creating a Release
1. Run appropriate `make_release_*` workflow
2. Configure options (dry-run, push to crates, npm packages)
3. Workflow handles versioning, building, and publishing
4. Includes provenance and SLSA attestation
---
## 🔔 Notification System
All critical workflows send Slack notifications on:
- ❌ Failure
- 🚫 Cancellation (non-PR events)
- ⚠️ Instance teardown failures
Notifications include:
- Job status
- Pull request link (if applicable)
- Action run URL
---
## 📚 Additional Resources
- **Workflow Files**: `.github/workflows/`
- **Reusable Actions**: `.github/actions/`
- **Configuration**: `ci/slab.toml`
- **Scripts**: `scripts/` directory
---
## ✅ Verification Summary
**Total Workflows: 71**
Count by category:
- Testing & Validation: **31 workflows** (7 AWS CPU + 16 GPU + 1 HPU + 1 M1 + 4 special + 2 cargo tests)
- Benchmarking: **17 workflows** (3 CPU + 5 GPU + 2 HPU + 7 specialized)
- Building & Compilation: **4 workflows**
- Release Management: **9 workflows**
- CI/CD & Maintenance: **10 workflows**
**Verification:** 31 + 17 + 4 + 9 + 10 = **71**
*Last Updated: 2026-01-08*

View File

@@ -40,7 +40,7 @@ rand = "0.8.5"
regex = "1.10.4"
bitflags = { version = "2.5.0", features = ["serde"] }
itertools = "0.11.0"
lru = "0.16.3"
lru = "0.12.3"
bitfield-struct = "0.10.0"
crossbeam = { version = "0.8.4", features = ["crossbeam-queue"] }
rayon = { workspace = true }

View File

@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:934c8131c12010dc837f6a2af5111b83f8f5d42f10485e9b3b971edb24c467f8
size 82201876
oid sha256:35cc06547a23b862ab9829351d74d944e60ea9dad3ecf593d15f0ce8445d145e
size 81710610

View File

@@ -160,9 +160,9 @@ impl ProgramInner {
.filter(|(_, var)| var.is_none())
.map(|(rid, _)| *rid)
.collect::<Vec<_>>();
demote_order.into_iter().for_each(|rid| {
self.regs.demote(&rid);
});
demote_order
.into_iter()
.for_each(|rid| self.regs.demote(&rid));
}
/// Release register entry
@@ -179,7 +179,7 @@ impl ProgramInner {
/// Notify register access to update LRU state
pub(crate) fn reg_access(&mut self, rid: asm::RegId) {
self.regs.promote(&rid);
self.regs.promote(&rid)
}
/// Retrieved least-recent-used heap entry
@@ -220,9 +220,9 @@ impl ProgramInner {
.filter(|(_mid, var)| var.is_none())
.map(|(mid, _)| *mid)
.collect::<Vec<_>>();
demote_order.into_iter().for_each(|mid| {
self.heap.demote(&mid);
});
demote_order
.into_iter()
.for_each(|mid| self.heap.demote(&mid));
}
_ => { /*Only release Heap slot*/ }
}
@@ -231,9 +231,7 @@ impl ProgramInner {
/// Notify heap access to update LRU state
pub(crate) fn heap_access(&mut self, mid: asm::MemId) {
match mid {
asm::MemId::Heap { .. } => {
self.heap.promote(&mid);
}
asm::MemId::Heap { .. } => self.heap.promote(&mid),
_ => { /* Do Nothing slot do not below to heap*/ }
}
}

View File

@@ -1 +0,0 @@
benchmarks_parameters/*

View File

@@ -2,9 +2,7 @@ use benchmark::utilities::{
hlapi_throughput_num_ops, write_to_json, BenchmarkType, BitSizesSet, EnvConfig, OperatorType,
};
use criterion::{black_box, Criterion, Throughput};
use oprf::oprf_any_range2;
use rand::prelude::*;
use rayon::prelude::*;
use std::marker::PhantomData;
use std::ops::*;
use tfhe::core_crypto::prelude::Numeric;
@@ -13,42 +11,34 @@ use tfhe::keycache::NamedParam;
use tfhe::named::Named;
use tfhe::prelude::*;
use tfhe::{
ClientKey, CompressedServerKey, FheIntegerType, FheUint, FheUint10, FheUint12, FheUint128,
FheUint14, FheUint16, FheUint2, FheUint32, FheUint4, FheUint6, FheUint64, FheUint8, FheUintId,
IntegerId, KVStore,
ClientKey, CompressedServerKey, FheIntegerType, FheUint10, FheUint12, FheUint128, FheUint14,
FheUint16, FheUint2, FheUint32, FheUint4, FheUint6, FheUint64, FheUint8, FheUintId, IntegerId,
KVStore,
};
mod oprf;
use rayon::prelude::*;
trait BenchWait {
fn wait_bench(&self);
}
impl<Id: FheUintId> BenchWait for FheUint<Id> {
fn wait_bench(&self) {
self.wait()
}
}
impl<T1: FheWait, T2> BenchWait for (T1, T2) {
fn wait_bench(&self) {
self.0.wait()
}
}
fn bench_fhe_type_op<FheType, F, R>(
fn bench_fhe_type<FheType>(
c: &mut Criterion,
client_key: &ClientKey,
type_name: &str,
bit_size: usize,
display_name: &str,
func_name: &str,
func: F,
) where
F: Fn(&FheType, &FheType) -> R,
R: BenchWait,
FheType: FheEncrypt<u128, ClientKey>,
FheType: FheWait,
for<'a> &'a FheType: Add<&'a FheType, Output = FheType>
+ Sub<&'a FheType, Output = FheType>
+ Mul<&'a FheType, Output = FheType>
+ BitAnd<&'a FheType, Output = FheType>
+ BitOr<&'a FheType, Output = FheType>
+ BitXor<&'a FheType, Output = FheType>
+ Shl<&'a FheType, Output = FheType>
+ Shr<&'a FheType, Output = FheType>
+ RotateLeft<&'a FheType, Output = FheType>
+ RotateRight<&'a FheType, Output = FheType>
+ OverflowingAdd<&'a FheType, Output = FheType>
+ OverflowingSub<&'a FheType, Output = FheType>,
for<'a> FheType: FheMin<&'a FheType, Output = FheType> + FheMax<&'a FheType, Output = FheType>,
{
let mut bench_group = c.benchmark_group(type_name);
let mut bench_prefix = "hlapi".to_string();
@@ -81,90 +71,170 @@ fn bench_fhe_type_op<FheType, F, R>(
let lhs = FheType::encrypt(rng.gen(), client_key);
let rhs = FheType::encrypt(rng.gen(), client_key);
let bench_id = format!("{bench_prefix}::{func_name}::{param_name}::{type_name}");
let mut bench_id;
bench_id = format!("{bench_prefix}::add::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = func(&lhs, &rhs);
res.wait_bench();
let res = &lhs + &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, display_name);
write_record(bench_id, "add");
bench_id = format!("{bench_prefix}::overflowing_add::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let (res, flag) = lhs.overflowing_add(&rhs);
res.wait();
black_box((res, flag))
})
});
write_record(bench_id, "overflowing_add");
bench_id = format!("{bench_prefix}::overflowing_sub::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let (res, flag) = lhs.overflowing_sub(&rhs);
res.wait();
black_box((res, flag))
})
});
write_record(bench_id, "overflowing_sub");
bench_id = format!("{bench_prefix}::sub::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = &lhs - &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, "sub");
bench_id = format!("{bench_prefix}::mul::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = &lhs * &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, "mul");
bench_id = format!("{bench_prefix}::bitand::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = &lhs & &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, "bitand");
bench_id = format!("{bench_prefix}::bitor::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = &lhs | &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, "bitor");
bench_id = format!("{bench_prefix}::bitxor::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = &lhs ^ &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, "bitxor");
bench_id = format!("{bench_prefix}::left_shift::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = &lhs << &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, "left_shift");
bench_id = format!("{bench_prefix}::right_shift::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = &lhs >> &rhs;
res.wait();
black_box(res)
})
});
write_record(bench_id, "right_shift");
bench_id = format!("{bench_prefix}::left_rotate::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = (&lhs).rotate_left(&rhs);
res.wait();
black_box(res)
})
});
write_record(bench_id, "left_rotate");
bench_id = format!("{bench_prefix}::right_rotate::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = (&lhs).rotate_right(&rhs);
res.wait();
black_box(res)
})
});
write_record(bench_id, "right_rotate");
bench_id = format!("{bench_prefix}::min::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = lhs.min(&rhs);
res.wait();
black_box(res)
})
});
write_record(bench_id, "min");
bench_id = format!("{bench_prefix}::max::{param_name}::{type_name}");
bench_group.bench_function(&bench_id, |b| {
b.iter(|| {
let res = lhs.max(&rhs);
res.wait();
black_box(res)
})
});
write_record(bench_id, "max");
}
macro_rules! bench_type_op (
(type_name: $fhe_type:ident, display_name: $display_name:literal, operation: $op:ident) => {
macro_rules! bench_type {
($fhe_type:ident) => {
::paste::paste! {
fn [<bench_ $fhe_type:snake _ $op>](c: &mut Criterion, cks: &ClientKey) {
bench_fhe_type_op::<$fhe_type, _, _>(
c,
cks,
stringify!($fhe_type),
$fhe_type::num_bits(),
$display_name,
stringify!($op),
|lhs, rhs| lhs.$op(rhs)
);
fn [<bench_ $fhe_type:snake>](c: &mut Criterion, cks: &ClientKey) {
bench_fhe_type::<$fhe_type>(c, cks, stringify!($fhe_type), $fhe_type::num_bits());
}
}
};
);
macro_rules! generate_typed_benches {
($fhe_type:ident) => {
bench_type_op!(type_name: $fhe_type, display_name: "add", operation: add);
bench_type_op!(type_name: $fhe_type, display_name: "overflowing_add", operation: overflowing_add);
bench_type_op!(type_name: $fhe_type, display_name: "sub", operation: sub);
bench_type_op!(type_name: $fhe_type, display_name: "overflowing_sub", operation: overflowing_sub);
bench_type_op!(type_name: $fhe_type, display_name: "mul", operation: mul);
bench_type_op!(type_name: $fhe_type, display_name: "bitand", operation: bitand);
bench_type_op!(type_name: $fhe_type, display_name: "bitor", operation: bitor);
bench_type_op!(type_name: $fhe_type, display_name: "bitxor", operation: bitxor);
bench_type_op!(type_name: $fhe_type, display_name: "left_shift", operation: shl);
bench_type_op!(type_name: $fhe_type, display_name: "right_shift", operation: shr);
bench_type_op!(type_name: $fhe_type, display_name: "left_rotate", operation: rotate_left);
bench_type_op!(type_name: $fhe_type, display_name: "right_rotate", operation: rotate_right);
bench_type_op!(type_name: $fhe_type, display_name: "min", operation: min);
bench_type_op!(type_name: $fhe_type, display_name: "max", operation: max);
};
}
// Generate benches for all FheUint types
generate_typed_benches!(FheUint2);
generate_typed_benches!(FheUint4);
generate_typed_benches!(FheUint6);
generate_typed_benches!(FheUint8);
generate_typed_benches!(FheUint10);
generate_typed_benches!(FheUint12);
generate_typed_benches!(FheUint14);
generate_typed_benches!(FheUint16);
generate_typed_benches!(FheUint32);
generate_typed_benches!(FheUint64);
generate_typed_benches!(FheUint128);
macro_rules! run_benches {
($c:expr, $cks:expr, $($fhe_type:ident),+ $(,)?) => {
$(
::paste::paste! {
[<bench_ $fhe_type:snake _add>]($c, $cks);
[<bench_ $fhe_type:snake _overflowing_add>]($c, $cks);
[<bench_ $fhe_type:snake _sub>]($c, $cks);
[<bench_ $fhe_type:snake _overflowing_sub>]($c, $cks);
[<bench_ $fhe_type:snake _mul>]($c, $cks);
[<bench_ $fhe_type:snake _bitand>]($c, $cks);
[<bench_ $fhe_type:snake _bitor>]($c, $cks);
[<bench_ $fhe_type:snake _bitxor>]($c, $cks);
[<bench_ $fhe_type:snake _shl>]($c, $cks);
[<bench_ $fhe_type:snake _shr>]($c, $cks);
[<bench_ $fhe_type:snake _rotate_left>]($c, $cks);
[<bench_ $fhe_type:snake _rotate_right>]($c, $cks);
[<bench_ $fhe_type:snake _min>]($c, $cks);
[<bench_ $fhe_type:snake _max>]($c, $cks);
}
)+
};
}
bench_type!(FheUint2);
bench_type!(FheUint4);
bench_type!(FheUint6);
bench_type!(FheUint8);
bench_type!(FheUint10);
bench_type!(FheUint12);
bench_type!(FheUint14);
bench_type!(FheUint16);
bench_type!(FheUint32);
bench_type!(FheUint64);
bench_type!(FheUint128);
trait TypeDisplay {
fn fmt(f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
@@ -374,7 +444,7 @@ fn main() {
match env_config.bit_sizes_set {
BitSizesSet::Fast => {
run_benches!(&mut c, &cks, FheUint64);
bench_fhe_uint64(&mut c, &cks);
// KVStore Benches
if benched_device == tfhe::Device::Cpu {
@@ -382,11 +452,17 @@ fn main() {
}
}
_ => {
// Call all benchmarks for all types
run_benches!(
&mut c, &cks, FheUint2, FheUint4, FheUint6, FheUint8, FheUint10, FheUint12,
FheUint14, FheUint16, FheUint32, FheUint64, FheUint128
);
bench_fhe_uint2(&mut c, &cks);
bench_fhe_uint4(&mut c, &cks);
bench_fhe_uint6(&mut c, &cks);
bench_fhe_uint8(&mut c, &cks);
bench_fhe_uint10(&mut c, &cks);
bench_fhe_uint12(&mut c, &cks);
bench_fhe_uint14(&mut c, &cks);
bench_fhe_uint16(&mut c, &cks);
bench_fhe_uint32(&mut c, &cks);
bench_fhe_uint64(&mut c, &cks);
bench_fhe_uint128(&mut c, &cks);
// KVStore Benches
if benched_device == tfhe::Device::Cpu {
@@ -405,7 +481,5 @@ fn main() {
}
}
oprf_any_range2();
c.final_summary();
}

View File

@@ -1,44 +0,0 @@
use benchmark::params_aliases::BENCH_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M128;
use criterion::{black_box, criterion_group, Criterion};
use std::num::NonZeroU64;
use tfhe::{set_server_key, ClientKey, ConfigBuilder, FheUint64, RangeForRandom, Seed, ServerKey};
pub fn oprf_any_range(c: &mut Criterion) {
let bench_name = "hlapi::oprf_any_range";
let mut bench_group = c.benchmark_group(bench_name);
bench_group
.sample_size(15)
.measurement_time(std::time::Duration::from_secs(30));
let param = BENCH_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M128;
let config = ConfigBuilder::with_custom_parameters(param).build();
let cks = ClientKey::generate(config);
let sks = ServerKey::new(&cks);
rayon::broadcast(|_| set_server_key(sks.clone()));
set_server_key(sks);
for excluded_upper_bound in [3, 52] {
let range = RangeForRandom::new_from_excluded_upper_bound(
NonZeroU64::new(excluded_upper_bound).unwrap(),
);
let bench_id_oprf = format!("{bench_name}::bound_{excluded_upper_bound}");
bench_group.bench_function(&bench_id_oprf, |b| {
b.iter(|| {
_ = black_box(FheUint64::generate_oblivious_pseudo_random_custom_range(
Seed(0),
&range,
None,
));
})
});
}
bench_group.finish()
}
criterion_group!(oprf_any_range2, oprf_any_range);

View File

@@ -27,7 +27,6 @@ rand_distr = "0.4.3"
criterion = "0.5.1"
doc-comment = "0.3.3"
serde_json = "1.0.94"
num-bigint = "0.4.6"
# clap has to be pinned as its minimum supported rust version
# changes often between minor releases, which breaks our CI
clap = { version = "=4.5.30", features = ["derive"] }

View File

@@ -2,30 +2,14 @@
This document explains the mechanism and steps to generate an oblivious encrypted random value using only server keys.
The goal is to give to the server the possibility to generate a random value, which will be obtained in an encrypted format and will remain unknown to the server.
The goal is to give to the server the possibility to generate a random value, which will be obtained in an encrypted format and will remain unknown to the server. The implementation is based on [this article](https://eprint.iacr.org/2024/665).
The main method for this is `FheUint::generate_oblivious_pseudo_random_custom_range` which returns an integer in the given range.
Currently the range can only be in the form `[0, excluded_upper_bound[` with any `excluded_upper_bound` in `[1, 2^64[`
It follows a distribution close to the uniform.
This function guarantees the norm-1 distance (defined as ∆(P,Q) := 1/2 Sum[ω∈Ω] |P(ω) Q(ω)|)
between the actual distribution and the target uniform distribution will be below the `max_distance` argument (which must be in ]0, 1[).
The higher the distance, the more dissimilar the actual distribution is from the target uniform distribution.
The default value for `max_distance` is `2^-128` if `None` is provided.
Higher values allow better performance but must be considered carefully in the context of their target application as it may have serious unintended consequences.
If the range is a power of 2, the distribution is uniform (for any `max_distance`) and the cost is smaller.
For powers of 2 specifically there are two methods on `FheUint` and `FheInt` (based on [this article](https://eprint.iacr.org/2024/665)):
This is possible through two methods on `FheUint` and `FheInt`:
- `generate_oblivious_pseudo_random` which return an integer taken uniformly in the full integer range (`[0; 2^N[` for a `FheUintN` and `[-2^(N-1); 2^(N-1)[` for a `FheIntN`).
- `generate_oblivious_pseudo_random_bounded` which return an integer taken uniformly in `[0; 2^random_bits_count[`. For a `FheUintN`, we must have `random_bits_count <= N`. For a `FheIntN`, we must have `random_bits_count <= N - 1`.
These method functions take a seed `Seed` as input, which could be any `u128` value.
They rely on the use of the usual server key.
Both methods functions take a seed `Seed` as input, which could be any `u128` value.
They both rely on the use of the usual server key.
The output is reproducible, i.e., the function is deterministic from the inputs: assuming the same hardware, seed and server key, this function outputs the same random encrypted value.
@@ -34,8 +18,7 @@ Here is an example of the usage:
```rust
use tfhe::prelude::FheDecrypt;
use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheUint8, FheInt8, RangeForRandom, Seed};
use std::num::NonZeroU64;
use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheUint8, FheInt8, Seed};
pub fn main() {
let config = ConfigBuilder::default().build();
@@ -43,30 +26,23 @@ pub fn main() {
set_server_key(server_key);
let excluded_upper_bound = NonZeroU64::new(3).unwrap();
let range = RangeForRandom::new_from_excluded_upper_bound(excluded_upper_bound);
// in [0, excluded_upper_bound[ = {0, 1, 2}
let ct_res = FheUint8::generate_oblivious_pseudo_random_custom_range(Seed(0), &range, None);
let dec_result: u8 = ct_res.decrypt(&client_key);
let random_bits_count = 3;
// in [0, 2^8[
let ct_res = FheUint8::generate_oblivious_pseudo_random(Seed(0));
let dec_result: u8 = ct_res.decrypt(&client_key);
// in [0, 2^random_bits_count[ = [0, 8[
let ct_res = FheUint8::generate_oblivious_pseudo_random_bounded(Seed(0), random_bits_count);
let dec_result: u8 = ct_res.decrypt(&client_key);
assert!(dec_result < (1 << random_bits_count));
// in [-2^7, 2^7[
let ct_res = FheInt8::generate_oblivious_pseudo_random(Seed(0));
let dec_result: i8 = ct_res.decrypt(&client_key);
// in [0, 2^random_bits_count[ = [0, 8[
let ct_res = FheInt8::generate_oblivious_pseudo_random_bounded(Seed(0), random_bits_count);
let dec_result: i8 = ct_res.decrypt(&client_key);
assert!(dec_result < (1 << random_bits_count));
}

View File

@@ -540,12 +540,10 @@ pub fn sup_diff(cumulative_bins: &[u64], theoretical_cdf: &[f64]) -> f64 {
.iter()
.copied()
.zip_eq(theoretical_cdf.iter().copied())
.enumerate()
.map(|(i, (x, theoretical_cdf))| {
.map(|(x, theoretical_cdf)| {
let empirical_cdf = x as f64 / number_of_samples as f64;
if i == cumulative_bins.len() - 1 {
assert_eq!(theoretical_cdf, 1.0);
if theoretical_cdf == 1.0 {
assert_eq!(empirical_cdf, 1.0);
}

View File

@@ -4,9 +4,7 @@ use crate::high_level_api::keys::InternalServerKey;
use crate::high_level_api::re_randomization::ReRandomizationMetadata;
#[cfg(feature = "gpu")]
use crate::integer::gpu::ciphertext::{CudaSignedRadixCiphertext, CudaUnsignedRadixCiphertext};
use crate::shortint::MessageModulus;
use crate::{FheInt, Seed};
use std::num::NonZeroU64;
impl<Id: FheUintId> FheUint<Id> {
/// Generates an encrypted unsigned integer
@@ -94,7 +92,7 @@ impl<Id: FheUintId> FheUint<Id> {
}
})
}
/// Generates an encrypted unsigned integer
/// Generates an encrypted `num_block` blocks unsigned integer
/// taken uniformly in `[0, 2^random_bits_count[` using the given seed.
/// The encrypted value is oblivious to the server.
/// It can be useful to make server random generation deterministic.
@@ -152,103 +150,6 @@ impl<Id: FheUintId> FheUint<Id> {
}
})
}
/// Generates an encrypted unsigned integer
/// taken almost uniformly in the given range using the given seed.
/// Currently the range can only be in the form `[0, excluded_upper_bound[`
/// with any `excluded_upper_bound` in `[1, 2^64[`.
///
/// The encrypted value is oblivious to the server.
/// It can be useful to make server random generation deterministic.
///
/// This function guarantees the the norm-1 distance
/// (defined as ∆(P,Q) := 1/2 Sum[ω∈Ω] |P(ω) Q(ω)|)
/// between the actual distribution and the target uniform distribution
/// will be below the `max_distance` argument (which must be in ]0, 1[).
/// The higher the distance, the more dissimilar the actual distribution is
/// from the target uniform distribution.
///
/// The default value for `max_distance` is `2^-128` if `None` is provided.
///
/// Higher values allow better performance but must be considered carefully in the context of
/// their target application as it may have serious unintended consequences.
///
/// If the range is a power of 2, the distribution is uniform (for any `max_distance`) and
/// the cost is smaller.
///
/// ```rust
/// use std::num::NonZeroU64;
/// use tfhe::prelude::FheDecrypt;
/// use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheUint8, RangeForRandom, Seed};
///
/// let config = ConfigBuilder::default().build();
/// let (client_key, server_key) = generate_keys(config);
///
/// set_server_key(server_key);
///
/// let excluded_upper_bound = NonZeroU64::new(3).unwrap();
///
/// let range = RangeForRandom::new_from_excluded_upper_bound(excluded_upper_bound);
///
/// let ct_res = FheUint8::generate_oblivious_pseudo_random_custom_range(Seed(0), &range, None);
///
/// let dec_result: u16 = ct_res.decrypt(&client_key);
/// assert!(dec_result < excluded_upper_bound.get() as u16);
/// ```
pub fn generate_oblivious_pseudo_random_custom_range(
seed: Seed,
range: &RangeForRandom,
max_distance: Option<f64>,
) -> Self {
let excluded_upper_bound = range.excluded_upper_bound;
if excluded_upper_bound.is_power_of_two() {
let random_bits_count = excluded_upper_bound.ilog2() as u64;
Self::generate_oblivious_pseudo_random_bounded(seed, random_bits_count)
} else {
let max_distance = max_distance.unwrap_or_else(|| 2_f64.powi(-128));
assert!(
0_f64 < max_distance && max_distance < 1_f64,
"max_distance (={max_distance}) should be in ]0, 1["
);
global_state::with_internal_keys(|key| match key {
InternalServerKey::Cpu(key) => {
let message_modulus = key.message_modulus();
let num_input_random_bits = num_input_random_bits_for_max_distance(
excluded_upper_bound,
max_distance,
message_modulus,
);
let num_blocks_output = Id::num_blocks(key.message_modulus()) as u64;
let ct = key
.pbs_key()
.par_generate_oblivious_pseudo_random_unsigned_custom_range(
seed,
num_input_random_bits,
excluded_upper_bound,
num_blocks_output,
);
Self::new(ct, key.tag.clone(), ReRandomizationMetadata::default())
}
#[cfg(feature = "gpu")]
InternalServerKey::Cuda(_cuda_key) => {
panic!("Gpu does not support this operation yet.")
}
#[cfg(feature = "hpu")]
InternalServerKey::Hpu(_device) => {
panic!("Hpu does not support this operation yet.")
}
})
}
}
#[cfg(feature = "gpu")]
/// Returns the amount of memory required to execute generate_oblivious_pseudo_random_bounded
///
@@ -372,7 +273,7 @@ impl<Id: FheIntId> FheInt<Id> {
}
})
}
/// Generates an encrypted signed integer
/// Generates an encrypted `num_block` blocks signed integer
/// taken uniformly in `[0, 2^random_bits_count[` using the given seed.
/// The encrypted value is oblivious to the server.
/// It can be useful to make server random generation deterministic.
@@ -466,350 +367,10 @@ impl<Id: FheIntId> FheInt<Id> {
}
}
pub struct RangeForRandom {
excluded_upper_bound: NonZeroU64,
}
impl RangeForRandom {
pub fn new_from_excluded_upper_bound(excluded_upper_bound: NonZeroU64) -> Self {
Self {
excluded_upper_bound,
}
}
}
fn num_input_random_bits_for_max_distance(
excluded_upper_bound: NonZeroU64,
max_distance: f64,
message_modulus: MessageModulus,
) -> u64 {
assert!(message_modulus.0.is_power_of_two());
let log_message_modulus = message_modulus.0.ilog2() as u64;
let mut random_block_count = 1;
let random_block_count = loop {
let random_bit_count = random_block_count * log_message_modulus;
let distance = distance(excluded_upper_bound.get(), random_bit_count);
if distance < max_distance {
break random_block_count;
}
random_block_count += 1;
};
random_block_count * log_message_modulus
}
fn distance(excluded_upper_bound: u64, random_bit_count: u64) -> f64 {
let remainder = mod_pow_2(random_bit_count, excluded_upper_bound);
remainder as f64 * (excluded_upper_bound - remainder) as f64
/ (2_f64.powi(random_bit_count as i32) * excluded_upper_bound as f64)
}
// Computes 2^exponent % modulus
fn mod_pow_2(exponent: u64, modulus: u64) -> u64 {
assert_ne!(modulus, 0);
if modulus == 1 {
return 0;
}
let mut result: u128 = 1;
let mut base: u128 = 2; // We are calculating 2^i
// We cast exponent to u128 to match the loop, though u64 is fine
let mut exp = exponent;
let mod_val = modulus as u128;
while exp > 0 {
// If exponent is odd, multiply result with base
if exp % 2 == 1 {
result = (result * base) % mod_val;
}
// Square the base
base = (base * base) % mod_val;
// Divide exponent by 2
exp /= 2;
}
result as u64
}
#[cfg(test)]
mod test {
use super::*;
use crate::integer::server_key::radix_parallel::tests_unsigned::test_oprf::{
oprf_density_function, p_value_upper_bound_oprf_almost_uniformity_from_values,
probability_density_function_from_density,
};
use crate::prelude::FheDecrypt;
use crate::shortint::oprf::test::test_uniformity;
use crate::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS32_PBS_TUNIFORM_2M128;
use crate::{generate_keys, set_server_key, ClientKey, ConfigBuilder, FheUint8, Seed};
use num_bigint::BigUint;
use rand::{thread_rng, Rng};
use rayon::iter::{IntoParallelIterator, ParallelIterator};
// Helper: The "Oracle" implementation using BigInt
// This is slow but mathematically guaranteed to be correct.
fn oracle_mod_pow_2(exponent: u64, modulus: u64) -> u64 {
assert_ne!(modulus, 0);
if modulus == 1 {
return 0;
}
let base = BigUint::from(2u32);
let exp = BigUint::from(exponent);
let modu = BigUint::from(modulus);
let res = base.modpow(&exp, &modu);
res.iter_u64_digits().next().unwrap_or(0)
}
#[test]
fn test_edge_cases() {
// 2^0 % 10 = 1
assert_eq!(mod_pow_2(0, 10), 1, "Failed exponent 0");
// 2^10 % 1 = 0
assert_eq!(mod_pow_2(10, 1), 0, "Failed modulus 1");
// 2^1 % 10 = 2
assert_eq!(mod_pow_2(1, 10), 2, "Failed exponent 1");
// 2^3 % 5 = 8 % 5 = 3
assert_eq!(mod_pow_2(3, 5), 3, "Failed small calc");
}
#[test]
fn test_boundaries_and_overflow() {
assert_eq!(mod_pow_2(2, u64::MAX), 4);
assert_eq!(mod_pow_2(u64::MAX, 3), 2);
assert_eq!(mod_pow_2(5, 32), 0);
}
#[test]
fn test_against_oracle() {
let mut rng = thread_rng();
for _ in 0..1_000_000 {
let exp: u64 = rng.gen();
let mod_val: u64 = rng.gen();
let mod_val = if mod_val == 0 { 1 } else { mod_val };
let expected = oracle_mod_pow_2(exp, mod_val);
let actual = mod_pow_2(exp, mod_val);
assert_eq!(
actual, expected,
"Mismatch! 2^{exp} % {mod_val} => Ours: {actual}, Oracle: {expected}",
);
}
}
#[test]
fn test_distance_with_uniform() {
for excluded_upper_bound in 1..20 {
for num_input_random_bits in 0..20 {
let density = oprf_density_function(excluded_upper_bound, num_input_random_bits);
let theoretical_pdf = probability_density_function_from_density(&density);
let p_uniform = 1. / excluded_upper_bound as f64;
let actual_distance: f64 = 1. / 2.
* theoretical_pdf
.iter()
.map(|p| (*p - p_uniform).abs())
.sum::<f64>();
let theoretical_distance = distance(excluded_upper_bound, num_input_random_bits);
assert!(
(theoretical_distance - actual_distance).abs()
<= theoretical_distance / 1_000_000.,
"{theoretical_distance} != {actual_distance}"
);
}
}
}
#[test]
fn test_uniformity_scalar_mul_shift() {
let max_distance = 2_f64.powi(-20);
let message_modulus = MessageModulus(4);
let excluded_upper_bound = 3;
let num_input_random_bits = num_input_random_bits_for_max_distance(
NonZeroU64::new(excluded_upper_bound).unwrap(),
max_distance,
message_modulus,
);
let sample_count: usize = 10_000_000;
let p_value_limit: f64 = 0.001;
// The distribution is not exactly uniform
// This check ensures than with the given low max_distance,
// the distribution is indistinguishable from the uniform with at the given sample count
test_uniformity(sample_count, p_value_limit, excluded_upper_bound, |_seed| {
oprf_clear_equivalent(excluded_upper_bound, num_input_random_bits)
});
}
fn oprf_clear_equivalent(excluded_upper_bound: u64, num_input_random_bits: u64) -> u64 {
let random_input_upper_bound = 1 << num_input_random_bits;
let random_input = thread_rng().gen_range(0..random_input_upper_bound);
(random_input * excluded_upper_bound) >> num_input_random_bits
}
#[test]
fn test_uniformity_generate_oblivious_pseudo_random_custom_range() {
let base_sample_count: usize = 10_000;
let p_value_limit: f64 = 0.001;
let params = PARAM_MESSAGE_2_CARRY_2_KS32_PBS_TUNIFORM_2M128;
let config = ConfigBuilder::with_custom_parameters(params).build();
let (cks, sks) = generate_keys(config);
rayon::broadcast(|_| set_server_key(sks.clone()));
let message_modulus = params.message_modulus;
// [0.7, 0.1] for `max_distance` chosen to have `num_input_random_bits` be [2, 4]
// for any of the listed `excluded_upper_bound`
for (expected_num_input_random_bits, max_distance, excluded_upper_bounds) in
[(2, 0.7, [3, 5, 6, 7]), (4, 0.1, [3, 5, 6, 7])]
{
for excluded_upper_bound in excluded_upper_bounds {
let sample_count = base_sample_count * excluded_upper_bound as usize;
let excluded_upper_bound = NonZeroU64::new(excluded_upper_bound).unwrap();
let num_input_random_bits = num_input_random_bits_for_max_distance(
excluded_upper_bound,
max_distance,
message_modulus,
);
assert_eq!(num_input_random_bits, expected_num_input_random_bits);
test_uniformity_generate_oblivious_pseudo_random_custom_range2(
sample_count,
p_value_limit,
message_modulus,
&cks,
excluded_upper_bound,
max_distance,
);
}
}
}
fn test_uniformity_generate_oblivious_pseudo_random_custom_range2(
sample_count: usize,
p_value_limit: f64,
message_modulus: MessageModulus,
cks: &ClientKey,
excluded_upper_bound: NonZeroU64,
max_distance: f64,
) {
let num_input_random_bits = num_input_random_bits_for_max_distance(
excluded_upper_bound,
max_distance,
message_modulus,
);
let range = RangeForRandom::new_from_excluded_upper_bound(excluded_upper_bound);
let real_values: Vec<u64> = (0..sample_count)
.into_par_iter()
.map(|_| {
let img = FheUint8::generate_oblivious_pseudo_random_custom_range(
Seed(rand::thread_rng().gen::<u128>()),
&range,
Some(max_distance),
);
img.decrypt(cks)
})
.collect();
let excluded_upper_bound = excluded_upper_bound.get();
let uniform_values: Vec<u64> = (0..sample_count)
.into_par_iter()
.map(|_| thread_rng().gen_range(0..excluded_upper_bound))
.collect();
let clear_oprf_value_lower_num_input_random_bits = (0..sample_count)
.into_par_iter()
.map(|_| oprf_clear_equivalent(excluded_upper_bound, num_input_random_bits - 1))
.collect();
let clear_oprf_value_same_num_input_random_bits = (0..sample_count)
.into_par_iter()
.map(|_| oprf_clear_equivalent(excluded_upper_bound, num_input_random_bits))
.collect();
let clear_oprf_value_higher_num_input_random_bits = (0..sample_count)
.into_par_iter()
.map(|_| oprf_clear_equivalent(excluded_upper_bound, num_input_random_bits + 1))
.collect();
for (values, should_have_low_p_value) in [
(&real_values, false),
// to test that the same distribution passes
(&clear_oprf_value_same_num_input_random_bits, false),
// to test that other distribution don't pass
// (makes sure the test is statistically powerful)
(&uniform_values, true),
(&clear_oprf_value_lower_num_input_random_bits, true),
(&clear_oprf_value_higher_num_input_random_bits, true),
] {
let p_value_upper_bound = p_value_upper_bound_oprf_almost_uniformity_from_values(
values,
num_input_random_bits,
excluded_upper_bound,
);
println!("p_value_upper_bound: {p_value_upper_bound}");
if should_have_low_p_value {
assert!(
p_value_upper_bound < p_value_limit,
"p_value_upper_bound (={p_value_upper_bound}) expected to be smaller than {p_value_limit}"
);
} else {
assert!(
p_value_limit < p_value_upper_bound ,
"p_value_upper_bound (={p_value_upper_bound}) expected to be bigger than {p_value_limit}"
);
}
}
}
}
#[cfg(test)]
#[cfg(feature = "gpu")]
#[allow(unused_imports)]
mod test_gpu {
mod test {
use crate::prelude::*;
use crate::{
generate_keys, set_server_key, ConfigBuilder, FheInt128, FheUint32, FheUint64, GpuIndex,

View File

@@ -48,7 +48,6 @@ macro_rules! export_concrete_array_types {
}
pub use crate::core_crypto::commons::math::random::{Seed, XofSeed};
pub use crate::high_level_api::integers::oprf::RangeForRandom;
pub use crate::integer::server_key::MatchValues;
use crate::{error, Error, Versionize};
use backward_compatibility::compressed_ciphertext_list::SquashedNoiseCiphertextStateVersions;

View File

@@ -2,7 +2,6 @@ use super::{RadixCiphertext, ServerKey, SignedRadixCiphertext};
use crate::core_crypto::commons::generators::DeterministicSeeder;
use crate::core_crypto::prelude::DefaultRandomGenerator;
use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};
use std::num::NonZeroU64;
pub use tfhe_csprng::seeders::{Seed, Seeder};
@@ -164,7 +163,6 @@ impl ServerKey {
/// as `num_input_random_bits`
///
/// ```rust
/// use std::num::NonZeroU64;
/// use tfhe::integer::gen_keys_radix;
/// use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128;
/// use tfhe::Seed;
@@ -175,7 +173,7 @@ impl ServerKey {
/// let (cks, sks) = gen_keys_radix(PARAM_MESSAGE_2_CARRY_2_KS_PBS_GAUSSIAN_2M128, size);
///
/// let num_input_random_bits = 5;
/// let excluded_upper_bound = NonZeroU64::new(3).unwrap();
/// let excluded_upper_bound = 3;
/// let num_blocks_output = 8;
///
/// let ct_res = sks.par_generate_oblivious_pseudo_random_unsigned_custom_range(
@@ -188,17 +186,15 @@ impl ServerKey {
/// // Decrypt:
/// let dec_result: u64 = cks.decrypt(&ct_res);
///
/// assert!(dec_result < excluded_upper_bound.get());
/// assert!(dec_result < excluded_upper_bound);
/// ```
pub fn par_generate_oblivious_pseudo_random_unsigned_custom_range(
&self,
seed: Seed,
num_input_random_bits: u64,
excluded_upper_bound: NonZeroU64,
excluded_upper_bound: u64,
num_blocks_output: u64,
) -> RadixCiphertext {
let excluded_upper_bound = excluded_upper_bound.get();
assert!(self.message_modulus().0.is_power_of_two());
let message_bits_count = self.message_modulus().0.ilog2() as u64;

View File

@@ -10,7 +10,6 @@ use crate::integer::{BooleanBlock, IntegerKeyKind, RadixCiphertext, RadixClientK
use crate::shortint::parameters::*;
use crate::{ClientKey, CompressedServerKey, MatchValues, Seed, Tag};
use std::cmp::{max, min};
use std::num::NonZeroU64;
use std::sync::Arc;
create_parameterized_test!(random_op_sequence {
@@ -499,18 +498,7 @@ where
&ServerKey::par_generate_oblivious_pseudo_random_unsigned_integer_bounded,
);
let oprf_custom_range_executor = OpSequenceCpuFunctionExecutor::new(
&|sk: &ServerKey,
seed: Seed,
num_input_random_bits: u64,
excluded_upper_bound: u64,
num_blocks_output: u64| {
sk.par_generate_oblivious_pseudo_random_unsigned_custom_range(
seed,
num_input_random_bits,
NonZeroU64::new(excluded_upper_bound).unwrap_or(NonZeroU64::new(1).unwrap()),
num_blocks_output,
)
},
&ServerKey::par_generate_oblivious_pseudo_random_unsigned_custom_range,
);
let mut oprf_ops: Vec<(OprfExecutor, String)> = vec![(

View File

@@ -9,7 +9,6 @@ use crate::integer::{IntegerKeyKind, RadixCiphertext, RadixClientKey, ServerKey}
use crate::shortint::parameters::*;
use statrs::distribution::ContinuousCDF;
use std::collections::HashMap;
use std::num::NonZeroU64;
use std::sync::Arc;
use tfhe_csprng::seeders::Seed;
@@ -37,19 +36,9 @@ fn oprf_any_range_unsigned<P>(param: P)
where
P: Into<TestParameters>,
{
let executor =
CpuFunctionExecutor::new(&|sk: &ServerKey,
seed: Seed,
num_input_random_bits: u64,
excluded_upper_bound: u64,
num_blocks_output: u64| {
sk.par_generate_oblivious_pseudo_random_unsigned_custom_range(
seed,
num_input_random_bits,
NonZeroU64::new(excluded_upper_bound).unwrap(),
num_blocks_output,
)
});
let executor = CpuFunctionExecutor::new(
&ServerKey::par_generate_oblivious_pseudo_random_unsigned_custom_range,
);
oprf_any_range_test(param, executor);
}
@@ -57,19 +46,9 @@ fn oprf_almost_uniformity_unsigned<P>(param: P)
where
P: Into<TestParameters>,
{
let executor =
CpuFunctionExecutor::new(&|sk: &ServerKey,
seed: Seed,
num_input_random_bits: u64,
excluded_upper_bound: u64,
num_blocks_output: u64| {
sk.par_generate_oblivious_pseudo_random_unsigned_custom_range(
seed,
num_input_random_bits,
NonZeroU64::new(excluded_upper_bound).unwrap(),
num_blocks_output,
)
});
let executor = CpuFunctionExecutor::new(
&ServerKey::par_generate_oblivious_pseudo_random_unsigned_custom_range,
);
oprf_almost_uniformity_test(param, executor);
}
@@ -110,7 +89,7 @@ where
);
}
pub(crate) fn oprf_uniformity_test<P, E>(param: P, mut executor: E)
pub fn oprf_uniformity_test<P, E>(param: P, mut executor: E)
where
P: Into<TestParameters>,
E: for<'a> FunctionExecutor<(Seed, u64, u64), RadixCiphertext>,
@@ -134,7 +113,7 @@ where
});
}
pub(crate) fn oprf_any_range_test<P, E>(param: P, mut executor: E)
pub fn oprf_any_range_test<P, E>(param: P, mut executor: E)
where
P: Into<TestParameters>,
E: for<'a> FunctionExecutor<(Seed, u64, u64, u64), RadixCiphertext>,
@@ -170,7 +149,7 @@ where
}
}
pub(crate) fn oprf_almost_uniformity_test<P, E>(param: P, mut executor: E)
pub fn oprf_almost_uniformity_test<P, E>(param: P, mut executor: E)
where
P: Into<TestParameters>,
E: for<'a> FunctionExecutor<(Seed, u64, u64, u64), RadixCiphertext>,
@@ -186,70 +165,40 @@ where
let num_input_random_bits: u64 = 4;
let num_blocks_output = 64;
let excluded_upper_bound = 10;
let random_input_upper_bound = 1 << num_input_random_bits;
let mut density = vec![0_usize; excluded_upper_bound as usize];
for i in 0..random_input_upper_bound {
let index = ((i * excluded_upper_bound) as f64 / random_input_upper_bound as f64) as usize;
density[index] += 1;
}
let theoretical_pdf: Vec<f64> = density
.iter()
.map(|count| *count as f64 / random_input_upper_bound as f64)
.collect();
let values: Vec<u64> = (0..sample_count)
.map(|seed| {
let img = executor.execute((
Seed(seed as u128),
num_input_random_bits,
excluded_upper_bound,
excluded_upper_bound as u64,
num_blocks_output,
));
cks.decrypt(&img)
})
.collect();
let p_value_upper_bound = p_value_upper_bound_oprf_almost_uniformity_from_values(
&values,
num_input_random_bits,
excluded_upper_bound,
);
assert!(p_value_limit < p_value_upper_bound);
}
pub(crate) fn p_value_upper_bound_oprf_almost_uniformity_from_values(
values: &[u64],
num_input_random_bits: u64,
excluded_upper_bound: u64,
) -> f64 {
let density = oprf_density_function(excluded_upper_bound, num_input_random_bits);
let theoretical_pdf = probability_density_function_from_density(&density);
let mut bins = vec![0_u64; excluded_upper_bound as usize];
for value in values.iter().copied() {
for value in values {
bins[value as usize] += 1;
}
let cumulative_bins = cumulate(&bins);
let theoretical_cdf = cumulate(&theoretical_pdf);
let sup_diff = sup_diff(&cumulative_bins, &theoretical_cdf);
let p_value_upper_bound = dkw_alpha_from_epsilon(sample_count as f64, sup_diff);
dkw_alpha_from_epsilon(values.len() as f64, sup_diff)
}
pub(crate) fn oprf_density_function(
excluded_upper_bound: u64,
num_input_random_bits: u64,
) -> Vec<usize> {
let random_input_upper_bound = 1 << num_input_random_bits;
let mut density = vec![0_usize; excluded_upper_bound as usize];
for i in 0..random_input_upper_bound {
let output = ((i * excluded_upper_bound) >> num_input_random_bits) as usize;
density[output] += 1;
}
density
}
pub(crate) fn probability_density_function_from_density(density: &[usize]) -> Vec<f64> {
let total_count: usize = density.iter().copied().sum();
density
.iter()
.map(|count| *count as f64 / total_count as f64)
.collect()
assert!(p_value_limit < p_value_upper_bound);
}

View File

@@ -475,12 +475,8 @@ pub(crate) mod test {
}
}
pub(crate) fn test_uniformity<F>(
sample_count: usize,
p_value_limit: f64,
distinct_values: u64,
f: F,
) where
pub fn test_uniformity<F>(sample_count: usize, p_value_limit: f64, distinct_values: u64, f: F)
where
F: Sync + Fn(usize) -> u64,
{
let p_value = uniformity_p_value(f, sample_count, distinct_values);
@@ -491,7 +487,7 @@ pub(crate) mod test {
);
}
pub(crate) fn uniformity_p_value<F>(f: F, sample_count: usize, distinct_values: u64) -> f64
fn uniformity_p_value<F>(f: F, sample_count: usize, distinct_values: u64) -> f64
where
F: Sync + Fn(usize) -> u64,
{
@@ -499,11 +495,8 @@ pub(crate) mod test {
let mut values_count = HashMap::new();
for i in values.iter().copied() {
assert!(
i < distinct_values,
"i (={i}) is supposed to be smaller than distinct_values (={distinct_values})",
);
for i in &values {
assert!(*i < distinct_values, "i {} dv{}", *i, distinct_values);
*values_count.entry(i).or_insert(0) += 1;
}

View File

@@ -727,15 +727,8 @@ async function compactPublicKeyZeroKnowledgeBench() {
serialized_size = list.safe_serialize(BigInt(10000000)).length;
}
const mean = timing / bench_loops;
let base_bench_str = "compact_fhe_uint_proven_encryption_";
let supportsThreads = await threads();
if (!supportsThreads) {
base_bench_str += "unsafe_coop_";
}
const common_bench_str =
base_bench_str +
"compact_fhe_uint_proven_encryption_" +
params.zk_scheme +
"_" +
bits_to_encrypt +