mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-11 07:38:08 -05:00
Compare commits
223 Commits
0.3.0
...
am/refacto
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58169f9365 | ||
|
|
e14be3b41a | ||
|
|
f1c21888a7 | ||
|
|
2624beb7fa | ||
|
|
e44c38a102 | ||
|
|
4535230874 | ||
|
|
a7b2d9b228 | ||
|
|
ab923a3ebc | ||
|
|
a0e85fb355 | ||
|
|
ecee305340 | ||
|
|
f08ea8cf85 | ||
|
|
096e320b97 | ||
|
|
95aac64c1c | ||
|
|
76aaa56691 | ||
|
|
a40489bdd2 | ||
|
|
4bf617eb10 | ||
|
|
070073d229 | ||
|
|
6c1ca8e32b | ||
|
|
6523610ca4 | ||
|
|
41c20e22f5 | ||
|
|
4a00d25cb1 | ||
|
|
8c9ee64612 | ||
|
|
bfdfbfac0f | ||
|
|
dbe7bdcd5c | ||
|
|
6d77ff18ad | ||
|
|
7d4d0e0b16 | ||
|
|
b27762232c | ||
|
|
f597d0f06f | ||
|
|
ee188448f3 | ||
|
|
ee49f048c7 | ||
|
|
a9b09ecc45 | ||
|
|
efc243edc9 | ||
|
|
bc34411d3f | ||
|
|
c7923ff3ed | ||
|
|
7534b68e5c | ||
|
|
655f7e6214 | ||
|
|
b8556ddbd4 | ||
|
|
cab7439064 | ||
|
|
f8a8780651 | ||
|
|
bb3c8e7d5d | ||
|
|
69536960c3 | ||
|
|
52a7c52a49 | ||
|
|
751c407ba5 | ||
|
|
492d348138 | ||
|
|
e7df7eb5ef | ||
|
|
380ee52986 | ||
|
|
439a28f68b | ||
|
|
2eb1e37ca7 | ||
|
|
eb1b136c45 | ||
|
|
1376bcba7c | ||
|
|
b5b4e54b9b | ||
|
|
23c2bd790a | ||
|
|
251ee9aa0e | ||
|
|
fad066a996 | ||
|
|
6ef1f22b33 | ||
|
|
8cc8dba1ab | ||
|
|
082328c91a | ||
|
|
fdb6faa0a8 | ||
|
|
856440386f | ||
|
|
2e8189514c | ||
|
|
29b2454cce | ||
|
|
9ed2589c7a | ||
|
|
36b71529e6 | ||
|
|
b738946d72 | ||
|
|
62f1425257 | ||
|
|
44e491b93f | ||
|
|
a470b26672 | ||
|
|
015409424c | ||
|
|
37be751188 | ||
|
|
2580a834af | ||
|
|
a029bd878e | ||
|
|
400e7930b6 | ||
|
|
40d07c6bc3 | ||
|
|
9dd2d39f1c | ||
|
|
4045a3bc2f | ||
|
|
b4ffeccd46 | ||
|
|
7fe3ad3b6e | ||
|
|
7fdd4f9532 | ||
|
|
81eef39ddb | ||
|
|
b6459e3cda | ||
|
|
f2ef78c348 | ||
|
|
aef8f31621 | ||
|
|
df78d178da | ||
|
|
9297a886a4 | ||
|
|
28b4f91a32 | ||
|
|
04fb46e41b | ||
|
|
53da809f37 | ||
|
|
723910c669 | ||
|
|
8ecf8879fb | ||
|
|
2427f744f8 | ||
|
|
422e1f23d5 | ||
|
|
30a5ade17f | ||
|
|
6cdd41c22f | ||
|
|
f369bec394 | ||
|
|
df4e9c69c7 | ||
|
|
0e3d129906 | ||
|
|
682e455c94 | ||
|
|
b553a68fa9 | ||
|
|
be95eadf79 | ||
|
|
0213a11a0c | ||
|
|
413fde3b3b | ||
|
|
40f8ac9adf | ||
|
|
2ab25c1084 | ||
|
|
86c62b70e5 | ||
|
|
18d790fc26 | ||
|
|
e9e3dae786 | ||
|
|
9b1dccbcb4 | ||
|
|
cef011dd91 | ||
|
|
19f7d5af5c | ||
|
|
95ca5a80dc | ||
|
|
b5fded34d1 | ||
|
|
0c3b09c83d | ||
|
|
85a19d30a9 | ||
|
|
f58132c391 | ||
|
|
099bff84aa | ||
|
|
42ad474a46 | ||
|
|
9f6827b803 | ||
|
|
d23c0df449 | ||
|
|
229bfeebe4 | ||
|
|
48aab9d494 | ||
|
|
e4769a8212 | ||
|
|
79bdaaba20 | ||
|
|
02a14fff7c | ||
|
|
72cce4c5b2 | ||
|
|
a317c4b9dd | ||
|
|
2e2bd5ba29 | ||
|
|
827d8d8708 | ||
|
|
bf434be347 | ||
|
|
ed83fbb460 | ||
|
|
0aad2e669b | ||
|
|
cd68a3bd1c | ||
|
|
b77286bcbc | ||
|
|
609f83bbff | ||
|
|
2a8ebb81d8 | ||
|
|
1a2a17a6ab | ||
|
|
0080caf95d | ||
|
|
c26238533b | ||
|
|
b29936d844 | ||
|
|
25914cc727 | ||
|
|
ca229e369b | ||
|
|
4a99e54c0d | ||
|
|
2383591351 | ||
|
|
dc464f398d | ||
|
|
ce70b5758a | ||
|
|
1c76a08373 | ||
|
|
9b19bd1e8b | ||
|
|
a3dde21240 | ||
|
|
005e1afe2f | ||
|
|
17c404b77d | ||
|
|
1403971d15 | ||
|
|
94ad69bfa3 | ||
|
|
bc129ba0ed | ||
|
|
462834a12e | ||
|
|
ebeee1d6f8 | ||
|
|
d0e1a582e1 | ||
|
|
546cb369a8 | ||
|
|
445af7ab97 | ||
|
|
23f8c69bae | ||
|
|
b8df207b68 | ||
|
|
03688aee4c | ||
|
|
5a3652f398 | ||
|
|
ae3c261d1e | ||
|
|
95dcf95e88 | ||
|
|
b92d6400f4 | ||
|
|
9ba27b4082 | ||
|
|
b164c90d75 | ||
|
|
ff893ca6ef | ||
|
|
2dd1e13dad | ||
|
|
4393fce861 | ||
|
|
4f10cfa6dd | ||
|
|
a6e4488de2 | ||
|
|
878f3fa448 | ||
|
|
90b887a56f | ||
|
|
7dc52cf4ef | ||
|
|
a69333ed37 | ||
|
|
ffad25449e | ||
|
|
6d471856c7 | ||
|
|
65749cb39b | ||
|
|
bf36316c12 | ||
|
|
d98bb0eb86 | ||
|
|
5747af6dce | ||
|
|
cf08436c7d | ||
|
|
241bddccaf | ||
|
|
8ce1984214 | ||
|
|
82ef430dfa | ||
|
|
2348303b26 | ||
|
|
a35386f740 | ||
|
|
3df542c5f8 | ||
|
|
37623eedf3 | ||
|
|
fa8cf73d57 | ||
|
|
872b20a4a1 | ||
|
|
4920e3b4df | ||
|
|
75a0881e9d | ||
|
|
f67effc359 | ||
|
|
e8eb82f7ae | ||
|
|
53018ddc36 | ||
|
|
9ef62acff1 | ||
|
|
12220b2a18 | ||
|
|
625c150dc1 | ||
|
|
304932a861 | ||
|
|
59181d4717 | ||
|
|
c5d93f4b38 | ||
|
|
7a465cd258 | ||
|
|
a0946ac509 | ||
|
|
5521f2a4a4 | ||
|
|
1a9e40c860 | ||
|
|
bc3e3e46a0 | ||
|
|
60c87b6d95 | ||
|
|
df4c9f511d | ||
|
|
69bfd6556f | ||
|
|
7fad91e429 | ||
|
|
0da30d5e58 | ||
|
|
97ce5b617a | ||
|
|
a4723b03f3 | ||
|
|
d24d484bcf | ||
|
|
9945cdd9b2 | ||
|
|
04a0aa0c31 | ||
|
|
898c305acd | ||
|
|
f5854db0b1 | ||
|
|
6a56af0b07 | ||
|
|
b7d830c57f | ||
|
|
9b983745da | ||
|
|
10b1305e66 |
@@ -5,13 +5,3 @@ failure-output = "final"
|
||||
fail-fast = false
|
||||
retries = 0
|
||||
slow-timeout = "5m"
|
||||
|
||||
|
||||
[[profile.ci.overrides]]
|
||||
filter = 'test(/^.*param_message_1_carry_[567]_ks_pbs$/) or test(/^.*param_message_4_carry_4_ks_pbs$/)'
|
||||
retries = 3
|
||||
|
||||
[[profile.ci.overrides]]
|
||||
filter = 'test(/^.*param_message_[23]_carry_[23]_ks_pbs$/)'
|
||||
retries = 1
|
||||
|
||||
|
||||
12
.github/workflows/aws_tfhe_fast_tests.yml
vendored
12
.github/workflows/aws_tfhe_fast_tests.yml
vendored
@@ -51,7 +51,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -66,6 +66,10 @@ jobs:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Run concrete-csprng tests
|
||||
run: |
|
||||
make test_concrete_csprng
|
||||
|
||||
- name: Run core tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON make test_core_crypto
|
||||
@@ -106,10 +110,14 @@ jobs:
|
||||
run: |
|
||||
make test_high_level_api
|
||||
|
||||
- name: Run safe deserialization tests
|
||||
run: |
|
||||
make test_safe_deserialization
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
6
.github/workflows/aws_tfhe_integer_tests.yml
vendored
6
.github/workflows/aws_tfhe_integer_tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -71,12 +71,12 @@ jobs:
|
||||
|
||||
- name: Run integer tests
|
||||
run: |
|
||||
BIG_TESTS_INSTANCE=TRUE make test_integer_ci
|
||||
AVX512_SUPPORT=ON BIG_TESTS_INSTANCE=TRUE make test_integer_ci
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -75,12 +75,12 @@ jobs:
|
||||
|
||||
- name: Run integer multi-bit tests
|
||||
run: |
|
||||
make test_integer_multi_bit_ci
|
||||
AVX512_SUPPORT=ON make test_integer_multi_bit_ci
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
8
.github/workflows/aws_tfhe_tests.yml
vendored
8
.github/workflows/aws_tfhe_tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -65,6 +65,10 @@ jobs:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Run concrete-csprng tests
|
||||
run: |
|
||||
make test_concrete_csprng
|
||||
|
||||
- name: Run core tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON make test_core_crypto
|
||||
@@ -100,7 +104,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
4
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
4
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -77,7 +77,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
16
.github/workflows/boolean_benchmark.yml
vendored
16
.github/workflows/boolean_benchmark.yml
vendored
@@ -19,6 +19,14 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -43,7 +51,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -88,13 +96,13 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_boolean
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -117,7 +125,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
16
.github/workflows/cargo_build.yml
vendored
16
.github/workflows/cargo_build.yml
vendored
@@ -21,12 +21,26 @@ jobs:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
|
||||
- name: Install and run newline linter checks
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
wget https://github.com/fernandrone/linelint/releases/download/0.0.6/linelint-linux-amd64
|
||||
echo "16b70fb7b471d6f95cbdc0b4e5dc2b0ac9e84ba9ecdc488f7bdf13df823aca4b linelint-linux-amd64" > checksum
|
||||
sha256sum -c checksum || exit 1
|
||||
chmod +x linelint-linux-amd64
|
||||
mv linelint-linux-amd64 /usr/local/bin/linelint
|
||||
make check_newline
|
||||
|
||||
- name: Run pcc checks
|
||||
run: |
|
||||
make pcc
|
||||
|
||||
- name: Build concrete-csprng
|
||||
run: |
|
||||
make build_concrete_csprng
|
||||
|
||||
- name: Build Release core
|
||||
run: |
|
||||
make build_core AVX512_SUPPORT=ON
|
||||
|
||||
2
.github/workflows/check_commit.yml
vendored
2
.github/workflows/check_commit.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
- name: Check first line
|
||||
uses: gsactions/commit-message-checker@16fa2d5de096ae0d35626443bcd24f1e756cafee
|
||||
with:
|
||||
pattern: '^((feat|fix|chore|refactor|style|test|docs|doc)\(\w+\)\:) .+$'
|
||||
pattern: '^((feat|fix|chore|refactor|style|test|docs|doc)(\(\w+\))?\:) .+$'
|
||||
flags: "gs"
|
||||
error: 'Your first line has to contain a commit type and scope like "feat(my_feature): msg".'
|
||||
excludeDescription: "true" # optional: this excludes the description body of a pull request
|
||||
|
||||
112
.github/workflows/code_coverage.yml
vendored
Normal file
112
.github/workflows/code_coverage.yml
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
name: Code Coverage
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUSTFLAGS: "-C target-cpu=native"
|
||||
|
||||
on:
|
||||
# Allows you to run this workflow manually from the Actions tab as an alternative.
|
||||
workflow_dispatch:
|
||||
# All the inputs are provided by Slab
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "AWS instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "AWS instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "AWS instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: 'Slab request ID'
|
||||
type: string
|
||||
fork_repo:
|
||||
description: 'Name of forked repo as user/repo'
|
||||
type: string
|
||||
fork_git_sha:
|
||||
description: 'Git SHA to checkout from fork'
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
code-coverage:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}_${{ github.ref }}_${{ inputs.instance_image_id }}_${{ inputs.instance_type }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ${{ inputs.runner_name }}
|
||||
steps:
|
||||
# Step used for log purpose.
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
echo "ID: ${{ inputs.instance_id }}"
|
||||
echo "AMI: ${{ inputs.instance_image_id }}"
|
||||
echo "Type: ${{ inputs.instance_type }}"
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
echo "Fork repo: ${{ inputs.fork_repo }}"
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
|
||||
- name: Set up home
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install latest stable
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@408093d9ff9c134c33b974e0722ce06b9d6e8263
|
||||
with:
|
||||
files_yaml: |
|
||||
tfhe:
|
||||
- tfhe/src/**
|
||||
concrete_csprng:
|
||||
- concrete-csprng/src/**
|
||||
|
||||
- name: Generate Keys
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
run: |
|
||||
make GEN_KEY_CACHE_COVERAGE_ONLY=TRUE gen_key_cache
|
||||
|
||||
- name: Run coverage for boolean
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
run: |
|
||||
make test_boolean_cov
|
||||
|
||||
- name: Run coverage for shortint
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
run: |
|
||||
make test_shortint_cov
|
||||
|
||||
- name: Upload tfhe coverage to Codecov
|
||||
uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
directory: ./coverage/
|
||||
fail_ci_if_error: true
|
||||
files: shortint/cobertura.xml,boolean/cobertura.xml
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Code coverage finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
74
.github/workflows/csprng_randomness_testing.yml
vendored
Normal file
74
.github/workflows/csprng_randomness_testing.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
name: CSPRNG randomness testing Workflow
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUSTFLAGS: "-C target-cpu=native"
|
||||
|
||||
on:
|
||||
# Allows you to run this workflow manually from the Actions tab as an alternative.
|
||||
workflow_dispatch:
|
||||
# All the inputs are provided by Slab
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "AWS instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "AWS instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "AWS instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: 'Slab request ID'
|
||||
type: string
|
||||
fork_repo:
|
||||
description: 'Name of forked repo as user/repo'
|
||||
type: string
|
||||
fork_git_sha:
|
||||
description: 'Git SHA to checkout from fork'
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
csprng-randomness-teting:
|
||||
name: CSPRNG randomness testing
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}_${{ github.ref }}_${{ inputs.instance_image_id }}_${{ inputs.instance_type }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ${{ inputs.runner_name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
|
||||
- name: Set up home
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install latest stable
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Dieharder randomness test suite
|
||||
run: |
|
||||
make dieharder_csprng
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "concrete-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
12
.github/workflows/integer_benchmark.yml
vendored
12
.github/workflows/integer_benchmark.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
|
||||
- name: Run benchmarks with AVX512
|
||||
run: |
|
||||
make AVX512_SUPPORT=ON bench_integer
|
||||
make AVX512_SUPPORT=ON FAST_BENCH=TRUE bench_integer
|
||||
|
||||
- name: Parse benchmarks to csv
|
||||
run: |
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
parse_integer_benches
|
||||
|
||||
- name: Upload csv results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_csv_integer
|
||||
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
|
||||
@@ -90,13 +90,13 @@ jobs:
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_integer
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
157
.github/workflows/integer_full_benchmark.yml
vendored
Normal file
157
.github/workflows/integer_full_benchmark.yml
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
# Run all integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Integer full benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "Instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "Instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "Instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
op_flavor: ${{ steps.set_op_flavor.outputs.op_flavor }}
|
||||
steps:
|
||||
- name: Weekly benchmarks
|
||||
if: ${{ github.event.inputs.user_inputs == 'weekly_benchmarks' }}
|
||||
run: |
|
||||
echo "OP_FLAVOR=[\"default\", \"default_comp\", \"default_scalar\", \"default_scalar_comp\"]" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Quarterly benchmarks
|
||||
if: ${{ github.event.inputs.user_inputs == 'quarterly_benchmarks' }}
|
||||
run: |
|
||||
echo "OP_FLAVOR=[\"default\", \"default_comp\", \"default_scalar\", \"default_scalar_comp\", \
|
||||
\"smart\", \"smart_comp\", \"smart_scalar\", \"smart_parallelized\", \"smart_parallelized_comp\", \"smart_scalar_parallelized\", \"smart_scalar_parallelized_comp\", \
|
||||
\"unchecked\", \"unchecked_comp\", \"unchecked_scalar\", \"unchecked_scalar_comp\", \
|
||||
\"misc\"]" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set operation flavor output
|
||||
id: set_op_flavor
|
||||
run: |
|
||||
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> ${GITHUB_OUTPUT}
|
||||
|
||||
integer-benchmarks:
|
||||
name: Execute integer benchmarks for all operations flavor
|
||||
needs: prepare-matrix
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ !cancelled() }}
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
command: [ integer, integer_multi_bit]
|
||||
op_flavor: ${{ fromJson(needs.prepare-matrix.outputs.op_flavor) }}
|
||||
steps:
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
echo "IDs: ${{ inputs.instance_id }}"
|
||||
echo "AMI: ${{ inputs.instance_image_id }}"
|
||||
echo "Type: ${{ inputs.instance_type }}"
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})" >> "${GITHUB_ENV}"
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set up home
|
||||
# "Install rust" step require root user to have a HOME directory which is not set.
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Run benchmarks with AVX512
|
||||
run: |
|
||||
make AVX512_SUPPORT=ON BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_${{ matrix.command }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
|
||||
--database tfhe_rs \
|
||||
--hardware ${{ inputs.instance_type }} \
|
||||
--project-version "${{ env.COMMIT_HASH }}" \
|
||||
--branch ${{ github.ref_name }} \
|
||||
--commit-date "${{ env.COMMIT_DATE }}" \
|
||||
--bench-date "${{ env.BENCH_DATE }}" \
|
||||
--walk-subdirs \
|
||||
--name-suffix avx512 \
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Computing HMac on results file"
|
||||
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
|
||||
echo "Sending results to Slab..."
|
||||
curl -v -k \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Slab-Repository: ${{ github.repository }}" \
|
||||
-H "X-Slab-Command: store_data_v2" \
|
||||
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
|
||||
-d @${{ env.RESULTS_FILENAME }} \
|
||||
${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notification:
|
||||
name: Slack Notification
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ failure() }}
|
||||
needs: integer-benchmarks
|
||||
steps:
|
||||
- name: Notify
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Integer full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
|
||||
- name: Run multi-bit benchmarks with AVX512
|
||||
run: |
|
||||
make AVX512_SUPPORT=ON bench_integer_multi_bit
|
||||
make AVX512_SUPPORT=ON FAST_BENCH=TRUE bench_integer_multi_bit
|
||||
|
||||
- name: Parse benchmarks to csv
|
||||
run: |
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
parse_integer_benches
|
||||
|
||||
- name: Upload csv results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_csv_integer
|
||||
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
|
||||
@@ -90,13 +90,13 @@ jobs:
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_integer
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
12
.github/workflows/m1_tests.yml
vendored
12
.github/workflows/m1_tests.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
runs-on: ["self-hosted", "m1mac"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
|
||||
- name: Install latest stable
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
@@ -40,6 +40,10 @@ jobs:
|
||||
run: |
|
||||
make pcc
|
||||
|
||||
- name: Build concrete-csprng
|
||||
run: |
|
||||
make build_concrete_csprng
|
||||
|
||||
- name: Build Release core
|
||||
run: |
|
||||
make build_core
|
||||
@@ -64,6 +68,10 @@ jobs:
|
||||
run: |
|
||||
make build_c_api
|
||||
|
||||
- name: Run concrete-csprng tests
|
||||
run: |
|
||||
make test_concrete_csprng
|
||||
|
||||
- name: Run core tests
|
||||
run: |
|
||||
make test_core_crypto
|
||||
@@ -124,7 +132,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ needs.cargo-builds.result != 'skipped' }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ needs.cargo-builds.result }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
10
.github/workflows/make_release.yml
vendored
10
.github/workflows/make_release.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Publish web package
|
||||
if: ${{ inputs.push_web_package }}
|
||||
uses: JS-DevTools/npm-publish@5a85faf05d2ade2d5b6682bfe5359915d5159c6c
|
||||
uses: JS-DevTools/npm-publish@fe72237be0920f7a0cafd6a966c9b929c9466e9b
|
||||
with:
|
||||
token: ${{ secrets.NPM_TOKEN }}
|
||||
package: tfhe/pkg/package.json
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
- name: Publish Node package
|
||||
if: ${{ inputs.push_node_package }}
|
||||
uses: JS-DevTools/npm-publish@5a85faf05d2ade2d5b6682bfe5359915d5159c6c
|
||||
uses: JS-DevTools/npm-publish@fe72237be0920f7a0cafd6a966c9b929c9466e9b
|
||||
with:
|
||||
token: ${{ secrets.NPM_TOKEN }}
|
||||
package: tfhe/pkg/package.json
|
||||
@@ -74,11 +74,11 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_MESSAGE: "tfhe release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
42
.github/workflows/make_release_concrete_csprng.yml
vendored
Normal file
42
.github/workflows/make_release_concrete_csprng.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
# Publish new release of tfhe-rs on various platform.
|
||||
name: Publish concrete-csprng release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: "Dry-run"
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
publish_release:
|
||||
name: Publish concrete-csprng Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
cargo publish -p concrete-csprng --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "concrete-csprng release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
51
.github/workflows/parameters_check.yml
vendored
Normal file
51
.github/workflows/parameters_check.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
# Perform a security check on all the cryptographic parameters set
|
||||
name: Parameters curves security check
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUSTFLAGS: "-C target-cpu=native"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
params-curves-security-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
|
||||
- name: Checkout lattice-estimator
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: malb/lattice-estimator
|
||||
path: lattice_estimator
|
||||
|
||||
- name: Install Sage
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y sagemath
|
||||
|
||||
- name: Collect parameters
|
||||
run: |
|
||||
CARGO_PROFILE=devo make write_params_to_file
|
||||
|
||||
- name: Perform security check
|
||||
run: |
|
||||
PYTHONPATH=lattice_estimator sage ci/lattice_estimator.sage
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Security check for parameters finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
16
.github/workflows/pbs_benchmark.yml
vendored
16
.github/workflows/pbs_benchmark.yml
vendored
@@ -19,6 +19,14 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -43,7 +51,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -78,13 +86,13 @@ jobs:
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_pbs
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -107,7 +115,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
14
.github/workflows/placeholder_workflow.yml
vendored
Normal file
14
.github/workflows/placeholder_workflow.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Placeholder workflow file allowing running it without having to merge to main first
|
||||
name: Placeholder Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
placeholder:
|
||||
name: Placeholder
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- run: |
|
||||
echo "Hello this is a Placeholder Workflow"
|
||||
8
.github/workflows/shortint_benchmark.yml
vendored
8
.github/workflows/shortint_benchmark.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -88,13 +88,13 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_shortint
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
149
.github/workflows/shortint_full_benchmark.yml
vendored
Normal file
149
.github/workflows/shortint_full_benchmark.yml
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
# Run all shortint benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Shortint full benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "Instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "Instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "Instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
shortint-benchmarks:
|
||||
name: Execute shortint benchmarks for all operations flavor
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ !cancelled() }}
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
op_flavor: [ default, smart, unchecked ]
|
||||
steps:
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
echo "IDs: ${{ inputs.instance_id }}"
|
||||
echo "AMI: ${{ inputs.instance_image_id }}"
|
||||
echo "Type: ${{ inputs.instance_type }}"
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})" >> "${GITHUB_ENV}"
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set up home
|
||||
# "Install rust" step require root user to have a HOME directory which is not set.
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Run benchmarks with AVX512
|
||||
run: |
|
||||
make AVX512_SUPPORT=ON BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_shortint
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
|
||||
COMMIT_HASH="$(git describe --tags --dirty)"
|
||||
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
|
||||
--database tfhe_rs \
|
||||
--hardware ${{ inputs.instance_type }} \
|
||||
--project-version "${COMMIT_HASH}" \
|
||||
--branch ${{ github.ref_name }} \
|
||||
--commit-date "${COMMIT_DATE}" \
|
||||
--bench-date "${{ env.BENCH_DATE }}" \
|
||||
--walk-subdirs \
|
||||
--name-suffix avx512 \
|
||||
--throughput
|
||||
|
||||
# This small benchmark needs to be executed only once.
|
||||
- name: Measure key sizes
|
||||
if: matrix.op_flavor == 'default'
|
||||
run: |
|
||||
make measure_shortint_key_sizes
|
||||
|
||||
- name: Parse key sizes results
|
||||
if: matrix.op_flavor == 'default'
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py tfhe/shortint_key_sizes.csv ${{ env.RESULTS_FILENAME }} \
|
||||
--key-sizes \
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_shortint_${{ matrix.op_flavor }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Computing HMac on results file"
|
||||
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
|
||||
echo "Sending results to Slab..."
|
||||
curl -v -k \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Slab-Repository: ${{ github.repository }}" \
|
||||
-H "X-Slab-Command: store_data_v2" \
|
||||
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
|
||||
-d @${{ env.RESULTS_FILENAME }} \
|
||||
${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notification:
|
||||
name: Slack Notification
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ failure() }}
|
||||
needs: shortint-benchmarks
|
||||
steps:
|
||||
- name: Notify
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Shortint full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
6
.github/workflows/start_benchmarks.yml
vendored
6
.github/workflows/start_benchmarks.yml
vendored
@@ -42,13 +42,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@de0eba32790fb9bf87471b32855a30fc8f9d5fc6
|
||||
uses: tj-actions/changed-files@408093d9ff9c134c33b974e0722ce06b9d6e8263
|
||||
with:
|
||||
files_yaml: |
|
||||
common_benches:
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
- .github/workflows/wasm_client_benchmark.yml
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
64
.github/workflows/start_full_benchmarks.yml
vendored
Normal file
64
.github/workflows/start_full_benchmarks.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
# Start all benchmark jobs, including full shortint and integer, on Slab CI bot.
|
||||
name: Start full suite benchmarks
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Weekly benchmarks will be triggered each Saturday at 1a.m.
|
||||
- cron: '0 1 * * 6'
|
||||
# Quarterly benchmarks will be triggered right before end of quarter, the 25th of the current month at 4a.m.
|
||||
# These benchmarks are far longer to execute hence the reason to run them only four time a year.
|
||||
- cron: '0 4 25 MAR,JUN,SEP,DEC *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
benchmark_type:
|
||||
description: 'Benchmark type'
|
||||
required: true
|
||||
default: 'weekly'
|
||||
type: choice
|
||||
options:
|
||||
- weekly
|
||||
- quarterly
|
||||
|
||||
jobs:
|
||||
start-benchmarks:
|
||||
if: ${{ (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') || github.event_name == 'workflow_dispatch' }}
|
||||
strategy:
|
||||
matrix:
|
||||
command: [ boolean_bench, shortint_full_bench, integer_full_bench, pbs_bench, wasm_client_bench ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Set benchmarks type as weekly
|
||||
if: (github.event_name == 'workflow_dispatch' && inputs.benchmark_type == 'weekly') || github.event.schedule == '0 1 * * 6'
|
||||
run: |
|
||||
echo "BENCH_TYPE=weekly_benchmarks" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set benchmarks type as quarterly
|
||||
if: (github.event_name == 'workflow_dispatch' && inputs.benchmark_type == 'quarterly') || github.event.schedule == '0 4 25 MAR,JUN,SEP,DEC *'
|
||||
run: |
|
||||
echo "BENCH_TYPE=quarterly_benchmarks" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Start AWS job in Slab
|
||||
shell: bash
|
||||
run: |
|
||||
echo -n '{"command": "${{ matrix.command }}", "git_ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "user_inputs": "${{ env.BENCH_TYPE }}"}' > command.json
|
||||
SIGNATURE="$(slab/scripts/hmac_calculator.sh command.json '${{ secrets.JOB_SECRET }}')"
|
||||
curl -v -k \
|
||||
--fail-with-body \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Slab-Repository: ${{ github.repository }}" \
|
||||
-H "X-Slab-Command: start_aws" \
|
||||
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
|
||||
-d @command.json \
|
||||
${{ secrets.SLAB_URL }}
|
||||
4
.github/workflows/sync_on_push.yml
vendored
4
.github/workflows/sync_on_push.yml
vendored
@@ -13,11 +13,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Save repo
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: repo-archive
|
||||
path: '.'
|
||||
|
||||
22
.github/workflows/trigger_aws_tests_on_pr.yml
vendored
22
.github/workflows/trigger_aws_tests_on_pr.yml
vendored
@@ -12,6 +12,16 @@ jobs:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Get current labels
|
||||
uses: snnaplab/get-labels-action@f426df40304808ace3b5282d4f036515f7609576
|
||||
|
||||
- name: Remove approved label
|
||||
if: ${{ github.event_name == 'pull_request' && contains(fromJSON(env.LABELS), 'approved') }}
|
||||
uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
labels: approved
|
||||
|
||||
- name: Launch fast tests
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
uses: mshick/add-pr-comment@a65df5f64fc741e91c59b8359a4bc56e57aaf5b1
|
||||
@@ -20,8 +30,17 @@ jobs:
|
||||
message: |
|
||||
@slab-ci cpu_fast_test
|
||||
|
||||
- name: Add approved label
|
||||
uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf
|
||||
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && !contains(fromJSON(env.LABELS), 'approved') }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
labels: approved
|
||||
|
||||
# PR label 'approved' presence is checked to avoid running the full test suite several times
|
||||
# in case of multiple approvals without new commits in between.
|
||||
- name: Launch full tests suite
|
||||
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' }}
|
||||
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && !contains(fromJSON(env.LABELS), 'approved') }}
|
||||
uses: mshick/add-pr-comment@a65df5f64fc741e91c59b8359a4bc56e57aaf5b1
|
||||
with:
|
||||
allow-repeats: true
|
||||
@@ -32,3 +51,4 @@ jobs:
|
||||
@slab-ci cpu_integer_test
|
||||
@slab-ci cpu_multi_bit_test
|
||||
@slab-ci cpu_wasm_test
|
||||
@slab-ci csprng_randomness_testing
|
||||
|
||||
16
.github/workflows/wasm_client_benchmark.yml
vendored
16
.github/workflows/wasm_client_benchmark.yml
vendored
@@ -19,6 +19,14 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -43,7 +51,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -89,13 +97,13 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_wasm
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -118,7 +126,7 @@ jobs:
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -13,3 +13,9 @@ target/
|
||||
# Some of our bench outputs
|
||||
/tfhe/benchmarks_parameters
|
||||
**/*.csv
|
||||
|
||||
# dieharder run log
|
||||
dieharder_run.log
|
||||
|
||||
# Coverage reports
|
||||
./coverage/
|
||||
|
||||
14
.linelint.yml
Normal file
14
.linelint.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
ignore:
|
||||
- .git
|
||||
- target
|
||||
- tfhe/benchmarks_parameters
|
||||
- tfhe/web_wasm_parallel_tests/node_modules
|
||||
- tfhe/web_wasm_parallel_tests/dist
|
||||
- keys
|
||||
- coverage
|
||||
|
||||
rules:
|
||||
# checks if file ends in a newline character
|
||||
end-of-file:
|
||||
enable: true
|
||||
single-new-line: true
|
||||
@@ -1,6 +1,6 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["tfhe", "tasks", "apps/trivium"]
|
||||
members = ["tfhe", "tasks", "apps/trivium", "concrete-csprng"]
|
||||
|
||||
[profile.bench]
|
||||
lto = "fat"
|
||||
|
||||
181
Makefile
181
Makefile
@@ -3,8 +3,7 @@ OS:=$(shell uname)
|
||||
RS_CHECK_TOOLCHAIN:=$(shell cat toolchain.txt | tr -d '\n')
|
||||
CARGO_RS_CHECK_TOOLCHAIN:=+$(RS_CHECK_TOOLCHAIN)
|
||||
TARGET_ARCH_FEATURE:=$(shell ./scripts/get_arch_feature.sh)
|
||||
RS_BUILD_TOOLCHAIN:=$(shell \
|
||||
( (echo $(TARGET_ARCH_FEATURE) | grep -q x86) && echo stable) || echo $(RS_CHECK_TOOLCHAIN))
|
||||
RS_BUILD_TOOLCHAIN:=stable
|
||||
CARGO_RS_BUILD_TOOLCHAIN:=+$(RS_BUILD_TOOLCHAIN)
|
||||
CARGO_PROFILE?=release
|
||||
MIN_RUST_VERSION:=$(shell grep rust-version tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
|
||||
@@ -12,8 +11,10 @@ AVX512_SUPPORT?=OFF
|
||||
WASM_RUSTFLAGS:=
|
||||
BIG_TESTS_INSTANCE?=FALSE
|
||||
GEN_KEY_CACHE_MULTI_BIT_ONLY?=FALSE
|
||||
GEN_KEY_CACHE_COVERAGE_ONLY?=FALSE
|
||||
PARSE_INTEGER_BENCH_CSV_FILE?=tfhe_rs_integer_benches.csv
|
||||
FAST_TESTS?=FALSE
|
||||
FAST_BENCH?=FALSE
|
||||
BENCH_OP_FLAVOR?=DEFAULT
|
||||
# This is done to avoid forgetting it, we still precise the RUSTFLAGS in the commands to be able to
|
||||
# copy paste the command in the terminal and change them if required without forgetting the flags
|
||||
@@ -31,10 +32,32 @@ else
|
||||
MULTI_BIT_ONLY=
|
||||
endif
|
||||
|
||||
ifeq ($(GEN_KEY_CACHE_COVERAGE_ONLY),TRUE)
|
||||
COVERAGE_ONLY=--coverage-only
|
||||
else
|
||||
COVERAGE_ONLY=
|
||||
endif
|
||||
|
||||
# Variables used only for regex_engine example
|
||||
REGEX_STRING?=''
|
||||
REGEX_PATTERN?=''
|
||||
|
||||
# Exclude these files from coverage reports
|
||||
define COVERAGE_EXCLUDED_FILES
|
||||
--exclude-files apps/trivium/src/trivium/* \
|
||||
--exclude-files apps/trivium/src/kreyvium/* \
|
||||
--exclude-files apps/trivium/src/static_deque/* \
|
||||
--exclude-files apps/trivium/src/trans_ciphering/* \
|
||||
--exclude-files tasks/src/* \
|
||||
--exclude-files tfhe/benches/boolean/* \
|
||||
--exclude-files tfhe/benches/core_crypto/* \
|
||||
--exclude-files tfhe/benches/shortint/* \
|
||||
--exclude-files tfhe/benches/integer/* \
|
||||
--exclude-files tfhe/benches/* \
|
||||
--exclude-files tfhe/examples/regex_engine/* \
|
||||
--exclude-files tfhe/examples/utilities/*
|
||||
endef
|
||||
|
||||
.PHONY: rs_check_toolchain # Echo the rust toolchain used for checks
|
||||
rs_check_toolchain:
|
||||
@echo $(RS_CHECK_TOOLCHAIN)
|
||||
@@ -79,14 +102,42 @@ install_node:
|
||||
$(SHELL) -i -c 'nvm install node' || \
|
||||
( echo "Unable to install node, unknown error." && exit 1 )
|
||||
|
||||
.PHONY: install_dieharder # Install dieharder for apt distributions or macOS
|
||||
install_dieharder:
|
||||
@dieharder -h > /dev/null 2>&1 || \
|
||||
if [[ "$(OS)" == "Linux" ]]; then \
|
||||
sudo apt update && sudo apt install -y dieharder; \
|
||||
elif [[ "$(OS)" == "Darwin" ]]; then\
|
||||
brew install dieharder; \
|
||||
fi || ( echo "Unable to install dieharder, unknown error." && exit 1 )
|
||||
|
||||
.PHONY: install_tarpaulin # Install tarpaulin to perform code coverage
|
||||
install_tarpaulin: install_rs_build_toolchain
|
||||
@cargo tarpaulin --version > /dev/null 2>&1 || \
|
||||
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install cargo-tarpaulin --locked || \
|
||||
( echo "Unable to install cargo tarpaulin, unknown error." && exit 1 )
|
||||
|
||||
.PHONY: check_linelint_installed # Check if linelint newline linter is installed
|
||||
check_linelint_installed:
|
||||
@printf "\n" | linelint - > /dev/null 2>&1 || \
|
||||
( echo "Unable to locate linelint. Try installing it: https://github.com/fernandrone/linelint/releases" && exit 1 )
|
||||
|
||||
.PHONY: fmt # Format rust code
|
||||
fmt: install_rs_check_toolchain
|
||||
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt
|
||||
|
||||
.PHONT: check_fmt # Check rust code format
|
||||
.PHONY: check_fmt # Check rust code format
|
||||
check_fmt: install_rs_check_toolchain
|
||||
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt --check
|
||||
|
||||
.PHONY: fix_newline # Fix newline at end of file issues to be UNIX compliant
|
||||
fix_newline: check_linelint_installed
|
||||
linelint -a .
|
||||
|
||||
.PHONY: check_newline # Check for newline at end of file to be UNIX compliant
|
||||
check_newline: check_linelint_installed
|
||||
linelint .
|
||||
|
||||
.PHONY: clippy_core # Run clippy lints on core_crypto with and without experimental features
|
||||
clippy_core: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
@@ -137,25 +188,38 @@ clippy_tasks:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
-p tasks -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_trivium # Run clippy lints on Trivium app
|
||||
clippy_trivium: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy -p tfhe-trivium \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_all_targets # Run clippy lints on all targets (benches, examples, etc.)
|
||||
clippy_all_targets:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,safe-deserialization \
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_concrete_csprng # Run clippy lints on concrete-csprng
|
||||
clippy_concrete_csprng:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
--features=$(TARGET_ARCH_FEATURE) \
|
||||
-p concrete-csprng -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_all # Run all clippy targets
|
||||
clippy_all: clippy clippy_boolean clippy_shortint clippy_integer clippy_all_targets clippy_c_api \
|
||||
clippy_js_wasm_api clippy_tasks clippy_core
|
||||
clippy_js_wasm_api clippy_tasks clippy_core clippy_concrete_csprng clippy_trivium
|
||||
|
||||
.PHONY: clippy_fast # Run main clippy targets
|
||||
clippy_fast: clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core
|
||||
clippy_fast: clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core \
|
||||
clippy_concrete_csprng
|
||||
|
||||
.PHONY: gen_key_cache # Run the script to generate keys and cache them for shortint tests
|
||||
gen_key_cache: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) run --profile $(CARGO_PROFILE) \
|
||||
--example generates_test_keys \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p tfhe -- \
|
||||
$(MULTI_BIT_ONLY)
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache -p tfhe -- \
|
||||
$(MULTI_BIT_ONLY) $(COVERAGE_ONLY)
|
||||
|
||||
.PHONY: build_core # Build core_crypto without experimental features
|
||||
build_core: install_rs_build_toolchain install_rs_check_toolchain
|
||||
@@ -198,13 +262,13 @@ build_tfhe_full: install_rs_build_toolchain
|
||||
.PHONY: build_c_api # Build the C API for boolean, shortint and integer
|
||||
build_c_api: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization \
|
||||
-p tfhe
|
||||
|
||||
.PHONY: build_c_api_experimental_deterministic_fft # Build the C API for boolean, shortint and integer with experimental deterministic FFT
|
||||
build_c_api_experimental_deterministic_fft: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,experimental-force_fft_algo_dif4 \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization,experimental-force_fft_algo_dif4 \
|
||||
-p tfhe
|
||||
|
||||
.PHONY: build_web_js_api # Build the js API targeting the web browser
|
||||
@@ -230,6 +294,11 @@ build_node_js_api: install_rs_build_toolchain install_wasm_pack
|
||||
wasm-pack build --release --target=nodejs \
|
||||
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api
|
||||
|
||||
.PHONY: build_concrete_csprng # Build concrete_csprng
|
||||
build_concrete_csprng: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-csprng --all-targets
|
||||
|
||||
.PHONY: test_core_crypto # Run the tests of the core_crypto module including experimental ones
|
||||
test_core_crypto: install_rs_build_toolchain install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
@@ -244,10 +313,18 @@ test_boolean: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean -p tfhe -- boolean::
|
||||
|
||||
.PHONY: test_boolean_cov # Run the tests of the boolean module with code coverage
|
||||
test_boolean_cov: install_rs_check_toolchain install_tarpaulin
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
|
||||
--out xml --output-dir coverage/boolean --line --engine llvm --timeout 500 \
|
||||
$(COVERAGE_EXCLUDED_FILES) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,internal-keycache,__coverage \
|
||||
-p tfhe -- boolean::
|
||||
|
||||
.PHONY: test_c_api_rs # Run the rust tests for the C API
|
||||
test_c_api_rs: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization \
|
||||
-p tfhe \
|
||||
c_api
|
||||
|
||||
@@ -277,19 +354,32 @@ test_shortint: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p tfhe -- shortint::
|
||||
|
||||
.PHONY: test_shortint_cov # Run the tests of the shortint module with code coverage
|
||||
test_shortint_cov: install_rs_check_toolchain install_tarpaulin
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
|
||||
--out xml --output-dir coverage/shortint --line --engine llvm --timeout 500 \
|
||||
$(COVERAGE_EXCLUDED_FILES) \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,__coverage \
|
||||
-p tfhe -- shortint::
|
||||
|
||||
.PHONY: test_integer_ci # Run the tests for integer ci
|
||||
test_integer_ci: install_rs_build_toolchain install_cargo_nextest
|
||||
test_integer_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)"
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)"
|
||||
|
||||
.PHONY: test_integer_multi_bit_ci # Run the tests for integer ci running only multibit tests
|
||||
test_integer_multi_bit_ci: install_rs_build_toolchain install_cargo_nextest
|
||||
test_integer_multi_bit_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --multi-bit
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --multi-bit --avx512-support "$(AVX512_SUPPORT)"
|
||||
|
||||
.PHONY: test_safe_deserialization # Run the tests for safe deserialization
|
||||
test_safe_deserialization: install_rs_build_toolchain install_cargo_nextest
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,safe-deserialization -p tfhe -- safe_deserialization::
|
||||
|
||||
.PHONY: test_integer # Run all the tests for integer
|
||||
test_integer: install_rs_build_toolchain
|
||||
@@ -326,24 +416,36 @@ test_examples: test_sha256_bool test_regex_engine
|
||||
.PHONY: test_trivium # Run tests for trivium
|
||||
test_trivium: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
trivium --features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
|
||||
-- --test-threads=1
|
||||
-p tfhe-trivium -- --test-threads=1 trivium::
|
||||
|
||||
.PHONY: test_kreyvium # Run tests for kreyvium
|
||||
test_kreyvium: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
kreyvium --features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
|
||||
-- --test-threads=1
|
||||
-p tfhe-trivium -- --test-threads=1 kreyvium::
|
||||
|
||||
.PHONY: test_concrete_csprng # Run concrete-csprng tests
|
||||
test_concrete_csprng:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-csprng
|
||||
|
||||
.PHONY: doc # Build rust doc
|
||||
doc: install_rs_check_toolchain
|
||||
RUSTDOCFLAGS="--html-in-header katex-header.html -Dwarnings" \
|
||||
RUSTDOCFLAGS="--html-in-header katex-header.html" \
|
||||
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" doc \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer --no-deps
|
||||
|
||||
.PHONY: docs # Build rust doc alias for doc
|
||||
docs: doc
|
||||
|
||||
.PHONY: lint_doc # Build rust doc with linting enabled
|
||||
lint_doc: install_rs_check_toolchain
|
||||
RUSTDOCFLAGS="--html-in-header katex-header.html -Dwarnings" \
|
||||
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" doc \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer --no-deps
|
||||
|
||||
.PHONY: lint_docs # Build rust doc with linting enabled alias for lint_doc
|
||||
lint_docs: lint_doc
|
||||
|
||||
.PHONY: format_doc_latex # Format the documentation latex equations to avoid broken rendering.
|
||||
format_doc_latex:
|
||||
cargo xtask format_latex_doc
|
||||
@@ -356,7 +458,7 @@ format_doc_latex:
|
||||
.PHONY: check_compile_tests # Build tests in debug without running them
|
||||
check_compile_tests:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,boolean,shortint,integer,internal-keycache \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,boolean,shortint,integer,internal-keycache,safe-deserialization \
|
||||
-p tfhe
|
||||
|
||||
@if [[ "$(OS)" == "Linux" || "$(OS)" == "Darwin" ]]; then \
|
||||
@@ -391,7 +493,8 @@ test_web_js_api_parallel: build_web_js_api_parallel
|
||||
.PHONY: ci_test_web_js_api_parallel # Run tests for the web wasm api
|
||||
ci_test_web_js_api_parallel: build_web_js_api_parallel
|
||||
source ~/.nvm/nvm.sh && \
|
||||
nvm use node && \
|
||||
nvm install 20 && \
|
||||
nvm use 20 && \
|
||||
$(MAKE) -C tfhe/web_wasm_parallel_tests test-ci
|
||||
|
||||
.PHONY: no_tfhe_typo # Check we did not invert the h and f in tfhe
|
||||
@@ -402,20 +505,25 @@ no_tfhe_typo:
|
||||
no_dbg_log:
|
||||
@./scripts/no_dbg_calls.sh
|
||||
|
||||
.PHONY: dieharder_csprng # Run the dieharder test suite on our CSPRNG implementation
|
||||
dieharder_csprng: install_dieharder build_concrete_csprng
|
||||
./scripts/dieharder_test.sh
|
||||
|
||||
#
|
||||
# Benchmarks
|
||||
#
|
||||
|
||||
.PHONY: bench_integer # Run benchmarks for integer
|
||||
bench_integer: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p tfhe --
|
||||
|
||||
.PHONY: bench_integer_multi_bit # Run benchmarks for integer using multi-bit parameters
|
||||
bench_integer_multi_bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=MULTI_BIT __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=MULTI_BIT \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p tfhe --
|
||||
@@ -427,6 +535,15 @@ bench_shortint: install_rs_check_toolchain
|
||||
--bench shortint-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe
|
||||
|
||||
.PHONY: bench_shortint_multi_bit # Run benchmarks for shortint using multi-bit parameters
|
||||
bench_shortint_multi_bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=MULTI_BIT \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench shortint-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe --
|
||||
|
||||
|
||||
.PHONY: bench_boolean # Run benchmarks for boolean
|
||||
bench_boolean: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
@@ -484,6 +601,12 @@ parse_wasm_benchmarks: install_rs_check_toolchain
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache \
|
||||
-- web_wasm_parallel_tests/test/benchmark_results
|
||||
|
||||
.PHONY: write_params_to_file # Gather all crypto parameters into a file with a Sage readable format.
|
||||
write_params_to_file: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) run --profile $(CARGO_PROFILE) \
|
||||
--example write_params_to_file \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache
|
||||
|
||||
#
|
||||
# Real use case examples
|
||||
#
|
||||
@@ -509,13 +632,13 @@ sha256_bool: install_rs_check_toolchain
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean
|
||||
|
||||
.PHONY: pcc # pcc stands for pre commit checks
|
||||
pcc: no_tfhe_typo no_dbg_log check_fmt doc clippy_all check_compile_tests
|
||||
pcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_all check_compile_tests
|
||||
|
||||
.PHONY: fpcc # pcc stands for pre commit checks, the f stands for fast
|
||||
fpcc: no_tfhe_typo no_dbg_log check_fmt doc clippy_fast check_compile_tests
|
||||
fpcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_fast check_compile_tests
|
||||
|
||||
.PHONY: conformance # Automatically fix problems that can be fixed
|
||||
conformance: fmt
|
||||
conformance: fix_newline fmt
|
||||
|
||||
.PHONY: help # Generate list of targets with descriptions
|
||||
help:
|
||||
|
||||
150
README.md
150
README.md
@@ -31,7 +31,9 @@ implementation. The goal is to have a stable, simple, high-performance, and
|
||||
production-ready library for all the advanced features of TFHE.
|
||||
|
||||
## Getting Started
|
||||
The steps to run a first example are described below.
|
||||
|
||||
### Cargo.toml configuration
|
||||
To use the latest version of `TFHE-rs` in your project, you first need to add it as a dependency in your `Cargo.toml`:
|
||||
|
||||
+ For x86_64-based machines running Unix-like OSes:
|
||||
@@ -45,7 +47,7 @@ tfhe = { version = "*", features = ["boolean", "shortint", "integer", "x86_64-un
|
||||
```toml
|
||||
tfhe = { version = "*", features = ["boolean", "shortint", "integer", "aarch64-unix"] }
|
||||
```
|
||||
Note: users with ARM devices must use `TFHE-rs` by compiling using the `nightly` toolchain.
|
||||
Note: users with ARM devices must compile `TFHE-rs` using a stable toolchain with version >= 1.72.
|
||||
|
||||
|
||||
+ For x86_64-based machines with the [`rdseed instruction`](https://en.wikipedia.org/wiki/RDRAND)
|
||||
@@ -57,95 +59,69 @@ tfhe = { version = "*", features = ["boolean", "shortint", "integer", "x86_64"]
|
||||
|
||||
Note: aarch64-based machines are not yet supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs
|
||||
|
||||
|
||||
## A simple example
|
||||
|
||||
Here is a full example:
|
||||
|
||||
``` rust
|
||||
use tfhe::prelude::*;
|
||||
use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheUint32, FheUint8};
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Basic configuration to use homomorphic integers
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
|
||||
// Key generation
|
||||
let (client_key, server_keys) = generate_keys(config);
|
||||
|
||||
let clear_a = 1344u32;
|
||||
let clear_b = 5u32;
|
||||
let clear_c = 7u8;
|
||||
|
||||
// Encrypting the input data using the (private) client_key
|
||||
// FheUint32: Encrypted equivalent to u32
|
||||
let mut encrypted_a = FheUint32::try_encrypt(clear_a, &client_key)?;
|
||||
let encrypted_b = FheUint32::try_encrypt(clear_b, &client_key)?;
|
||||
|
||||
// FheUint8: Encrypted equivalent to u8
|
||||
let encrypted_c = FheUint8::try_encrypt(clear_c, &client_key)?;
|
||||
|
||||
// On the server side:
|
||||
set_server_key(server_keys);
|
||||
|
||||
// Clear equivalent computations: 1344 * 5 = 6720
|
||||
let encrypted_res_mul = &encrypted_a * &encrypted_b;
|
||||
|
||||
// Clear equivalent computations: 1344 >> 5 = 42
|
||||
encrypted_a = &encrypted_res_mul >> &encrypted_b;
|
||||
|
||||
// Clear equivalent computations: let casted_a = a as u8;
|
||||
let casted_a: FheUint8 = encrypted_a.cast_into();
|
||||
|
||||
// Clear equivalent computations: min(42, 7) = 7
|
||||
let encrypted_res_min = &casted_a.min(&encrypted_c);
|
||||
|
||||
// Operation between clear and encrypted data:
|
||||
// Clear equivalent computations: 7 & 1 = 1
|
||||
let encrypted_res = encrypted_res_min & 1_u8;
|
||||
|
||||
// Decrypting on the client side:
|
||||
let clear_res: u8 = encrypted_res.decrypt(&client_key);
|
||||
assert_eq!(clear_res, 1_u8);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
To run this code, use the following command:
|
||||
<p align="center"> <code> cargo run --release </code> </p>
|
||||
|
||||
Note that when running code that uses `tfhe-rs`, it is highly recommended
|
||||
to run in release mode with cargo's `--release` flag to have the best performances possible,
|
||||
eg: `cargo run --release`.
|
||||
|
||||
Here is a full example evaluating a Boolean circuit:
|
||||
|
||||
```rust
|
||||
use tfhe::boolean::prelude::*;
|
||||
|
||||
fn main() {
|
||||
// We generate a set of client/server keys, using the default parameters:
|
||||
let (client_key, server_key) = gen_keys();
|
||||
|
||||
// We use the client secret key to encrypt two messages:
|
||||
let ct_1 = client_key.encrypt(true);
|
||||
let ct_2 = client_key.encrypt(false);
|
||||
|
||||
// We use the server public key to execute a boolean circuit:
|
||||
// if ((NOT ct_2) NAND (ct_1 AND ct_2)) then (NOT ct_2) else (ct_1 AND ct_2)
|
||||
let ct_3 = server_key.not(&ct_2);
|
||||
let ct_4 = server_key.and(&ct_1, &ct_2);
|
||||
let ct_5 = server_key.nand(&ct_3, &ct_4);
|
||||
let ct_6 = server_key.mux(&ct_5, &ct_3, &ct_4);
|
||||
|
||||
// We use the client key to decrypt the output of the circuit:
|
||||
let output = client_key.decrypt(&ct_6);
|
||||
assert_eq!(output, true);
|
||||
}
|
||||
```
|
||||
|
||||
Another example of how the library can be used with shortints:
|
||||
|
||||
```rust
|
||||
use tfhe::shortint::prelude::*;
|
||||
|
||||
fn main() {
|
||||
// Generate a set of client/server keys
|
||||
// with 2 bits of message and 2 bits of carry
|
||||
let (client_key, server_key) = gen_keys(PARAM_MESSAGE_2_CARRY_2_KS_PBS);
|
||||
|
||||
let msg1 = 3;
|
||||
let msg2 = 2;
|
||||
|
||||
// Encrypt two messages using the (private) client key:
|
||||
let ct_1 = client_key.encrypt(msg1);
|
||||
let ct_2 = client_key.encrypt(msg2);
|
||||
|
||||
// Homomorphically compute an addition
|
||||
let ct_add = server_key.unchecked_add(&ct_1, &ct_2);
|
||||
|
||||
// Define the Hamming weight function
|
||||
// f: x -> sum of the bits of x
|
||||
let f = |x:u64| x.count_ones() as u64;
|
||||
|
||||
// Generate the lookup table for the function
|
||||
let acc = server_key.generate_lookup_table(f);
|
||||
|
||||
// Compute the function over the ciphertext using the PBS
|
||||
let ct_res = server_key.apply_lookup_table(&ct_add, &acc);
|
||||
|
||||
// Decrypt the ciphertext using the (private) client key
|
||||
let output = client_key.decrypt(&ct_res);
|
||||
assert_eq!(output, f(msg1 + msg2));
|
||||
}
|
||||
```
|
||||
|
||||
An example using integer:
|
||||
|
||||
```rust
|
||||
use tfhe::integer::gen_keys_radix;
|
||||
use tfhe::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS;
|
||||
|
||||
fn main() {
|
||||
// We create keys to create 16 bits integers
|
||||
// using 8 blocks of 2 bits
|
||||
let (cks, sks) = gen_keys_radix(PARAM_MESSAGE_2_CARRY_2_KS_PBS, 8);
|
||||
|
||||
let clear_a = 2382u16;
|
||||
let clear_b = 29374u16;
|
||||
|
||||
let mut a = cks.encrypt(clear_a as u64);
|
||||
let mut b = cks.encrypt(clear_b as u64);
|
||||
|
||||
let encrypted_max = sks.smart_max_parallelized(&mut a, &mut b);
|
||||
let decrypted_max: u64 = cks.decrypt(&encrypted_max);
|
||||
|
||||
assert_eq!(decrypted_max as u16, clear_a.max(clear_b))
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ path = "../../tfhe"
|
||||
features = [ "boolean", "shortint", "integer", "aarch64-unix" ]
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.4", features = [ "html_reports" ]}
|
||||
criterion = { version = "0.5.1", features = [ "html_reports" ]}
|
||||
|
||||
[[bench]]
|
||||
name = "trivium"
|
||||
|
||||
@@ -120,7 +120,7 @@ fn main() {
|
||||
|
||||
# FHE byte Trivium implementation
|
||||
|
||||
The same objects have also been implemented to stream bytes insead of booleans. They can be constructed and used in the same way via the functions `TriviumStreamByte::<u8>::new` and
|
||||
The same objects have also been implemented to stream bytes instead of booleans. They can be constructed and used in the same way via the functions `TriviumStreamByte::<u8>::new` and
|
||||
`TriviumStreamByte::<FheUint8>::new` with the same arguments as before. The `FheUint8` version is significantly slower than the `FheBool` version, because not running
|
||||
with the same cryptographic parameters. Its interest lie in its trans-ciphering capabilities: `TriviumStreamByte<FheUint8>` implements the trait `TransCiphering`,
|
||||
meaning it implements the functions `trans_encrypt_64`. This function takes as input a `FheUint64` and outputs a `FheUint64`, the output being
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Kreyvium stream cipher, using booleans or FheBool
|
||||
//! for the representaion of the inner bits.
|
||||
//! for the representation of the inner bits.
|
||||
|
||||
use crate::static_deque::StaticDeque;
|
||||
|
||||
@@ -35,7 +35,7 @@ pub struct KreyviumStream<T> {
|
||||
}
|
||||
|
||||
impl KreyviumStream<bool> {
|
||||
/// Contructor for `KreyviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Constructor for `KreyviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(mut key: [bool; 128], mut iv: [bool; 128]) -> KreyviumStream<bool> {
|
||||
@@ -80,9 +80,9 @@ impl KreyviumStream<FheBool> {
|
||||
|
||||
// Initialization of Kreyvium registers: a has the secret key, b the input vector,
|
||||
// and c a few ones.
|
||||
let mut a_register = [false; 93].map(|x| FheBool::encrypt_trivial(x));
|
||||
let mut b_register = [false; 84].map(|x| FheBool::encrypt_trivial(x));
|
||||
let mut c_register = [false; 111].map(|x| FheBool::encrypt_trivial(x));
|
||||
let mut a_register = [false; 93].map(FheBool::encrypt_trivial);
|
||||
let mut b_register = [false; 84].map(FheBool::encrypt_trivial);
|
||||
let mut c_register = [false; 111].map(FheBool::encrypt_trivial);
|
||||
|
||||
for i in 0..93 {
|
||||
a_register[i] = key[128 - 93 + i].clone();
|
||||
@@ -99,7 +99,7 @@ impl KreyviumStream<FheBool> {
|
||||
|
||||
key.reverse();
|
||||
iv.reverse();
|
||||
let iv = iv.map(|x| FheBool::encrypt_trivial(x));
|
||||
let iv = iv.map(FheBool::encrypt_trivial);
|
||||
|
||||
unset_server_key();
|
||||
KreyviumStream::<FheBool>::new_from_registers(
|
||||
@@ -118,7 +118,7 @@ where
|
||||
T: KreyviumBoolInput<T> + std::marker::Send + std::marker::Sync,
|
||||
for<'a> &'a T: KreyviumBoolInput<T>,
|
||||
{
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 93],
|
||||
@@ -149,7 +149,7 @@ where
|
||||
}
|
||||
|
||||
/// Computes one turn of the stream, updating registers and outputting the new bit.
|
||||
pub fn next(&mut self) -> T {
|
||||
pub fn next_bool(&mut self) -> T {
|
||||
match &self.fhe_key {
|
||||
Some(sk) => set_server_key(sk.clone()),
|
||||
None => (),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Kreyvium stream cipher, using u8 or FheUint8
|
||||
//! for the representaion of the inner bits.
|
||||
//! for the representation of the inner bits.
|
||||
|
||||
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
|
||||
|
||||
@@ -31,7 +31,7 @@ impl KreyviumByteInput<FheUint8> for &FheUint8 {}
|
||||
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
|
||||
/// an Option for a ServerKey.
|
||||
/// Since the original Kreyvium registers' sizes are not a multiple of 8, these registers (which
|
||||
/// store byte-like objects) have a size that is the eigth of the closest multiple of 8 above the
|
||||
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
|
||||
/// originals' sizes.
|
||||
pub struct KreyviumStreamByte<T> {
|
||||
a_byte: StaticByteDeque<12, T>,
|
||||
@@ -43,7 +43,7 @@ pub struct KreyviumStreamByte<T> {
|
||||
}
|
||||
|
||||
impl KreyviumStreamByte<u8> {
|
||||
/// Contructor for `KreyviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Constructor for `KreyviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(key_bytes: [u8; 16], iv_bytes: [u8; 16]) -> KreyviumStreamByte<u8> {
|
||||
@@ -54,18 +54,15 @@ impl KreyviumStreamByte<u8> {
|
||||
let mut c_byte_reg = [0u8; 14];
|
||||
|
||||
// Copy key bits into a register
|
||||
for b in 0..12 {
|
||||
a_byte_reg[b] = key_bytes[b + 4];
|
||||
}
|
||||
a_byte_reg.copy_from_slice(&key_bytes[4..]);
|
||||
|
||||
// Copy iv bits into a register
|
||||
for b in 0..11 {
|
||||
b_byte_reg[b] = iv_bytes[b + 5];
|
||||
}
|
||||
b_byte_reg.copy_from_slice(&iv_bytes[5..]);
|
||||
|
||||
// Copy a lot of ones in the c register
|
||||
c_byte_reg[0] = 252;
|
||||
for b in 1..8 {
|
||||
c_byte_reg[b] = 255;
|
||||
}
|
||||
c_byte_reg[1..8].fill(255);
|
||||
|
||||
// Copy iv bits in the c register
|
||||
c_byte_reg[8] = (iv_bytes[0] << 4) | 31;
|
||||
for b in 9..14 {
|
||||
@@ -100,23 +97,22 @@ impl KreyviumStreamByte<FheUint8> {
|
||||
|
||||
// Initialization of Kreyvium registers: a has the secret key, b the input vector,
|
||||
// and c a few ones.
|
||||
let mut a_byte_reg = [0u8; 12].map(|x| FheUint8::encrypt_trivial(x));
|
||||
let mut b_byte_reg = [0u8; 11].map(|x| FheUint8::encrypt_trivial(x));
|
||||
let mut c_byte_reg = [0u8; 14].map(|x| FheUint8::encrypt_trivial(x));
|
||||
let mut a_byte_reg = [0u8; 12].map(FheUint8::encrypt_trivial);
|
||||
let mut b_byte_reg = [0u8; 11].map(FheUint8::encrypt_trivial);
|
||||
let mut c_byte_reg = [0u8; 14].map(FheUint8::encrypt_trivial);
|
||||
|
||||
// Copy key bits into a register
|
||||
for b in 0..12 {
|
||||
a_byte_reg[b] = key_bytes[b + 4].clone();
|
||||
}
|
||||
a_byte_reg.clone_from_slice(&key_bytes[4..]);
|
||||
|
||||
// Copy iv bits into a register
|
||||
for b in 0..11 {
|
||||
b_byte_reg[b] = FheUint8::encrypt_trivial(iv_bytes[b + 5]);
|
||||
}
|
||||
// Copy a lot of ones in the c register
|
||||
c_byte_reg[0] = FheUint8::encrypt_trivial(252u8);
|
||||
for b in 1..8 {
|
||||
c_byte_reg[b] = FheUint8::encrypt_trivial(255u8);
|
||||
}
|
||||
|
||||
c_byte_reg[1..8].fill_with(|| FheUint8::encrypt_trivial(255u8));
|
||||
|
||||
// Copy iv bits in the c register
|
||||
c_byte_reg[8] = FheUint8::encrypt_trivial((&iv_bytes[0] << 4u8) | 31u8);
|
||||
for b in 9..14 {
|
||||
@@ -150,7 +146,7 @@ where
|
||||
T: KreyviumByteInput<T> + Send,
|
||||
for<'a> &'a T: KreyviumByteInput<T>,
|
||||
{
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 12],
|
||||
@@ -292,6 +288,6 @@ where
|
||||
|
||||
impl KreyviumStreamByte<FheUint8> {
|
||||
pub fn get_server_key(&self) -> &ServerKey {
|
||||
&self.fhe_key.as_ref().unwrap()
|
||||
self.fhe_key.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ pub struct KreyviumStreamShortint {
|
||||
}
|
||||
|
||||
impl KreyviumStreamShortint {
|
||||
/// Contructor for KreyviumStreamShortint: arguments are the secret key and the input vector,
|
||||
/// Constructor for KreyviumStreamShortint: arguments are the secret key and the input vector,
|
||||
/// and a ServerKey reference. Outputs a KreyviumStream object already initialized (1152
|
||||
/// steps have been run before returning)
|
||||
pub fn new(
|
||||
@@ -75,7 +75,7 @@ impl KreyviumStreamShortint {
|
||||
}
|
||||
|
||||
/// Computes one turn of the stream, updating registers and outputting the new bit.
|
||||
pub fn next(&mut self) -> Ciphertext {
|
||||
pub fn next_ct(&mut self) -> Ciphertext {
|
||||
let [o, a, b, c] = self.get_output_and_values(0);
|
||||
|
||||
self.a.push(a);
|
||||
@@ -149,7 +149,7 @@ impl KreyviumStreamShortint {
|
||||
.unchecked_add_assign(&mut new_c, c5);
|
||||
self.internal_server_key
|
||||
.unchecked_add_assign(&mut new_c, &temp_b);
|
||||
self.internal_server_key.clear_carry_assign(&mut new_c);
|
||||
self.internal_server_key.message_extract_assign(&mut new_c);
|
||||
new_c
|
||||
},
|
||||
|| {
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#[allow(clippy::module_inception)]
|
||||
mod kreyvium;
|
||||
pub use kreyvium::KreyviumStream;
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ fn get_hexadecimal_string_from_lsb_first_stream(a: Vec<bool>) -> String {
|
||||
_ => (),
|
||||
};
|
||||
}
|
||||
return hexadecimal;
|
||||
hexadecimal
|
||||
}
|
||||
|
||||
fn get_hexagonal_string_from_bytes(a: Vec<u8>) -> String {
|
||||
@@ -65,7 +65,7 @@ fn get_hexagonal_string_from_bytes(a: Vec<u8>) -> String {
|
||||
for test in a {
|
||||
hexadecimal.push_str(&format!("{:02X?}", test));
|
||||
}
|
||||
return hexadecimal;
|
||||
hexadecimal
|
||||
}
|
||||
|
||||
fn get_hexagonal_string_from_u64(a: Vec<u64>) -> String {
|
||||
@@ -73,7 +73,7 @@ fn get_hexagonal_string_from_u64(a: Vec<u64>) -> String {
|
||||
for test in a {
|
||||
hexadecimal.push_str(&format!("{:016X?}", test));
|
||||
}
|
||||
return hexadecimal;
|
||||
hexadecimal
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -86,7 +86,7 @@ fn kreyvium_test_1() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(64);
|
||||
while vec.len() < 64 {
|
||||
vec.push(kreyvium.next());
|
||||
vec.push(kreyvium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
@@ -105,7 +105,7 @@ fn kreyvium_test_2() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(64);
|
||||
while vec.len() < 64 {
|
||||
vec.push(kreyvium.next());
|
||||
vec.push(kreyvium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
@@ -124,7 +124,7 @@ fn kreyvium_test_3() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(64);
|
||||
while vec.len() < 64 {
|
||||
vec.push(kreyvium.next());
|
||||
vec.push(kreyvium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
@@ -161,7 +161,7 @@ fn kreyvium_test_4() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(64);
|
||||
while vec.len() < 64 {
|
||||
vec.push(kreyvium.next());
|
||||
vec.push(kreyvium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#[allow(clippy::module_inception)]
|
||||
mod static_deque;
|
||||
pub use static_deque::StaticDeque;
|
||||
mod static_byte_deque;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//! This module implements the StaticByteDeque struct: a deque of bytes. The idea
|
||||
//! is that this is a wrapper around StaticDeque, but StaticByteDeque has an additional
|
||||
//! functionnality: it can construct the "intermediate" bytes, made of parts of other bytes.
|
||||
//! functionality: it can construct the "intermediate" bytes, made of parts of other bytes.
|
||||
//! This is pretending to store bits, and allows accessing bits in chunks of 8 consecutive.
|
||||
|
||||
use crate::static_deque::StaticDeque;
|
||||
@@ -77,7 +77,7 @@ where
|
||||
}
|
||||
|
||||
let byte_next: &T = &self.deque[i / 8 + 1];
|
||||
return (byte << bit_idx) | (byte_next >> (8 - bit_idx as u8));
|
||||
(byte << bit_idx) | (byte_next >> (8 - bit_idx))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ mod tests {
|
||||
assert!(deque.bit(7) == 0);
|
||||
|
||||
// second youngest: 128
|
||||
assert!(deque.bit(8 + 0) == 0);
|
||||
assert!(deque.bit(8) == 0);
|
||||
assert!(deque.bit(8 + 1) == 0);
|
||||
assert!(deque.bit(8 + 2) == 0);
|
||||
assert!(deque.bit(8 + 3) == 0);
|
||||
@@ -111,7 +111,7 @@ mod tests {
|
||||
assert!(deque.bit(8 + 7) > 0);
|
||||
|
||||
// oldest: 64
|
||||
assert!(deque.bit(16 + 0) == 0);
|
||||
assert!(deque.bit(16) == 0);
|
||||
assert!(deque.bit(16 + 1) == 0);
|
||||
assert!(deque.bit(16 + 2) == 0);
|
||||
assert!(deque.bit(16 + 3) == 0);
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
use core::ops::{Index, IndexMut};
|
||||
|
||||
/// StaticDeque: a struct implementing a deque whose size is known at compile time.
|
||||
/// It has 2 members: the static array conatining the data (never empty), and a cursor
|
||||
/// It has 2 members: the static array containing the data (never empty), and a cursor
|
||||
/// equal to the index of the oldest element (and the next one to be overwritten).
|
||||
#[derive(Clone)]
|
||||
pub struct StaticDeque<const N: usize, T> {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
mod trivium;
|
||||
pub use trivium::TriviumStream;
|
||||
mod trivium_bool;
|
||||
pub use trivium_bool::TriviumStream;
|
||||
|
||||
mod trivium_byte;
|
||||
pub use trivium_byte::TriviumStreamByte;
|
||||
|
||||
@@ -56,7 +56,7 @@ fn get_hexadecimal_string_from_lsb_first_stream(a: Vec<bool>) -> String {
|
||||
_ => (),
|
||||
};
|
||||
}
|
||||
return hexadecimal;
|
||||
hexadecimal
|
||||
}
|
||||
|
||||
fn get_hexagonal_string_from_bytes(a: Vec<u8>) -> String {
|
||||
@@ -65,7 +65,7 @@ fn get_hexagonal_string_from_bytes(a: Vec<u8>) -> String {
|
||||
for test in a {
|
||||
hexadecimal.push_str(&format!("{:02X?}", test));
|
||||
}
|
||||
return hexadecimal;
|
||||
hexadecimal
|
||||
}
|
||||
|
||||
fn get_hexagonal_string_from_u64(a: Vec<u64>) -> String {
|
||||
@@ -73,7 +73,7 @@ fn get_hexagonal_string_from_u64(a: Vec<u64>) -> String {
|
||||
for test in a {
|
||||
hexadecimal.push_str(&format!("{:016X?}", test));
|
||||
}
|
||||
return hexadecimal;
|
||||
hexadecimal
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -89,7 +89,7 @@ fn trivium_test_1() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(512 * 8);
|
||||
while vec.len() < 512 * 8 {
|
||||
vec.push(trivium.next());
|
||||
vec.push(trivium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
@@ -114,7 +114,7 @@ fn trivium_test_2() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(512 * 8);
|
||||
while vec.len() < 512 * 8 {
|
||||
vec.push(trivium.next());
|
||||
vec.push(trivium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
@@ -139,7 +139,7 @@ fn trivium_test_3() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(512 * 8);
|
||||
while vec.len() < 512 * 8 {
|
||||
vec.push(trivium.next());
|
||||
vec.push(trivium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
@@ -182,7 +182,7 @@ fn trivium_test_4() {
|
||||
|
||||
let mut vec = Vec::<bool>::with_capacity(131072 * 8);
|
||||
while vec.len() < 131072 * 8 {
|
||||
vec.push(trivium.next());
|
||||
vec.push(trivium.next_bool());
|
||||
}
|
||||
|
||||
let hexadecimal = get_hexadecimal_string_from_lsb_first_stream(vec);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Trivium stream cipher, using booleans or FheBool
|
||||
//! for the representaion of the inner bits.
|
||||
//! for the representation of the inner bits.
|
||||
|
||||
use crate::static_deque::StaticDeque;
|
||||
|
||||
@@ -33,7 +33,7 @@ pub struct TriviumStream<T> {
|
||||
}
|
||||
|
||||
impl TriviumStream<bool> {
|
||||
/// Contructor for `TriviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Constructor for `TriviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(key: [bool; 80], iv: [bool; 80]) -> TriviumStream<bool> {
|
||||
@@ -66,9 +66,9 @@ impl TriviumStream<FheBool> {
|
||||
|
||||
// Initialization of Trivium registers: a has the secret key, b the input vector,
|
||||
// and c a few ones.
|
||||
let mut a_register = [false; 93].map(|x| FheBool::encrypt_trivial(x));
|
||||
let mut b_register = [false; 84].map(|x| FheBool::encrypt_trivial(x));
|
||||
let mut c_register = [false; 111].map(|x| FheBool::encrypt_trivial(x));
|
||||
let mut a_register = [false; 93].map(FheBool::encrypt_trivial);
|
||||
let mut b_register = [false; 84].map(FheBool::encrypt_trivial);
|
||||
let mut c_register = [false; 111].map(FheBool::encrypt_trivial);
|
||||
|
||||
for i in 0..80 {
|
||||
a_register[93 - 80 + i] = key[i].clone();
|
||||
@@ -94,7 +94,7 @@ where
|
||||
T: TriviumBoolInput<T> + std::marker::Send + std::marker::Sync,
|
||||
for<'a> &'a T: TriviumBoolInput<T>,
|
||||
{
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 93],
|
||||
@@ -121,7 +121,7 @@ where
|
||||
}
|
||||
|
||||
/// Computes one turn of the stream, updating registers and outputting the new bit.
|
||||
pub fn next(&mut self) -> T {
|
||||
pub fn next_bool(&mut self) -> T {
|
||||
match &self.fhe_key {
|
||||
Some(sk) => set_server_key(sk.clone()),
|
||||
None => (),
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Trivium stream cipher, using u8 or FheUint8
|
||||
//! for the representaion of the inner bits.
|
||||
//! for the representation of the inner bits.
|
||||
|
||||
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
|
||||
|
||||
@@ -31,7 +31,7 @@ impl TriviumByteInput<FheUint8> for &FheUint8 {}
|
||||
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
|
||||
/// an Option for a ServerKey.
|
||||
/// Since the original Trivium registers' sizes are not a multiple of 8, these registers (which
|
||||
/// store byte-like objects) have a size that is the eigth of the closest multiple of 8 above the
|
||||
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
|
||||
/// originals' sizes.
|
||||
pub struct TriviumStreamByte<T> {
|
||||
a_byte: StaticByteDeque<12, T>,
|
||||
@@ -41,7 +41,7 @@ pub struct TriviumStreamByte<T> {
|
||||
}
|
||||
|
||||
impl TriviumStreamByte<u8> {
|
||||
/// Contructor for `TriviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Constructor for `TriviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(key: [u8; 10], iv: [u8; 10]) -> TriviumStreamByte<u8> {
|
||||
@@ -81,9 +81,9 @@ impl TriviumStreamByte<FheUint8> {
|
||||
|
||||
// Initialization of Trivium registers: a has the secret key, b the input vector,
|
||||
// and c a few ones.
|
||||
let mut a_byte_reg = [0u8; 12].map(|x| FheUint8::encrypt_trivial(x));
|
||||
let mut b_byte_reg = [0u8; 11].map(|x| FheUint8::encrypt_trivial(x));
|
||||
let mut c_byte_reg = [0u8; 14].map(|x| FheUint8::encrypt_trivial(x));
|
||||
let mut a_byte_reg = [0u8; 12].map(FheUint8::encrypt_trivial);
|
||||
let mut b_byte_reg = [0u8; 11].map(FheUint8::encrypt_trivial);
|
||||
let mut c_byte_reg = [0u8; 14].map(FheUint8::encrypt_trivial);
|
||||
|
||||
for i in 0..10 {
|
||||
a_byte_reg[12 - 10 + i] = key[i].clone();
|
||||
@@ -111,7 +111,7 @@ where
|
||||
T: TriviumByteInput<T> + Send,
|
||||
for<'a> &'a T: TriviumByteInput<T>,
|
||||
{
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 12],
|
||||
@@ -236,6 +236,6 @@ where
|
||||
|
||||
impl TriviumStreamByte<FheUint8> {
|
||||
pub fn get_server_key(&self) -> &ServerKey {
|
||||
&self.fhe_key.as_ref().unwrap()
|
||||
self.fhe_key.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ pub struct TriviumStreamShortint {
|
||||
}
|
||||
|
||||
impl TriviumStreamShortint {
|
||||
/// Contructor for TriviumStreamShortint: arguments are the secret key and the input vector, and
|
||||
/// a ServerKey reference. Outputs a TriviumStream object already initialized (1152 steps
|
||||
/// have been run before returning)
|
||||
/// Constructor for TriviumStreamShortint: arguments are the secret key and the input vector,
|
||||
/// and a ServerKey reference. Outputs a TriviumStream object already initialized (1152
|
||||
/// steps have been run before returning)
|
||||
pub fn new(
|
||||
key: [Ciphertext; 80],
|
||||
iv: [u64; 80],
|
||||
@@ -63,7 +63,7 @@ impl TriviumStreamShortint {
|
||||
}
|
||||
|
||||
/// Computes one turn of the stream, updating registers and outputting the new bit.
|
||||
pub fn next(&mut self) -> Ciphertext {
|
||||
pub fn next_ct(&mut self) -> Ciphertext {
|
||||
let [o, a, b, c] = self.get_output_and_values(0);
|
||||
|
||||
self.a.push(a);
|
||||
@@ -113,7 +113,7 @@ impl TriviumStreamShortint {
|
||||
.unchecked_add_assign(&mut new_a, a5);
|
||||
self.internal_server_key
|
||||
.unchecked_add_assign(&mut new_a, &temp_c);
|
||||
self.internal_server_key.clear_carry_assign(&mut new_a);
|
||||
self.internal_server_key.message_extract_assign(&mut new_a);
|
||||
new_a
|
||||
},
|
||||
|| {
|
||||
@@ -122,7 +122,7 @@ impl TriviumStreamShortint {
|
||||
.unchecked_add_assign(&mut new_b, b5);
|
||||
self.internal_server_key
|
||||
.unchecked_add_assign(&mut new_b, &temp_a);
|
||||
self.internal_server_key.clear_carry_assign(&mut new_b);
|
||||
self.internal_server_key.message_extract_assign(&mut new_b);
|
||||
new_b
|
||||
},
|
||||
)
|
||||
@@ -135,7 +135,7 @@ impl TriviumStreamShortint {
|
||||
.unchecked_add_assign(&mut new_c, c5);
|
||||
self.internal_server_key
|
||||
.unchecked_add_assign(&mut new_c, &temp_b);
|
||||
self.internal_server_key.clear_carry_assign(&mut new_c);
|
||||
self.internal_server_key.message_extract_assign(&mut new_c);
|
||||
new_c
|
||||
},
|
||||
|| {
|
||||
|
||||
@@ -108,12 +108,25 @@ def recursive_parse(directory, walk_subdirs=False, name_suffix="", compute_throu
|
||||
)
|
||||
)
|
||||
|
||||
# This is a special case where PBS are blasted as vector LWE ciphertext with
|
||||
# variable length to saturate the machine. To get the actual throughput we need to
|
||||
# multiply by the length of the vector.
|
||||
if "PBS_throughput" in test_name and "chunk" in test_name:
|
||||
try:
|
||||
multiplier = int(test_name.split("chunk")[0].split("_")[-1])
|
||||
except ValueError:
|
||||
parsing_failures.append((full_name,
|
||||
"failed to extract throughput multiplier"))
|
||||
continue
|
||||
else:
|
||||
multiplier = 1
|
||||
|
||||
if stat_name == "mean" and compute_throughput:
|
||||
test_suffix = "ops-per-sec"
|
||||
test_name_parts.append(test_suffix)
|
||||
result_values.append(
|
||||
_create_point(
|
||||
compute_ops_per_second(value),
|
||||
multiplier * compute_ops_per_second(value),
|
||||
"_".join(test_name_parts),
|
||||
bench_class,
|
||||
"throughput",
|
||||
@@ -129,7 +142,7 @@ def recursive_parse(directory, walk_subdirs=False, name_suffix="", compute_throu
|
||||
test_name_parts.append(test_suffix)
|
||||
result_values.append(
|
||||
_create_point(
|
||||
compute_ops_per_dollar(value, hardware_hourly_cost),
|
||||
multiplier * compute_ops_per_dollar(value, hardware_hourly_cost),
|
||||
"_".join(test_name_parts),
|
||||
bench_class,
|
||||
"throughput",
|
||||
|
||||
76
ci/lattice_estimator.sage
Executable file
76
ci/lattice_estimator.sage
Executable file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
lattice_estimator
|
||||
-----------------
|
||||
|
||||
Test cryptographic parameters set against several attacks to estimate their security level.
|
||||
"""
|
||||
import pathlib
|
||||
import sys
|
||||
sys.path.insert(1, 'lattice-estimator')
|
||||
from estimator import *
|
||||
|
||||
|
||||
model = RC.BDGL16
|
||||
|
||||
def check_security(filename):
|
||||
"""
|
||||
Run lattice estimator to determine if a parameters set is secure or not.
|
||||
|
||||
:param filename: name of the file containing parameters set
|
||||
|
||||
:return: :class:`list` of parameters to update
|
||||
"""
|
||||
filepath = pathlib.Path("ci", filename)
|
||||
load(filepath)
|
||||
print(f"Parsing parameters in {filepath}")
|
||||
|
||||
to_update = []
|
||||
|
||||
for param in all_params:
|
||||
if param.tag.startswith("TFHE_LIB_PARAMETERS"):
|
||||
# This third-party parameters set is known to be less secure, just skip the analysis.
|
||||
continue
|
||||
|
||||
print(f"\t{param.tag}...\t", end= "")
|
||||
|
||||
try:
|
||||
# The lattice estimator is not able to manage such large dimension.
|
||||
# If we have the security for smaller `n` then we have security for larger ones.
|
||||
if param.n > 16384:
|
||||
param = param.updated(n = 16384)
|
||||
|
||||
usvp_level = LWE.primal_usvp(param, red_cost_model = model)
|
||||
dual_level = LWE.dual_hybrid(param, red_cost_model = model)
|
||||
|
||||
estimator_level = log(min(usvp_level["rop"], dual_level["rop"]),2 )
|
||||
security_level = f"security level = {estimator_level} bits"
|
||||
if estimator_level < 127:
|
||||
print("FAIL\t({security_level})")
|
||||
reason = f"attained {security_level} target is 128 bits"
|
||||
to_update.append((param, reason))
|
||||
continue
|
||||
except Exception as err:
|
||||
print("FAIL")
|
||||
to_update.append((param, f"{repr(err)}"))
|
||||
else:
|
||||
print(f"OK\t({security_level})")
|
||||
|
||||
return to_update
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
params_to_update = []
|
||||
|
||||
for params_filename in ("boolean_parameters_lattice_estimator.sage",
|
||||
"shortint_classic_parameters_lattice_estimator.sage",
|
||||
"shortint_multi_bit_parameters_lattice_estimator.sage"):
|
||||
params_to_update.extend(check_security(params_filename))
|
||||
|
||||
if params_to_update:
|
||||
print("Some parameters need update")
|
||||
print("----------------------------")
|
||||
for param, reason in params_to_update:
|
||||
print(f"[{param.tag}] reason: {reason} (param)")
|
||||
sys.exit(int(1)) # Explicit conversion is needed to make this call work
|
||||
else:
|
||||
print("All parameters passed the security check")
|
||||
@@ -21,8 +21,15 @@ def main(args):
|
||||
|
||||
split = bench_function_id.split("::")
|
||||
(_, function_name, parameter_set, bits) = split
|
||||
(bits, _) = bits.split("_")
|
||||
bits = int(bits)
|
||||
|
||||
if "_scalar_" in bits:
|
||||
(bits, scalar) = bits.split("_bits_scalar_")
|
||||
bits = int(bits)
|
||||
scalar = int(scalar)
|
||||
else:
|
||||
(bits, _) = bits.split("_")
|
||||
bits = int(bits)
|
||||
scalar = None
|
||||
|
||||
estimate_mean_ms = estimate_data["mean"]["point_estimate"] / 1000000
|
||||
estimate_lower_bound_ms = (
|
||||
@@ -37,6 +44,7 @@ def main(args):
|
||||
function_name,
|
||||
parameter_set,
|
||||
bits,
|
||||
scalar,
|
||||
estimate_mean_ms,
|
||||
estimate_lower_bound_ms,
|
||||
estimate_upper_bound_ms,
|
||||
@@ -51,7 +59,7 @@ def main(args):
|
||||
|
||||
with open(output_file, "w", encoding="utf-8") as output:
|
||||
output.write(
|
||||
"function_name,parameter_set,bits,mean_ms,"
|
||||
"function_name,parameter_set,bits,scalar,mean_ms,"
|
||||
"confidence_interval_lower_bound_ms,confidence_interval_upper_bound_ms\n"
|
||||
)
|
||||
# Sort by func_name, bit width and then parameters
|
||||
@@ -62,12 +70,13 @@ def main(args):
|
||||
function_name,
|
||||
parameter_set,
|
||||
bits,
|
||||
scalar,
|
||||
estimate_mean_ms,
|
||||
estimate_lower_bound_ms,
|
||||
estimate_upper_bound_ms,
|
||||
) = dat
|
||||
output.write(
|
||||
f"{function_name},{parameter_set},{bits},{estimate_mean_ms},"
|
||||
f"{function_name},{parameter_set},{bits},{scalar},{estimate_mean_ms},"
|
||||
f"{estimate_lower_bound_ms},{estimate_upper_bound_ms}\n"
|
||||
)
|
||||
|
||||
|
||||
26
ci/slab.toml
26
ci/slab.toml
@@ -1,16 +1,16 @@
|
||||
[profile.cpu-big]
|
||||
region = "eu-west-3"
|
||||
image_id = "ami-0ab73f5bd11708a85"
|
||||
image_id = "ami-051942e4055555752"
|
||||
instance_type = "m6i.32xlarge"
|
||||
|
||||
[profile.cpu-small]
|
||||
region = "eu-west-3"
|
||||
image_id = "ami-0ab73f5bd11708a85"
|
||||
image_id = "ami-051942e4055555752"
|
||||
instance_type = "m6i.4xlarge"
|
||||
|
||||
[profile.bench]
|
||||
region = "eu-west-3"
|
||||
image_id = "ami-0ab73f5bd11708a85"
|
||||
image_id = "ami-051942e4055555752"
|
||||
instance_type = "m6i.metal"
|
||||
|
||||
[command.cpu_test]
|
||||
@@ -38,6 +38,11 @@ workflow = "aws_tfhe_fast_tests.yml"
|
||||
profile = "cpu-big"
|
||||
check_run_name = "CPU AWS Fast Tests"
|
||||
|
||||
[command.integer_full_bench]
|
||||
workflow = "integer_full_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Integer CPU AWS Benchmarks Full Suite"
|
||||
|
||||
[command.integer_bench]
|
||||
workflow = "integer_benchmark.yml"
|
||||
profile = "bench"
|
||||
@@ -48,6 +53,11 @@ workflow = "integer_multi_bit_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Integer multi bit CPU AWS Benchmarks"
|
||||
|
||||
[command.shortint_full_bench]
|
||||
workflow = "shortint_full_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Shortint CPU AWS Benchmarks Full Suite"
|
||||
|
||||
[command.shortint_bench]
|
||||
workflow = "shortint_benchmark.yml"
|
||||
profile = "bench"
|
||||
@@ -67,3 +77,13 @@ check_run_name = "PBS CPU AWS Benchmarks"
|
||||
workflow = "wasm_client_benchmark.yml"
|
||||
profile = "cpu-small"
|
||||
check_run_name = "WASM Client AWS Benchmarks"
|
||||
|
||||
[command.csprng_randomness_testing]
|
||||
workflow = "csprng_randomness_testing.yml"
|
||||
profile = "cpu-small"
|
||||
check_run_name = "CSPRNG randomness testing"
|
||||
|
||||
[command.code_coverage]
|
||||
workflow = "code_coverage.yml"
|
||||
profile = "cpu-small"
|
||||
check_run_name = "Code coverage"
|
||||
|
||||
4
codecov.yml
Normal file
4
codecov.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
coverage:
|
||||
status:
|
||||
# Disable patch checks in GitHub until all tfhe-rs layers have coverage implemented.
|
||||
patch: false
|
||||
53
concrete-csprng/Cargo.toml
Normal file
53
concrete-csprng/Cargo.toml
Normal file
@@ -0,0 +1,53 @@
|
||||
[package]
|
||||
name = "concrete-csprng"
|
||||
version = "0.4.0"
|
||||
edition = "2021"
|
||||
license = "BSD-3-Clause-Clear"
|
||||
description = "Cryptographically Secure PRNG used in the TFHE-rs library."
|
||||
homepage = "https://zama.ai/"
|
||||
documentation = "https://docs.zama.ai/tfhe-rs"
|
||||
repository = "https://github.com/zama-ai/tfhe-rs"
|
||||
readme = "README.md"
|
||||
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
|
||||
rust-version = "1.72"
|
||||
|
||||
[dependencies]
|
||||
aes = "0.8.2"
|
||||
rayon = { version = "1.5.0", optional = true }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
libc = "0.2.133"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8.3"
|
||||
criterion = "0.5.1"
|
||||
clap = "=4.4.4"
|
||||
|
||||
[features]
|
||||
parallel = ["rayon"]
|
||||
seeder_x86_64_rdseed = []
|
||||
seeder_unix = []
|
||||
generator_x86_64_aesni = []
|
||||
generator_fallback = []
|
||||
generator_aarch64_aes = []
|
||||
|
||||
x86_64 = [
|
||||
"parallel",
|
||||
"seeder_x86_64_rdseed",
|
||||
"generator_x86_64_aesni",
|
||||
"generator_fallback",
|
||||
]
|
||||
x86_64-unix = ["x86_64", "seeder_unix"]
|
||||
aarch64 = ["parallel", "generator_aarch64_aes", "generator_fallback"]
|
||||
aarch64-unix = ["aarch64", "seeder_unix"]
|
||||
|
||||
[[bench]]
|
||||
name = "benchmark"
|
||||
path = "benches/benchmark.rs"
|
||||
harness = false
|
||||
required-features = ["seeder_x86_64_rdseed", "generator_x86_64_aesni"]
|
||||
|
||||
[[example]]
|
||||
name = "generate"
|
||||
path = "examples/generate.rs"
|
||||
required-features = ["seeder_unix", "generator_fallback"]
|
||||
28
concrete-csprng/LICENSE
Normal file
28
concrete-csprng/LICENSE
Normal file
@@ -0,0 +1,28 @@
|
||||
BSD 3-Clause Clear License
|
||||
|
||||
Copyright © 2023 ZAMA.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or other
|
||||
materials provided with the distribution.
|
||||
|
||||
3. Neither the name of ZAMA nor the names of its contributors may be used to endorse
|
||||
or promote products derived from this software without specific prior written permission.
|
||||
|
||||
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
|
||||
THIS SOFTWARE IS PROVIDED BY THE ZAMA AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
||||
ZAMA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
||||
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
23
concrete-csprng/README.md
Normal file
23
concrete-csprng/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Concrete CSPRNG
|
||||
|
||||
This crate contains a fast *Cryptographically Secure Pseudoramdon Number Generator*, used in the
|
||||
['concrete-core'](https://crates.io/crates/concrete-core) library, you can find it [here](../concrete-core/) in this repo.
|
||||
|
||||
The implementation is based on the AES blockcipher used in CTR mode, as described in the ISO/IEC
|
||||
18033-4 standard.
|
||||
|
||||
Two implementations are available, an accelerated one on x86_64 CPUs with the `aes` feature and the `sse2` feature, and a pure software one that can be used on other platforms.
|
||||
|
||||
The crate also makes two seeders available, one needing the x86_64 feature `rdseed` and another one based on the Unix random device `/dev/random` the latter requires the user to provide a secret.
|
||||
|
||||
## Running the benchmarks
|
||||
|
||||
To execute the benchmarks on an x86_64 platform:
|
||||
```shell
|
||||
RUSTFLAGS="-Ctarget-cpu=native" cargo bench --features=seeder_x86_64_rdseed,generator_x86_64_aesni
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions,
|
||||
please contact us at `hello@zama.ai`.
|
||||
54
concrete-csprng/benches/benchmark.rs
Normal file
54
concrete-csprng/benches/benchmark.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
use concrete_csprng::generators::{
|
||||
AesniRandomGenerator, BytesPerChild, ChildrenCount, RandomGenerator,
|
||||
};
|
||||
use concrete_csprng::seeders::{RdseedSeeder, Seeder};
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
|
||||
// The number of bytes to generate during one benchmark iteration.
|
||||
const N_GEN: usize = 1_000_000;
|
||||
|
||||
fn parent_generate(c: &mut Criterion) {
|
||||
let mut seeder = RdseedSeeder;
|
||||
let mut generator = AesniRandomGenerator::new(seeder.seed());
|
||||
c.bench_function("parent_generate", |b| {
|
||||
b.iter(|| {
|
||||
(0..N_GEN).for_each(|_| {
|
||||
generator.next();
|
||||
})
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn child_generate(c: &mut Criterion) {
|
||||
let mut seeder = RdseedSeeder;
|
||||
let mut generator = AesniRandomGenerator::new(seeder.seed());
|
||||
let mut generator = generator
|
||||
.try_fork(ChildrenCount(1), BytesPerChild(N_GEN * 10_000))
|
||||
.unwrap()
|
||||
.next()
|
||||
.unwrap();
|
||||
c.bench_function("child_generate", |b| {
|
||||
b.iter(|| {
|
||||
(0..N_GEN).for_each(|_| {
|
||||
generator.next();
|
||||
})
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn fork(c: &mut Criterion) {
|
||||
let mut seeder = RdseedSeeder;
|
||||
let mut generator = AesniRandomGenerator::new(seeder.seed());
|
||||
c.bench_function("fork", |b| {
|
||||
b.iter(|| {
|
||||
black_box(
|
||||
generator
|
||||
.try_fork(ChildrenCount(2048), BytesPerChild(2048))
|
||||
.unwrap(),
|
||||
)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, parent_generate, child_generate, fork);
|
||||
criterion_main!(benches);
|
||||
112
concrete-csprng/build.rs
Normal file
112
concrete-csprng/build.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// To have clear error messages during compilation about why some piece of code may not be available
|
||||
// we decided to check the features compatibility with the target configuration in this script.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
|
||||
// See https://doc.rust-lang.org/reference/conditional-compilation.html#target_arch for various
|
||||
// compilation configuration
|
||||
|
||||
// Can be easily extended if needed
|
||||
pub struct FeatureRequirement {
|
||||
pub feature_name: &'static str,
|
||||
// target_arch requirement
|
||||
pub feature_req_target_arch: Option<&'static str>,
|
||||
// target_family requirement
|
||||
pub feature_req_target_family: Option<&'static str>,
|
||||
}
|
||||
|
||||
// We implement a version of default that is const which is not possible through the Default trait
|
||||
impl FeatureRequirement {
|
||||
// As we cannot use cfg!(feature = "feature_name") with something else than a literal, we need
|
||||
// a reference to the HashMap we populate with the enabled features
|
||||
fn is_activated(&self, build_activated_features: &HashMap<&'static str, bool>) -> bool {
|
||||
*build_activated_features.get(self.feature_name).unwrap()
|
||||
}
|
||||
|
||||
// panics if the requirements are not met
|
||||
fn check_requirements(&self) {
|
||||
let target_arch = get_target_arch_cfg();
|
||||
if let Some(feature_req_target_arch) = self.feature_req_target_arch {
|
||||
if feature_req_target_arch != target_arch {
|
||||
panic!(
|
||||
"Feature `{}` requires target_arch `{}`, current cfg: `{}`",
|
||||
self.feature_name, feature_req_target_arch, target_arch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let target_family = get_target_family_cfg();
|
||||
if let Some(feature_req_target_family) = self.feature_req_target_family {
|
||||
if feature_req_target_family != target_family {
|
||||
panic!(
|
||||
"Feature `{}` requires target_family `{}`, current cfg: `{}`",
|
||||
self.feature_name, feature_req_target_family, target_family
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// const vecs are not yet a thing so use a fixed size array (update the array size when adding
|
||||
// requirements)
|
||||
static FEATURE_REQUIREMENTS: [FeatureRequirement; 4] = [
|
||||
FeatureRequirement {
|
||||
feature_name: "seeder_x86_64_rdseed",
|
||||
feature_req_target_arch: Some("x86_64"),
|
||||
feature_req_target_family: None,
|
||||
},
|
||||
FeatureRequirement {
|
||||
feature_name: "generator_x86_64_aesni",
|
||||
feature_req_target_arch: Some("x86_64"),
|
||||
feature_req_target_family: None,
|
||||
},
|
||||
FeatureRequirement {
|
||||
feature_name: "seeder_unix",
|
||||
feature_req_target_arch: None,
|
||||
feature_req_target_family: Some("unix"),
|
||||
},
|
||||
FeatureRequirement {
|
||||
feature_name: "generator_aarch64_aes",
|
||||
feature_req_target_arch: Some("aarch64"),
|
||||
feature_req_target_family: None,
|
||||
},
|
||||
];
|
||||
|
||||
// For a "feature_name" feature_cfg!("feature_name") expands to
|
||||
// ("feature_name", cfg!(feature = "feature_name"))
|
||||
macro_rules! feature_cfg {
|
||||
($feat_name:literal) => {
|
||||
($feat_name, cfg!(feature = $feat_name))
|
||||
};
|
||||
}
|
||||
|
||||
// Static HashMap would require an additional crate (phf or lazy static e.g.), so we just write a
|
||||
// function that returns the HashMap we are interested in
|
||||
fn get_feature_enabled_status() -> HashMap<&'static str, bool> {
|
||||
HashMap::from([
|
||||
feature_cfg!("seeder_x86_64_rdseed"),
|
||||
feature_cfg!("generator_x86_64_aesni"),
|
||||
feature_cfg!("seeder_unix"),
|
||||
feature_cfg!("generator_aarch64_aes"),
|
||||
])
|
||||
}
|
||||
|
||||
// See https://stackoverflow.com/a/43435335/18088947 for the inspiration of this code
|
||||
fn get_target_arch_cfg() -> String {
|
||||
env::var("CARGO_CFG_TARGET_ARCH").expect("CARGO_CFG_TARGET_ARCH is not set")
|
||||
}
|
||||
|
||||
fn get_target_family_cfg() -> String {
|
||||
env::var("CARGO_CFG_TARGET_FAMILY").expect("CARGO_CFG_TARGET_FAMILY is not set")
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let feature_enabled_status = get_feature_enabled_status();
|
||||
|
||||
// This will panic if some requirements for a feature are not met
|
||||
FEATURE_REQUIREMENTS
|
||||
.iter()
|
||||
.filter(|&req| FeatureRequirement::is_activated(req, &feature_enabled_status))
|
||||
.for_each(FeatureRequirement::check_requirements);
|
||||
}
|
||||
113
concrete-csprng/examples/generate.rs
Normal file
113
concrete-csprng/examples/generate.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
//! This program uses the concrete csprng to generate an infinite stream of random bytes on
|
||||
//! the program stdout. It can also generate a fixed number of bytes by passing a value along the
|
||||
//! optional argument `--bytes_total`. For testing purpose.
|
||||
use clap::{value_parser, Arg, Command};
|
||||
#[cfg(feature = "generator_x86_64_aesni")]
|
||||
use concrete_csprng::generators::AesniRandomGenerator as ActivatedRandomGenerator;
|
||||
#[cfg(feature = "generator_aarch64_aes")]
|
||||
use concrete_csprng::generators::NeonAesRandomGenerator as ActivatedRandomGenerator;
|
||||
#[cfg(all(
|
||||
not(feature = "generator_x86_64_aesni"),
|
||||
not(feature = "generator_aarch64_aes"),
|
||||
feature = "generator_fallback"
|
||||
))]
|
||||
use concrete_csprng::generators::SoftwareRandomGenerator as ActivatedRandomGenerator;
|
||||
|
||||
use concrete_csprng::generators::RandomGenerator;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
use concrete_csprng::seeders::AppleSecureEnclaveSeeder as ActivatedSeeder;
|
||||
#[cfg(all(not(target_os = "macos"), feature = "seeder_x86_64_rdseed"))]
|
||||
use concrete_csprng::seeders::RdseedSeeder as ActivatedSeeder;
|
||||
#[cfg(all(
|
||||
not(target_os = "macos"),
|
||||
not(feature = "seeder_x86_64_rdseed"),
|
||||
feature = "seeder_unix"
|
||||
))]
|
||||
use concrete_csprng::seeders::UnixSeeder as ActivatedSeeder;
|
||||
|
||||
use concrete_csprng::seeders::Seeder;
|
||||
|
||||
use std::io::prelude::*;
|
||||
use std::io::{stdout, StdoutLock};
|
||||
|
||||
fn write_bytes(
|
||||
buffer: &mut [u8],
|
||||
generator: &mut ActivatedRandomGenerator,
|
||||
stdout: &mut StdoutLock<'_>,
|
||||
) -> std::io::Result<()> {
|
||||
buffer.iter_mut().zip(generator).for_each(|(b, g)| *b = g);
|
||||
stdout.write_all(buffer)
|
||||
}
|
||||
|
||||
fn infinite_bytes_generation(
|
||||
buffer: &mut [u8],
|
||||
generator: &mut ActivatedRandomGenerator,
|
||||
stdout: &mut StdoutLock<'_>,
|
||||
) {
|
||||
while write_bytes(buffer, generator, stdout).is_ok() {}
|
||||
}
|
||||
|
||||
fn bytes_generation(
|
||||
bytes_total: usize,
|
||||
buffer: &mut [u8],
|
||||
generator: &mut ActivatedRandomGenerator,
|
||||
stdout: &mut StdoutLock<'_>,
|
||||
) {
|
||||
let quotient = bytes_total / buffer.len();
|
||||
let remaining = bytes_total % buffer.len();
|
||||
|
||||
for _ in 0..quotient {
|
||||
write_bytes(buffer, generator, stdout).unwrap();
|
||||
}
|
||||
|
||||
write_bytes(&mut buffer[0..remaining], generator, stdout).unwrap()
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
let matches = Command::new(
|
||||
"Generate a stream of random numbers, specify no flags for infinite generation",
|
||||
)
|
||||
.arg(
|
||||
Arg::new("bytes_total")
|
||||
.short('b')
|
||||
.long("bytes_total")
|
||||
.value_parser(value_parser!(usize))
|
||||
.help("Total number of bytes that has to be generated"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// Ugly hack to be able to use UnixSeeder
|
||||
#[cfg(all(
|
||||
not(target_os = "macos"),
|
||||
not(feature = "seeder_x86_64_rdseed"),
|
||||
feature = "seeder_unix"
|
||||
))]
|
||||
let new_seeder = || ActivatedSeeder::new(0);
|
||||
#[cfg(not(all(
|
||||
not(target_os = "macos"),
|
||||
not(feature = "seeder_x86_64_rdseed"),
|
||||
feature = "seeder_unix"
|
||||
)))]
|
||||
let new_seeder = || ActivatedSeeder;
|
||||
|
||||
let mut seeder = new_seeder();
|
||||
let seed = seeder.seed();
|
||||
// Don't print on std out
|
||||
eprintln!("seed={seed:?}");
|
||||
let mut generator = ActivatedRandomGenerator::new(seed);
|
||||
let stdout = stdout();
|
||||
let mut buffer = [0u8; 16];
|
||||
|
||||
// lock stdout as there is a single thread running
|
||||
let mut stdout = stdout.lock();
|
||||
|
||||
match matches.get_one::<usize>("bytes_total") {
|
||||
Some(&total) => {
|
||||
bytes_generation(total, &mut buffer, &mut generator, &mut stdout);
|
||||
}
|
||||
None => {
|
||||
infinite_bytes_generation(&mut buffer, &mut generator, &mut stdout);
|
||||
}
|
||||
};
|
||||
}
|
||||
20
concrete-csprng/src/generators/aes_ctr/block_cipher.rs
Normal file
20
concrete-csprng/src/generators/aes_ctr/block_cipher.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use crate::generators::aes_ctr::index::AesIndex;
|
||||
use crate::generators::aes_ctr::BYTES_PER_BATCH;
|
||||
|
||||
/// Represents a key used in the AES block cipher.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct AesKey(pub u128);
|
||||
|
||||
/// A trait for AES block ciphers.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// The block cipher is used in a batched manner (to reduce amortized cost on special hardware).
|
||||
/// For this reason we only expose a `generate_batch` method.
|
||||
pub trait AesBlockCipher: Clone + Send + Sync {
|
||||
/// Instantiate a new generator from a secret key.
|
||||
fn new(key: AesKey) -> Self;
|
||||
/// Generates the batch corresponding to the given index.
|
||||
fn generate_batch(&mut self, index: AesIndex) -> [u8; BYTES_PER_BATCH];
|
||||
}
|
||||
379
concrete-csprng/src/generators/aes_ctr/generic.rs
Normal file
379
concrete-csprng/src/generators/aes_ctr/generic.rs
Normal file
@@ -0,0 +1,379 @@
|
||||
use crate::generators::aes_ctr::block_cipher::{AesBlockCipher, AesKey};
|
||||
use crate::generators::aes_ctr::index::TableIndex;
|
||||
use crate::generators::aes_ctr::states::{BufferPointer, ShiftAction, State};
|
||||
use crate::generators::aes_ctr::BYTES_PER_BATCH;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError};
|
||||
|
||||
// Usually, to work with iterators and parallel iterators, we would use opaque types such as
|
||||
// `impl Iterator<..>`. Unfortunately, it is not yet possible to return existential types in
|
||||
// traits, which we would need for `RandomGenerator`. For this reason, we have to use the
|
||||
// full type name where needed. Hence the following trait aliases definition:
|
||||
|
||||
/// A type alias for the children iterator closure type.
|
||||
pub type ChildrenClosure<BlockCipher> =
|
||||
fn((usize, (Box<BlockCipher>, TableIndex, BytesPerChild))) -> AesCtrGenerator<BlockCipher>;
|
||||
|
||||
/// A type alias for the children iterator type.
|
||||
pub type ChildrenIterator<BlockCipher> = std::iter::Map<
|
||||
std::iter::Zip<
|
||||
std::ops::Range<usize>,
|
||||
std::iter::Repeat<(Box<BlockCipher>, TableIndex, BytesPerChild)>,
|
||||
>,
|
||||
ChildrenClosure<BlockCipher>,
|
||||
>;
|
||||
|
||||
/// A type implementing the `RandomGenerator` api using the AES block cipher in counter mode.
|
||||
#[derive(Clone)]
|
||||
pub struct AesCtrGenerator<BlockCipher: AesBlockCipher> {
|
||||
// The block cipher used in the background
|
||||
pub(crate) block_cipher: Box<BlockCipher>,
|
||||
// The state corresponding to the latest outputted byte.
|
||||
pub(crate) state: State,
|
||||
// The last legal index. This makes bound check faster.
|
||||
pub(crate) last: TableIndex,
|
||||
// The buffer containing the current batch of aes calls.
|
||||
pub(crate) buffer: [u8; BYTES_PER_BATCH],
|
||||
}
|
||||
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
impl<BlockCipher: AesBlockCipher> AesCtrGenerator<BlockCipher> {
|
||||
/// Generates a new csprng.
|
||||
///
|
||||
/// Note :
|
||||
/// ------
|
||||
///
|
||||
/// The `start_index` given as input, points to the first byte that will be outputted by the
|
||||
/// generator. If not given, this one is automatically set to the second table index. The
|
||||
/// first table index is not used to prevent an edge case from happening: since `state` is
|
||||
/// supposed to contain the index of the previous byte, the initial value must be decremented.
|
||||
/// Using the second value prevents wrapping to the max index, which would make the bound
|
||||
/// checking fail.
|
||||
///
|
||||
/// The `bound_index` given as input, points to the first byte that can __not__ be legally
|
||||
/// outputted by the generator. If not given, the bound is automatically set to the last
|
||||
/// table index.
|
||||
pub fn new(
|
||||
key: AesKey,
|
||||
start_index: Option<TableIndex>,
|
||||
bound_index: Option<TableIndex>,
|
||||
) -> AesCtrGenerator<BlockCipher> {
|
||||
AesCtrGenerator::from_block_cipher(
|
||||
Box::new(BlockCipher::new(key)),
|
||||
start_index.unwrap_or(TableIndex::SECOND),
|
||||
bound_index.unwrap_or(TableIndex::LAST),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a csprng from an existing block cipher.
|
||||
pub fn from_block_cipher(
|
||||
block_cipher: Box<BlockCipher>,
|
||||
start_index: TableIndex,
|
||||
bound_index: TableIndex,
|
||||
) -> AesCtrGenerator<BlockCipher> {
|
||||
assert!(start_index < bound_index);
|
||||
let last = bound_index.decremented();
|
||||
let buffer = [0u8; BYTES_PER_BATCH];
|
||||
let state = State::new(start_index);
|
||||
AesCtrGenerator {
|
||||
block_cipher,
|
||||
state,
|
||||
last,
|
||||
buffer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the table index related to the previous random byte.
|
||||
pub fn table_index(&self) -> TableIndex {
|
||||
self.state.table_index()
|
||||
}
|
||||
|
||||
/// Returns the bound of the generator if any.
|
||||
///
|
||||
/// The bound is the table index of the first byte that can not be outputted by the generator.
|
||||
pub fn get_bound(&self) -> TableIndex {
|
||||
self.last.incremented()
|
||||
}
|
||||
|
||||
/// Returns whether the generator is bounded or not.
|
||||
pub fn is_bounded(&self) -> bool {
|
||||
self.get_bound() != TableIndex::LAST
|
||||
}
|
||||
|
||||
/// Computes the number of bytes that can still be outputted by the generator.
|
||||
///
|
||||
/// Note :
|
||||
/// ------
|
||||
///
|
||||
/// Note that `ByteCount` uses the `u128` datatype to store the byte count. Unfortunately, the
|
||||
/// number of remaining bytes is in ⟦0;2¹³² -1⟧. When the number is greater than 2¹²⁸ - 1,
|
||||
/// we saturate the count at 2¹²⁸ - 1.
|
||||
pub fn remaining_bytes(&self) -> ByteCount {
|
||||
TableIndex::distance(&self.last, &self.state.table_index()).unwrap()
|
||||
}
|
||||
|
||||
/// Outputs the next random byte.
|
||||
pub fn generate_next(&mut self) -> u8 {
|
||||
self.next()
|
||||
.expect("Tried to generate a byte after the bound.")
|
||||
}
|
||||
|
||||
/// Tries to fork the current generator into `n_child` generators each able to output
|
||||
/// `child_bytes` random bytes.
|
||||
pub fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<ChildrenIterator<BlockCipher>, ForkError> {
|
||||
if n_children.0 == 0 {
|
||||
return Err(ForkError::ZeroChildrenCount);
|
||||
}
|
||||
if n_bytes.0 == 0 {
|
||||
return Err(ForkError::ZeroBytesPerChild);
|
||||
}
|
||||
if !self.is_fork_in_bound(n_children, n_bytes) {
|
||||
return Err(ForkError::ForkTooLarge);
|
||||
}
|
||||
|
||||
// The state currently stored in the parent generator points to the table index of the last
|
||||
// generated byte. The first index to be generated is the next one:
|
||||
let first_index = self.state.table_index().incremented();
|
||||
let output = (0..n_children.0)
|
||||
.zip(std::iter::repeat((
|
||||
self.block_cipher.clone(),
|
||||
first_index,
|
||||
n_bytes,
|
||||
)))
|
||||
.map(
|
||||
// This map is a little weird because we need to cast the closure to a fn pointer
|
||||
// that matches the signature of `ChildrenIterator<BlockCipher>`.
|
||||
// Unfortunately, the compiler does not manage to coerce this one
|
||||
// automatically.
|
||||
(|(i, (block_cipher, first_index, n_bytes))| {
|
||||
// The first index to be outputted by the child is the `first_index` shifted by
|
||||
// the proper amount of `child_bytes`.
|
||||
let child_first_index = first_index.increased(n_bytes.0 * i);
|
||||
// The bound of the child is the first index of its next sibling.
|
||||
let child_bound_index = first_index.increased(n_bytes.0 * (i + 1));
|
||||
AesCtrGenerator::from_block_cipher(
|
||||
block_cipher,
|
||||
child_first_index,
|
||||
child_bound_index,
|
||||
)
|
||||
}) as ChildrenClosure<BlockCipher>,
|
||||
);
|
||||
// The parent next index is the bound of the last child.
|
||||
let next_index = first_index.increased(n_bytes.0 * n_children.0);
|
||||
self.state = State::new(next_index);
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub(crate) fn is_fork_in_bound(
|
||||
&self,
|
||||
n_child: ChildrenCount,
|
||||
child_bytes: BytesPerChild,
|
||||
) -> bool {
|
||||
let mut end = self.state.table_index();
|
||||
end.increase(n_child.0 * child_bytes.0);
|
||||
end <= self.last
|
||||
}
|
||||
}
|
||||
|
||||
impl<BlockCipher: AesBlockCipher> Iterator for AesCtrGenerator<BlockCipher> {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.state.table_index() >= self.last {
|
||||
None
|
||||
} else {
|
||||
match self.state.increment() {
|
||||
ShiftAction::OutputByte(BufferPointer(ptr)) => Some(self.buffer[ptr]),
|
||||
ShiftAction::RefreshBatchAndOutputByte(aes_index, BufferPointer(ptr)) => {
|
||||
self.buffer = self.block_cipher.generate_batch(aes_index);
|
||||
Some(self.buffer[ptr])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod aes_ctr_generic_test {
|
||||
#![allow(unused)] // to please clippy when tests are not activated
|
||||
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::index::{AesIndex, ByteIndex};
|
||||
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
pub fn any_table_index() -> impl Iterator<Item = TableIndex> {
|
||||
std::iter::repeat_with(|| {
|
||||
TableIndex::new(
|
||||
AesIndex(thread_rng().gen()),
|
||||
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn any_usize() -> impl Iterator<Item = usize> {
|
||||
std::iter::repeat_with(|| thread_rng().gen())
|
||||
}
|
||||
|
||||
pub fn any_children_count() -> impl Iterator<Item = ChildrenCount> {
|
||||
std::iter::repeat_with(|| ChildrenCount(thread_rng().gen::<usize>() % 2048 + 1))
|
||||
}
|
||||
|
||||
pub fn any_bytes_per_child() -> impl Iterator<Item = BytesPerChild> {
|
||||
std::iter::repeat_with(|| BytesPerChild(thread_rng().gen::<usize>() % 2048 + 1))
|
||||
}
|
||||
|
||||
pub fn any_key() -> impl Iterator<Item = AesKey> {
|
||||
std::iter::repeat_with(|| AesKey(thread_rng().gen()))
|
||||
}
|
||||
|
||||
/// Output a valid fork:
|
||||
/// a table index t,
|
||||
/// a number of children nc,
|
||||
/// a number of bytes per children nb
|
||||
/// and a positive integer i such that:
|
||||
/// increase(t, nc*nb+i) < MAX with MAX the largest table index.
|
||||
///
|
||||
/// Put differently, if we initialize a parent generator at t and fork it with (nc, nb), our
|
||||
/// parent generator current index gets shifted to an index, distant of at least i bytes of
|
||||
/// the max index.
|
||||
pub fn any_valid_fork(
|
||||
) -> impl Iterator<Item = (TableIndex, ChildrenCount, BytesPerChild, usize)> {
|
||||
any_table_index()
|
||||
.zip(any_children_count())
|
||||
.zip(any_bytes_per_child())
|
||||
.zip(any_usize())
|
||||
.map(|(((t, nc), nb), i)| (t, nc, nb, i))
|
||||
.filter(|(t, nc, nb, i)| {
|
||||
TableIndex::distance(&TableIndex::LAST, t).unwrap().0 > (nc.0 * nb.0 + i) as u128
|
||||
})
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first child is the same as the table index of
|
||||
/// the parent before the fork.
|
||||
pub fn prop_fork_first_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let first_child = forked_generator.try_fork(nc, nb).unwrap().next().unwrap();
|
||||
assert_eq!(original_generator.table_index(), first_child.table_index());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first byte outputted by the parent after the
|
||||
/// fork, is the bound of the last child of the fork.
|
||||
pub fn prop_fork_last_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut parent_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let last_child = parent_generator.try_fork(nc, nb).unwrap().last().unwrap();
|
||||
assert_eq!(
|
||||
parent_generator.table_index().incremented(),
|
||||
last_child.get_bound()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bound of the parent does not change.
|
||||
pub fn prop_fork_parent_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator.try_fork(nc, nb).unwrap().last().unwrap();
|
||||
assert_eq!(original_generator.get_bound(), forked_generator.get_bound());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the parent table index is increased of the number of children
|
||||
/// multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator.try_fork(nc, nb).unwrap().last().unwrap();
|
||||
assert_eq!(
|
||||
forked_generator.table_index(),
|
||||
// Decrement accounts for the fact that the table index stored is the previous one
|
||||
t.increased(nc.0 * nb.0).decremented()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bytes outputted by the children in the fork order form the same
|
||||
/// sequence the parent would have had yielded no fork had happened.
|
||||
pub fn prop_fork<G: AesBlockCipher>() {
|
||||
for _ in 0..1000 {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let initial_output: Vec<u8> = original_generator.take(bytes_to_go).collect();
|
||||
let forked_output: Vec<u8> = forked_generator
|
||||
.try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.flat_map(|child| child.collect::<Vec<_>>())
|
||||
.collect();
|
||||
assert_eq!(initial_output, forked_output);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, all children got a number of remaining bytes equals to the number of
|
||||
/// bytes per child given as fork input.
|
||||
pub fn prop_fork_children_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
assert!(generator
|
||||
.try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.all(|c| c.remaining_bytes().0 == nb.0 as u128));
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the number of remaining bybtes of the parent is reduced by the number
|
||||
/// of children multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let before_remaining_bytes = generator.remaining_bytes();
|
||||
let _ = generator.try_fork(nc, nb).unwrap();
|
||||
let after_remaining_bytes = generator.remaining_bytes();
|
||||
assert_eq!(
|
||||
before_remaining_bytes.0 - after_remaining_bytes.0,
|
||||
bytes_to_go as u128
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
389
concrete-csprng/src/generators/aes_ctr/index.rs
Normal file
389
concrete-csprng/src/generators/aes_ctr/index.rs
Normal file
@@ -0,0 +1,389 @@
|
||||
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
|
||||
use crate::generators::ByteCount;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// A structure representing an [aes index](#coarse-grained-pseudo-random-table-lookup).
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct AesIndex(pub u128);
|
||||
|
||||
/// A structure representing a [byte index](#fine-grained-pseudo-random-table-lookup).
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct ByteIndex(pub usize);
|
||||
|
||||
/// A structure representing a [table index](#fine-grained-pseudo-random-table-lookup)
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct TableIndex {
|
||||
pub(crate) aes_index: AesIndex,
|
||||
pub(crate) byte_index: ByteIndex,
|
||||
}
|
||||
|
||||
impl TableIndex {
|
||||
/// The first table index.
|
||||
pub const FIRST: TableIndex = TableIndex {
|
||||
aes_index: AesIndex(0),
|
||||
byte_index: ByteIndex(0),
|
||||
};
|
||||
|
||||
/// The second table index.
|
||||
pub const SECOND: TableIndex = TableIndex {
|
||||
aes_index: AesIndex(0),
|
||||
byte_index: ByteIndex(1),
|
||||
};
|
||||
|
||||
/// The last table index.
|
||||
pub const LAST: TableIndex = TableIndex {
|
||||
aes_index: AesIndex(u128::MAX),
|
||||
byte_index: ByteIndex(BYTES_PER_AES_CALL - 1),
|
||||
};
|
||||
|
||||
/// Creates a table index from an aes index and a byte index.
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
pub fn new(aes_index: AesIndex, byte_index: ByteIndex) -> Self {
|
||||
assert!(byte_index.0 < BYTES_PER_AES_CALL);
|
||||
TableIndex {
|
||||
aes_index,
|
||||
byte_index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the table index forward of `shift` bytes.
|
||||
pub fn increase(&mut self, shift: usize) {
|
||||
// Compute full shifts to avoid overflows
|
||||
let full_aes_shifts = shift / BYTES_PER_AES_CALL;
|
||||
let shift_remainder = shift % BYTES_PER_AES_CALL;
|
||||
|
||||
// Get the additional shift if any
|
||||
let new_byte_index = self.byte_index.0 + shift_remainder;
|
||||
let full_aes_shifts = full_aes_shifts + new_byte_index / BYTES_PER_AES_CALL;
|
||||
|
||||
// Store the reaminder in the byte index
|
||||
self.byte_index.0 = new_byte_index % BYTES_PER_AES_CALL;
|
||||
|
||||
self.aes_index.0 = self.aes_index.0.wrapping_add(full_aes_shifts as u128);
|
||||
}
|
||||
|
||||
/// Shifts the table index backward of `shift` bytes.
|
||||
pub fn decrease(&mut self, shift: usize) {
|
||||
let remainder = shift % BYTES_PER_AES_CALL;
|
||||
if remainder <= self.byte_index.0 {
|
||||
self.aes_index.0 = self
|
||||
.aes_index
|
||||
.0
|
||||
.wrapping_sub((shift / BYTES_PER_AES_CALL) as u128);
|
||||
self.byte_index.0 -= remainder;
|
||||
} else {
|
||||
self.aes_index.0 = self
|
||||
.aes_index
|
||||
.0
|
||||
.wrapping_sub((shift / BYTES_PER_AES_CALL) as u128 + 1);
|
||||
self.byte_index.0 += BYTES_PER_AES_CALL - remainder;
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the table index forward of one byte.
|
||||
pub fn increment(&mut self) {
|
||||
self.increase(1)
|
||||
}
|
||||
|
||||
/// Shifts the table index backward of one byte.
|
||||
pub fn decrement(&mut self) {
|
||||
self.decrease(1)
|
||||
}
|
||||
|
||||
/// Returns the table index shifted forward by `shift` bytes.
|
||||
pub fn increased(mut self, shift: usize) -> Self {
|
||||
self.increase(shift);
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the table index shifted backward by `shift` bytes.
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
pub fn decreased(mut self, shift: usize) -> Self {
|
||||
self.decrease(shift);
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the table index to the next byte.
|
||||
pub fn incremented(mut self) -> Self {
|
||||
self.increment();
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the table index to the previous byte.
|
||||
pub fn decremented(mut self) -> Self {
|
||||
self.decrement();
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the distance between two table indices in bytes.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// This method assumes that the `larger` input is, well, larger than the `smaller` input. If
|
||||
/// this is not the case, the method returns `None`. Also, note that `ByteCount` uses the
|
||||
/// `u128` datatype to store the byte count. Unfortunately, the number of bytes between two
|
||||
/// table indices is in ⟦0;2¹³² -1⟧. When the distance is greater than 2¹²⁸ - 1, we saturate
|
||||
/// the count at 2¹²⁸ - 1.
|
||||
pub fn distance(larger: &Self, smaller: &Self) -> Option<ByteCount> {
|
||||
match std::cmp::Ord::cmp(larger, smaller) {
|
||||
Ordering::Less => None,
|
||||
Ordering::Equal => Some(ByteCount(0)),
|
||||
Ordering::Greater => {
|
||||
let mut result = larger.aes_index.0 - smaller.aes_index.0;
|
||||
result = result.saturating_mul(BYTES_PER_AES_CALL as u128);
|
||||
result = result.saturating_add(larger.byte_index.0 as u128);
|
||||
result = result.saturating_sub(smaller.byte_index.0 as u128);
|
||||
Some(ByteCount(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for TableIndex {}
|
||||
|
||||
impl PartialEq<Self> for TableIndex {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Equal))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd<Self> for TableIndex {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for TableIndex {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match self.aes_index.cmp(&other.aes_index) {
|
||||
Ordering::Equal => self.byte_index.cmp(&other.byte_index),
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
fn any_table_index() -> impl Iterator<Item = TableIndex> {
|
||||
std::iter::repeat_with(|| {
|
||||
TableIndex::new(
|
||||
AesIndex(thread_rng().gen()),
|
||||
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn any_usize() -> impl Iterator<Item = usize> {
|
||||
std::iter::repeat_with(|| thread_rng().gen())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
/// Verifies that the constructor of `TableIndex` panics when the byte index is too large.
|
||||
fn test_table_index_new_panic() {
|
||||
TableIndex::new(AesIndex(12), ByteIndex(144));
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Verifies that the `TableIndex` wraps nicely with predecessor
|
||||
fn test_table_index_predecessor_edge() {
|
||||
assert_eq!(TableIndex::FIRST.decremented(), TableIndex::LAST);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Verifies that the `TableIndex` wraps nicely with successor
|
||||
fn test_table_index_successor_edge() {
|
||||
assert_eq!(TableIndex::LAST.incremented(), TableIndex::FIRST);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check that the table index distance saturates nicely.
|
||||
fn prop_table_index_distance_saturates() {
|
||||
assert_eq!(
|
||||
TableIndex::distance(&TableIndex::LAST, &TableIndex::FIRST)
|
||||
.unwrap()
|
||||
.0,
|
||||
u128::MAX
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// distance(t, t) = Some(0).
|
||||
fn prop_table_index_distance_zero() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(TableIndex::distance(&t, &t), Some(ByteCount(0)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t1, t2 such that t1 < t2,
|
||||
/// distance(t1, t2) = None.
|
||||
fn prop_table_index_distance_wrong_order_none() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t1, t2) = any_table_index()
|
||||
.zip(any_table_index())
|
||||
.find(|(t1, t2)| t1 < t2)
|
||||
.unwrap();
|
||||
assert_eq!(TableIndex::distance(&t1, &t2), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t1, t2 such that t1 > t2,
|
||||
/// distance(t1, t2) = Some(v) where v is strictly positive.
|
||||
fn prop_table_index_distance_some_positive() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t1, t2) = any_table_index()
|
||||
.zip(any_table_index())
|
||||
.find(|(t1, t2)| t1 > t2)
|
||||
.unwrap();
|
||||
assert!(matches!(TableIndex::distance(&t1, &t2), Some(ByteCount(v)) if v > 0));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive i such that i < distance (MAX, t) with MAX the largest
|
||||
/// table index,
|
||||
/// distance(t.increased(i), t) = Some(i).
|
||||
fn prop_table_index_distance_increase() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, inc) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.find(|(t, inc)| {
|
||||
(*inc as u128) < TableIndex::distance(&TableIndex::LAST, t).unwrap().0
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
TableIndex::distance(&t.increased(inc), &t).unwrap().0 as usize,
|
||||
inc
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, t =? t = true.
|
||||
fn prop_table_index_equality() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(
|
||||
std::cmp::PartialOrd::partial_cmp(&t, &t),
|
||||
Some(std::cmp::Ordering::Equal)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive i such that i < distance (MAX, t) with MAX the largest
|
||||
/// table index,
|
||||
/// t.increased(i) >? t = true.
|
||||
fn prop_table_index_greater() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, inc) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.find(|(t, inc)| {
|
||||
(*inc as u128) < TableIndex::distance(&TableIndex::LAST, t).unwrap().0
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
std::cmp::PartialOrd::partial_cmp(&t.increased(inc), &t),
|
||||
Some(std::cmp::Ordering::Greater),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive i such that i < distance (t, 0) with MAX the largest
|
||||
/// table index,
|
||||
/// t.decreased(i) <? t = true.
|
||||
fn prop_table_index_less() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, inc) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.find(|(t, inc)| {
|
||||
(*inc as u128) < TableIndex::distance(t, &TableIndex::FIRST).unwrap().0
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
std::cmp::PartialOrd::partial_cmp(&t.decreased(inc), &t),
|
||||
Some(std::cmp::Ordering::Less)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// successor(predecessor(t)) = t.
|
||||
fn prop_table_index_decrement_increment() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(t.decremented().incremented(), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// predecessor(successor(t)) = t.
|
||||
fn prop_table_index_increment_decrement() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(t.incremented().decremented(), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i,
|
||||
/// increase(decrease(t, i), i) = t.
|
||||
fn prop_table_index_increase_decrease() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, i) = any_table_index().zip(any_usize()).next().unwrap();
|
||||
assert_eq!(t.increased(i).decreased(i), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i,
|
||||
/// decrease(increase(t, i), i) = t.
|
||||
fn prop_table_index_decrease_increase() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, i) = any_table_index().zip(any_usize()).next().unwrap();
|
||||
assert_eq!(t.decreased(i).increased(i), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check that a big increase does not overflow
|
||||
fn prop_table_increase_max_no_overflow() {
|
||||
let first = TableIndex::FIRST;
|
||||
// Increase so that ByteIndex is at 1usize
|
||||
let second = first.increased(1);
|
||||
|
||||
// Now increase by usize::MAX, as the underlying byte index stores a usize this may overflow
|
||||
// depending on implementation, ensure it does not overflow
|
||||
let big_increase = second.increased(usize::MAX);
|
||||
let total_full_aes_shifts = (1u128 + usize::MAX as u128) / BYTES_PER_AES_CALL as u128;
|
||||
|
||||
assert_eq!(
|
||||
big_increase,
|
||||
TableIndex::new(AesIndex(total_full_aes_shifts), ByteIndex(0))
|
||||
);
|
||||
}
|
||||
}
|
||||
223
concrete-csprng/src/generators/aes_ctr/mod.rs
Normal file
223
concrete-csprng/src/generators/aes_ctr/mod.rs
Normal file
@@ -0,0 +1,223 @@
|
||||
//! A module implementing the random generator api with batched aes calls.
|
||||
//!
|
||||
//! This module provides a generic [`AesCtrGenerator`] structure which implements the
|
||||
//! [`super::RandomGenerator`] api using the AES block cipher in counter mode. That is, the
|
||||
//! generator holds a state (i.e. counter) which is incremented iteratively, to produce the stream
|
||||
//! of random values:
|
||||
//! ```ascii
|
||||
//! state=0 state=1 state=2
|
||||
//! ╔══↧══╗ ╔══↧══╗ ╔══↧══╗
|
||||
//! key ↦ AES ║ key ↦ AES ║ key ↦ AES ║ ...
|
||||
//! ╚══↧══╝ ╚══↧══╝ ╚══↧══╝
|
||||
//! output0 output1 output2
|
||||
//!
|
||||
//! t=0 t=1 t=2
|
||||
//! ```
|
||||
//!
|
||||
//! The [`AesCtrGenerator`] structure is generic over the AES block ciphers, which are
|
||||
//! represented by the [`AesBlockCipher`] trait. Consequently, implementers only need to implement
|
||||
//! the `AesBlockCipher` trait, to benefit from the whole api of the `AesCtrGenerator` structure.
|
||||
//!
|
||||
//! In the following section, we give details on the implementation of this generic generator.
|
||||
//!
|
||||
//! Coarse-grained pseudo-random lookup table
|
||||
//! =========================================
|
||||
//!
|
||||
//! To generate random values, we use the AES block cipher in counter mode. If we denote f the aes
|
||||
//! encryption function, we have:
|
||||
//! ```ascii
|
||||
//! f: ⟦0;2¹²⁸ -1⟧ X ⟦0;2¹²⁸ -1⟧ ↦ ⟦0;2¹²⁸ -1⟧
|
||||
//! f(secret_key, input) ↦ output
|
||||
//! ```
|
||||
|
||||
//! If we fix the secret key to a value k, we have a function fₖ from ⟦0;2¹²⁸ -1⟧ to ⟦0;2¹²⁸-1⟧,
|
||||
//! transforming the state of the counter into a pseudo random value. Essentially, this fₖ
|
||||
//! function can be considered as a the following lookup table, containing 2¹²⁸ pseudo-random
|
||||
//! values:
|
||||
//! ```ascii
|
||||
//! ╭──────────────┬──────────────┬─────┬──────────────╮
|
||||
//! │ 0 │ 1 │ │ 2¹²⁸ -1 │
|
||||
//! ├──────────────┼──────────────┼─────┼──────────────┤
|
||||
//! │ fₖ(0) │ fₖ(1) │ │ fₖ(2¹²⁸ -1) │
|
||||
//! ╔═══════↧══════╦═══════↧══════╦═════╦═══════↧══════╗
|
||||
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
|
||||
//! ║┃ u128 ┃║┃ u128 ┃║ ... ║┃ u128 ┃║
|
||||
//! ║┗━━━━━━━━━━━━┛║┗━━━━━━━━━━━━┛║ ║┗━━━━━━━━━━━━┛║
|
||||
//! ╚══════════════╩══════════════╩═════╩══════════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! An input to the fₖ function is called an _aes index_ (also called state or counter in the
|
||||
//! standards) of the pseudo-random table. The [`AesIndex`] structure defined in this module
|
||||
//! represents such an index in the code.
|
||||
//!
|
||||
//! Fine-grained pseudo-random table lookup
|
||||
//! =======================================
|
||||
//!
|
||||
//! Since we want to deliver the pseudo-random bytes one by one, we have to come with a finer
|
||||
//! grained indexing. Fortunately, each `u128` value outputted by fₖ can be seen as a table of 16
|
||||
//! `u8`:
|
||||
//! ```ascii
|
||||
//! ╭──────────────┬──────────────┬─────┬──────────────╮
|
||||
//! │ 0 │ 1 │ │ 2¹²⁸ -1 │
|
||||
//! ├──────────────┼──────────────┼─────┼──────────────┤
|
||||
//! │ fₖ(0) │ fₖ(1) │ │ fₖ(2¹²⁸ -1) │
|
||||
//! ╔═══════↧══════╦═══════↧══════╦═════╦═══════↧══════╗
|
||||
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
|
||||
//! ║┃ u128 ┃║┃ u128 ┃║ ║┃ u128 ┃║
|
||||
//! ║┣━━┯━━┯━━━┯━━┫║┣━━┯━━┯━━━┯━━┫║ ... ║┣━━┯━━┯━━━┯━━┫║
|
||||
//! ║┃u8│u8│...│u8┃║┃u8│u8│...│u8┃║ ║┃u8│u8│...│u8┃║
|
||||
//! ║┗━━┷━━┷━━━┷━━┛║┗━━┷━━┷━━━┷━━┛║ ║┗━━┷━━┷━━━┷━━┛║
|
||||
//! ╚══════════════╩══════════════╩═════╩══════════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! We introduce a second function to select a chunk of 8 bits:
|
||||
//! ```ascii
|
||||
//! g: ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ ↦ ⟦0;2⁸ -1⟧
|
||||
//! g(big_int, index) ↦ byte
|
||||
//! ```
|
||||
//!
|
||||
//! If we fix the `u128` value to a value e, we have a function gₑ from ⟦0;15⟧ to ⟦0;2⁸ -1⟧
|
||||
//! transforming an index into a pseudo-random byte:
|
||||
//! ```ascii
|
||||
//! ┏━━━━━━━━┯━━━━━━━━┯━━━┯━━━━━━━━┓
|
||||
//! ┃ u8 │ u8 │...│ u8 ┃
|
||||
//! ┗━━━━━━━━┷━━━━━━━━┷━━━┷━━━━━━━━┛
|
||||
//! │ gₑ(0) │ gₑ(1) │ │ gₑ(15) │
|
||||
//! ╰────────┴─────-──┴───┴────────╯
|
||||
//! ```
|
||||
//!
|
||||
//! We call this input to the gₑ function, a _byte index_ of the pseudo-random table. The
|
||||
//! [`ByteIndex`] structure defined in this module represents such an index in the code.
|
||||
//!
|
||||
//! By using both the g and the fₖ functions, we can define a new function l which allows to index
|
||||
//! any byte of the pseudo-random table:
|
||||
//! ```ascii
|
||||
//! l: ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ ↦ ⟦0;2⁸ -1⟧
|
||||
//! l(aes_index, byte_index) ↦ g(fₖ(aes_index), byte_index)
|
||||
//! ```
|
||||
//!
|
||||
//! In this sense, any member of ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ uniquely defines a byte in this pseudo-random
|
||||
//! table:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a)
|
||||
//! ╔══════════════╦═══════↧══════╦═════╦══════════════╗
|
||||
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
|
||||
//! ║┃ u128 ┃║┃ u128 ┃║ ║┃ u128 ┃║
|
||||
//! ║┣━━┯━━┯━━━┯━━┫║┣━━┯━━┯━━━┯━━┫║ ... ║┣━━┯━━┯━━━┯━━┫║
|
||||
//! ║┃u8│u8│...│u8┃║┃u8│u8│...│u8┃║ ║┃u8│u8│...│u8┃║
|
||||
//! ║┗━━┷━━┷━━━┷━━┛║┗━━┷↥━┷━━━┷━━┛║ ║┗━━┷━━┷━━━┷━━┛║
|
||||
//! ║ ║│ gₑ(b) │║ ║ ║
|
||||
//! ║ ║╰───-────────╯║ ║ ║
|
||||
//! ╚══════════════╩══════════════╩═════╩══════════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! We call this input to the l function, a _table index_ of the pseudo-random table. The
|
||||
//! [`TableIndex`] structure defined in this module represents such an index in the code.
|
||||
//!
|
||||
//! Prngs current table index
|
||||
//! =========================
|
||||
//!
|
||||
//! When created, a prng is given an initial _table index_, denoted (a₀, b₀), which identifies the
|
||||
//! first byte of the table to be outputted by the prng. Then, each time the prng is queried for a
|
||||
//! new value, the byte corresponding to the current _table index_ is returned, and the current
|
||||
//! _table index_ is incremented:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a₀) e = fₖ(a₁)
|
||||
//! ╔═════↧═════╦═══════════╦═════╦═══════════╗ ╔═══════════╦═════↧═════╦═════╦═══════════╗
|
||||
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║ ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
|
||||
//! ║┃ │ │...│ ┃║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║
|
||||
//! ║┗━┷━┷━━━┷↥┛║┗━┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ → ║┗━┷━┷━━━┷━┛║┗↥┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║
|
||||
//! ║│ gₑ(b₀) │║ ║ ║ ║ ║ ║│ gₑ(b₁) │║ ║ ║
|
||||
//! ║╰─────────╯║ ║ ║ ║ ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═══════════╩═══════════╩═════╩═══════════╝ ╚═══════════╩═══════════╩═════╩═══════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! Prng bound
|
||||
//! ==========
|
||||
//!
|
||||
//! When created, a prng is also given a _bound_ (aₘ, bₘ) , that is a table index which it is not
|
||||
//! allowed to exceed:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a₀)
|
||||
//! ╔═════↧═════╦═══════════╦═════╦═══════════╗
|
||||
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
|
||||
//! ║┃ │ │...│ ┃║┃ │╳│...│╳┃║ ║┃╳│╳│...│╳┃║
|
||||
//! ║┗━┷━┷━━━┷↥┛║┗━┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ The current byte can be returned.
|
||||
//! ║│ gₑ(b₀) │║ ║ ║ ║
|
||||
//! ║╰─────────╯║ ║ ║ ║
|
||||
//! ╚═══════════╩═══════════╩═════╩═══════════╝
|
||||
//!
|
||||
//! e = fₖ(aₘ)
|
||||
//! ╔═══════════╦═════↧═════╦═════╦═══════════╗
|
||||
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
|
||||
//! ║┃ │ │...│ ┃║┃ │╳│...│╳┃║ ║┃╳│╳│...│╳┃║ The table index reached the bound,
|
||||
//! ║┗━┷━┷━━━┷━┛║┗━┷↥┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ the current byte can not be
|
||||
//! ║ ║│ gₑ(bₘ) │║ ║ ║ returned.
|
||||
//! ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═══════════╩═══════════╩═════╩═══════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! Buffering
|
||||
//! =========
|
||||
//!
|
||||
//! Calling the aes function every time we need to output a single byte would be a huge waste of
|
||||
//! resources. In practice, we call aes 8 times in a row, for 8 successive values of aes index, and
|
||||
//! store the results in a buffer. For platforms which have a dedicated aes chip, this allows to
|
||||
//! fill the unit pipeline and reduces the amortized cost of the aes function.
|
||||
//!
|
||||
//! Together with the current table index of the prng, we also store a pointer p (initialized at
|
||||
//! p₀=b₀) to the current byte in the buffer. If we denote v the lookup function we have :
|
||||
//! ```ascii
|
||||
//! e = fₖ(a₀) Buffer(length=128)
|
||||
//! ╔═════╦═══════════╦═════↧═════╦═══════════╦═════╗ ┏━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓
|
||||
//! ║ ... ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║ ┃▓│▓│▓│▓│▓│▓│▓│▓│...│▓┃
|
||||
//! ║ ║┃ │ │...│ ┃║┃▓│▓│...│▓┃║┃▓│▓│...│▓┃║ ║ ┗━┷↥┷━┷━┷━┷━┷━┷━┷━━━┷━┛
|
||||
//! ║ ║┗━┷━┷━━━┷━┛║┗━┷↥┷━━━┷━┛║┗━┷━┷━━━┷━┛║ ║ │ v(p₀) │
|
||||
//! ║ ║ ║│ gₑ(b₀) │║ ║ ║ ╰─────────────────────╯
|
||||
//! ║ ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═════╩═══════════╩═══════════╩═══════════╩═════╝
|
||||
//! ```
|
||||
//!
|
||||
//! We call this input to the v function, a _buffer pointer_. The [`BufferPointer`] structure
|
||||
//! defined in this module represents such a pointer in the code.
|
||||
//!
|
||||
//! When the table index is incremented, the buffer pointer is incremented alongside:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a) Buffer(length=128)
|
||||
//! ╔═════╦═══════════╦═════↧═════╦═══════════╦═════╗ ┏━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓
|
||||
//! ║ ... ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║ ┃▓│▓│▓│▓│▓│▓│▓│▓│...│▓┃
|
||||
//! ║ ║┃ │ │...│ ┃║┃▓│▓│...│▓┃║┃▓│▓│...│▓┃║ ║ ┗━┷━┷↥┷━┷━┷━┷━┷━┷━━━┷━┛
|
||||
//! ║ ║┗━┷━┷━━━┷━┛║┗━┷━┷↥━━┷━┛║┗━┷━┷━━━┷━┛║ ║ │ v(p) │
|
||||
//! ║ ║ ║│ gₑ(b) │║ ║ ║ ╰─────────────────────╯
|
||||
//! ║ ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═════╩═══════════╩═══════════╩═══════════╩═════╝
|
||||
//! ```
|
||||
//!
|
||||
//! When the buffer pointer is incremented it is checked against the size of the buffer, and if
|
||||
//! necessary, a new batch of aes index values is generated.
|
||||
|
||||
pub const AES_CALLS_PER_BATCH: usize = 8;
|
||||
pub const BYTES_PER_AES_CALL: usize = 128 / 8;
|
||||
pub const BYTES_PER_BATCH: usize = BYTES_PER_AES_CALL * AES_CALLS_PER_BATCH;
|
||||
|
||||
/// A module containing structures to manage table indices.
|
||||
mod index;
|
||||
pub use index::*;
|
||||
|
||||
/// A module containing structures to manage table indices and buffer pointers together properly.
|
||||
mod states;
|
||||
pub use states::*;
|
||||
|
||||
/// A module containing an abstraction for aes block ciphers.
|
||||
mod block_cipher;
|
||||
pub use block_cipher::*;
|
||||
|
||||
/// A module containing a generic implementation of a random generator.
|
||||
mod generic;
|
||||
pub use generic::*;
|
||||
|
||||
/// A module extending `generic` to the `rayon` paradigm.
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
222
concrete-csprng/src/generators/aes_ctr/parallel.rs
Normal file
222
concrete-csprng/src/generators/aes_ctr/parallel.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
use crate::generators::aes_ctr::{
|
||||
AesBlockCipher, AesCtrGenerator, ChildrenClosure, State, TableIndex,
|
||||
};
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError};
|
||||
|
||||
/// A type alias for the parallel children iterator type.
|
||||
pub type ParallelChildrenIterator<BlockCipher> = rayon::iter::Map<
|
||||
rayon::iter::Zip<
|
||||
rayon::range::Iter<usize>,
|
||||
rayon::iter::RepeatN<(Box<BlockCipher>, TableIndex, BytesPerChild)>,
|
||||
>,
|
||||
fn((usize, (Box<BlockCipher>, TableIndex, BytesPerChild))) -> AesCtrGenerator<BlockCipher>,
|
||||
>;
|
||||
|
||||
impl<BlockCipher: AesBlockCipher> AesCtrGenerator<BlockCipher> {
|
||||
/// Tries to fork the current generator into `n_child` generators each able to output
|
||||
/// `child_bytes` random bytes as a parallel iterator.
|
||||
///
|
||||
/// # Notes
|
||||
///
|
||||
/// This method necessitate the "multithread" feature.
|
||||
pub fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<ParallelChildrenIterator<BlockCipher>, ForkError>
|
||||
where
|
||||
BlockCipher: Send + Sync,
|
||||
{
|
||||
use rayon::prelude::*;
|
||||
|
||||
if n_children.0 == 0 {
|
||||
return Err(ForkError::ZeroChildrenCount);
|
||||
}
|
||||
if n_bytes.0 == 0 {
|
||||
return Err(ForkError::ZeroBytesPerChild);
|
||||
}
|
||||
if !self.is_fork_in_bound(n_children, n_bytes) {
|
||||
return Err(ForkError::ForkTooLarge);
|
||||
}
|
||||
|
||||
// The state currently stored in the parent generator points to the table index of the last
|
||||
// generated byte. The first index to be generated is the next one :
|
||||
let first_index = self.state.table_index().incremented();
|
||||
let output = (0..n_children.0)
|
||||
.into_par_iter()
|
||||
.zip(rayon::iter::repeatn(
|
||||
(self.block_cipher.clone(), first_index, n_bytes),
|
||||
n_children.0,
|
||||
))
|
||||
.map(
|
||||
// This map is a little weird because we need to cast the closure to a fn pointer
|
||||
// that matches the signature of `ChildrenIterator<BlockCipher>`. Unfortunately,
|
||||
// the compiler does not manage to coerce this one automatically.
|
||||
(|(i, (block_cipher, first_index, n_bytes))| {
|
||||
// The first index to be outputted by the child is the `first_index` shifted by
|
||||
// the proper amount of `child_bytes`.
|
||||
let child_first_index = first_index.increased(n_bytes.0 * i);
|
||||
// The bound of the child is the first index of its next sibling.
|
||||
let child_bound_index = first_index.increased(n_bytes.0 * (i + 1));
|
||||
AesCtrGenerator::from_block_cipher(
|
||||
block_cipher,
|
||||
child_first_index,
|
||||
child_bound_index,
|
||||
)
|
||||
}) as ChildrenClosure<BlockCipher>,
|
||||
);
|
||||
// The parent next index is the bound of the last child.
|
||||
let next_index = first_index.increased(n_bytes.0 * n_children.0);
|
||||
self.state = State::new(next_index);
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod aes_ctr_parallel_generic_tests {
|
||||
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test::{any_key, any_valid_fork};
|
||||
use rayon::prelude::*;
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first child is the same as the table index of
|
||||
/// the parent before the fork.
|
||||
pub fn prop_fork_first_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let first_child = forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_first(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(original_generator.table_index(), first_child.table_index());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first byte outputted by the parent after the
|
||||
/// fork, is the bound of the last child of the fork.
|
||||
pub fn prop_fork_last_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut parent_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let last_child = parent_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_last(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
parent_generator.table_index().incremented(),
|
||||
last_child.get_bound()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bound of the parent does not change.
|
||||
pub fn prop_fork_parent_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_last(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(original_generator.get_bound(), forked_generator.get_bound());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the parent table index is increased of the number of children
|
||||
/// multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_last(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
forked_generator.table_index(),
|
||||
// Decrement accounts for the fact that the table index stored is the previous one
|
||||
t.increased(nc.0 * nb.0).decremented()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bytes outputted by the children in the fork order form the same
|
||||
/// sequence the parent would have had outputted no fork had happened.
|
||||
pub fn prop_fork<G: AesBlockCipher>() {
|
||||
for _ in 0..1000 {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let initial_output: Vec<u8> = original_generator.take(bytes_to_go).collect();
|
||||
let forked_output: Vec<u8> = forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.flat_map(|child| child.collect::<Vec<_>>())
|
||||
.collect();
|
||||
assert_eq!(initial_output, forked_output);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, all children got a number of remaining bytes equals to the number of
|
||||
/// bytes per child given as fork input.
|
||||
pub fn prop_fork_children_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
assert!(generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.all(|c| c.remaining_bytes().0 == nb.0 as u128));
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the number of remaining bytes of the parent is reduced by the
|
||||
/// number of children multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let before_remaining_bytes = generator.remaining_bytes();
|
||||
let _ = generator.par_try_fork(nc, nb).unwrap();
|
||||
let after_remaining_bytes = generator.remaining_bytes();
|
||||
assert_eq!(
|
||||
before_remaining_bytes.0 - after_remaining_bytes.0,
|
||||
bytes_to_go as u128
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
176
concrete-csprng/src/generators/aes_ctr/states.rs
Normal file
176
concrete-csprng/src/generators/aes_ctr/states.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use crate::generators::aes_ctr::index::{AesIndex, TableIndex};
|
||||
use crate::generators::aes_ctr::BYTES_PER_BATCH;
|
||||
|
||||
/// A pointer to the next byte to be outputted by the generator.
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct BufferPointer(pub usize);
|
||||
|
||||
/// A structure representing the current state of generator using batched aes-ctr approach.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct State {
|
||||
table_index: TableIndex,
|
||||
buffer_pointer: BufferPointer,
|
||||
}
|
||||
|
||||
/// A structure representing the action to be taken by the generator after shifting its state.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ShiftAction {
|
||||
/// Outputs the byte pointed to by the 0-th field.
|
||||
OutputByte(BufferPointer),
|
||||
/// Refresh the buffer starting from the 0-th field, and output the byte pointed to by the 0-th
|
||||
/// field.
|
||||
RefreshBatchAndOutputByte(AesIndex, BufferPointer),
|
||||
}
|
||||
|
||||
impl State {
|
||||
/// Creates a new state from the initial table index.
|
||||
///
|
||||
/// Note :
|
||||
/// ------
|
||||
///
|
||||
/// The `table_index` input, is the __first__ table index that will be outputted on the next
|
||||
/// call to `increment`. Put differently, the current table index of the newly created state
|
||||
/// is the predecessor of this one.
|
||||
pub fn new(table_index: TableIndex) -> Self {
|
||||
// We ensure that the table index is not the first one, to prevent wrapping on `decrement`,
|
||||
// and outputting `RefreshBatchAndOutputByte(AesIndex::MAX, ...)` on the first increment
|
||||
// (which would lead to loading a non continuous batch).
|
||||
assert_ne!(table_index, TableIndex::FIRST);
|
||||
State {
|
||||
// To ensure that the first outputted table index is the proper one, we decrement the
|
||||
// table index.
|
||||
table_index: table_index.decremented(),
|
||||
// To ensure that the first `ShiftAction` will be a `RefreshBatchAndOutputByte`, we set
|
||||
// the buffer to the last allowed value.
|
||||
buffer_pointer: BufferPointer(BYTES_PER_BATCH - 1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the state forward of `shift` bytes.
|
||||
pub fn increase(&mut self, shift: usize) -> ShiftAction {
|
||||
self.table_index.increase(shift);
|
||||
let total_batch_index = self.buffer_pointer.0 + shift;
|
||||
if total_batch_index > BYTES_PER_BATCH - 1 {
|
||||
self.buffer_pointer.0 = self.table_index.byte_index.0;
|
||||
ShiftAction::RefreshBatchAndOutputByte(self.table_index.aes_index, self.buffer_pointer)
|
||||
} else {
|
||||
self.buffer_pointer.0 = total_batch_index;
|
||||
ShiftAction::OutputByte(self.buffer_pointer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the state forward of one byte.
|
||||
pub fn increment(&mut self) -> ShiftAction {
|
||||
self.increase(1)
|
||||
}
|
||||
|
||||
/// Returns the current table index.
|
||||
pub fn table_index(&self) -> TableIndex {
|
||||
self.table_index
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
State::new(TableIndex::FIRST)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::index::ByteIndex;
|
||||
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
fn any_table_index() -> impl Iterator<Item = TableIndex> {
|
||||
std::iter::repeat_with(|| {
|
||||
TableIndex::new(
|
||||
AesIndex(thread_rng().gen()),
|
||||
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn any_usize() -> impl Iterator<Item = usize> {
|
||||
std::iter::repeat_with(|| thread_rng().gen())
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// State::new(t).increment() = RefreshBatchAndOutputByte(t.aes_index, t.byte_index)
|
||||
fn prop_state_new_increment() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s) = any_table_index()
|
||||
.map(|t| (t, State::new(t)))
|
||||
.next()
|
||||
.unwrap();
|
||||
assert!(matches!(
|
||||
s.increment(),
|
||||
ShiftAction::RefreshBatchAndOutputByte(t_, BufferPointer(p_)) if t_ == t.aes_index && p_ == t.byte_index.0
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all states s, table indices t, positive integer i
|
||||
/// if s = State::new(t), then t.increased(i) = s.increased(i-1).table_index().
|
||||
fn prop_state_increase_table_index() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s, i) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.map(|(t, i)| (t, State::new(t), i))
|
||||
.next()
|
||||
.unwrap();
|
||||
s.increase(i);
|
||||
assert_eq!(s.table_index(), t.increased(i - 1))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i such as t.byte_index + i < 127,
|
||||
/// if s = State::new(t), and s.increment() was executed, then
|
||||
/// s.increase(i) = OutputByte(t.byte_index + i).
|
||||
fn prop_state_increase_small() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s, i) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.map(|(t, i)| (t, State::new(t), i % BYTES_PER_BATCH))
|
||||
.find(|(t, _, i)| t.byte_index.0 + i < BYTES_PER_BATCH - 1)
|
||||
.unwrap();
|
||||
s.increment();
|
||||
assert!(matches!(
|
||||
s.increase(i),
|
||||
ShiftAction::OutputByte(BufferPointer(p_)) if p_ == t.byte_index.0 + i
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i such as t.byte_index + i >= 127,
|
||||
/// if s = State::new(t), and s.increment() was executed, then
|
||||
/// s.increase(i) = RefreshBatchAndOutputByte(
|
||||
/// t.increased(i).aes_index,
|
||||
/// t.increased(i).byte_index).
|
||||
fn prop_state_increase_large() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s, i) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.map(|(t, i)| (t, State::new(t), i))
|
||||
.find(|(t, _, i)| t.byte_index.0 + i >= BYTES_PER_BATCH - 1)
|
||||
.unwrap();
|
||||
s.increment();
|
||||
assert!(matches!(
|
||||
s.increase(i),
|
||||
ShiftAction::RefreshBatchAndOutputByte(t_, BufferPointer(p_))
|
||||
if t_ == t.increased(i).aes_index && p_ == t.increased(i).byte_index.0
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
184
concrete-csprng/src/generators/implem/aarch64/block_cipher.rs
Normal file
184
concrete-csprng/src/generators/implem/aarch64/block_cipher.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use crate::generators::aes_ctr::{AesBlockCipher, AesIndex, AesKey, BYTES_PER_BATCH};
|
||||
use core::arch::aarch64::{
|
||||
uint8x16_t, vaeseq_u8, vaesmcq_u8, vdupq_n_u32, vdupq_n_u8, veorq_u8, vgetq_lane_u32,
|
||||
vreinterpretq_u32_u8, vreinterpretq_u8_u32,
|
||||
};
|
||||
use std::arch::is_aarch64_feature_detected;
|
||||
use std::mem::transmute;
|
||||
|
||||
const RCONS: [u32; 10] = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36];
|
||||
const NUM_WORDS_IN_KEY: usize = 4;
|
||||
const NUM_ROUNDS: usize = 10;
|
||||
const NUM_ROUND_KEYS: usize = NUM_ROUNDS + 1;
|
||||
|
||||
/// An aes block cipher implementation which uses `neon` and `aes` instructions.
|
||||
#[derive(Clone)]
|
||||
pub struct ArmAesBlockCipher {
|
||||
round_keys: [uint8x16_t; NUM_ROUND_KEYS],
|
||||
}
|
||||
|
||||
impl AesBlockCipher for ArmAesBlockCipher {
|
||||
fn new(key: AesKey) -> ArmAesBlockCipher {
|
||||
let aes_detected = is_aarch64_feature_detected!("aes");
|
||||
let neon_detected = is_aarch64_feature_detected!("neon");
|
||||
|
||||
if !(aes_detected && neon_detected) {
|
||||
panic!(
|
||||
"The ArmAesBlockCipher requires both aes and neon aarch64 CPU features.\n\
|
||||
aes feature available: {}\nneon feature available: {}\n.",
|
||||
aes_detected, neon_detected
|
||||
)
|
||||
}
|
||||
|
||||
let round_keys = unsafe { generate_round_keys(key) };
|
||||
ArmAesBlockCipher { round_keys }
|
||||
}
|
||||
|
||||
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
|
||||
#[target_feature(enable = "aes,neon")]
|
||||
unsafe fn implementation(
|
||||
this: &ArmAesBlockCipher,
|
||||
AesIndex(aes_ctr): AesIndex,
|
||||
) -> [u8; BYTES_PER_BATCH] {
|
||||
let mut output = [0u8; BYTES_PER_BATCH];
|
||||
// We want 128 bytes of output, the ctr gives 128 bit message (16 bytes)
|
||||
for (i, out) in output.chunks_exact_mut(16).enumerate() {
|
||||
// Safe because we prevent the user from creating the Generator
|
||||
// on non-supported hardware
|
||||
let encrypted = encrypt(aes_ctr + (i as u128), &this.round_keys);
|
||||
out.copy_from_slice(&encrypted.to_ne_bytes());
|
||||
}
|
||||
output
|
||||
}
|
||||
// SAFETY: we checked for aes and neon availability in `Self::new`
|
||||
unsafe { implementation(self, AesIndex(aes_ctr)) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Does the AES SubWord operation for the Key Expansion step
|
||||
///
|
||||
/// # SAFETY
|
||||
///
|
||||
/// You must make sure the CPU's arch is`aarch64` and has
|
||||
/// `neon` and `aes` features.
|
||||
#[inline(always)]
|
||||
unsafe fn sub_word(word: u32) -> u32 {
|
||||
let data = vreinterpretq_u8_u32(vdupq_n_u32(word));
|
||||
let zero_key = vdupq_n_u8(0u8);
|
||||
let temp = vaeseq_u8(data, zero_key);
|
||||
// vaeseq_u8 does SubBytes(ShiftRow(XOR(data, key))
|
||||
// But because we used a zero aes key,the XOR did not alter data
|
||||
// We now have temp = SubBytes(ShiftRow(data))
|
||||
|
||||
// Since in AES ShiftRow operation, the first row is not shifted
|
||||
// We can just get that one to have our SubWord(word) result
|
||||
vgetq_lane_u32::<0>(vreinterpretq_u32_u8(temp))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn uint8x16_t_to_u128(input: uint8x16_t) -> u128 {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn u128_to_uint8x16_t(input: u128) -> uint8x16_t {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[target_feature(enable = "aes,neon")]
|
||||
unsafe fn generate_round_keys(key: AesKey) -> [uint8x16_t; NUM_ROUND_KEYS] {
|
||||
let mut round_keys: [uint8x16_t; NUM_ROUND_KEYS] = std::mem::zeroed();
|
||||
round_keys[0] = u128_to_uint8x16_t(key.0);
|
||||
|
||||
let words = std::slice::from_raw_parts_mut(
|
||||
round_keys.as_mut_ptr() as *mut u32,
|
||||
NUM_ROUND_KEYS * NUM_WORDS_IN_KEY,
|
||||
);
|
||||
|
||||
debug_assert_eq!(words.len(), 44);
|
||||
|
||||
// Skip the words of the first key, its already done
|
||||
for i in NUM_WORDS_IN_KEY..words.len() {
|
||||
if (i % NUM_WORDS_IN_KEY) == 0 {
|
||||
words[i] = words[i - NUM_WORDS_IN_KEY]
|
||||
^ sub_word(words[i - 1]).rotate_right(8)
|
||||
^ RCONS[(i / NUM_WORDS_IN_KEY) - 1];
|
||||
} else {
|
||||
words[i] = words[i - NUM_WORDS_IN_KEY] ^ words[i - 1];
|
||||
}
|
||||
// Note: there is also a special thing to do when
|
||||
// i mod SElf::NUM_WORDS_IN_KEY == 4 but it cannot happen on 128 bits keys
|
||||
}
|
||||
|
||||
round_keys
|
||||
}
|
||||
|
||||
/// Encrypts a 128-bit message
|
||||
///
|
||||
/// # SAFETY
|
||||
///
|
||||
/// You must make sure the CPU's arch is`aarch64` and has
|
||||
/// `neon` and `aes` features.
|
||||
#[inline(always)]
|
||||
unsafe fn encrypt(message: u128, keys: &[uint8x16_t; NUM_ROUND_KEYS]) -> u128 {
|
||||
// Notes:
|
||||
// According the [ARM Manual](https://developer.arm.com/documentation/ddi0487/gb/):
|
||||
// `vaeseq_u8` is the following AES operations:
|
||||
// 1. AddRoundKey (XOR)
|
||||
// 2. ShiftRows
|
||||
// 3. SubBytes
|
||||
// `vaesmcq_u8` is MixColumns
|
||||
let mut data: uint8x16_t = u128_to_uint8x16_t(message);
|
||||
|
||||
for &key in keys.iter().take(NUM_ROUNDS - 1) {
|
||||
data = vaesmcq_u8(vaeseq_u8(data, key));
|
||||
}
|
||||
|
||||
data = vaeseq_u8(data, keys[NUM_ROUNDS - 1]);
|
||||
data = veorq_u8(data, keys[NUM_ROUND_KEYS - 1]);
|
||||
|
||||
uint8x16_t_to_u128(data)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
// Test vector for aes128, from the FIPS publication 197
|
||||
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
|
||||
const KEY_SCHEDULE: [u128; 11] = [
|
||||
u128::from_be(0x000102030405060708090a0b0c0d0e0f),
|
||||
u128::from_be(0xd6aa74fdd2af72fadaa678f1d6ab76fe),
|
||||
u128::from_be(0xb692cf0b643dbdf1be9bc5006830b3fe),
|
||||
u128::from_be(0xb6ff744ed2c2c9bf6c590cbf0469bf41),
|
||||
u128::from_be(0x47f7f7bc95353e03f96c32bcfd058dfd),
|
||||
u128::from_be(0x3caaa3e8a99f9deb50f3af57adf622aa),
|
||||
u128::from_be(0x5e390f7df7a69296a7553dc10aa31f6b),
|
||||
u128::from_be(0x14f9701ae35fe28c440adf4d4ea9c026),
|
||||
u128::from_be(0x47438735a41c65b9e016baf4aebf7ad2),
|
||||
u128::from_be(0x549932d1f08557681093ed9cbe2c974e),
|
||||
u128::from_be(0x13111d7fe3944a17f307a78b4d2b30c5),
|
||||
];
|
||||
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
|
||||
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
|
||||
|
||||
#[test]
|
||||
fn test_generate_key_schedule() {
|
||||
// Checks that the round keys are correctly generated from the sample key from FIPS
|
||||
let key = AesKey(CIPHER_KEY);
|
||||
let keys = unsafe { generate_round_keys(key) };
|
||||
for (expected, actual) in KEY_SCHEDULE.iter().zip(keys.iter()) {
|
||||
assert_eq!(*expected, uint8x16_t_to_u128(*actual));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_message() {
|
||||
// Checks that encrypting many plaintext at the same time gives the correct output.
|
||||
let message = PLAINTEXT;
|
||||
let key = AesKey(CIPHER_KEY);
|
||||
let keys = unsafe { generate_round_keys(key) };
|
||||
let ciphertext = unsafe { encrypt(message, &keys) };
|
||||
assert_eq!(CIPHERTEXT, ciphertext);
|
||||
}
|
||||
}
|
||||
110
concrete-csprng/src/generators/implem/aarch64/generator.rs
Normal file
110
concrete-csprng/src/generators/implem/aarch64/generator.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
|
||||
use crate::seeders::Seed;
|
||||
|
||||
/// A random number generator using the `aesni` instructions.
|
||||
pub struct NeonAesRandomGenerator(pub(super) AesCtrGenerator<ArmAesBlockCipher>);
|
||||
|
||||
/// The children iterator used by [`NeonAesRandomGenerator`].
|
||||
///
|
||||
/// Outputs children generators one by one.
|
||||
pub struct ArmAesChildrenIterator(ChildrenIterator<ArmAesBlockCipher>);
|
||||
|
||||
impl Iterator for ArmAesChildrenIterator {
|
||||
type Item = NeonAesRandomGenerator;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(NeonAesRandomGenerator)
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomGenerator for NeonAesRandomGenerator {
|
||||
type ChildrenIter = ArmAesChildrenIterator;
|
||||
fn new(seed: Seed) -> Self {
|
||||
NeonAesRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
|
||||
}
|
||||
fn remaining_bytes(&self) -> ByteCount {
|
||||
self.0.remaining_bytes()
|
||||
}
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError> {
|
||||
self.0
|
||||
.try_fork(n_children, n_bytes)
|
||||
.map(ArmAesChildrenIterator)
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for NeonAesRandomGenerator {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test;
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
use crate::generators::{generator_generic_test, NeonAesRandomGenerator};
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_first_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_last_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_generic_test::prop_fork::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roughly_uniform() {
|
||||
generator_generic_test::test_roughly_uniform::<NeonAesRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generator_determinism() {
|
||||
generator_generic_test::test_generator_determinism::<NeonAesRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork() {
|
||||
generator_generic_test::test_fork_children::<NeonAesRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "expected test panic")]
|
||||
fn test_bounded_panic() {
|
||||
generator_generic_test::test_bounded_none_should_panic::<NeonAesRandomGenerator>();
|
||||
}
|
||||
}
|
||||
16
concrete-csprng/src/generators/implem/aarch64/mod.rs
Normal file
16
concrete-csprng/src/generators/implem/aarch64/mod.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
//! A module implementing a random number generator, using the aarch64 `neon` and `aes`
|
||||
//! instructions.
|
||||
//!
|
||||
//! This module implements a cryptographically secure pseudorandom number generator
|
||||
//! (CS-PRNG), using a fast block cipher. The implementation is based on the
|
||||
//! [intel aesni white paper 323641-001 revision 3.0](https://www.intel.com/content/dam/doc/white-paper/advanced-encryption-standard-new-instructions-set-paper.pdf).
|
||||
|
||||
mod block_cipher;
|
||||
|
||||
mod generator;
|
||||
pub use generator::*;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
95
concrete-csprng/src/generators/implem/aarch64/parallel.rs
Normal file
95
concrete-csprng/src/generators/implem/aarch64/parallel.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// The parallel children iterator used by [`NeonAesRandomGenerator`].
|
||||
///
|
||||
/// Outputs the children generators one by one.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct ParallelArmAesChildrenIterator(
|
||||
rayon::iter::Map<
|
||||
ParallelChildrenIterator<ArmAesBlockCipher>,
|
||||
fn(AesCtrGenerator<ArmAesBlockCipher>) -> NeonAesRandomGenerator,
|
||||
>,
|
||||
);
|
||||
|
||||
impl ParallelIterator for ParallelArmAesChildrenIterator {
|
||||
type Item = NeonAesRandomGenerator;
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
self.0.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexedParallelIterator for ParallelArmAesChildrenIterator {
|
||||
fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
|
||||
self.0.drive(consumer)
|
||||
}
|
||||
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
|
||||
self.0.with_producer(callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParallelRandomGenerator for NeonAesRandomGenerator {
|
||||
type ParChildrenIter = ParallelArmAesChildrenIterator;
|
||||
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError> {
|
||||
self.0
|
||||
.par_try_fork(n_children, n_bytes)
|
||||
.map(|iterator| ParallelArmAesChildrenIterator(iterator.map(NeonAesRandomGenerator)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_ttt() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
}
|
||||
231
concrete-csprng/src/generators/implem/aesni/block_cipher.rs
Normal file
231
concrete-csprng/src/generators/implem/aesni/block_cipher.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
use crate::generators::aes_ctr::{AesBlockCipher, AesIndex, AesKey, BYTES_PER_BATCH};
|
||||
use std::arch::x86_64::{
|
||||
__m128i, _mm_aesenc_si128, _mm_aesenclast_si128, _mm_aeskeygenassist_si128, _mm_shuffle_epi32,
|
||||
_mm_slli_si128, _mm_store_si128, _mm_xor_si128,
|
||||
};
|
||||
use std::mem::transmute;
|
||||
|
||||
/// An aes block cipher implementation which uses `aesni` instructions.
|
||||
#[derive(Clone)]
|
||||
pub struct AesniBlockCipher {
|
||||
// The set of round keys used for the aes encryption
|
||||
round_keys: [__m128i; 11],
|
||||
}
|
||||
|
||||
impl AesBlockCipher for AesniBlockCipher {
|
||||
fn new(key: AesKey) -> AesniBlockCipher {
|
||||
let aes_detected = is_x86_feature_detected!("aes");
|
||||
let sse2_detected = is_x86_feature_detected!("sse2");
|
||||
|
||||
if !(aes_detected && sse2_detected) {
|
||||
panic!(
|
||||
"The AesniBlockCipher requires both aes and sse2 x86 CPU features.\n\
|
||||
aes feature available: {}\nsse2 feature available: {}\n.",
|
||||
aes_detected, sse2_detected
|
||||
)
|
||||
}
|
||||
|
||||
// SAFETY: we checked for aes and sse2 availability
|
||||
let round_keys = unsafe { generate_round_keys(key) };
|
||||
AesniBlockCipher { round_keys }
|
||||
}
|
||||
|
||||
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
|
||||
#[target_feature(enable = "sse2,aes")]
|
||||
unsafe fn implementation(
|
||||
this: &AesniBlockCipher,
|
||||
AesIndex(aes_ctr): AesIndex,
|
||||
) -> [u8; BYTES_PER_BATCH] {
|
||||
si128arr_to_u8arr(aes_encrypt_many(
|
||||
u128_to_si128(aes_ctr),
|
||||
u128_to_si128(aes_ctr + 1),
|
||||
u128_to_si128(aes_ctr + 2),
|
||||
u128_to_si128(aes_ctr + 3),
|
||||
u128_to_si128(aes_ctr + 4),
|
||||
u128_to_si128(aes_ctr + 5),
|
||||
u128_to_si128(aes_ctr + 6),
|
||||
u128_to_si128(aes_ctr + 7),
|
||||
&this.round_keys,
|
||||
))
|
||||
}
|
||||
// SAFETY: we checked for aes and sse2 availability in `Self::new`
|
||||
unsafe { implementation(self, AesIndex(aes_ctr)) }
|
||||
}
|
||||
}
|
||||
|
||||
#[target_feature(enable = "sse2,aes")]
|
||||
unsafe fn generate_round_keys(key: AesKey) -> [__m128i; 11] {
|
||||
let key = u128_to_si128(key.0);
|
||||
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
|
||||
aes_128_key_expansion(key, &mut keys);
|
||||
keys
|
||||
}
|
||||
|
||||
// Uses aes to encrypt many values at once. This allows a substantial speedup (around 30%)
|
||||
// compared to the naive approach.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[inline(always)]
|
||||
fn aes_encrypt_many(
|
||||
message_1: __m128i,
|
||||
message_2: __m128i,
|
||||
message_3: __m128i,
|
||||
message_4: __m128i,
|
||||
message_5: __m128i,
|
||||
message_6: __m128i,
|
||||
message_7: __m128i,
|
||||
message_8: __m128i,
|
||||
keys: &[__m128i; 11],
|
||||
) -> [__m128i; 8] {
|
||||
unsafe {
|
||||
let mut tmp_1 = _mm_xor_si128(message_1, keys[0]);
|
||||
let mut tmp_2 = _mm_xor_si128(message_2, keys[0]);
|
||||
let mut tmp_3 = _mm_xor_si128(message_3, keys[0]);
|
||||
let mut tmp_4 = _mm_xor_si128(message_4, keys[0]);
|
||||
let mut tmp_5 = _mm_xor_si128(message_5, keys[0]);
|
||||
let mut tmp_6 = _mm_xor_si128(message_6, keys[0]);
|
||||
let mut tmp_7 = _mm_xor_si128(message_7, keys[0]);
|
||||
let mut tmp_8 = _mm_xor_si128(message_8, keys[0]);
|
||||
|
||||
for key in keys.iter().take(10).skip(1) {
|
||||
tmp_1 = _mm_aesenc_si128(tmp_1, *key);
|
||||
tmp_2 = _mm_aesenc_si128(tmp_2, *key);
|
||||
tmp_3 = _mm_aesenc_si128(tmp_3, *key);
|
||||
tmp_4 = _mm_aesenc_si128(tmp_4, *key);
|
||||
tmp_5 = _mm_aesenc_si128(tmp_5, *key);
|
||||
tmp_6 = _mm_aesenc_si128(tmp_6, *key);
|
||||
tmp_7 = _mm_aesenc_si128(tmp_7, *key);
|
||||
tmp_8 = _mm_aesenc_si128(tmp_8, *key);
|
||||
}
|
||||
|
||||
tmp_1 = _mm_aesenclast_si128(tmp_1, keys[10]);
|
||||
tmp_2 = _mm_aesenclast_si128(tmp_2, keys[10]);
|
||||
tmp_3 = _mm_aesenclast_si128(tmp_3, keys[10]);
|
||||
tmp_4 = _mm_aesenclast_si128(tmp_4, keys[10]);
|
||||
tmp_5 = _mm_aesenclast_si128(tmp_5, keys[10]);
|
||||
tmp_6 = _mm_aesenclast_si128(tmp_6, keys[10]);
|
||||
tmp_7 = _mm_aesenclast_si128(tmp_7, keys[10]);
|
||||
tmp_8 = _mm_aesenclast_si128(tmp_8, keys[10]);
|
||||
|
||||
[tmp_1, tmp_2, tmp_3, tmp_4, tmp_5, tmp_6, tmp_7, tmp_8]
|
||||
}
|
||||
}
|
||||
|
||||
fn aes_128_assist(temp1: __m128i, temp2: __m128i) -> __m128i {
|
||||
let mut temp3: __m128i;
|
||||
let mut temp2 = temp2;
|
||||
let mut temp1 = temp1;
|
||||
unsafe {
|
||||
temp2 = _mm_shuffle_epi32(temp2, 0xff);
|
||||
temp3 = _mm_slli_si128(temp1, 0x4);
|
||||
temp1 = _mm_xor_si128(temp1, temp3);
|
||||
temp3 = _mm_slli_si128(temp3, 0x4);
|
||||
temp1 = _mm_xor_si128(temp1, temp3);
|
||||
temp3 = _mm_slli_si128(temp3, 0x4);
|
||||
temp1 = _mm_xor_si128(temp1, temp3);
|
||||
temp1 = _mm_xor_si128(temp1, temp2);
|
||||
}
|
||||
temp1
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn aes_128_key_expansion(key: __m128i, keys: &mut [__m128i; 11]) {
|
||||
let (mut temp1, mut temp2): (__m128i, __m128i);
|
||||
temp1 = key;
|
||||
unsafe {
|
||||
_mm_store_si128(keys.as_mut_ptr(), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x01);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(1), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x02);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(2), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x04);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(3), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x08);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(4), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x10);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(5), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x20);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(6), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x40);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(7), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x80);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(8), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x1b);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(9), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x36);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(10), temp1);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn u128_to_si128(input: u128) -> __m128i {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
fn si128_to_u128(input: __m128i) -> u128 {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn si128arr_to_u8arr(input: [__m128i; 8]) -> [u8; BYTES_PER_BATCH] {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
// Test vector for aes128, from the FIPS publication 197
|
||||
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
|
||||
const KEY_SCHEDULE: [u128; 11] = [
|
||||
u128::from_be(0x000102030405060708090a0b0c0d0e0f),
|
||||
u128::from_be(0xd6aa74fdd2af72fadaa678f1d6ab76fe),
|
||||
u128::from_be(0xb692cf0b643dbdf1be9bc5006830b3fe),
|
||||
u128::from_be(0xb6ff744ed2c2c9bf6c590cbf0469bf41),
|
||||
u128::from_be(0x47f7f7bc95353e03f96c32bcfd058dfd),
|
||||
u128::from_be(0x3caaa3e8a99f9deb50f3af57adf622aa),
|
||||
u128::from_be(0x5e390f7df7a69296a7553dc10aa31f6b),
|
||||
u128::from_be(0x14f9701ae35fe28c440adf4d4ea9c026),
|
||||
u128::from_be(0x47438735a41c65b9e016baf4aebf7ad2),
|
||||
u128::from_be(0x549932d1f08557681093ed9cbe2c974e),
|
||||
u128::from_be(0x13111d7fe3944a17f307a78b4d2b30c5),
|
||||
];
|
||||
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
|
||||
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
|
||||
|
||||
#[test]
|
||||
fn test_generate_key_schedule() {
|
||||
// Checks that the round keys are correctly generated from the sample key from FIPS
|
||||
let key = u128_to_si128(CIPHER_KEY);
|
||||
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
|
||||
aes_128_key_expansion(key, &mut keys);
|
||||
for (expected, actual) in KEY_SCHEDULE.iter().zip(keys.iter()) {
|
||||
assert_eq!(*expected, si128_to_u128(*actual));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_many_messages() {
|
||||
// Checks that encrypting many plaintext at the same time gives the correct output.
|
||||
let message = u128_to_si128(PLAINTEXT);
|
||||
let key = u128_to_si128(CIPHER_KEY);
|
||||
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
|
||||
aes_128_key_expansion(key, &mut keys);
|
||||
let ciphertexts = aes_encrypt_many(
|
||||
message, message, message, message, message, message, message, message, &keys,
|
||||
);
|
||||
for ct in &ciphertexts {
|
||||
assert_eq!(CIPHERTEXT, si128_to_u128(*ct));
|
||||
}
|
||||
}
|
||||
}
|
||||
110
concrete-csprng/src/generators/implem/aesni/generator.rs
Normal file
110
concrete-csprng/src/generators/implem/aesni/generator.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
|
||||
use crate::seeders::Seed;
|
||||
|
||||
/// A random number generator using the `aesni` instructions.
|
||||
pub struct AesniRandomGenerator(pub(super) AesCtrGenerator<AesniBlockCipher>);
|
||||
|
||||
/// The children iterator used by [`AesniRandomGenerator`].
|
||||
///
|
||||
/// Outputs children generators one by one.
|
||||
pub struct AesniChildrenIterator(ChildrenIterator<AesniBlockCipher>);
|
||||
|
||||
impl Iterator for AesniChildrenIterator {
|
||||
type Item = AesniRandomGenerator;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(AesniRandomGenerator)
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomGenerator for AesniRandomGenerator {
|
||||
type ChildrenIter = AesniChildrenIterator;
|
||||
fn new(seed: Seed) -> Self {
|
||||
AesniRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
|
||||
}
|
||||
fn remaining_bytes(&self) -> ByteCount {
|
||||
self.0.remaining_bytes()
|
||||
}
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError> {
|
||||
self.0
|
||||
.try_fork(n_children, n_bytes)
|
||||
.map(AesniChildrenIterator)
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for AesniRandomGenerator {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test;
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
use crate::generators::{generator_generic_test, AesniRandomGenerator};
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_first_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_last_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_generic_test::prop_fork::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roughly_uniform() {
|
||||
generator_generic_test::test_roughly_uniform::<AesniRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generator_determinism() {
|
||||
generator_generic_test::test_generator_determinism::<AesniRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork() {
|
||||
generator_generic_test::test_fork_children::<AesniRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "expected test panic")]
|
||||
fn test_bounded_panic() {
|
||||
generator_generic_test::test_bounded_none_should_panic::<AesniRandomGenerator>();
|
||||
}
|
||||
}
|
||||
15
concrete-csprng/src/generators/implem/aesni/mod.rs
Normal file
15
concrete-csprng/src/generators/implem/aesni/mod.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
//! A module implementing a random number generator, using the x86_64 `aesni` instructions.
|
||||
//!
|
||||
//! This module implements a cryptographically secure pseudorandom number generator
|
||||
//! (CS-PRNG), using a fast block cipher. The implementation is based on the
|
||||
//! [intel aesni white paper 323641-001 revision 3.0](https://www.intel.com/content/dam/doc/white-paper/advanced-encryption-standard-new-instructions-set-paper.pdf).
|
||||
|
||||
mod block_cipher;
|
||||
|
||||
mod generator;
|
||||
pub use generator::*;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
95
concrete-csprng/src/generators/implem/aesni/parallel.rs
Normal file
95
concrete-csprng/src/generators/implem/aesni/parallel.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// The parallel children iterator used by [`AesniRandomGenerator`].
|
||||
///
|
||||
/// Outputs the children generators one by one.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct ParallelAesniChildrenIterator(
|
||||
rayon::iter::Map<
|
||||
ParallelChildrenIterator<AesniBlockCipher>,
|
||||
fn(AesCtrGenerator<AesniBlockCipher>) -> AesniRandomGenerator,
|
||||
>,
|
||||
);
|
||||
|
||||
impl ParallelIterator for ParallelAesniChildrenIterator {
|
||||
type Item = AesniRandomGenerator;
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
self.0.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexedParallelIterator for ParallelAesniChildrenIterator {
|
||||
fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
|
||||
self.0.drive(consumer)
|
||||
}
|
||||
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
|
||||
self.0.with_producer(callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParallelRandomGenerator for AesniRandomGenerator {
|
||||
type ParChildrenIter = ParallelAesniChildrenIterator;
|
||||
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError> {
|
||||
self.0
|
||||
.par_try_fork(n_children, n_bytes)
|
||||
.map(|iterator| ParallelAesniChildrenIterator(iterator.map(AesniRandomGenerator)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_ttt() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
}
|
||||
14
concrete-csprng/src/generators/implem/mod.rs
Normal file
14
concrete-csprng/src/generators/implem/mod.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
#[cfg(feature = "generator_x86_64_aesni")]
|
||||
mod aesni;
|
||||
#[cfg(feature = "generator_x86_64_aesni")]
|
||||
pub use aesni::*;
|
||||
|
||||
#[cfg(feature = "generator_aarch64_aes")]
|
||||
mod aarch64;
|
||||
#[cfg(feature = "generator_aarch64_aes")]
|
||||
pub use aarch64::*;
|
||||
|
||||
#[cfg(feature = "generator_fallback")]
|
||||
mod soft;
|
||||
#[cfg(feature = "generator_fallback")]
|
||||
pub use soft::*;
|
||||
114
concrete-csprng/src/generators/implem/soft/block_cipher.rs
Normal file
114
concrete-csprng/src/generators/implem/soft/block_cipher.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
use crate::generators::aes_ctr::{
|
||||
AesBlockCipher, AesIndex, AesKey, AES_CALLS_PER_BATCH, BYTES_PER_AES_CALL, BYTES_PER_BATCH,
|
||||
};
|
||||
use aes::cipher::generic_array::GenericArray;
|
||||
use aes::cipher::{BlockEncrypt, KeyInit};
|
||||
use aes::Aes128;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SoftwareBlockCipher {
|
||||
// Aes structure
|
||||
aes: Aes128,
|
||||
}
|
||||
|
||||
impl AesBlockCipher for SoftwareBlockCipher {
|
||||
fn new(key: AesKey) -> SoftwareBlockCipher {
|
||||
let key: [u8; BYTES_PER_AES_CALL] = key.0.to_ne_bytes();
|
||||
let key = GenericArray::clone_from_slice(&key[..]);
|
||||
let aes = Aes128::new(&key);
|
||||
SoftwareBlockCipher { aes }
|
||||
}
|
||||
|
||||
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
|
||||
aes_encrypt_many(
|
||||
aes_ctr,
|
||||
aes_ctr + 1,
|
||||
aes_ctr + 2,
|
||||
aes_ctr + 3,
|
||||
aes_ctr + 4,
|
||||
aes_ctr + 5,
|
||||
aes_ctr + 6,
|
||||
aes_ctr + 7,
|
||||
&self.aes,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Uses aes to encrypt many values at once. This allows a substantial speedup (around 30%)
|
||||
// compared to the naive approach.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn aes_encrypt_many(
|
||||
message_1: u128,
|
||||
message_2: u128,
|
||||
message_3: u128,
|
||||
message_4: u128,
|
||||
message_5: u128,
|
||||
message_6: u128,
|
||||
message_7: u128,
|
||||
message_8: u128,
|
||||
cipher: &Aes128,
|
||||
) -> [u8; BYTES_PER_BATCH] {
|
||||
let mut b1 = GenericArray::clone_from_slice(&message_1.to_ne_bytes()[..]);
|
||||
let mut b2 = GenericArray::clone_from_slice(&message_2.to_ne_bytes()[..]);
|
||||
let mut b3 = GenericArray::clone_from_slice(&message_3.to_ne_bytes()[..]);
|
||||
let mut b4 = GenericArray::clone_from_slice(&message_4.to_ne_bytes()[..]);
|
||||
let mut b5 = GenericArray::clone_from_slice(&message_5.to_ne_bytes()[..]);
|
||||
let mut b6 = GenericArray::clone_from_slice(&message_6.to_ne_bytes()[..]);
|
||||
let mut b7 = GenericArray::clone_from_slice(&message_7.to_ne_bytes()[..]);
|
||||
let mut b8 = GenericArray::clone_from_slice(&message_8.to_ne_bytes()[..]);
|
||||
|
||||
cipher.encrypt_block(&mut b1);
|
||||
cipher.encrypt_block(&mut b2);
|
||||
cipher.encrypt_block(&mut b3);
|
||||
cipher.encrypt_block(&mut b4);
|
||||
cipher.encrypt_block(&mut b5);
|
||||
cipher.encrypt_block(&mut b6);
|
||||
cipher.encrypt_block(&mut b7);
|
||||
cipher.encrypt_block(&mut b8);
|
||||
|
||||
let output_array: [[u8; BYTES_PER_AES_CALL]; AES_CALLS_PER_BATCH] = [
|
||||
b1.into(),
|
||||
b2.into(),
|
||||
b3.into(),
|
||||
b4.into(),
|
||||
b5.into(),
|
||||
b6.into(),
|
||||
b7.into(),
|
||||
b8.into(),
|
||||
];
|
||||
|
||||
unsafe { *{ output_array.as_ptr() as *const [u8; BYTES_PER_BATCH] } }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::convert::TryInto;
|
||||
|
||||
// Test vector for aes128, from the FIPS publication 197
|
||||
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
|
||||
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
|
||||
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_many_messages() {
|
||||
// Checks that encrypting many plaintext at the same time gives the correct output.
|
||||
let key: [u8; BYTES_PER_AES_CALL] = CIPHER_KEY.to_ne_bytes();
|
||||
let aes = Aes128::new(&GenericArray::from(key));
|
||||
let ciphertexts = aes_encrypt_many(
|
||||
PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT,
|
||||
&aes,
|
||||
);
|
||||
let ciphertexts: [u8; BYTES_PER_BATCH] = ciphertexts[..].try_into().unwrap();
|
||||
for i in 0..8 {
|
||||
assert_eq!(
|
||||
u128::from_ne_bytes(
|
||||
ciphertexts[BYTES_PER_AES_CALL * i..BYTES_PER_AES_CALL * (i + 1)]
|
||||
.try_into()
|
||||
.unwrap()
|
||||
),
|
||||
CIPHERTEXT
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
110
concrete-csprng/src/generators/implem/soft/generator.rs
Normal file
110
concrete-csprng/src/generators/implem/soft/generator.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
|
||||
use crate::generators::implem::soft::block_cipher::SoftwareBlockCipher;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
|
||||
use crate::seeders::Seed;
|
||||
|
||||
/// A random number generator using a software implementation.
|
||||
pub struct SoftwareRandomGenerator(pub(super) AesCtrGenerator<SoftwareBlockCipher>);
|
||||
|
||||
/// The children iterator used by [`SoftwareRandomGenerator`].
|
||||
///
|
||||
/// Outputs children generators one by one.
|
||||
pub struct SoftwareChildrenIterator(ChildrenIterator<SoftwareBlockCipher>);
|
||||
|
||||
impl Iterator for SoftwareChildrenIterator {
|
||||
type Item = SoftwareRandomGenerator;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(SoftwareRandomGenerator)
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomGenerator for SoftwareRandomGenerator {
|
||||
type ChildrenIter = SoftwareChildrenIterator;
|
||||
fn new(seed: Seed) -> Self {
|
||||
SoftwareRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
|
||||
}
|
||||
fn remaining_bytes(&self) -> ByteCount {
|
||||
self.0.remaining_bytes()
|
||||
}
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError> {
|
||||
self.0
|
||||
.try_fork(n_children, n_bytes)
|
||||
.map(SoftwareChildrenIterator)
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for SoftwareRandomGenerator {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test;
|
||||
use crate::generators::generator_generic_test;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_first_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_last_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_generic_test::prop_fork::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roughly_uniform() {
|
||||
generator_generic_test::test_roughly_uniform::<SoftwareRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork() {
|
||||
generator_generic_test::test_fork_children::<SoftwareRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generator_determinism() {
|
||||
generator_generic_test::test_generator_determinism::<SoftwareRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "expected test panic")]
|
||||
fn test_bounded_panic() {
|
||||
generator_generic_test::test_bounded_none_should_panic::<SoftwareRandomGenerator>();
|
||||
}
|
||||
}
|
||||
11
concrete-csprng/src/generators/implem/soft/mod.rs
Normal file
11
concrete-csprng/src/generators/implem/soft/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
//! A module using a software fallback implementation of random number generator.
|
||||
|
||||
mod block_cipher;
|
||||
|
||||
mod generator;
|
||||
pub use generator::*;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
94
concrete-csprng/src/generators/implem/soft/parallel.rs
Normal file
94
concrete-csprng/src/generators/implem/soft/parallel.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
|
||||
use crate::generators::implem::soft::block_cipher::SoftwareBlockCipher;
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// The parallel children iterator used by [`SoftwareRandomGenerator`].
|
||||
///
|
||||
/// Outputs the children generators one by one.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct ParallelSoftwareChildrenIterator(
|
||||
rayon::iter::Map<
|
||||
ParallelChildrenIterator<SoftwareBlockCipher>,
|
||||
fn(AesCtrGenerator<SoftwareBlockCipher>) -> SoftwareRandomGenerator,
|
||||
>,
|
||||
);
|
||||
|
||||
impl ParallelIterator for ParallelSoftwareChildrenIterator {
|
||||
type Item = SoftwareRandomGenerator;
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
self.0.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexedParallelIterator for ParallelSoftwareChildrenIterator {
|
||||
fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
|
||||
self.0.drive(consumer)
|
||||
}
|
||||
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
|
||||
self.0.with_producer(callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParallelRandomGenerator for SoftwareRandomGenerator {
|
||||
type ParChildrenIter = ParallelSoftwareChildrenIterator;
|
||||
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError> {
|
||||
self.0
|
||||
.par_try_fork(n_children, n_bytes)
|
||||
.map(|iterator| ParallelSoftwareChildrenIterator(iterator.map(SoftwareRandomGenerator)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
}
|
||||
235
concrete-csprng/src/generators/mod.rs
Normal file
235
concrete-csprng/src/generators/mod.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
//! A module containing random generators objects.
|
||||
//!
|
||||
//! See [crate-level](`crate`) explanations.
|
||||
use crate::seeders::Seed;
|
||||
use std::error::Error;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
/// The number of children created when a generator is forked.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ChildrenCount(pub usize);
|
||||
|
||||
/// The number of bytes each child can generate, when a generator is forked.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct BytesPerChild(pub usize);
|
||||
|
||||
/// A structure representing the number of bytes between two table indices.
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct ByteCount(pub u128);
|
||||
|
||||
/// An error occurring during a generator fork.
|
||||
#[derive(Debug)]
|
||||
pub enum ForkError {
|
||||
ForkTooLarge,
|
||||
ZeroChildrenCount,
|
||||
ZeroBytesPerChild,
|
||||
}
|
||||
|
||||
impl Display for ForkError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ForkError::ForkTooLarge => {
|
||||
write!(
|
||||
f,
|
||||
"The children generators would output bytes after the parent bound. "
|
||||
)
|
||||
}
|
||||
ForkError::ZeroChildrenCount => {
|
||||
write!(
|
||||
f,
|
||||
"The number of children in the fork must be greater than zero."
|
||||
)
|
||||
}
|
||||
ForkError::ZeroBytesPerChild => {
|
||||
write!(
|
||||
f,
|
||||
"The number of bytes per child must be greater than zero."
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Error for ForkError {}
|
||||
|
||||
/// A trait for cryptographically secure pseudo-random generators.
|
||||
///
|
||||
/// See the [crate-level](#crate) documentation for details.
|
||||
pub trait RandomGenerator: Iterator<Item = u8> {
|
||||
/// The iterator over children generators, returned by `try_fork` in case of success.
|
||||
type ChildrenIter: Iterator<Item = Self>;
|
||||
|
||||
/// Creates a new generator from a seed.
|
||||
///
|
||||
/// This operation is usually costly to perform, as the aes round keys need to be generated from
|
||||
/// the seed.
|
||||
fn new(seed: Seed) -> Self;
|
||||
|
||||
/// Returns the number of bytes that can still be outputted by the generator before reaching its
|
||||
/// bound.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// A fresh generator can generate 2¹³² bytes. Unfortunately, no rust integer type in is able
|
||||
/// to encode such a large number. Consequently [`ByteCount`] uses the largest integer type
|
||||
/// available to encode this value: the `u128` type. For this reason, this method does not
|
||||
/// effectively return the number of remaining bytes, but instead
|
||||
/// `min(2¹²⁸-1, remaining_bytes)`.
|
||||
fn remaining_bytes(&self) -> ByteCount;
|
||||
|
||||
/// Returns the next byte of the stream, if the generator did not yet reach its bound.
|
||||
fn next_byte(&mut self) -> Option<u8> {
|
||||
self.next()
|
||||
}
|
||||
|
||||
/// Tries to fork the generator into an iterator of `n_children` new generators, each able to
|
||||
/// output `n_bytes` bytes.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// To be successful, the number of remaining bytes for the parent generator must be larger than
|
||||
/// `n_children*n_bytes`.
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError>;
|
||||
}
|
||||
|
||||
/// A trait extending [`RandomGenerator`] to the parallel iterators of `rayon`.
|
||||
#[cfg(feature = "parallel")]
|
||||
pub trait ParallelRandomGenerator: RandomGenerator + Send {
|
||||
/// The iterator over children generators, returned by `par_try_fork` in case of success.
|
||||
type ParChildrenIter: rayon::prelude::IndexedParallelIterator<Item = Self>;
|
||||
|
||||
/// Tries to fork the generator into a parallel iterator of `n_children` new generators, each
|
||||
/// able to output `n_bytes` bytes.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// To be successful, the number of remaining bytes for the parent generator must be larger than
|
||||
/// `n_children*n_bytes`.
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError>;
|
||||
}
|
||||
|
||||
mod aes_ctr;
|
||||
|
||||
mod implem;
|
||||
pub use implem::*;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod generator_generic_test {
|
||||
#![allow(unused)] // to please clippy when tests are not activated
|
||||
use super::*;
|
||||
use rand::Rng;
|
||||
|
||||
const REPEATS: usize = 1_000;
|
||||
|
||||
fn any_seed() -> impl Iterator<Item = Seed> {
|
||||
std::iter::repeat_with(|| Seed(rand::thread_rng().gen()))
|
||||
}
|
||||
|
||||
fn some_children_count() -> impl Iterator<Item = ChildrenCount> {
|
||||
std::iter::repeat_with(|| ChildrenCount(rand::thread_rng().gen::<usize>() % 16 + 1))
|
||||
}
|
||||
|
||||
fn some_bytes_per_child() -> impl Iterator<Item = BytesPerChild> {
|
||||
std::iter::repeat_with(|| BytesPerChild(rand::thread_rng().gen::<usize>() % 128 + 1))
|
||||
}
|
||||
|
||||
/// Checks that the PRNG roughly generates uniform numbers.
|
||||
///
|
||||
/// To do that, we perform an histogram of the occurrences of each byte value, over a fixed
|
||||
/// number of samples and check that the empirical probabilities of the bins are close to
|
||||
/// the theoretical probabilities.
|
||||
pub fn test_roughly_uniform<G: RandomGenerator>() {
|
||||
// Number of bins to use for the histogram.
|
||||
const N_BINS: usize = u8::MAX as usize + 1;
|
||||
// Number of samples to use for the histogram.
|
||||
let n_samples = 10_000_000_usize;
|
||||
// Theoretical probability of a each bins.
|
||||
let expected_prob: f64 = 1. / N_BINS as f64;
|
||||
// Absolute error allowed on the empirical probabilities.
|
||||
// This value was tuned to make the test pass on an arguably correct state of
|
||||
// implementation. 10^-4 precision is arguably pretty fine for this rough test, but it would
|
||||
// be interesting to improve this test.
|
||||
let precision = 10f64.powi(-3);
|
||||
|
||||
for _ in 0..REPEATS {
|
||||
// We instantiate a new generator.
|
||||
let seed = any_seed().next().unwrap();
|
||||
let mut generator = G::new(seed);
|
||||
// We create a new histogram
|
||||
let mut counts = [0usize; N_BINS];
|
||||
// We fill the histogram.
|
||||
for _ in 0..n_samples {
|
||||
counts[generator.next_byte().unwrap() as usize] += 1;
|
||||
}
|
||||
// We check that the empirical probabilities are close enough to the theoretical one.
|
||||
counts
|
||||
.iter()
|
||||
.map(|a| (*a as f64) / (n_samples as f64))
|
||||
.for_each(|a| assert!((a - expected_prob).abs() < precision))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks that given a state and a key, the PRNG is determinist.
|
||||
pub fn test_generator_determinism<G: RandomGenerator>() {
|
||||
for _ in 0..REPEATS {
|
||||
let seed = any_seed().next().unwrap();
|
||||
let mut first_generator = G::new(seed);
|
||||
let mut second_generator = G::new(seed);
|
||||
for _ in 0..1024 {
|
||||
assert_eq!(first_generator.next(), second_generator.next());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks that forks returns a bounded child, and that the proper number of bytes can be
|
||||
/// generated.
|
||||
pub fn test_fork_children<G: RandomGenerator>() {
|
||||
for _ in 0..REPEATS {
|
||||
let ((seed, n_children), n_bytes) = any_seed()
|
||||
.zip(some_children_count())
|
||||
.zip(some_bytes_per_child())
|
||||
.next()
|
||||
.unwrap();
|
||||
let mut gen = G::new(seed);
|
||||
let mut bounded = gen.try_fork(n_children, n_bytes).unwrap().next().unwrap();
|
||||
assert_eq!(bounded.remaining_bytes(), ByteCount(n_bytes.0 as u128));
|
||||
for _ in 0..n_bytes.0 {
|
||||
bounded.next().unwrap();
|
||||
}
|
||||
|
||||
// Assert we are at the bound
|
||||
assert!(bounded.next().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks that a bounded prng returns none when exceeding the allowed number of bytes.
|
||||
///
|
||||
/// To properly check for panic use `#[should_panic(expected = "expected test panic")]` as an
|
||||
/// attribute on the test function.
|
||||
pub fn test_bounded_none_should_panic<G: RandomGenerator>() {
|
||||
let ((seed, n_children), n_bytes) = any_seed()
|
||||
.zip(some_children_count())
|
||||
.zip(some_bytes_per_child())
|
||||
.next()
|
||||
.unwrap();
|
||||
let mut gen = G::new(seed);
|
||||
let mut bounded = gen.try_fork(n_children, n_bytes).unwrap().next().unwrap();
|
||||
assert_eq!(bounded.remaining_bytes(), ByteCount(n_bytes.0 as u128));
|
||||
for _ in 0..n_bytes.0 {
|
||||
assert!(bounded.next().is_some());
|
||||
}
|
||||
|
||||
// One call too many, should panic
|
||||
bounded.next().ok_or("expected test panic").unwrap();
|
||||
}
|
||||
}
|
||||
114
concrete-csprng/src/lib.rs
Normal file
114
concrete-csprng/src/lib.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
#![deny(rustdoc::broken_intra_doc_links)]
|
||||
//! Cryptographically secure pseudo random number generator.
|
||||
//!
|
||||
//! Welcome to the `concrete-csprng` documentation.
|
||||
//!
|
||||
//! This crate provides a fast cryptographically secure pseudo-random number generator, suited to
|
||||
//! work in a multithreaded setting.
|
||||
//!
|
||||
//! Random Generators
|
||||
//! =================
|
||||
//!
|
||||
//! The central abstraction of this crate is the [`RandomGenerator`](generators::RandomGenerator)
|
||||
//! trait, which is implemented by different types, each supporting a different platform. In
|
||||
//! essence, a type implementing [`RandomGenerator`](generators::RandomGenerator) is a type that
|
||||
//! outputs a new pseudo-random byte at each call to
|
||||
//! [`next_byte`](generators::RandomGenerator::next_byte). Such a generator `g` can be seen as
|
||||
//! enclosing a growing index into an imaginary array of pseudo-random bytes:
|
||||
//! ```ascii
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M-1 │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃ │ │ │ │ │ │ │ │ │ │...│ ┃ │
|
||||
//! ┗↥┷━┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! g │
|
||||
//! │
|
||||
//! g.next_byte() │
|
||||
//! │
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M-1 │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃╳│ │ │ │ │ │ │ │ │ │...│ ┃ │
|
||||
//! ┗━┷↥┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! g │
|
||||
//! │
|
||||
//! g.next_byte() │ legend:
|
||||
//! │ -------
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M-1 │ ↥ : next byte to be outputted by g
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │ │: byte not yet outputted by g
|
||||
//! ┃╳│╳│ │ │ │ │ │ │ │ │...│ ┃ │ │╳│: byte already outputted by g
|
||||
//! ┗━┷━┷↥┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! g 🭭
|
||||
//! ```
|
||||
//!
|
||||
//! While being large, this imaginary array is still bounded to M = 2¹³² bytes. Consequently, a
|
||||
//! generator is always bounded to a maximal index. That is, there is always a max amount of
|
||||
//! elements of this array that can be outputted by the generator. By default, generators created
|
||||
//! via [`new`](generators::RandomGenerator::new) are always bounded to M-1.
|
||||
//!
|
||||
//! Tree partition of the pseudo-random stream
|
||||
//! ==========================================
|
||||
//!
|
||||
//! One particularity of this implementation is that you can use the
|
||||
//! [`try_fork`](generators::RandomGenerator::try_fork) method to create an arbitrary partition tree
|
||||
//! of a region of this array. Indeed, calling `try_fork(nc, nb)` outputs `nc` new generators, each
|
||||
//! able to output `nb` bytes. The `try_fork` method ensures that the states and bounds of the
|
||||
//! parent and children generators are set so as to prevent the same substream to be outputted
|
||||
//! twice:
|
||||
//! ```ascii
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃P│P│P│P│P│P│P│P│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! p │
|
||||
//! │
|
||||
//! (a,b) = p.fork(2,4) │
|
||||
//! │
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃A│A│A│A│B│B│B│B│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷↥┷━┷━┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a b p │
|
||||
//! │ legend:
|
||||
//! (c,d) = b.fork(2, 1) │ -------
|
||||
//! │ ↥ : next byte to be outputted by p
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │ │P│: byte to be outputted by p
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │╳│: byte already outputted
|
||||
//! ┃A│A│A│A│C│D│B│B│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p 🭭
|
||||
//! ```
|
||||
//!
|
||||
//! This makes it possible to consume the stream at different places. This is particularly useful in
|
||||
//! a multithreaded setting, in which we want to use the same generator from different independent
|
||||
//! threads:
|
||||
//!
|
||||
//! ```ascii
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃A│A│A│A│C│D│B│B│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p │
|
||||
//! │
|
||||
//! a.next_byte() │
|
||||
//! │
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃╳│A│A│A│C│D│B│B│P│P│...│P┃ │
|
||||
//! ┗━┷↥┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p │
|
||||
//! │ legend:
|
||||
//! b.next_byte() │ -------
|
||||
//! │ ↥ : next byte to be outputted by p
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │ │P│: byte to be outputted by p
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │╳│: byte already outputted
|
||||
//! ┃╳│A│A│A│C│D│╳│B│P│P│...│P┃ │
|
||||
//! ┗━┷↥┷━┷━┷↥┷↥┷━┷↥┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p 🭭
|
||||
//! ```
|
||||
//!
|
||||
//! Implementation
|
||||
//! ==============
|
||||
//!
|
||||
//! The implementation is based on the AES blockcipher used in counter (CTR) mode, as presented
|
||||
//! in the ISO/IEC 18033-4 document.
|
||||
pub mod generators;
|
||||
pub mod seeders;
|
||||
@@ -0,0 +1,141 @@
|
||||
use crate::seeders::{Seed, Seeder};
|
||||
use libc;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// There is no `rseed` equivalent in the ARM specification until `ARMv8.5-A`.
|
||||
/// However it seems that these instructions are not exposed in `core::arch::aarch64`.
|
||||
///
|
||||
/// Our primary interest for supporting aarch64 targets is AppleSilicon support
|
||||
/// which for the M1 macs available, they are based on the `ARMv8.4-A` set.
|
||||
///
|
||||
/// So we fall back to using a function from Apple's API which
|
||||
/// uses the [Secure Enclave] to generate cryptographically secure random bytes.
|
||||
///
|
||||
/// [Secure Enclave]: https://support.apple.com/fr-fr/guide/security/sec59b0b31ff/web
|
||||
mod secure_enclave {
|
||||
pub enum __SecRandom {}
|
||||
pub type SecRandomRef = *const __SecRandom;
|
||||
use libc::{c_int, c_void};
|
||||
|
||||
#[link(name = "Security", kind = "framework")]
|
||||
extern "C" {
|
||||
pub static kSecRandomDefault: SecRandomRef;
|
||||
|
||||
pub fn SecRandomCopyBytes(rnd: SecRandomRef, count: usize, bytes: *mut c_void) -> c_int;
|
||||
}
|
||||
|
||||
pub fn generate_random_bytes(bytes: &mut [u8]) -> std::io::Result<()> {
|
||||
// As per Apple's documentation:
|
||||
// - https://developer.apple.com/documentation/security/randomization_services?language=objc
|
||||
// - https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
|
||||
//
|
||||
// The `SecRandomCopyBytes` "Generate cryptographically secure random numbers"
|
||||
unsafe {
|
||||
let res = SecRandomCopyBytes(
|
||||
kSecRandomDefault,
|
||||
bytes.len(),
|
||||
bytes.as_mut_ptr() as *mut c_void,
|
||||
);
|
||||
if res != 0 {
|
||||
Err(std::io::Error::last_os_error())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A seeder which uses the `SecRandomCopyBytes` function from Apple's `Security` framework.
|
||||
///
|
||||
/// <https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc>
|
||||
pub struct AppleSecureEnclaveSeeder;
|
||||
|
||||
impl Seeder for AppleSecureEnclaveSeeder {
|
||||
fn seed(&mut self) -> Seed {
|
||||
// 16 bytes == 128 bits
|
||||
let mut bytes = [0u8; 16];
|
||||
secure_enclave::generate_random_bytes(&mut bytes)
|
||||
.expect("Failure while using Apple secure enclave: {err:?}");
|
||||
|
||||
Seed(u128::from_le_bytes(bytes))
|
||||
}
|
||||
|
||||
fn is_available() -> bool {
|
||||
let os_version_sysctl_name = match std::ffi::CString::new("kern.osproductversion") {
|
||||
Ok(c_str) => c_str,
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
// Big enough buffer to get a version output as an ASCII string
|
||||
const OUTPUT_BUFFER_SIZE: usize = 64;
|
||||
let mut output_buffer_size = OUTPUT_BUFFER_SIZE;
|
||||
let mut output_buffer = [0u8; OUTPUT_BUFFER_SIZE];
|
||||
let res = unsafe {
|
||||
libc::sysctlbyname(
|
||||
os_version_sysctl_name.as_ptr() as *const _ as *const _,
|
||||
&mut output_buffer as *mut _ as *mut _,
|
||||
&mut output_buffer_size as *mut _ as *mut _,
|
||||
std::ptr::null_mut(),
|
||||
0,
|
||||
)
|
||||
};
|
||||
|
||||
if res != 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let result_c_str =
|
||||
match std::ffi::CStr::from_bytes_with_nul(&output_buffer[..output_buffer_size]) {
|
||||
Ok(c_str) => c_str,
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
let result_string = match result_c_str.to_str() {
|
||||
Ok(str) => str,
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
// Normally we get a major version and minor version
|
||||
let split_string: Vec<&str> = result_string.split('.').collect();
|
||||
|
||||
let mut major = -1;
|
||||
let mut minor = -1;
|
||||
|
||||
// Major part of the version string
|
||||
if !split_string.is_empty() {
|
||||
major = match split_string[0].parse() {
|
||||
Ok(major_from_str) => major_from_str,
|
||||
_ => return false,
|
||||
};
|
||||
}
|
||||
|
||||
// SecRandomCopyBytes is available starting with mac OS 10.7
|
||||
// https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
|
||||
// This match pattern is recommended by clippy, so we oblige here
|
||||
match major.cmp(&10) {
|
||||
Ordering::Greater => true,
|
||||
Ordering::Equal => {
|
||||
// Minor part of the version string
|
||||
if split_string.len() >= 2 {
|
||||
minor = match split_string[1].parse() {
|
||||
Ok(minor_from_str) => minor_from_str,
|
||||
_ => return false,
|
||||
};
|
||||
}
|
||||
minor >= 7
|
||||
}
|
||||
Ordering::Less => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
|
||||
|
||||
#[test]
|
||||
fn check_bounded_sequence_difference() {
|
||||
check_seeder_fixed_sequences_different(|_| AppleSecureEnclaveSeeder);
|
||||
}
|
||||
}
|
||||
14
concrete-csprng/src/seeders/implem/mod.rs
Normal file
14
concrete-csprng/src/seeders/implem/mod.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
#[cfg(target_os = "macos")]
|
||||
mod apple_secure_enclave_seeder;
|
||||
#[cfg(target_os = "macos")]
|
||||
pub use apple_secure_enclave_seeder::AppleSecureEnclaveSeeder;
|
||||
|
||||
#[cfg(feature = "seeder_x86_64_rdseed")]
|
||||
mod rdseed;
|
||||
#[cfg(feature = "seeder_x86_64_rdseed")]
|
||||
pub use rdseed::RdseedSeeder;
|
||||
|
||||
#[cfg(feature = "seeder_unix")]
|
||||
mod unix;
|
||||
#[cfg(feature = "seeder_unix")]
|
||||
pub use unix::UnixSeeder;
|
||||
51
concrete-csprng/src/seeders/implem/rdseed.rs
Normal file
51
concrete-csprng/src/seeders/implem/rdseed.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use crate::seeders::{Seed, Seeder};
|
||||
|
||||
/// A seeder which uses the `rdseed` x86_64 instruction.
|
||||
///
|
||||
/// The `rdseed` instruction allows to deliver seeds from a hardware source of entropy see
|
||||
/// <https://www.felixcloutier.com/x86/rdseed> .
|
||||
pub struct RdseedSeeder;
|
||||
|
||||
impl Seeder for RdseedSeeder {
|
||||
fn seed(&mut self) -> Seed {
|
||||
Seed(unsafe { rdseed_random_m128() })
|
||||
}
|
||||
|
||||
fn is_available() -> bool {
|
||||
is_x86_feature_detected!("rdseed")
|
||||
}
|
||||
}
|
||||
|
||||
// Generates a random 128 bits value from rdseed
|
||||
#[target_feature(enable = "rdseed")]
|
||||
unsafe fn rdseed_random_m128() -> u128 {
|
||||
let mut rand1: u64 = 0;
|
||||
let mut rand2: u64 = 0;
|
||||
let mut output_bytes = [0u8; 16];
|
||||
unsafe {
|
||||
loop {
|
||||
if core::arch::x86_64::_rdseed64_step(&mut rand1) == 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
loop {
|
||||
if core::arch::x86_64::_rdseed64_step(&mut rand2) == 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
output_bytes[0..8].copy_from_slice(&rand1.to_ne_bytes());
|
||||
output_bytes[8..16].copy_from_slice(&rand2.to_ne_bytes());
|
||||
u128::from_ne_bytes(output_bytes)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
|
||||
|
||||
#[test]
|
||||
fn check_bounded_sequence_difference() {
|
||||
check_seeder_fixed_sequences_different(|_| RdseedSeeder);
|
||||
}
|
||||
}
|
||||
72
concrete-csprng/src/seeders/implem/unix.rs
Normal file
72
concrete-csprng/src/seeders/implem/unix.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use crate::seeders::{Seed, Seeder};
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
/// A seeder which uses the `/dev/random` source on unix-like systems.
|
||||
pub struct UnixSeeder {
|
||||
counter: u128,
|
||||
secret: u128,
|
||||
file: File,
|
||||
}
|
||||
|
||||
impl UnixSeeder {
|
||||
/// Creates a new seeder from a user defined secret.
|
||||
///
|
||||
/// Important:
|
||||
/// ----------
|
||||
///
|
||||
/// This secret is used to ensure the quality of the seed in scenarios where `/dev/random` may
|
||||
/// be compromised.
|
||||
///
|
||||
/// The attack hypotheses are as follow:
|
||||
/// - `/dev/random` output can be predicted by a process running on the machine by just
|
||||
/// observing various states of the machine
|
||||
/// - The attacker cannot read data from the process where `concrete-csprng` is running
|
||||
///
|
||||
/// Using a secret in `concrete-csprng` allows to generate values that the attacker cannot
|
||||
/// predict, making this seeder secure on systems were `/dev/random` outputs can be
|
||||
/// predicted.
|
||||
pub fn new(secret: u128) -> UnixSeeder {
|
||||
let file = std::fs::File::open("/dev/random").expect("Failed to open /dev/random .");
|
||||
let counter = std::time::UNIX_EPOCH
|
||||
.elapsed()
|
||||
.expect("Failed to initialize unix seeder.")
|
||||
.as_nanos();
|
||||
UnixSeeder {
|
||||
secret,
|
||||
counter,
|
||||
file,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Seeder for UnixSeeder {
|
||||
fn seed(&mut self) -> Seed {
|
||||
let output = self.secret ^ self.counter ^ dev_random(&mut self.file);
|
||||
self.counter = self.counter.wrapping_add(1);
|
||||
Seed(output)
|
||||
}
|
||||
|
||||
fn is_available() -> bool {
|
||||
cfg!(target_family = "unix")
|
||||
}
|
||||
}
|
||||
|
||||
fn dev_random(random: &mut File) -> u128 {
|
||||
let mut buf = [0u8; 16];
|
||||
random
|
||||
.read_exact(&mut buf[..])
|
||||
.expect("Failed to read from /dev/random .");
|
||||
u128::from_ne_bytes(buf)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
|
||||
|
||||
#[test]
|
||||
fn check_bounded_sequence_difference() {
|
||||
check_seeder_fixed_sequences_different(UnixSeeder::new);
|
||||
}
|
||||
}
|
||||
47
concrete-csprng/src/seeders/mod.rs
Normal file
47
concrete-csprng/src/seeders/mod.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
//! A module containing seeders objects.
|
||||
//!
|
||||
//! When initializing a generator, one needs to provide a [`Seed`], which is then used as key to the
|
||||
//! AES blockcipher. As a consequence, the quality of the outputs of the generator is directly
|
||||
//! conditioned by the quality of this seed. This module proposes different mechanisms to deliver
|
||||
//! seeds that can accommodate varying scenarios.
|
||||
|
||||
/// A seed value, used to initialize a generator.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct Seed(pub u128);
|
||||
|
||||
/// A trait representing a seeding strategy.
|
||||
pub trait Seeder {
|
||||
/// Generates a new seed.
|
||||
fn seed(&mut self) -> Seed;
|
||||
|
||||
/// Check whether the seeder can be used on the current machine. This function may check if some
|
||||
/// required CPU features are available or if some OS features are available for example.
|
||||
fn is_available() -> bool
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
mod implem;
|
||||
pub use implem::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod generic_tests {
|
||||
use crate::seeders::Seeder;
|
||||
|
||||
/// Naively verifies that two fixed-size sequences generated by repeatedly calling the seeder
|
||||
/// are different.
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
pub fn check_seeder_fixed_sequences_different<S: Seeder, F: Fn(u128) -> S>(
|
||||
construct_seeder: F,
|
||||
) {
|
||||
const SEQUENCE_SIZE: usize = 500;
|
||||
const REPEATS: usize = 10_000;
|
||||
for i in 0..REPEATS {
|
||||
let mut seeder = construct_seeder(i as u128);
|
||||
let orig_seed = seeder.seed();
|
||||
for _ in 0..SEQUENCE_SIZE {
|
||||
assert_ne!(seeder.seed(), orig_seed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -34,6 +34,6 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > install-rustup.s
|
||||
chmod +x install-node.sh && \
|
||||
./install-node.sh && \
|
||||
. "$HOME/.nvm/nvm.sh" && \
|
||||
bash -i -c 'nvm install node && nvm use node'
|
||||
bash -i -c 'nvm install 20 && nvm use 20'
|
||||
|
||||
WORKDIR /tfhe-wasm-tests/tfhe-rs/
|
||||
|
||||
77
scripts/dieharder_test.sh
Executable file
77
scripts/dieharder_test.sh
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# dieharder does not support running a subset of its tests, so we'll check which ones are not good
|
||||
# and ignore the output from those tests in the final log
|
||||
|
||||
set -e
|
||||
|
||||
DIEHARDER_RUN_LOG_FILE="dieharder_run.log"
|
||||
|
||||
bad_tests="$(dieharder -l | \
|
||||
# select lines with the -d
|
||||
grep -w '\-d' | \
|
||||
# forget about the good tests
|
||||
grep -v -i 'good' | \
|
||||
# get the test id
|
||||
cut -d ' ' -f 4 | \
|
||||
# nice formatting
|
||||
xargs)"
|
||||
|
||||
|
||||
bad_test_filter=""
|
||||
for bad_test in ${bad_tests}; do
|
||||
bad_test_filter="${bad_test_filter:+${bad_test_filter}|}$(dieharder -d "${bad_test}" -t 1 -p 1 -D test_name | xargs)"
|
||||
done
|
||||
|
||||
echo "The following tests will be ignored as they are marked as either 'suspect' or 'do not use': "
|
||||
echo ""
|
||||
echo "${bad_test_filter}"
|
||||
echo ""
|
||||
|
||||
# by default we may have no pv just forward the input
|
||||
pv="cat"
|
||||
if which pv > /dev/null; then
|
||||
pv="pv -t -a -b"
|
||||
fi
|
||||
|
||||
rm -f "${DIEHARDER_RUN_LOG_FILE}"
|
||||
|
||||
# ignore potential errors and parse the log afterwards
|
||||
set +e
|
||||
|
||||
# We are writing in both cases
|
||||
# shellcheck disable=SC2094
|
||||
./target/release/examples/generate 2>"${DIEHARDER_RUN_LOG_FILE}" | \
|
||||
$pv | \
|
||||
# -a: all tests
|
||||
# -g 200: get random bytes from input
|
||||
# -Y 1: disambiguate results, i.e. if a weak result appear check if it's a random failure/weakness
|
||||
# -k 2: better maths formulas to determine some test statistics
|
||||
dieharder -a -g 200 -Y 1 -k 2 | \
|
||||
tee -a "${DIEHARDER_RUN_LOG_FILE}"
|
||||
set -e
|
||||
|
||||
printf "\n\n"
|
||||
|
||||
cat "${DIEHARDER_RUN_LOG_FILE}"
|
||||
|
||||
if ! grep -q -i "failed" < "${DIEHARDER_RUN_LOG_FILE}"; then
|
||||
echo "All tests passed!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
printf "\n\n"
|
||||
|
||||
failed_tests="$(grep -i "failed" < "${DIEHARDER_RUN_LOG_FILE}")"
|
||||
true_failed_test="$(grep -i "failed" < "${DIEHARDER_RUN_LOG_FILE}" | { grep -v -E "${bad_test_filter}" || true; } | sed -z '$ s/\n$//')"
|
||||
|
||||
if [[ "${true_failed_test}" == "" ]]; then
|
||||
echo "There were test failures, but the tests were either marked as 'suspect' or 'do not use'"
|
||||
echo "${failed_tests}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "The following tests failed:"
|
||||
echo "${true_failed_test}"
|
||||
|
||||
exit 1
|
||||
@@ -9,6 +9,7 @@ function usage() {
|
||||
echo "--rust-toolchain The toolchain to run the tests with default: stable"
|
||||
echo "--multi-bit Run multi-bit tests only: default off"
|
||||
echo "--cargo-profile The cargo profile used to build tests"
|
||||
echo "--avx512-support Set to ON to enable avx512"
|
||||
echo
|
||||
}
|
||||
|
||||
@@ -16,6 +17,7 @@ RUST_TOOLCHAIN="+stable"
|
||||
multi_bit=""
|
||||
not_multi_bit="_multi_bit"
|
||||
cargo_profile="release"
|
||||
avx512_feature=""
|
||||
|
||||
while [ -n "$1" ]
|
||||
do
|
||||
@@ -40,6 +42,13 @@ do
|
||||
cargo_profile="$1"
|
||||
;;
|
||||
|
||||
"--avx512-support" )
|
||||
shift
|
||||
if [[ "$1" == "ON" ]]; then
|
||||
avx512_feature=nightly-avx512
|
||||
fi
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Unknown param : $1"
|
||||
exit 1
|
||||
@@ -104,7 +113,7 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
|
||||
--cargo-profile "${cargo_profile}" \
|
||||
--package tfhe \
|
||||
--profile ci \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache,"${avx512_feature}" \
|
||||
--test-threads "${n_threads}" \
|
||||
-E "$filter_expression"
|
||||
|
||||
@@ -112,7 +121,7 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
|
||||
cargo "${RUST_TOOLCHAIN}" test \
|
||||
--profile "${cargo_profile}" \
|
||||
--package tfhe \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache,"${avx512_feature}" \
|
||||
--doc \
|
||||
-- integer::
|
||||
fi
|
||||
@@ -142,13 +151,13 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
|
||||
fi
|
||||
|
||||
num_cpu_threads="$(${nproc_bin})"
|
||||
num_threads=$((num_cpu_threads * 2 / 3))
|
||||
num_threads=$((num_cpu_threads * 1 / 2))
|
||||
cargo "${RUST_TOOLCHAIN}" nextest run \
|
||||
--tests \
|
||||
--cargo-profile "${cargo_profile}" \
|
||||
--package tfhe \
|
||||
--profile ci \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache,"${avx512_feature}" \
|
||||
--test-threads $num_threads \
|
||||
-E "$filter_expression"
|
||||
|
||||
@@ -156,7 +165,7 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
|
||||
cargo "${RUST_TOOLCHAIN}" test \
|
||||
--profile "${cargo_profile}" \
|
||||
--package tfhe \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache \
|
||||
--features="${ARCH_FEATURE}",integer,internal-keycache,"${avx512_feature}" \
|
||||
--doc \
|
||||
-- --test-threads="$(${nproc_bin})" integer::
|
||||
fi
|
||||
|
||||
@@ -6,7 +6,7 @@ edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
clap = "3.1"
|
||||
clap = "=4.4.4"
|
||||
lazy_static = "1.4"
|
||||
log = "0.4"
|
||||
simplelog = "0.12"
|
||||
|
||||
2
tfhe/.gitignore
vendored
2
tfhe/.gitignore
vendored
@@ -1 +1 @@
|
||||
build/
|
||||
build/
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "tfhe"
|
||||
version = "0.3.0"
|
||||
version = "0.4.0"
|
||||
edition = "2021"
|
||||
readme = "../README.md"
|
||||
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
|
||||
@@ -17,7 +17,7 @@ exclude = [
|
||||
"/js_on_wasm_tests/",
|
||||
"/web_wasm_parallel_tests/",
|
||||
]
|
||||
rust-version = "1.67"
|
||||
rust-version = "1.72"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@@ -26,27 +26,30 @@ rand = "0.8.5"
|
||||
rand_distr = "0.4.3"
|
||||
paste = "1.0.7"
|
||||
lazy_static = { version = "1.4.0" }
|
||||
criterion = "0.4.0"
|
||||
criterion = "0.5.1"
|
||||
doc-comment = "0.3.3"
|
||||
serde_json = "1.0.94"
|
||||
clap = { version = "4.2.7", features = ["derive"] }
|
||||
# clap has to be pinned as its minimum supported rust version
|
||||
# changes often between minor releases, which breaks our CI
|
||||
clap = { version = "=4.4.4", features = ["derive"] }
|
||||
# Used in user documentation
|
||||
bincode = "1.3.3"
|
||||
fs2 = { version = "0.4.3" }
|
||||
itertools = "0.10.5"
|
||||
num_cpus = "1.15"
|
||||
itertools = "0.11.0"
|
||||
# For erf and normality test
|
||||
libm = "0.2.6"
|
||||
# Begin regex-engine deps
|
||||
test-case = "3.1.0"
|
||||
combine = "4.6.6"
|
||||
env_logger = "0.10.0"
|
||||
log = "0.4.19"
|
||||
# End regex-engine deps
|
||||
|
||||
[build-dependencies]
|
||||
cbindgen = { version = "0.24.3", optional = true }
|
||||
cbindgen = { version = "0.26.0", optional = true }
|
||||
|
||||
[dependencies]
|
||||
concrete-csprng = { version = "0.3.0", features = [
|
||||
concrete-csprng = { version = "0.4.0", path= "../concrete-csprng", features = [
|
||||
"generator_fallback",
|
||||
"parallel",
|
||||
] }
|
||||
@@ -54,15 +57,14 @@ lazy_static = { version = "1.4.0", optional = true }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
rayon = { version = "1.5.0" }
|
||||
bincode = { version = "1.3.3", optional = true }
|
||||
concrete-fft = { version = "0.2.1", features = ["serde", "fft128"] }
|
||||
pulp = "0.11"
|
||||
concrete-fft = { version = "0.3.0", features = ["serde", "fft128"] }
|
||||
pulp = "0.13"
|
||||
aligned-vec = { version = "0.5", features = ["serde"] }
|
||||
dyn-stack = { version = "0.9" }
|
||||
once_cell = "1.13"
|
||||
paste = "1.0.7"
|
||||
paste = { version = "1.0.7", optional = true }
|
||||
fs2 = { version = "0.4.3", optional = true }
|
||||
# While we wait for repeat_n in rust standard library
|
||||
itertools = "0.10.5"
|
||||
itertools = "0.11.0"
|
||||
|
||||
# wasm deps
|
||||
wasm-bindgen = { version = "0.2.86", features = [
|
||||
@@ -71,22 +73,24 @@ wasm-bindgen = { version = "0.2.86", features = [
|
||||
wasm-bindgen-rayon = { version = "1.0", optional = true }
|
||||
js-sys = { version = "0.3", optional = true }
|
||||
console_error_panic_hook = { version = "0.1.7", optional = true }
|
||||
serde-wasm-bindgen = { version = "0.4", optional = true }
|
||||
serde-wasm-bindgen = { version = "0.6.0", optional = true }
|
||||
getrandom = { version = "0.2.8", optional = true }
|
||||
bytemuck = "1.13.1"
|
||||
|
||||
[features]
|
||||
boolean = []
|
||||
shortint = []
|
||||
integer = ["shortint"]
|
||||
internal-keycache = ["lazy_static", "fs2", "bincode"]
|
||||
# paste is used by the HL API
|
||||
boolean = ["dep:paste"]
|
||||
shortint = ["dep:paste"]
|
||||
integer = ["shortint", "dep:paste"]
|
||||
internal-keycache = ["lazy_static", "dep:fs2", "dep:bincode", "dep:paste"]
|
||||
safe-deserialization = ["dep:bincode"]
|
||||
|
||||
# Experimental section
|
||||
experimental = []
|
||||
experimental-force_fft_algo_dif4 = []
|
||||
# End experimental section
|
||||
|
||||
__c_api = ["cbindgen", "bincode"]
|
||||
__c_api = ["cbindgen", "dep:bincode", "dep:paste"]
|
||||
boolean-c-api = ["boolean", "__c_api"]
|
||||
shortint-c-api = ["shortint", "__c_api"]
|
||||
high-level-c-api = ["boolean-c-api", "shortint-c-api", "integer", "__c_api"]
|
||||
@@ -98,7 +102,8 @@ __wasm_api = [
|
||||
"serde-wasm-bindgen",
|
||||
"getrandom",
|
||||
"getrandom/js",
|
||||
"bincode",
|
||||
"dep:bincode",
|
||||
"safe-deserialization",
|
||||
]
|
||||
boolean-client-js-wasm-api = ["boolean", "__wasm_api"]
|
||||
shortint-client-js-wasm-api = ["shortint", "__wasm_api"]
|
||||
@@ -118,6 +123,7 @@ generator_aarch64_aes = ["concrete-csprng/generator_aarch64_aes"]
|
||||
|
||||
# Private features
|
||||
__profiling = []
|
||||
__coverage = []
|
||||
|
||||
seeder_unix = ["concrete-csprng/seeder_unix"]
|
||||
seeder_x86_64_rdseed = ["concrete-csprng/seeder_x86_64_rdseed"]
|
||||
@@ -178,6 +184,12 @@ path = "benches/integer/bench.rs"
|
||||
harness = false
|
||||
required-features = ["integer", "internal-keycache"]
|
||||
|
||||
[[bench]]
|
||||
name = "integer-signed-bench"
|
||||
path = "benches/integer/signed_bench.rs"
|
||||
harness = false
|
||||
required-features = ["integer", "internal-keycache"]
|
||||
|
||||
[[bench]]
|
||||
name = "keygen"
|
||||
path = "benches/keygen/bench.rs"
|
||||
@@ -222,6 +234,11 @@ name = "micro_bench_and"
|
||||
path = "examples/utilities/micro_bench_and.rs"
|
||||
required-features = ["boolean"]
|
||||
|
||||
[[example]]
|
||||
name = "write_params_to_file"
|
||||
path = "examples/utilities/params_to_file.rs"
|
||||
required-features = ["boolean", "shortint", "internal-keycache"]
|
||||
|
||||
# Real use-case examples
|
||||
|
||||
[[example]]
|
||||
|
||||
@@ -4,14 +4,21 @@ use crate::utilities::{write_to_json, CryptoParametersRecord, OperatorType};
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use tfhe::boolean::client_key::ClientKey;
|
||||
use tfhe::boolean::parameters::{BooleanParameters, DEFAULT_PARAMETERS, TFHE_LIB_PARAMETERS};
|
||||
use tfhe::boolean::parameters::{
|
||||
BooleanParameters, DEFAULT_PARAMETERS, DEFAULT_PARAMETERS_KS_PBS,
|
||||
PARAMETERS_ERROR_PROB_2_POW_MINUS_165, PARAMETERS_ERROR_PROB_2_POW_MINUS_165_KS_PBS,
|
||||
TFHE_LIB_PARAMETERS,
|
||||
};
|
||||
use tfhe::boolean::prelude::BinaryBooleanGates;
|
||||
use tfhe::boolean::server_key::ServerKey;
|
||||
|
||||
criterion_group!(
|
||||
gates_benches,
|
||||
bench_default_parameters,
|
||||
bench_tfhe_lib_parameters
|
||||
bench_default_parameters_ks_pbs,
|
||||
bench_low_prob_parameters,
|
||||
bench_low_prob_parameters_ks_pbs,
|
||||
bench_tfhe_lib_parameters,
|
||||
);
|
||||
|
||||
criterion_main!(gates_benches);
|
||||
@@ -36,7 +43,7 @@ pub fn write_to_json_boolean<T: Into<CryptoParametersRecord<u32>>>(
|
||||
|
||||
// Put all `bench_function` in one place
|
||||
// so the keygen is only run once per parameters saving time.
|
||||
fn benchs(c: &mut Criterion, params: BooleanParameters, parameter_name: &str) {
|
||||
fn benches(c: &mut Criterion, params: BooleanParameters, parameter_name: &str) {
|
||||
let mut bench_group = c.benchmark_group("gates_benches");
|
||||
|
||||
let cks = ClientKey::new(¶ms);
|
||||
@@ -76,9 +83,29 @@ fn benchs(c: &mut Criterion, params: BooleanParameters, parameter_name: &str) {
|
||||
}
|
||||
|
||||
fn bench_default_parameters(c: &mut Criterion) {
|
||||
benchs(c, DEFAULT_PARAMETERS, "DEFAULT_PARAMETERS");
|
||||
benches(c, DEFAULT_PARAMETERS, "DEFAULT_PARAMETERS");
|
||||
}
|
||||
|
||||
fn bench_default_parameters_ks_pbs(c: &mut Criterion) {
|
||||
benches(c, DEFAULT_PARAMETERS_KS_PBS, "DEFAULT_PARAMETERS_KS_PBS");
|
||||
}
|
||||
|
||||
fn bench_low_prob_parameters(c: &mut Criterion) {
|
||||
benches(
|
||||
c,
|
||||
PARAMETERS_ERROR_PROB_2_POW_MINUS_165,
|
||||
"PARAMETERS_ERROR_PROB_2_POW_MINUS_165",
|
||||
);
|
||||
}
|
||||
|
||||
fn bench_low_prob_parameters_ks_pbs(c: &mut Criterion) {
|
||||
benches(
|
||||
c,
|
||||
PARAMETERS_ERROR_PROB_2_POW_MINUS_165_KS_PBS,
|
||||
"PARAMETERS_ERROR_PROB_2_POW_MINUS_165_KS_PBS",
|
||||
);
|
||||
}
|
||||
|
||||
fn bench_tfhe_lib_parameters(c: &mut Criterion) {
|
||||
benchs(c, TFHE_LIB_PARAMETERS, "TFHE_LIB_PARAMETERS");
|
||||
benches(c, TFHE_LIB_PARAMETERS, " TFHE_LIB_PARAMETERS");
|
||||
}
|
||||
|
||||
@@ -5,13 +5,15 @@ use rayon::prelude::*;
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use serde::Serialize;
|
||||
use tfhe::boolean::parameters::{BooleanParameters, DEFAULT_PARAMETERS, TFHE_LIB_PARAMETERS};
|
||||
use tfhe::boolean::parameters::{
|
||||
BooleanParameters, DEFAULT_PARAMETERS, PARAMETERS_ERROR_PROB_2_POW_MINUS_165,
|
||||
};
|
||||
use tfhe::core_crypto::prelude::*;
|
||||
use tfhe::shortint::keycache::NamedParam;
|
||||
use tfhe::keycache::NamedParam;
|
||||
use tfhe::shortint::parameters::*;
|
||||
use tfhe::shortint::ClassicPBSParameters;
|
||||
|
||||
const SHORTINT_BENCH_PARAMS: [ClassicPBSParameters; 15] = [
|
||||
const SHORTINT_BENCH_PARAMS: [ClassicPBSParameters; 19] = [
|
||||
PARAM_MESSAGE_1_CARRY_0_KS_PBS,
|
||||
PARAM_MESSAGE_1_CARRY_1_KS_PBS,
|
||||
PARAM_MESSAGE_2_CARRY_0_KS_PBS,
|
||||
@@ -27,11 +29,18 @@ const SHORTINT_BENCH_PARAMS: [ClassicPBSParameters; 15] = [
|
||||
PARAM_MESSAGE_6_CARRY_0_KS_PBS,
|
||||
PARAM_MESSAGE_7_CARRY_0_KS_PBS,
|
||||
PARAM_MESSAGE_8_CARRY_0_KS_PBS,
|
||||
PARAM_MESSAGE_1_CARRY_1_PBS_KS,
|
||||
PARAM_MESSAGE_2_CARRY_2_PBS_KS,
|
||||
PARAM_MESSAGE_3_CARRY_3_PBS_KS,
|
||||
PARAM_MESSAGE_4_CARRY_4_PBS_KS,
|
||||
];
|
||||
|
||||
const BOOLEAN_BENCH_PARAMS: [(&str, BooleanParameters); 2] = [
|
||||
("BOOLEAN_DEFAULT_PARAMS", DEFAULT_PARAMETERS),
|
||||
("BOOLEAN_TFHE_LIB_PARAMS", TFHE_LIB_PARAMETERS),
|
||||
(
|
||||
"BOOLEAN_TFHE_LIB_PARAMS",
|
||||
PARAMETERS_ERROR_PROB_2_POW_MINUS_165,
|
||||
),
|
||||
];
|
||||
|
||||
criterion_group!(
|
||||
@@ -57,8 +66,8 @@ criterion_group!(
|
||||
|
||||
criterion_main!(pbs_group, multi_bit_pbs_group, pbs_throughput_group);
|
||||
|
||||
fn benchmark_parameters<Scalar: UnsignedInteger>(
|
||||
) -> Vec<(&'static str, CryptoParametersRecord<Scalar>)> {
|
||||
fn benchmark_parameters<Scalar: UnsignedInteger>() -> Vec<(String, CryptoParametersRecord<Scalar>)>
|
||||
{
|
||||
if Scalar::BITS == 64 {
|
||||
SHORTINT_BENCH_PARAMS
|
||||
.iter()
|
||||
@@ -74,7 +83,7 @@ fn benchmark_parameters<Scalar: UnsignedInteger>(
|
||||
} else if Scalar::BITS == 32 {
|
||||
BOOLEAN_BENCH_PARAMS
|
||||
.iter()
|
||||
.map(|(name, params)| (*name, params.to_owned().into()))
|
||||
.map(|(name, params)| (name.to_string(), params.to_owned().into()))
|
||||
.collect()
|
||||
} else {
|
||||
vec![]
|
||||
@@ -82,7 +91,7 @@ fn benchmark_parameters<Scalar: UnsignedInteger>(
|
||||
}
|
||||
|
||||
fn throughput_benchmark_parameters<Scalar: UnsignedInteger>(
|
||||
) -> Vec<(&'static str, CryptoParametersRecord<Scalar>)> {
|
||||
) -> Vec<(String, CryptoParametersRecord<Scalar>)> {
|
||||
if Scalar::BITS == 64 {
|
||||
vec![
|
||||
PARAM_MESSAGE_1_CARRY_1_KS_PBS,
|
||||
@@ -102,18 +111,15 @@ fn throughput_benchmark_parameters<Scalar: UnsignedInteger>(
|
||||
} else if Scalar::BITS == 32 {
|
||||
BOOLEAN_BENCH_PARAMS
|
||||
.iter()
|
||||
.map(|(name, params)| (*name, params.to_owned().into()))
|
||||
.map(|(name, params)| (name.to_string(), params.to_owned().into()))
|
||||
.collect()
|
||||
} else {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
fn multi_bit_benchmark_parameters<Scalar: UnsignedInteger + Default>() -> Vec<(
|
||||
&'static str,
|
||||
CryptoParametersRecord<Scalar>,
|
||||
LweBskGroupingFactor,
|
||||
)> {
|
||||
fn multi_bit_benchmark_parameters<Scalar: UnsignedInteger + Default>(
|
||||
) -> Vec<(String, CryptoParametersRecord<Scalar>, LweBskGroupingFactor)> {
|
||||
if Scalar::BITS == 64 {
|
||||
vec![
|
||||
PARAM_MULTI_BIT_MESSAGE_1_CARRY_1_GROUP_2_KS_PBS,
|
||||
@@ -234,7 +240,7 @@ fn mem_optimized_pbs<Scalar: UnsignedTorus + CastInto<usize> + Serialize>(c: &mu
|
||||
write_to_json(
|
||||
&id,
|
||||
*params,
|
||||
*name,
|
||||
name,
|
||||
"pbs",
|
||||
&OperatorType::Atomic,
|
||||
bit_size,
|
||||
@@ -323,7 +329,7 @@ fn multi_bit_pbs<
|
||||
write_to_json(
|
||||
&id,
|
||||
*params,
|
||||
*name,
|
||||
name,
|
||||
"pbs",
|
||||
&OperatorType::Atomic,
|
||||
bit_size,
|
||||
@@ -412,7 +418,7 @@ fn multi_bit_deterministic_pbs<
|
||||
write_to_json(
|
||||
&id,
|
||||
*params,
|
||||
*name,
|
||||
name,
|
||||
"pbs",
|
||||
&OperatorType::Atomic,
|
||||
bit_size,
|
||||
@@ -469,8 +475,6 @@ fn pbs_throughput<Scalar: UnsignedTorus + CastInto<usize> + Sync + Send + Serial
|
||||
params.ciphertext_modulus.unwrap(),
|
||||
);
|
||||
|
||||
let lwe_vec = lwe_vec;
|
||||
|
||||
let fft = Fft::new(params.polynomial_size.unwrap());
|
||||
let fft = fft.as_view();
|
||||
|
||||
@@ -534,7 +538,7 @@ fn pbs_throughput<Scalar: UnsignedTorus + CastInto<usize> + Sync + Send + Serial
|
||||
write_to_json(
|
||||
&id,
|
||||
*params,
|
||||
*name,
|
||||
name,
|
||||
"pbs",
|
||||
&OperatorType::Atomic,
|
||||
bit_size,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
811
tfhe/benches/integer/signed_bench.rs
Normal file
811
tfhe/benches/integer/signed_bench.rs
Normal file
@@ -0,0 +1,811 @@
|
||||
#[path = "../utilities.rs"]
|
||||
mod utilities;
|
||||
|
||||
use crate::utilities::{write_to_json, OperatorType};
|
||||
use std::env;
|
||||
|
||||
use criterion::{criterion_group, Criterion};
|
||||
use itertools::iproduct;
|
||||
use rand::prelude::*;
|
||||
use rand::Rng;
|
||||
use std::vec::IntoIter;
|
||||
use tfhe::integer::keycache::KEY_CACHE;
|
||||
use tfhe::integer::{RadixCiphertext, ServerKey, SignedRadixCiphertext, I256};
|
||||
use tfhe::keycache::NamedParam;
|
||||
|
||||
use tfhe::shortint::parameters::{
|
||||
PARAM_MESSAGE_2_CARRY_2_KS_PBS, PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_2_KS_PBS,
|
||||
};
|
||||
|
||||
fn gen_random_i256(rng: &mut ThreadRng) -> I256 {
|
||||
let clearlow = rng.gen::<u128>();
|
||||
let clearhigh = rng.gen::<u128>();
|
||||
|
||||
tfhe::integer::I256::from((clearlow, clearhigh))
|
||||
}
|
||||
|
||||
/// An iterator that yields a succession of combinations
|
||||
/// of parameters and a num_block to achieve a certain bit_size ciphertext
|
||||
/// in radix decomposition
|
||||
struct ParamsAndNumBlocksIter {
|
||||
params_and_bit_sizes:
|
||||
itertools::Product<IntoIter<tfhe::shortint::PBSParameters>, IntoIter<usize>>,
|
||||
}
|
||||
|
||||
impl Default for ParamsAndNumBlocksIter {
|
||||
fn default() -> Self {
|
||||
let is_multi_bit = match env::var("__TFHE_RS_BENCH_TYPE") {
|
||||
Ok(val) => val.to_lowercase() == "multi_bit",
|
||||
Err(_) => false,
|
||||
};
|
||||
|
||||
let is_fast_bench = match env::var("__TFHE_RS_FAST_BENCH") {
|
||||
Ok(val) => val.to_lowercase() == "true",
|
||||
Err(_) => false,
|
||||
};
|
||||
|
||||
if is_multi_bit {
|
||||
let params = vec![PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_2_KS_PBS.into()];
|
||||
|
||||
let bit_sizes = if is_fast_bench {
|
||||
vec![32]
|
||||
} else {
|
||||
vec![8, 16, 32, 40, 64]
|
||||
};
|
||||
|
||||
let params_and_bit_sizes = iproduct!(params, bit_sizes);
|
||||
Self {
|
||||
params_and_bit_sizes,
|
||||
}
|
||||
} else {
|
||||
// FIXME One set of parameter is tested since we want to benchmark only quickest
|
||||
// operations.
|
||||
let params = vec![
|
||||
PARAM_MESSAGE_2_CARRY_2_KS_PBS.into(),
|
||||
// PARAM_MESSAGE_3_CARRY_3_KS_PBS.into(),
|
||||
// PARAM_MESSAGE_4_CARRY_4_KS_PBS.into(),
|
||||
];
|
||||
|
||||
let bit_sizes = if is_fast_bench {
|
||||
vec![32]
|
||||
} else {
|
||||
vec![8, 16, 32, 40, 64, 128, 256]
|
||||
};
|
||||
|
||||
let params_and_bit_sizes = iproduct!(params, bit_sizes);
|
||||
Self {
|
||||
params_and_bit_sizes,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for ParamsAndNumBlocksIter {
|
||||
type Item = (tfhe::shortint::PBSParameters, usize, usize);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let (param, bit_size) = self.params_and_bit_sizes.next()?;
|
||||
let num_block =
|
||||
(bit_size as f64 / param.message_modulus().0.ilog2() as f64).ceil() as usize;
|
||||
|
||||
Some((param, num_block, bit_size))
|
||||
}
|
||||
}
|
||||
|
||||
/// Base function to bench a server key function that is a binary operation, input ciphertext will
|
||||
/// contain only zero carries
|
||||
fn bench_server_key_signed_binary_function_clean_inputs<F>(
|
||||
c: &mut Criterion,
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
binary_op: F,
|
||||
sample_size: usize,
|
||||
) where
|
||||
F: Fn(&ServerKey, &SignedRadixCiphertext, &SignedRadixCiphertext),
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
bench_group
|
||||
.sample_size(sample_size)
|
||||
.measurement_time(std::time::Duration::from_secs(60));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
|
||||
let param_name = param.name();
|
||||
|
||||
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
let (cks, sks) = KEY_CACHE.get_from_params(param);
|
||||
|
||||
let encrypt_two_values = || {
|
||||
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
|
||||
let ct_1 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
|
||||
|
||||
(ct_0, ct_1)
|
||||
};
|
||||
|
||||
b.iter_batched(
|
||||
encrypt_two_values,
|
||||
|(ct_0, ct_1)| {
|
||||
binary_op(&sks, &ct_0, &ct_1);
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
bit_size as u32,
|
||||
vec![param.message_modulus().0.ilog2(); num_block],
|
||||
);
|
||||
}
|
||||
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
/// Shifts and rotations require a special function as the rhs,
|
||||
/// i.e. the shift amount has to be a positive radix type.
|
||||
fn bench_server_key_signed_shift_function_clean_inputs<F>(
|
||||
c: &mut Criterion,
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
binary_op: F,
|
||||
) where
|
||||
F: Fn(&ServerKey, &SignedRadixCiphertext, &RadixCiphertext),
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
bench_group
|
||||
.sample_size(15)
|
||||
.measurement_time(std::time::Duration::from_secs(60));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
|
||||
let param_name = param.name();
|
||||
|
||||
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
let (cks, sks) = KEY_CACHE.get_from_params(param);
|
||||
|
||||
let encrypt_two_values = || {
|
||||
let clear_1 = rng.gen_range(0u128..bit_size as u128);
|
||||
|
||||
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
|
||||
let ct_1 = cks.encrypt_radix(clear_1, num_block);
|
||||
|
||||
(ct_0, ct_1)
|
||||
};
|
||||
|
||||
b.iter_batched(
|
||||
encrypt_two_values,
|
||||
|(ct_0, ct_1)| {
|
||||
binary_op(&sks, &ct_0, &ct_1);
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
bit_size as u32,
|
||||
vec![param.message_modulus().0.ilog2(); num_block],
|
||||
);
|
||||
}
|
||||
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
/// Base function to bench a server key function that is a unary operation, input ciphertext will
|
||||
/// contain only zero carries
|
||||
fn bench_server_key_unary_function_clean_inputs<F>(
|
||||
c: &mut Criterion,
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
unary_fn: F,
|
||||
) where
|
||||
F: Fn(&ServerKey, &SignedRadixCiphertext),
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
bench_group
|
||||
.sample_size(15)
|
||||
.measurement_time(std::time::Duration::from_secs(60));
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
|
||||
let param_name = param.name();
|
||||
|
||||
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
let (cks, sks) = KEY_CACHE.get_from_params(param);
|
||||
|
||||
let encrypt_one_value =
|
||||
|| cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
|
||||
|
||||
b.iter_batched(
|
||||
encrypt_one_value,
|
||||
|ct_0| {
|
||||
unary_fn(&sks, &ct_0);
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
bit_size as u32,
|
||||
vec![param.message_modulus().0.ilog2(); num_block],
|
||||
);
|
||||
}
|
||||
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn signed_if_then_else_parallelized(c: &mut Criterion) {
|
||||
let bench_name = "integer::signed::if_then_else_parallelized";
|
||||
let display_name = "if_then_else";
|
||||
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
bench_group
|
||||
.sample_size(15)
|
||||
.measurement_time(std::time::Duration::from_secs(60));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
|
||||
let param_name = param.name();
|
||||
|
||||
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
let (cks, sks) = KEY_CACHE.get_from_params(param);
|
||||
|
||||
let encrypt_tree_values = || {
|
||||
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
|
||||
let ct_1 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
|
||||
|
||||
let cond = sks.create_trivial_radix(rng.gen_bool(0.5) as u64, num_block);
|
||||
|
||||
(cond, ct_0, ct_1)
|
||||
};
|
||||
|
||||
b.iter_batched(
|
||||
encrypt_tree_values,
|
||||
|(condition, true_ct, false_ct)| {
|
||||
sks.if_then_else_parallelized(&condition, &true_ct, &false_ct)
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
bit_size as u32,
|
||||
vec![param.message_modulus().0.ilog2(); num_block],
|
||||
);
|
||||
}
|
||||
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
macro_rules! define_server_key_bench_binary_signed_clean_inputs_fn (
|
||||
(method_name: $server_key_method:ident, display_name:$name:ident $(,)?) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_signed_binary_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::signed::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs, rhs| {
|
||||
server_key.$server_key_method(lhs, rhs);
|
||||
},
|
||||
15 /* sample_size */
|
||||
)
|
||||
}
|
||||
};
|
||||
(
|
||||
method_name: $server_key_method:ident,
|
||||
display_name:$name:ident,
|
||||
sample_size: $sample_size:expr $(,)?
|
||||
) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_signed_binary_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::signed::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs, rhs| {
|
||||
server_key.$server_key_method(lhs, rhs);
|
||||
},
|
||||
$sample_size
|
||||
)
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
macro_rules! define_server_key_bench_unary_signed_clean_input_fn (
|
||||
(method_name: $server_key_method:ident, display_name:$name:ident $(,)?) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_unary_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::signed::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs| {
|
||||
server_key.$server_key_method(lhs);
|
||||
},
|
||||
)
|
||||
}
|
||||
};
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_mul_parallelized,
|
||||
display_name: mul
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_bitand_parallelized,
|
||||
display_name: bitand
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_bitor_parallelized,
|
||||
display_name: bitand
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_bitxor_parallelized,
|
||||
display_name: bitand
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_eq_parallelized,
|
||||
display_name: eq
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_ne_parallelized,
|
||||
display_name: ne
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_le_parallelized,
|
||||
display_name: le
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_lt_parallelized,
|
||||
display_name: lt
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_ge_parallelized,
|
||||
display_name: ge
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_gt_parallelized,
|
||||
display_name: gt
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_max_parallelized,
|
||||
display_name: max
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_min_parallelized,
|
||||
display_name: min
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_div_rem_parallelized,
|
||||
display_name: div_rem,
|
||||
sample_size: 10,
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_signed_clean_inputs_fn!(
|
||||
method_name: unchecked_div_rem_floor_parallelized,
|
||||
display_name: div_rem_floor,
|
||||
sample_size: 10,
|
||||
);
|
||||
|
||||
fn unchecked_left_shift_parallelized(c: &mut Criterion) {
|
||||
bench_server_key_signed_shift_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::signed::", "unchecked_left_shift_parallelized"),
|
||||
"left_shift",
|
||||
|server_key, lhs, rhs| {
|
||||
server_key.unchecked_left_shift_parallelized(lhs, rhs);
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn unchecked_right_shift_parallelized(c: &mut Criterion) {
|
||||
bench_server_key_signed_shift_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::signed::", "unchecked_right_shift_parallelized"),
|
||||
"right_shift",
|
||||
|server_key, lhs, rhs| {
|
||||
server_key.unchecked_right_shift_parallelized(lhs, rhs);
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn unchecked_rotate_left_parallelized(c: &mut Criterion) {
|
||||
bench_server_key_signed_shift_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::signed::", "unchecked_rotate_left_parallelized"),
|
||||
"rotate_left",
|
||||
|server_key, lhs, rhs| {
|
||||
server_key.unchecked_rotate_left_parallelized(lhs, rhs);
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn unchecked_rotate_right_parallelized(c: &mut Criterion) {
|
||||
bench_server_key_signed_shift_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::signed::", "unchecked_rotate_right_parallelized"),
|
||||
"rotate_right",
|
||||
|server_key, lhs, rhs| {
|
||||
server_key.unchecked_rotate_right_parallelized(lhs, rhs);
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
define_server_key_bench_unary_signed_clean_input_fn!(
|
||||
method_name: unchecked_abs_parallelized,
|
||||
display_name: abs,
|
||||
);
|
||||
|
||||
criterion_group!(
|
||||
unchecked_ops,
|
||||
unchecked_mul_parallelized,
|
||||
unchecked_left_shift_parallelized,
|
||||
unchecked_right_shift_parallelized,
|
||||
unchecked_rotate_left_parallelized,
|
||||
unchecked_rotate_right_parallelized,
|
||||
unchecked_bitand_parallelized,
|
||||
unchecked_bitor_parallelized,
|
||||
unchecked_bitxor_parallelized,
|
||||
unchecked_abs_parallelized,
|
||||
unchecked_div_rem_parallelized,
|
||||
unchecked_div_rem_floor_parallelized,
|
||||
);
|
||||
|
||||
criterion_group!(
|
||||
unchecked_ops_comp,
|
||||
unchecked_eq_parallelized,
|
||||
unchecked_ne_parallelized,
|
||||
unchecked_ge_parallelized,
|
||||
unchecked_gt_parallelized,
|
||||
unchecked_le_parallelized,
|
||||
unchecked_lt_parallelized,
|
||||
unchecked_max_parallelized,
|
||||
unchecked_min_parallelized,
|
||||
);
|
||||
|
||||
//================================================================================
|
||||
// Scalar Benches
|
||||
//================================================================================
|
||||
|
||||
type ScalarType = I256;
|
||||
|
||||
fn bench_server_key_binary_scalar_function_clean_inputs<F, G>(
|
||||
c: &mut Criterion,
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
binary_op: F,
|
||||
rng_func: G,
|
||||
) where
|
||||
F: Fn(&ServerKey, &mut SignedRadixCiphertext, ScalarType),
|
||||
G: Fn(&mut ThreadRng, usize) -> ScalarType,
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
bench_group
|
||||
.sample_size(15)
|
||||
.measurement_time(std::time::Duration::from_secs(60));
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
|
||||
if bit_size > ScalarType::BITS as usize {
|
||||
break;
|
||||
}
|
||||
let param_name = param.name();
|
||||
|
||||
let range = range_for_signed_bit_size(bit_size);
|
||||
|
||||
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits_scalar_{bit_size}");
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
let (cks, sks) = KEY_CACHE.get_from_params(param);
|
||||
|
||||
let encrypt_one_value = || {
|
||||
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
|
||||
|
||||
let clear_1 = rng_func(&mut rng, bit_size);
|
||||
assert!(
|
||||
range.contains(&clear_1),
|
||||
"{:?} is not within the range {:?}",
|
||||
clear_1,
|
||||
range
|
||||
);
|
||||
|
||||
(ct_0, clear_1)
|
||||
};
|
||||
|
||||
b.iter_batched(
|
||||
encrypt_one_value,
|
||||
|(mut ct_0, clear_1)| {
|
||||
binary_op(&sks, &mut ct_0, clear_1);
|
||||
},
|
||||
criterion::BatchSize::SmallInput,
|
||||
)
|
||||
});
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
bit_size as u32,
|
||||
vec![param.message_modulus().0.ilog2(); num_block],
|
||||
);
|
||||
}
|
||||
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn range_for_signed_bit_size(bit_size: usize) -> std::ops::RangeInclusive<ScalarType> {
|
||||
assert!(bit_size <= ScalarType::BITS as usize);
|
||||
assert!(bit_size > 0);
|
||||
let modulus = ScalarType::ONE << (bit_size - 1);
|
||||
// if clear_bit_size == ScalarType::BITS then modulus==T::MIN
|
||||
// -T::MIN = -T::MIN so we sill have our correct lower value
|
||||
// (in two's complement which rust uses)
|
||||
let lowest = modulus.wrapping_neg();
|
||||
// if clear_bit_size == 128 then modulus==T::MIN
|
||||
// T::MIN - 1 = T::MAX (in two's complement which rust uses)
|
||||
let highest = modulus.wrapping_sub(ScalarType::ONE);
|
||||
|
||||
lowest..=highest
|
||||
}
|
||||
|
||||
/// Creates a bitmask where bit_size bits are 1s, rest are 0s
|
||||
/// Only works if ScalarType in signed
|
||||
fn positive_bit_mask_for_bit_size(bit_size: usize) -> ScalarType {
|
||||
assert!(bit_size <= ScalarType::BITS as usize);
|
||||
assert!(bit_size > 0);
|
||||
let minus_one = -ScalarType::ONE; // (In two's complement this is full of 1s)
|
||||
// The last bit of bit_size can only be set for when value is positive
|
||||
let bitmask = (minus_one) >> (ScalarType::BITS as usize - bit_size - 1);
|
||||
// flib msb as they would still be one due to '>>' being arithmetic shift
|
||||
bitmask ^ ((minus_one) << (bit_size - 1))
|
||||
}
|
||||
|
||||
fn negative_bit_mask_for_bit_size(bit_size: usize) -> ScalarType {
|
||||
assert!(bit_size <= ScalarType::BITS as usize);
|
||||
assert!(bit_size > 0);
|
||||
let minus_one = -ScalarType::ONE; // (In two's complement this is full of 1s)
|
||||
let bitmask = (minus_one) >> (ScalarType::BITS as usize - bit_size);
|
||||
// flib msb as they would still be one due to '>>' being arithmetic shift
|
||||
bitmask ^ ((minus_one) << bit_size)
|
||||
}
|
||||
|
||||
// We have to do this complex stuff because we cannot impl
|
||||
// rand::distributions::Distribution<I256> because benches are considered out of the crate
|
||||
// so neither I256 nor rand::distributions::Distribution belong to the benches.
|
||||
//
|
||||
// rand::distributions::Distribution can't be implemented in tfhe sources
|
||||
// in a way that it becomes available to the benches, because rand is a dev dependency
|
||||
fn gen_random_i256_in_range(rng: &mut ThreadRng, bit_size: usize) -> I256 {
|
||||
let value = gen_random_i256(rng);
|
||||
if value >= I256::ZERO {
|
||||
value & positive_bit_mask_for_bit_size(bit_size)
|
||||
} else {
|
||||
(value & negative_bit_mask_for_bit_size(bit_size)) | -I256::ONE
|
||||
}
|
||||
}
|
||||
|
||||
// Functions used to apply different way of selecting a scalar based on the context.
|
||||
fn default_scalar(rng: &mut ThreadRng, clear_bit_size: usize) -> ScalarType {
|
||||
gen_random_i256_in_range(rng, clear_bit_size)
|
||||
}
|
||||
|
||||
fn shift_scalar(_rng: &mut ThreadRng, _clear_bit_size: usize) -> ScalarType {
|
||||
// Shifting by one is the worst case scenario.
|
||||
ScalarType::ONE
|
||||
}
|
||||
|
||||
fn div_scalar(rng: &mut ThreadRng, clear_bit_size: usize) -> ScalarType {
|
||||
loop {
|
||||
let scalar = gen_random_i256_in_range(rng, clear_bit_size);
|
||||
if scalar != ScalarType::ZERO {
|
||||
return scalar;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! define_server_key_bench_binary_scalar_clean_inputs_fn (
|
||||
(method_name: $server_key_method:ident, display_name:$name:ident, rng_func:$($rng_fn:tt)*) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_binary_scalar_function_clean_inputs(
|
||||
c,
|
||||
concat!("integer::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs, rhs| {
|
||||
server_key.$server_key_method(lhs, rhs);
|
||||
}, $($rng_fn)*)
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_left_shift_parallelized,
|
||||
display_name: scalar_left_shift,
|
||||
rng_func: shift_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_right_shift_parallelized,
|
||||
display_name: scalar_right_shift,
|
||||
rng_func: shift_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_rotate_right_parallelized,
|
||||
display_name: scalar_rotate_right,
|
||||
rng_func: shift_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_rotate_left_parallelized,
|
||||
display_name: scalar_rotate_left,
|
||||
rng_func: shift_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_mul_parallelized,
|
||||
display_name: scalar_mul,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_bitand_parallelized,
|
||||
display_name: scalar_bitand,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_bitor_parallelized,
|
||||
display_name: scalar_bitor,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_bitxor_parallelized,
|
||||
display_name: scalar_bitxor,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_eq_parallelized,
|
||||
display_name: scalar_eq,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_ne_parallelized,
|
||||
display_name: scalar_ne,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_le_parallelized,
|
||||
display_name: scalar_le,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_lt_parallelized,
|
||||
display_name: scalar_lt,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_ge_parallelized,
|
||||
display_name: scalar_ge,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_gt_parallelized,
|
||||
display_name: scalar_gt,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_max_parallelized,
|
||||
display_name: scalar_max,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_scalar_min_parallelized,
|
||||
display_name: scalar_min,
|
||||
rng_func: default_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_signed_scalar_div_rem_parallelized,
|
||||
display_name: scalar_div_rem,
|
||||
rng_func: div_scalar
|
||||
);
|
||||
|
||||
define_server_key_bench_binary_scalar_clean_inputs_fn!(
|
||||
method_name: unchecked_signed_scalar_div_parallelized,
|
||||
display_name: scalar_div,
|
||||
rng_func: div_scalar
|
||||
);
|
||||
|
||||
criterion_group!(
|
||||
unchecked_scalar_ops,
|
||||
unchecked_scalar_left_shift_parallelized,
|
||||
unchecked_scalar_right_shift_parallelized,
|
||||
unchecked_scalar_rotate_right_parallelized,
|
||||
unchecked_scalar_rotate_left_parallelized,
|
||||
unchecked_scalar_bitand_parallelized,
|
||||
unchecked_scalar_bitor_parallelized,
|
||||
unchecked_scalar_bitxor_parallelized,
|
||||
unchecked_scalar_mul_parallelized,
|
||||
unchecked_signed_scalar_div_rem_parallelized,
|
||||
unchecked_signed_scalar_div_parallelized,
|
||||
unchecked_div_rem_floor_parallelized,
|
||||
);
|
||||
|
||||
criterion_group!(
|
||||
unchecked_scalar_ops_comp,
|
||||
unchecked_scalar_eq_parallelized,
|
||||
unchecked_scalar_ne_parallelized,
|
||||
unchecked_scalar_le_parallelized,
|
||||
unchecked_scalar_lt_parallelized,
|
||||
unchecked_scalar_ge_parallelized,
|
||||
unchecked_scalar_gt_parallelized,
|
||||
unchecked_scalar_max_parallelized,
|
||||
unchecked_scalar_min_parallelized,
|
||||
);
|
||||
|
||||
criterion_group!(default_ops, signed_if_then_else_parallelized,);
|
||||
|
||||
fn main() {
|
||||
match env::var("__TFHE_RS_BENCH_OP_FLAVOR") {
|
||||
Ok(val) => {
|
||||
match val.to_lowercase().as_str() {
|
||||
"unchecked" => unchecked_ops(),
|
||||
"unchecked_comp" => unchecked_ops_comp(),
|
||||
"unchecked_scalar" => unchecked_scalar_ops(),
|
||||
"unchecked_scalar_comp" => unchecked_scalar_ops_comp(),
|
||||
_ => panic!("unknown benchmark operations flavor"),
|
||||
};
|
||||
}
|
||||
Err(_) => {
|
||||
unchecked_ops();
|
||||
unchecked_ops_comp();
|
||||
unchecked_scalar_ops();
|
||||
unchecked_scalar_ops_comp();
|
||||
}
|
||||
};
|
||||
|
||||
Criterion::default().configure_from_args().final_summary();
|
||||
}
|
||||
@@ -5,14 +5,15 @@ use crate::utilities::{write_to_json, OperatorType};
|
||||
use std::env;
|
||||
|
||||
use criterion::{criterion_group, Criterion};
|
||||
use tfhe::shortint::keycache::NamedParam;
|
||||
use tfhe::keycache::NamedParam;
|
||||
use tfhe::shortint::parameters::*;
|
||||
use tfhe::shortint::{Ciphertext, ClassicPBSParameters, ServerKey, ShortintParameterSet};
|
||||
use tfhe::shortint::{
|
||||
Ciphertext, ClassicPBSParameters, CompressedServerKey, ServerKey, ShortintParameterSet,
|
||||
};
|
||||
|
||||
use rand::Rng;
|
||||
use tfhe::shortint::keycache::KEY_CACHE;
|
||||
use tfhe::shortint::keycache::{KEY_CACHE, KEY_CACHE_WOPBS};
|
||||
|
||||
use tfhe::shortint::keycache::KEY_CACHE_WOPBS;
|
||||
use tfhe::shortint::parameters::parameters_wopbs::WOPBS_PARAM_MESSAGE_4_NORM2_6_KS_PBS;
|
||||
|
||||
const SERVER_KEY_BENCH_PARAMS: [ClassicPBSParameters; 4] = [
|
||||
@@ -40,20 +41,59 @@ const SERVER_KEY_BENCH_PARAMS_EXTENDED: [ClassicPBSParameters; 15] = [
|
||||
PARAM_MESSAGE_8_CARRY_0_KS_PBS,
|
||||
];
|
||||
|
||||
const SERVER_KEY_MULTI_BIT_BENCH_PARAMS: [MultiBitPBSParameters; 2] = [
|
||||
PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_2_KS_PBS,
|
||||
PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_3_KS_PBS,
|
||||
];
|
||||
|
||||
const SERVER_KEY_MULTI_BIT_BENCH_PARAMS_EXTENDED: [MultiBitPBSParameters; 6] = [
|
||||
PARAM_MULTI_BIT_MESSAGE_1_CARRY_1_GROUP_2_KS_PBS,
|
||||
PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_2_KS_PBS,
|
||||
PARAM_MULTI_BIT_MESSAGE_3_CARRY_3_GROUP_2_KS_PBS,
|
||||
PARAM_MULTI_BIT_MESSAGE_1_CARRY_1_GROUP_3_KS_PBS,
|
||||
PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_3_KS_PBS,
|
||||
PARAM_MULTI_BIT_MESSAGE_3_CARRY_3_GROUP_3_KS_PBS,
|
||||
];
|
||||
|
||||
enum BenchParamsSet {
|
||||
Standard,
|
||||
Extended,
|
||||
}
|
||||
|
||||
fn benchmark_parameters(params_set: BenchParamsSet) -> Vec<PBSParameters> {
|
||||
let is_multi_bit = match env::var("__TFHE_RS_BENCH_TYPE") {
|
||||
Ok(val) => val.to_lowercase() == "multi_bit",
|
||||
Err(_) => false,
|
||||
};
|
||||
|
||||
if is_multi_bit {
|
||||
let params = match params_set {
|
||||
BenchParamsSet::Standard => SERVER_KEY_MULTI_BIT_BENCH_PARAMS.to_vec(),
|
||||
BenchParamsSet::Extended => SERVER_KEY_MULTI_BIT_BENCH_PARAMS_EXTENDED.to_vec(),
|
||||
};
|
||||
params.iter().map(|p| (*p).into()).collect()
|
||||
} else {
|
||||
let params = match params_set {
|
||||
BenchParamsSet::Standard => SERVER_KEY_BENCH_PARAMS.to_vec(),
|
||||
BenchParamsSet::Extended => SERVER_KEY_BENCH_PARAMS_EXTENDED.to_vec(),
|
||||
};
|
||||
params.iter().map(|p| (*p).into()).collect()
|
||||
}
|
||||
}
|
||||
|
||||
fn bench_server_key_unary_function<F>(
|
||||
c: &mut Criterion,
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
unary_op: F,
|
||||
params: &[ClassicPBSParameters],
|
||||
params_set: BenchParamsSet,
|
||||
) where
|
||||
F: Fn(&ServerKey, &mut Ciphertext),
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
|
||||
for param in params.iter() {
|
||||
let param: PBSParameters = (*param).into();
|
||||
let keys = KEY_CACHE.get_from_param(param);
|
||||
for param in benchmark_parameters(params_set).iter() {
|
||||
let keys = KEY_CACHE.get_from_param(*param);
|
||||
let (cks, sks) = (keys.client_key(), keys.server_key());
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
@@ -73,7 +113,7 @@ fn bench_server_key_unary_function<F>(
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
*param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
@@ -90,15 +130,14 @@ fn bench_server_key_binary_function<F>(
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
binary_op: F,
|
||||
params: &[ClassicPBSParameters],
|
||||
params_set: BenchParamsSet,
|
||||
) where
|
||||
F: Fn(&ServerKey, &mut Ciphertext, &mut Ciphertext),
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
|
||||
for param in params.iter() {
|
||||
let param: PBSParameters = (*param).into();
|
||||
let keys = KEY_CACHE.get_from_param(param);
|
||||
for param in benchmark_parameters(params_set).iter() {
|
||||
let keys = KEY_CACHE.get_from_param(*param);
|
||||
let (cks, sks) = (keys.client_key(), keys.server_key());
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
@@ -120,7 +159,7 @@ fn bench_server_key_binary_function<F>(
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
*param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
@@ -137,15 +176,14 @@ fn bench_server_key_binary_scalar_function<F>(
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
binary_op: F,
|
||||
params: &[ClassicPBSParameters],
|
||||
params_set: BenchParamsSet,
|
||||
) where
|
||||
F: Fn(&ServerKey, &mut Ciphertext, u8),
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
|
||||
for param in params {
|
||||
let param: PBSParameters = (*param).into();
|
||||
let keys = KEY_CACHE.get_from_param(param);
|
||||
for param in benchmark_parameters(params_set).iter() {
|
||||
let keys = KEY_CACHE.get_from_param(*param);
|
||||
let (cks, sks) = (keys.client_key(), keys.server_key());
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
@@ -166,7 +204,7 @@ fn bench_server_key_binary_scalar_function<F>(
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
*param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
@@ -183,15 +221,14 @@ fn bench_server_key_binary_scalar_division_function<F>(
|
||||
bench_name: &str,
|
||||
display_name: &str,
|
||||
binary_op: F,
|
||||
params: &[ClassicPBSParameters],
|
||||
params_set: BenchParamsSet,
|
||||
) where
|
||||
F: Fn(&ServerKey, &mut Ciphertext, u8),
|
||||
{
|
||||
let mut bench_group = c.benchmark_group(bench_name);
|
||||
|
||||
for param in params {
|
||||
let param: PBSParameters = (*param).into();
|
||||
let keys = KEY_CACHE.get_from_param(param);
|
||||
for param in benchmark_parameters(params_set).iter() {
|
||||
let keys = KEY_CACHE.get_from_param(*param);
|
||||
let (cks, sks) = (keys.client_key(), keys.server_key());
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
@@ -216,7 +253,7 @@ fn bench_server_key_binary_scalar_division_function<F>(
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
*param,
|
||||
param.name(),
|
||||
display_name,
|
||||
&OperatorType::Atomic,
|
||||
@@ -228,12 +265,11 @@ fn bench_server_key_binary_scalar_division_function<F>(
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn carry_extract(c: &mut Criterion) {
|
||||
fn carry_extract_bench(c: &mut Criterion, params_set: BenchParamsSet) {
|
||||
let mut bench_group = c.benchmark_group("carry_extract");
|
||||
|
||||
for param in SERVER_KEY_BENCH_PARAMS {
|
||||
let param: PBSParameters = param.into();
|
||||
let keys = KEY_CACHE.get_from_param(param);
|
||||
for param in benchmark_parameters(params_set).iter() {
|
||||
let keys = KEY_CACHE.get_from_param(*param);
|
||||
let (cks, sks) = (keys.client_key(), keys.server_key());
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
@@ -244,7 +280,7 @@ fn carry_extract(c: &mut Criterion) {
|
||||
|
||||
let ct_0 = cks.encrypt(clear_0);
|
||||
|
||||
let bench_id = format!("ServerKey::carry_extract::{}", param.name());
|
||||
let bench_id = format!("shortint::carry_extract::{}", param.name());
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
let _ = sks.carry_extract(&ct_0);
|
||||
@@ -253,7 +289,7 @@ fn carry_extract(c: &mut Criterion) {
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
*param,
|
||||
param.name(),
|
||||
"carry_extract",
|
||||
&OperatorType::Atomic,
|
||||
@@ -265,12 +301,11 @@ fn carry_extract(c: &mut Criterion) {
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn programmable_bootstrapping(c: &mut Criterion) {
|
||||
fn programmable_bootstrapping_bench(c: &mut Criterion, params_set: BenchParamsSet) {
|
||||
let mut bench_group = c.benchmark_group("programmable_bootstrap");
|
||||
|
||||
for param in SERVER_KEY_BENCH_PARAMS {
|
||||
let param: PBSParameters = param.into();
|
||||
let keys = KEY_CACHE.get_from_param(param);
|
||||
for param in benchmark_parameters(params_set).iter() {
|
||||
let keys = KEY_CACHE.get_from_param(*param);
|
||||
let (cks, sks) = (keys.client_key(), keys.server_key());
|
||||
|
||||
let mut rng = rand::thread_rng();
|
||||
@@ -283,7 +318,7 @@ fn programmable_bootstrapping(c: &mut Criterion) {
|
||||
|
||||
let ctxt = cks.encrypt(clear_0);
|
||||
|
||||
let bench_id = format!("ServerKey::programmable_bootstrap::{}", param.name());
|
||||
let bench_id = format!("shortint::programmable_bootstrap::{}", param.name());
|
||||
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
@@ -293,7 +328,7 @@ fn programmable_bootstrapping(c: &mut Criterion) {
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
param,
|
||||
*param,
|
||||
param.name(),
|
||||
"pbs",
|
||||
&OperatorType::Atomic,
|
||||
@@ -305,6 +340,54 @@ fn programmable_bootstrapping(c: &mut Criterion) {
|
||||
bench_group.finish();
|
||||
}
|
||||
|
||||
fn server_key_from_compressed_key(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("uncompress_key");
|
||||
bench_group
|
||||
.sample_size(10)
|
||||
.measurement_time(std::time::Duration::from_secs(60));
|
||||
|
||||
let mut params = SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
.iter()
|
||||
.map(|p| (*p).into())
|
||||
.collect::<Vec<PBSParameters>>();
|
||||
let multi_bit_params = SERVER_KEY_MULTI_BIT_BENCH_PARAMS_EXTENDED
|
||||
.iter()
|
||||
.map(|p| (*p).into())
|
||||
.collect::<Vec<PBSParameters>>();
|
||||
params.extend(&multi_bit_params);
|
||||
|
||||
for param in params.iter() {
|
||||
let keys = KEY_CACHE.get_from_param(*param);
|
||||
let sks_compressed = CompressedServerKey::new(keys.client_key());
|
||||
|
||||
let bench_id = format!("shortint::uncompress_key::{}", param.name());
|
||||
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
let clone_compressed_key = || sks_compressed.clone();
|
||||
|
||||
b.iter_batched(
|
||||
clone_compressed_key,
|
||||
|sks_cloned| {
|
||||
let _ = ServerKey::from(sks_cloned);
|
||||
},
|
||||
criterion::BatchSize::PerIteration,
|
||||
)
|
||||
});
|
||||
|
||||
write_to_json::<u64, _>(
|
||||
&bench_id,
|
||||
*param,
|
||||
param.name(),
|
||||
"uncompress_key",
|
||||
&OperatorType::Atomic,
|
||||
param.message_modulus().0.ilog2(),
|
||||
vec![param.message_modulus().0.ilog2()],
|
||||
);
|
||||
}
|
||||
|
||||
bench_group.finish();
|
||||
}
|
||||
|
||||
// TODO: remove?
|
||||
fn _bench_wopbs_param_message_8_norm2_5(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("programmable_bootstrap");
|
||||
@@ -334,57 +417,69 @@ fn _bench_wopbs_param_message_8_norm2_5(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
macro_rules! define_server_key_unary_bench_fn (
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params:expr) => {
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params_set:expr) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_unary_function(
|
||||
c,
|
||||
concat!("ServerKey::", stringify!($server_key_method)),
|
||||
concat!("shortint::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs| {
|
||||
let _ = server_key.$server_key_method(lhs);},
|
||||
$params)
|
||||
$params_set)
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
macro_rules! define_server_key_bench_fn (
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params:expr) => {
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params_set:expr) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_binary_function(
|
||||
c,
|
||||
concat!("ServerKey::", stringify!($server_key_method)),
|
||||
concat!("shortint::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs, rhs| {
|
||||
let _ = server_key.$server_key_method(lhs, rhs);},
|
||||
$params)
|
||||
$params_set)
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
macro_rules! define_server_key_scalar_bench_fn (
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params:expr) => {
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params_set:expr) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_binary_scalar_function(
|
||||
c,
|
||||
concat!("ServerKey::", stringify!($server_key_method)),
|
||||
concat!("shortint::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs, rhs| {
|
||||
let _ = server_key.$server_key_method(lhs, rhs);},
|
||||
$params)
|
||||
$params_set)
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
macro_rules! define_server_key_scalar_div_bench_fn (
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params:expr) => {
|
||||
(method_name:$server_key_method:ident, display_name:$name:ident, $params_set:expr) => {
|
||||
fn $server_key_method(c: &mut Criterion) {
|
||||
bench_server_key_binary_scalar_division_function(
|
||||
c,
|
||||
concat!("ServerKey::", stringify!($server_key_method)),
|
||||
concat!("shortint::", stringify!($server_key_method)),
|
||||
stringify!($name),
|
||||
|server_key, lhs, rhs| {
|
||||
let _ = server_key.$server_key_method(lhs, rhs);},
|
||||
$params)
|
||||
$params_set)
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
macro_rules! define_custom_bench_fn (
|
||||
(function_name:$function:ident, $params_set:expr) => {
|
||||
fn $function(c: &mut Criterion) {
|
||||
::paste::paste! {
|
||||
[<$function _bench>](
|
||||
c,
|
||||
$params_set)
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
@@ -392,251 +487,258 @@ macro_rules! define_server_key_scalar_div_bench_fn (
|
||||
define_server_key_unary_bench_fn!(
|
||||
method_name: unchecked_neg,
|
||||
display_name: negation,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_add,
|
||||
display_name: add,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_sub,
|
||||
display_name: sub,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_mul_lsb,
|
||||
display_name: mul,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_mul_msb,
|
||||
display_name: mul,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_div,
|
||||
display_name: div,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: smart_bitand,
|
||||
display_name: bitand,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: smart_bitor,
|
||||
display_name: bitor,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: smart_bitxor,
|
||||
display_name: bitxor,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: smart_add,
|
||||
display_name: add,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: smart_sub,
|
||||
display_name: sub,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: smart_mul_lsb,
|
||||
display_name: mul,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: bitand,
|
||||
display_name: bitand,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: bitor,
|
||||
display_name: bitor,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: bitxor,
|
||||
display_name: bitxor,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: add,
|
||||
display_name: add,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: sub,
|
||||
display_name: sub,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: mul,
|
||||
display_name: mul,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: div,
|
||||
display_name: div,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: greater,
|
||||
display_name: greater,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: greater_or_equal,
|
||||
display_name: greater_or_equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: less,
|
||||
display_name: less,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: less_or_equal,
|
||||
display_name: less_or_equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: equal,
|
||||
display_name: equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: not_equal,
|
||||
display_name: not_equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_unary_bench_fn!(
|
||||
method_name: neg,
|
||||
display_name: negation,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_greater,
|
||||
display_name: greater_than,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_less,
|
||||
display_name: less_than,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_bench_fn!(
|
||||
method_name: unchecked_equal,
|
||||
display_name: equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: unchecked_scalar_add,
|
||||
display_name: add,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: unchecked_scalar_sub,
|
||||
display_name: sub,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: unchecked_scalar_mul,
|
||||
display_name: mul,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: unchecked_scalar_left_shift,
|
||||
display_name: left_shift,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: unchecked_scalar_right_shift,
|
||||
display_name: right_shift,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
|
||||
define_server_key_scalar_div_bench_fn!(
|
||||
method_name: unchecked_scalar_div,
|
||||
display_name: div,
|
||||
&SERVER_KEY_BENCH_PARAMS_EXTENDED
|
||||
BenchParamsSet::Extended
|
||||
);
|
||||
define_server_key_scalar_div_bench_fn!(
|
||||
method_name: unchecked_scalar_mod,
|
||||
display_name: modulo,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_add,
|
||||
display_name: add,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_sub,
|
||||
display_name: sub,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_mul,
|
||||
display_name: mul,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_left_shift,
|
||||
display_name: left_shift,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_right_shift,
|
||||
display_name: right_shift,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
|
||||
define_server_key_scalar_div_bench_fn!(
|
||||
method_name: scalar_div,
|
||||
display_name: div,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_div_bench_fn!(
|
||||
method_name: scalar_mod,
|
||||
display_name: modulo,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_greater,
|
||||
display_name: greater,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_greater_or_equal,
|
||||
display_name: greater_or_equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_less,
|
||||
display_name: less,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_bench_fn!(
|
||||
method_name: scalar_less_or_equal,
|
||||
display_name: less_or_equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_div_bench_fn!(
|
||||
method_name: scalar_equal,
|
||||
display_name: equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
define_server_key_scalar_div_bench_fn!(
|
||||
method_name: scalar_not_equal,
|
||||
display_name: not_equal,
|
||||
&SERVER_KEY_BENCH_PARAMS
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
|
||||
define_custom_bench_fn!(function_name: carry_extract, BenchParamsSet::Standard);
|
||||
|
||||
define_custom_bench_fn!(
|
||||
function_name: programmable_bootstrapping,
|
||||
BenchParamsSet::Standard
|
||||
);
|
||||
|
||||
criterion_group!(
|
||||
@@ -710,6 +812,8 @@ criterion_group!(
|
||||
scalar_not_equal
|
||||
);
|
||||
|
||||
criterion_group!(misc, server_key_from_compressed_key);
|
||||
|
||||
mod casting;
|
||||
criterion_group!(
|
||||
casting,
|
||||
@@ -723,6 +827,7 @@ fn main() {
|
||||
casting();
|
||||
default_ops();
|
||||
default_scalar_ops();
|
||||
misc();
|
||||
}
|
||||
|
||||
match env::var("__TFHE_RS_BENCH_OP_FLAVOR") {
|
||||
|
||||
@@ -37,4 +37,3 @@ foreach (testsourcefile ${TEST_CASES})
|
||||
# Enabled asserts even in release mode
|
||||
add_definitions(-UNDEBUG)
|
||||
endforeach (testsourcefile ${TEST_CASES})
|
||||
|
||||
|
||||
@@ -110,6 +110,134 @@ int uint256_public_key(const ClientKey *client_key, const PublicKey *public_key)
|
||||
return ok;
|
||||
}
|
||||
|
||||
int int256_client_key(const ClientKey *client_key) {
|
||||
int ok;
|
||||
FheInt256 *lhs = NULL;
|
||||
FheInt256 *rhs = NULL;
|
||||
FheInt256 *result = NULL;
|
||||
FheInt64 *cast_result = NULL;
|
||||
// This is +1
|
||||
I256 lhs_clear = {1, 0, 0, 0};
|
||||
// This is -1
|
||||
I256 rhs_clear = {UINT64_MAX, UINT64_MAX, UINT64_MAX, UINT64_MAX};
|
||||
I256 result_clear = {0};
|
||||
|
||||
ok = fhe_int256_try_encrypt_with_client_key_i256(lhs_clear, client_key, &lhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_try_encrypt_with_client_key_i256(rhs_clear, client_key, &rhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_add(lhs, rhs, &result);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_decrypt(result, client_key, &result_clear);
|
||||
assert(ok == 0);
|
||||
|
||||
// We did 1 + (-1), so we expect 0
|
||||
assert(result_clear.w0 == 0);
|
||||
assert(result_clear.w1 == 0);
|
||||
assert(result_clear.w2 == 0);
|
||||
assert(result_clear.w3 == 0);
|
||||
|
||||
fhe_int256_destroy(result);
|
||||
ok = fhe_int256_sub(lhs, rhs, &result);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_decrypt(result, client_key, &result_clear);
|
||||
assert(ok == 0);
|
||||
|
||||
// We did 1 - (-1), so we expect 2
|
||||
assert(result_clear.w0 == 2);
|
||||
assert(result_clear.w1 == 0);
|
||||
assert(result_clear.w2 == 0);
|
||||
assert(result_clear.w3 == 0);
|
||||
|
||||
// try some casting
|
||||
ok = fhe_int256_cast_into_fhe_int64(result, &cast_result);
|
||||
assert(ok == 0);
|
||||
int64_t u64_clear;
|
||||
ok = fhe_int64_decrypt(cast_result, client_key, &u64_clear);
|
||||
assert(ok == 0);
|
||||
assert(u64_clear == 2);
|
||||
|
||||
fhe_int256_destroy(lhs);
|
||||
fhe_int256_destroy(rhs);
|
||||
fhe_int256_destroy(result);
|
||||
fhe_int64_destroy(cast_result);
|
||||
return ok;
|
||||
}
|
||||
|
||||
int int256_encrypt_trivial(const ClientKey *client_key) {
|
||||
int ok;
|
||||
FheInt256 *lhs = NULL;
|
||||
FheInt256 *rhs = NULL;
|
||||
FheInt256 *result = NULL;
|
||||
I256 lhs_clear = {1, 2, 3, 4};
|
||||
I256 rhs_clear = {5, 6, 7, 8};
|
||||
I256 result_clear = {0};
|
||||
|
||||
ok = fhe_int256_try_encrypt_trivial_i256(lhs_clear, &lhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_try_encrypt_trivial_i256(rhs_clear, &rhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_add(lhs, rhs, &result);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_decrypt(result, client_key, &result_clear);
|
||||
assert(ok == 0);
|
||||
|
||||
assert(result_clear.w0 == 6);
|
||||
assert(result_clear.w1 == 8);
|
||||
assert(result_clear.w2 == 10);
|
||||
assert(result_clear.w3 == 12);
|
||||
|
||||
fhe_int256_destroy(lhs);
|
||||
fhe_int256_destroy(rhs);
|
||||
fhe_int256_destroy(result);
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
||||
int int256_public_key(const ClientKey *client_key, const PublicKey *public_key) {
|
||||
int ok;
|
||||
FheInt256 *lhs = NULL;
|
||||
FheInt256 *rhs = NULL;
|
||||
FheInt256 *result = NULL;
|
||||
// This is +1
|
||||
I256 lhs_clear = {1, 0, 0, 0};
|
||||
// This is -1
|
||||
I256 rhs_clear = {UINT64_MAX, UINT64_MAX, UINT64_MAX, UINT64_MAX};
|
||||
I256 result_clear = {0};
|
||||
|
||||
ok = fhe_int256_try_encrypt_with_public_key_i256(lhs_clear, public_key, &lhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_try_encrypt_with_public_key_i256(rhs_clear, public_key, &rhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_sub(lhs, rhs, &result);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int256_decrypt(result, client_key, &result_clear);
|
||||
assert(ok == 0);
|
||||
|
||||
// We did 1 - (-1), so we expect 2
|
||||
assert(result_clear.w0 == 2);
|
||||
assert(result_clear.w1 == 0);
|
||||
assert(result_clear.w2 == 0);
|
||||
assert(result_clear.w3 == 0);
|
||||
|
||||
fhe_int256_destroy(lhs);
|
||||
fhe_int256_destroy(rhs);
|
||||
fhe_int256_destroy(result);
|
||||
return ok;
|
||||
}
|
||||
|
||||
|
||||
|
||||
int main(void) {
|
||||
int ok = 0;
|
||||
ConfigBuilder *builder;
|
||||
@@ -132,6 +260,10 @@ int main(void) {
|
||||
uint256_encrypt_trivial(client_key);
|
||||
uint256_public_key(client_key, public_key);
|
||||
|
||||
int256_client_key(client_key);
|
||||
int256_encrypt_trivial(client_key);
|
||||
int256_public_key(client_key, public_key);
|
||||
|
||||
client_key_destroy(client_key);
|
||||
public_key_destroy(public_key);
|
||||
server_key_destroy(server_key);
|
||||
|
||||
@@ -78,7 +78,7 @@ int uint256_encrypt_trivial(const ClientKey *client_key) {
|
||||
return ok;
|
||||
}
|
||||
|
||||
int uint256_public_key(const ClientKey *client_key,
|
||||
int uint256_compact_public_key(const ClientKey *client_key,
|
||||
const CompressedCompactPublicKey *compressed_public_key) {
|
||||
int ok;
|
||||
CompactPublicKey *public_key = NULL;
|
||||
@@ -112,7 +112,7 @@ int uint256_public_key(const ClientKey *client_key,
|
||||
lhs = expand_output[0];
|
||||
rhs = expand_output[1];
|
||||
// We can destroy the compact list
|
||||
// The expanded ciphertext are independant from it
|
||||
// The expanded ciphertext are independent from it
|
||||
compact_fhe_uint256_list_destroy(list);
|
||||
|
||||
ok = fhe_uint256_sub(lhs, rhs, &result);
|
||||
@@ -158,6 +158,80 @@ int uint256_public_key(const ClientKey *client_key,
|
||||
return ok;
|
||||
}
|
||||
|
||||
int int32_compact_public_key(const ClientKey *client_key,
|
||||
const CompressedCompactPublicKey *compressed_public_key) {
|
||||
int ok;
|
||||
CompactPublicKey *public_key = NULL;
|
||||
FheInt32 *lhs = NULL;
|
||||
FheInt32 *rhs = NULL;
|
||||
FheInt32 *result = NULL;
|
||||
CompactFheInt32List *list = NULL;
|
||||
|
||||
int32_t result_clear = 0;
|
||||
int32_t clears[2] = {-9482394, 98712234};
|
||||
|
||||
ok = compressed_compact_public_key_decompress(compressed_public_key, &public_key);
|
||||
assert(ok == 0);
|
||||
|
||||
// Compact list example
|
||||
{
|
||||
ok = compact_fhe_int32_list_try_encrypt_with_compact_public_key_i32(&clears[0], 2,
|
||||
public_key, &list);
|
||||
assert(ok == 0);
|
||||
|
||||
size_t len = 0;
|
||||
ok = compact_fhe_int32_list_len(list, &len);
|
||||
assert(ok == 0);
|
||||
assert(len == 2);
|
||||
|
||||
FheInt32 *expand_output[2] = {NULL};
|
||||
ok = compact_fhe_int32_list_expand(list, &expand_output[0], 2);
|
||||
assert(ok == 0);
|
||||
|
||||
// transfer ownership
|
||||
lhs = expand_output[0];
|
||||
rhs = expand_output[1];
|
||||
// We can destroy the compact list
|
||||
// The expanded ciphertext are independent from it
|
||||
compact_fhe_int32_list_destroy(list);
|
||||
|
||||
ok = fhe_int32_sub(lhs, rhs, &result);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int32_decrypt(result, client_key, &result_clear);
|
||||
assert(ok == 0);
|
||||
|
||||
assert(result_clear == clears[0] - clears[1]);
|
||||
|
||||
fhe_int32_destroy(lhs);
|
||||
fhe_int32_destroy(rhs);
|
||||
fhe_int32_destroy(result);
|
||||
}
|
||||
|
||||
{
|
||||
ok = fhe_int32_try_encrypt_with_compact_public_key_i32(clears[0], public_key, &lhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int32_try_encrypt_with_compact_public_key_i32(clears[1], public_key, &rhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int32_add(lhs, rhs, &result);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = fhe_int32_decrypt(result, client_key, &result_clear);
|
||||
assert(ok == 0);
|
||||
|
||||
assert(result_clear == clears[0] + clears[1]);
|
||||
|
||||
fhe_int32_destroy(lhs);
|
||||
fhe_int32_destroy(rhs);
|
||||
fhe_int32_destroy(result);
|
||||
}
|
||||
|
||||
compact_public_key_destroy(public_key);
|
||||
return ok;
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
int ok = 0;
|
||||
{
|
||||
@@ -180,7 +254,8 @@ int main(void) {
|
||||
|
||||
uint256_client_key(client_key);
|
||||
uint256_encrypt_trivial(client_key);
|
||||
uint256_public_key(client_key, compressed_public_key);
|
||||
uint256_compact_public_key(client_key, compressed_public_key);
|
||||
int32_compact_public_key(client_key, compressed_public_key);
|
||||
|
||||
client_key_destroy(client_key);
|
||||
compressed_compact_public_key_destroy(compressed_public_key);
|
||||
@@ -207,7 +282,8 @@ int main(void) {
|
||||
|
||||
uint256_client_key(client_key);
|
||||
uint256_encrypt_trivial(client_key);
|
||||
uint256_public_key(client_key, compressed_public_key);
|
||||
uint256_compact_public_key(client_key, compressed_public_key);
|
||||
int32_compact_public_key(client_key, compressed_public_key);
|
||||
|
||||
client_key_destroy(client_key);
|
||||
compressed_compact_public_key_destroy(compressed_public_key);
|
||||
|
||||
@@ -64,6 +64,67 @@ int uint8_public_key(const ClientKey *client_key, const PublicKey *public_key) {
|
||||
return ok;
|
||||
}
|
||||
|
||||
int uint8_safe_serialization(const ClientKey *client_key, const ServerKey *server_key) {
|
||||
int ok;
|
||||
CompactFheUint8 *lhs = NULL;
|
||||
CompactFheUint8 *deserialized_lhs = NULL;
|
||||
CompactFheUint8 *result = NULL;
|
||||
Buffer value_buffer = {.pointer = NULL, .length = 0};
|
||||
Buffer cks_buffer = {.pointer = NULL, .length = 0};
|
||||
BufferView deser_view = {.pointer = NULL, .length = 0};
|
||||
ClientKey *deserialized_client_key = NULL;
|
||||
|
||||
const uint64_t max_serialization_size = UINT64_C(1) << UINT64_C(20);
|
||||
|
||||
uint8_t lhs_clear = 123;
|
||||
|
||||
ok = client_key_serialize(client_key, &cks_buffer);
|
||||
assert(ok == 0);
|
||||
|
||||
deser_view.pointer = cks_buffer.pointer;
|
||||
deser_view.length = cks_buffer.length;
|
||||
ok = client_key_deserialize(deser_view, &deserialized_client_key);
|
||||
assert(ok == 0);
|
||||
|
||||
struct CompactPublicKey *public_key;
|
||||
|
||||
ok = compact_public_key_new(deserialized_client_key, &public_key);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = compact_fhe_uint8_try_encrypt_with_compact_public_key_u8(lhs_clear, public_key, &lhs);
|
||||
assert(ok == 0);
|
||||
|
||||
ok = compact_fhe_uint8_safe_serialize(lhs, &value_buffer, max_serialization_size);
|
||||
assert(ok == 0);
|
||||
|
||||
deser_view.pointer = value_buffer.pointer;
|
||||
deser_view.length = value_buffer.length;
|
||||
ok = compact_fhe_uint8_safe_deserialize_conformant(deser_view, max_serialization_size, server_key,
|
||||
&deserialized_lhs);
|
||||
assert(ok == 0);
|
||||
|
||||
FheUint8 *expanded = NULL;
|
||||
|
||||
ok = compact_fhe_uint8_expand(deserialized_lhs, &expanded);
|
||||
assert(ok == 0);
|
||||
|
||||
uint8_t clear;
|
||||
ok = fhe_uint8_decrypt(expanded, deserialized_client_key, &clear);
|
||||
assert(ok == 0);
|
||||
|
||||
assert(clear == lhs_clear);
|
||||
|
||||
if (value_buffer.pointer != NULL) {
|
||||
destroy_buffer(&value_buffer);
|
||||
}
|
||||
compact_fhe_uint8_destroy(lhs);
|
||||
compact_fhe_uint8_destroy(deserialized_lhs);
|
||||
compact_fhe_uint8_destroy(result);
|
||||
fhe_uint8_destroy(expanded);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
||||
int uint8_serialization(const ClientKey *client_key) {
|
||||
int ok;
|
||||
FheUint8 *lhs = NULL;
|
||||
@@ -159,6 +220,8 @@ int main(void) {
|
||||
assert(ok == 0);
|
||||
ok = uint8_serialization(client_key);
|
||||
assert(ok == 0);
|
||||
ok = uint8_safe_serialization(client_key, server_key);
|
||||
assert(ok == 0);
|
||||
ok = uint8_compressed(client_key);
|
||||
assert(ok == 0);
|
||||
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
## Getting Started
|
||||
* [Installation](getting_started/installation.md)
|
||||
* [Quick Start](getting_started/quick_start.md)
|
||||
* [Operations](getting_started/operations.md)
|
||||
* [Types & Operations](getting_started/operations.md)
|
||||
* [Benchmarks](getting_started/benchmarks.md)
|
||||
* [Security and Cryptography](getting_started/security_and_cryptography.md)
|
||||
|
||||
## Tutorials
|
||||
## Tutorials
|
||||
* [Homomorphic Parity Bit](tutorials/parity_bit.md)
|
||||
* [Homomorphic Case Changing on Latin String](tutorials/latin_fhe_string.md)
|
||||
* [Homomorphic Case Changing on Ascii String](tutorials/ascii_fhe_string.md)
|
||||
|
||||
## How To
|
||||
* [Configure Rust](how_to/rust_configuration.md)
|
||||
@@ -19,23 +19,24 @@
|
||||
* [Compress Ciphertexts/Keys](how_to/compress.md)
|
||||
* [Use Public Key Encryption](how_to/public_key.md)
|
||||
* [Use Trivial Ciphertext](how_to/trivial_ciphertext.md)
|
||||
* [Generic Function Bounds](how_to/trait_bounds.md)
|
||||
* [Use Parallelized PBS](how_to/parallelized_pbs.md)
|
||||
* [Use the C API](how_to/c_api.md)
|
||||
* [Use the JS on WASM API](how_to/js_on_wasm_api.md)
|
||||
|
||||
## Fine-grained APIs
|
||||
* [Quick Start](fine_grained_api/quick_start.md)
|
||||
* [Boolean](fine_grained_api/Boolean/tutorial.md)
|
||||
* [Boolean](fine_grained_api/Boolean/readme.md)
|
||||
* [Operations](fine_grained_api/Boolean/operations.md)
|
||||
* [Cryptographic Parameters](fine_grained_api/Boolean/parameters.md)
|
||||
* [Serialization/Deserialization](fine_grained_api/Boolean/serialization.md)
|
||||
|
||||
* [Shortint](fine_grained_api/shortint/tutorial.md)
|
||||
* [Shortint](fine_grained_api/shortint/readme.md)
|
||||
* [Operations](fine_grained_api/shortint/operations.md)
|
||||
* [Cryptographic Parameters](fine_grained_api/shortint/parameters.md)
|
||||
* [Serialization/Deserialization](fine_grained_api/shortint/serialization.md)
|
||||
|
||||
* [Integer](fine_grained_api/integer/tutorial.md)
|
||||
* [Integer](fine_grained_api/integer/readme.md)
|
||||
* [Operations](fine_grained_api/integer/operations.md)
|
||||
* [Cryptographic Parameters](fine_grained_api/integer/parameters.md)
|
||||
* [Serialization/Deserialization](fine_grained_api/integer/serialization.md)
|
||||
@@ -55,4 +56,3 @@
|
||||
|
||||
## API references
|
||||
* [docs.rs](https://docs.rs/tfhe/)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user