Compare commits

..

12 Commits

Author SHA1 Message Date
Mayeul@Zama
ce625df1f6 needed here too ? 2023-09-08 16:00:25 +02:00
Mayeul@Zama
5f16704fa8 fix(shortint): add LUT generation without carry 2023-09-08 16:00:25 +02:00
Mayeul@Zama
d686cd86b5 remove now invalid asserts 2023-09-08 16:00:15 +02:00
Mayeul@Zama
a2a3f10341 fix(shortint): fix degree after lookup_table 2023-09-08 16:00:15 +02:00
Mayeul@Zama
d7ac3b6d15 feat(shortint): add asserts in smart ops 2023-09-08 16:00:15 +02:00
Mayeul@Zama
44b9241c83 feat(shortint): cleanup input if necessary in ops 2023-09-08 16:00:15 +02:00
Mayeul@Zama
9df1725efc feat(shortint): cleanup input if necessary in apply_lookup_table_bivariate 2023-09-08 16:00:15 +02:00
Mayeul@Zama
55d97ed794 feat(shortint): no PBS in message/carry extract if useless 2023-09-08 16:00:15 +02:00
Mayeul@Zama
d10c11720e style(shortint): smart always take mut references 2023-09-08 16:00:15 +02:00
Mayeul@Zama
f1e0742b9f feat(shortint): replace apply_msg_identity_lut_assign by message_extract 2023-09-08 16:00:15 +02:00
Mayeul@Zama
e6136ca418 feat(shortint): replace clear_carry by message_extract 2023-09-08 16:00:15 +02:00
Mayeul@Zama
0805cd69e0 chore(shortint): remove comp_assign operations 2023-09-08 16:00:15 +02:00
496 changed files with 13520 additions and 52388 deletions

View File

@@ -51,7 +51,7 @@ jobs:
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}
@@ -66,10 +66,6 @@ jobs:
toolchain: stable
default: true
- name: Run concrete-csprng tests
run: |
make test_concrete_csprng
- name: Run core tests
run: |
AVX512_SUPPORT=ON make test_core_crypto
@@ -110,14 +106,6 @@ jobs:
run: |
make test_high_level_api
- name: Run safe deserialization tests
run: |
make test_safe_deserialization
- name: Run forward compatibility tests
run: |
make test_forward_compatibility
- name: Slack Notification
if: ${{ always() }}
continue-on-error: true

View File

@@ -50,7 +50,7 @@ jobs:
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}

View File

@@ -50,7 +50,7 @@ jobs:
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}

View File

@@ -50,7 +50,7 @@ jobs:
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}
@@ -65,10 +65,6 @@ jobs:
toolchain: stable
default: true
- name: Run concrete-csprng tests
run: |
make test_concrete_csprng
- name: Run core tests
run: |
AVX512_SUPPORT=ON make test_core_crypto
@@ -81,10 +77,6 @@ jobs:
run: |
make test_c_api
- name: Run C API tests with forward_compatibility
run: |
FORWARD_COMPAT=ON make test_c_api
- name: Run user docs tests
run: |
make test_user_doc

View File

@@ -50,7 +50,7 @@ jobs:
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}

View File

@@ -19,14 +19,6 @@ on:
request_id:
description: "Slab request ID"
type: string
# This input is not used in this workflow but still mandatory since a calling workflow could
# use it. If a triggering command include a user_inputs field, then the triggered workflow
# must include this very input, otherwise the workflow won't be called.
# See start_full_benchmarks.yml as example.
user_inputs:
description: "Type of benchmarks to run"
type: string
default: "weekly_benchmarks"
env:
CARGO_TERM_COLOR: always
@@ -51,7 +43,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -96,13 +88,13 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_boolean
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab

View File

@@ -21,26 +21,12 @@ jobs:
fail-fast: false
steps:
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
- name: Install and run newline linter checks
if: matrix.os == 'ubuntu-latest'
run: |
wget https://github.com/fernandrone/linelint/releases/download/0.0.6/linelint-linux-amd64
echo "16b70fb7b471d6f95cbdc0b4e5dc2b0ac9e84ba9ecdc488f7bdf13df823aca4b linelint-linux-amd64" > checksum
sha256sum -c checksum || exit 1
chmod +x linelint-linux-amd64
mv linelint-linux-amd64 /usr/local/bin/linelint
make check_newline
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
- name: Run pcc checks
run: |
make pcc
- name: Build concrete-csprng
run: |
make build_concrete_csprng
- name: Build Release core
run: |
make build_core AVX512_SUPPORT=ON

View File

@@ -1,112 +0,0 @@
name: Code Coverage
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
# All the inputs are provided by Slab
inputs:
instance_id:
description: "AWS instance ID"
type: string
instance_image_id:
description: "AWS instance AMI ID"
type: string
instance_type:
description: "AWS instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: 'Slab request ID'
type: string
fork_repo:
description: 'Name of forked repo as user/repo'
type: string
fork_git_sha:
description: 'Git SHA to checkout from fork'
type: string
jobs:
code-coverage:
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}_${{ inputs.instance_image_id }}_${{ inputs.instance_type }}
cancel-in-progress: true
runs-on: ${{ inputs.runner_name }}
steps:
# Step used for log purpose.
- name: Instance configuration used
run: |
echo "ID: ${{ inputs.instance_id }}"
echo "AMI: ${{ inputs.instance_image_id }}"
echo "Type: ${{ inputs.instance_type }}"
echo "Request ID: ${{ inputs.request_id }}"
echo "Fork repo: ${{ inputs.fork_repo }}"
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: stable
default: true
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@408093d9ff9c134c33b974e0722ce06b9d6e8263
with:
files_yaml: |
tfhe:
- tfhe/src/**
concrete_csprng:
- concrete-csprng/src/**
- name: Generate Keys
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make GEN_KEY_CACHE_COVERAGE_ONLY=TRUE gen_key_cache
- name: Run coverage for boolean
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make test_boolean_cov
- name: Run coverage for shortint
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
run: |
make test_shortint_cov
- name: Upload tfhe coverage to Codecov
uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
with:
token: ${{ secrets.CODECOV_TOKEN }}
directory: ./coverage/
fail_ci_if_error: true
files: shortint/cobertura.xml,boolean/cobertura.xml
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "Code coverage finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,74 +0,0 @@
name: CSPRNG randomness testing Workflow
env:
CARGO_TERM_COLOR: always
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
RUSTFLAGS: "-C target-cpu=native"
on:
# Allows you to run this workflow manually from the Actions tab as an alternative.
workflow_dispatch:
# All the inputs are provided by Slab
inputs:
instance_id:
description: "AWS instance ID"
type: string
instance_image_id:
description: "AWS instance AMI ID"
type: string
instance_type:
description: "AWS instance product type"
type: string
runner_name:
description: "Action runner name"
type: string
request_id:
description: 'Slab request ID'
type: string
fork_repo:
description: 'Name of forked repo as user/repo'
type: string
fork_git_sha:
description: 'Git SHA to checkout from fork'
type: string
jobs:
csprng-randomness-teting:
name: CSPRNG randomness testing
concurrency:
group: ${{ github.workflow }}_${{ github.ref }}_${{ inputs.instance_image_id }}_${{ inputs.instance_type }}
cancel-in-progress: true
runs-on: ${{ inputs.runner_name }}
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
with:
repository: ${{ inputs.fork_repo }}
ref: ${{ inputs.fork_git_sha }}
- name: Set up home
run: |
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
- name: Install latest stable
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
with:
toolchain: stable
default: true
- name: Dieharder randomness test suite
run: |
make dieharder_csprng
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "concrete-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -44,7 +44,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -69,7 +69,7 @@ jobs:
parse_integer_benches
- name: Upload csv results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_csv_integer
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
@@ -90,13 +90,13 @@ jobs:
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_integer
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab

View File

@@ -19,10 +19,6 @@ on:
request_id:
description: "Slab request ID"
type: string
user_inputs:
description: "Type of benchmarks to run"
type: string
default: "weekly_benchmarks"
env:
CARGO_TERM_COLOR: always
@@ -30,33 +26,8 @@ env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
jobs:
prepare-matrix:
name: Prepare operations matrix
runs-on: ubuntu-latest
outputs:
op_flavor: ${{ steps.set_op_flavor.outputs.op_flavor }}
steps:
- name: Weekly benchmarks
if: ${{ github.event.inputs.user_inputs == 'weekly_benchmarks' }}
run: |
echo "OP_FLAVOR=[\"default\", \"default_comp\", \"default_scalar\", \"default_scalar_comp\"]" >> ${GITHUB_ENV}
- name: Quarterly benchmarks
if: ${{ github.event.inputs.user_inputs == 'quarterly_benchmarks' }}
run: |
echo "OP_FLAVOR=[\"default\", \"default_comp\", \"default_scalar\", \"default_scalar_comp\", \
\"smart\", \"smart_comp\", \"smart_scalar\", \"smart_parallelized\", \"smart_parallelized_comp\", \"smart_scalar_parallelized\", \"smart_scalar_parallelized_comp\", \
\"unchecked\", \"unchecked_comp\", \"unchecked_scalar\", \"unchecked_scalar_comp\", \
\"misc\"]" >> ${GITHUB_ENV}
- name: Set operation flavor output
id: set_op_flavor
run: |
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> ${GITHUB_OUTPUT}
integer-benchmarks:
name: Execute integer benchmarks for all operations flavor
needs: prepare-matrix
runs-on: ${{ github.event.inputs.runner_name }}
if: ${{ !cancelled() }}
continue-on-error: true
@@ -64,7 +35,7 @@ jobs:
max-parallel: 1
matrix:
command: [ integer, integer_multi_bit]
op_flavor: ${{ fromJson(needs.prepare-matrix.outputs.op_flavor) }}
op_flavor: [ default, default_comp, default_scalar, default_scalar_comp, smart, smart_comp, smart_scalar, smart_parallelized, smart_parallelized_comp, smart_scalar_parallelized, unchecked, unchecked_comp, unchecked_scalar, unchecked_scalar_comp, misc ]
steps:
- name: Instance configuration used
run: |
@@ -74,7 +45,7 @@ jobs:
echo "Request ID: ${{ inputs.request_id }}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -96,7 +67,7 @@ jobs:
override: true
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab
@@ -120,7 +91,7 @@ jobs:
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -44,7 +44,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -69,7 +69,7 @@ jobs:
parse_integer_benches
- name: Upload csv results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_csv_integer
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
@@ -90,13 +90,13 @@ jobs:
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_integer
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab

View File

@@ -28,7 +28,7 @@ jobs:
runs-on: ["self-hosted", "m1mac"]
steps:
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
- name: Install latest stable
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
@@ -40,10 +40,6 @@ jobs:
run: |
make pcc
- name: Build concrete-csprng
run: |
make build_concrete_csprng
- name: Build Release core
run: |
make build_core
@@ -68,10 +64,6 @@ jobs:
run: |
make build_c_api
- name: Run concrete-csprng tests
run: |
make test_concrete_csprng
- name: Run core tests
run: |
make test_core_crypto

View File

@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -49,7 +49,7 @@ jobs:
- name: Publish web package
if: ${{ inputs.push_web_package }}
uses: JS-DevTools/npm-publish@fe72237be0920f7a0cafd6a966c9b929c9466e9b
uses: JS-DevTools/npm-publish@5a85faf05d2ade2d5b6682bfe5359915d5159c6c
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
@@ -65,7 +65,7 @@ jobs:
- name: Publish Node package
if: ${{ inputs.push_node_package }}
uses: JS-DevTools/npm-publish@fe72237be0920f7a0cafd6a966c9b929c9466e9b
uses: JS-DevTools/npm-publish@5a85faf05d2ade2d5b6682bfe5359915d5159c6c
with:
token: ${{ secrets.NPM_TOKEN }}
package: tfhe/pkg/package.json
@@ -79,6 +79,6 @@ jobs:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "tfhe release failed: (${{ env.ACTION_RUN_URL }})"
SLACK_MESSAGE: "Integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -1,42 +0,0 @@
# Publish new release of tfhe-rs on various platform.
name: Publish concrete-csprng release
on:
workflow_dispatch:
inputs:
dry_run:
description: "Dry-run"
type: boolean
default: true
env:
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
jobs:
publish_release:
name: Publish concrete-csprng Release
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
with:
fetch-depth: 0
- name: Publish crate.io package
env:
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
run: |
cargo publish -p concrete-csprng --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
- name: Slack Notification
if: ${{ failure() }}
continue-on-error: true
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
env:
SLACK_COLOR: ${{ job.status }}
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
SLACK_MESSAGE: "concrete-csprng release failed: (${{ env.ACTION_RUN_URL }})"
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View File

@@ -17,10 +17,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
- name: Checkout lattice-estimator
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: malb/lattice-estimator
path: lattice_estimator
@@ -32,7 +32,7 @@ jobs:
- name: Collect parameters
run: |
CARGO_PROFILE=devo make write_params_to_file
make write_params_to_file
- name: Perform security check
run: |

View File

@@ -19,14 +19,6 @@ on:
request_id:
description: "Slab request ID"
type: string
# This input is not used in this workflow but still mandatory since a calling workflow could
# use it. If a triggering command include a user_inputs field, then the triggered workflow
# must include this very input, otherwise the workflow won't be called.
# See start_full_benchmarks.yml as example.
user_inputs:
description: "Type of benchmarks to run"
type: string
default: "weekly_benchmarks"
env:
CARGO_TERM_COLOR: always
@@ -51,7 +43,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -66,17 +58,9 @@ jobs:
toolchain: nightly
override: true
- name: bench_ac2023_CJP
- name: Run benchmarks with AVX512
run: |
make AVX512_SUPPORT=ON bench_ac2023_CJP
- name: bench_ac2023_stairKS
run: |
make AVX512_SUPPORT=ON bench_ac2023_stairKS
- name: bench_ac2023_fastKS
run: |
make AVX512_SUPPORT=ON bench_ac2023_fastKS
make AVX512_SUPPORT=ON bench_pbs
- name: Parse results
run: |
@@ -94,13 +78,13 @@ jobs:
--throughput
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_pbs
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab

View File

@@ -43,7 +43,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -88,13 +88,13 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_shortint
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab

View File

@@ -19,14 +19,6 @@ on:
request_id:
description: "Slab request ID"
type: string
# This input is not used in this workflow but still mandatory since a calling workflow could
# use it. If a triggering command include a user_inputs field, then the triggered workflow
# must include this very input, otherwise the workflow won't be called.
# See start_full_benchmarks.yml as example.
user_inputs:
description: "Type of benchmarks to run"
type: string
default: "weekly_benchmarks"
env:
CARGO_TERM_COLOR: always
@@ -51,7 +43,7 @@ jobs:
echo "Request ID: ${{ inputs.request_id }}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -73,7 +65,7 @@ jobs:
override: true
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab
@@ -112,7 +104,7 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_shortint_${{ matrix.op_flavor }}
path: ${{ env.RESULTS_FILENAME }}

View File

@@ -42,13 +42,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
- name: Check for file changes
id: changed-files
uses: tj-actions/changed-files@408093d9ff9c134c33b974e0722ce06b9d6e8263
uses: tj-actions/changed-files@2f7246cb26e8bb6709b6cbfc1fec7febfe82e96a
with:
files_yaml: |
common_benches:
@@ -85,7 +85,7 @@ jobs:
- .github/workflows/wasm_client_benchmark.yml
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab

View File

@@ -3,21 +3,9 @@ name: Start full suite benchmarks
on:
schedule:
# Weekly benchmarks will be triggered each Saturday at 1a.m.
# Job will be triggered each Saturday at 1a.m.
- cron: '0 1 * * 6'
# Quarterly benchmarks will be triggered right before end of quarter, the 25th of the current month at 4a.m.
# These benchmarks are far longer to execute hence the reason to run them only four time a year.
- cron: '0 4 25 MAR,JUN,SEP,DEC *'
workflow_dispatch:
inputs:
benchmark_type:
description: 'Benchmark type'
required: true
default: 'weekly'
type: choice
options:
- weekly
- quarterly
jobs:
start-benchmarks:
@@ -28,31 +16,21 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout tfhe-rs
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
- name: Set benchmarks type as weekly
if: (github.event_name == 'workflow_dispatch' && inputs.benchmark_type == 'weekly') || github.event.schedule == '0 1 * * 6'
run: |
echo "BENCH_TYPE=weekly_benchmarks" >> "${GITHUB_ENV}"
- name: Set benchmarks type as quarterly
if: (github.event_name == 'workflow_dispatch' && inputs.benchmark_type == 'quarterly') || github.event.schedule == '0 4 25 MAR,JUN,SEP,DEC *'
run: |
echo "BENCH_TYPE=quarterly_benchmarks" >> "${GITHUB_ENV}"
- name: Start AWS job in Slab
shell: bash
run: |
echo -n '{"command": "${{ matrix.command }}", "git_ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "user_inputs": "${{ env.BENCH_TYPE }}"}' > command.json
echo -n '{"command": "${{ matrix.command }}", "git_ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' > command.json
SIGNATURE="$(slab/scripts/hmac_calculator.sh command.json '${{ secrets.JOB_SECRET }}')"
curl -v -k \
--fail-with-body \

View File

@@ -13,11 +13,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
- name: Save repo
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: repo-archive
path: '.'

View File

@@ -12,16 +12,6 @@ jobs:
permissions:
pull-requests: write
steps:
- name: Get current labels
uses: snnaplab/get-labels-action@f426df40304808ace3b5282d4f036515f7609576
- name: Remove approved label
if: ${{ github.event_name == 'pull_request' && contains(fromJSON(env.LABELS), 'approved') }}
uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
labels: approved
- name: Launch fast tests
if: ${{ github.event_name == 'pull_request' }}
uses: mshick/add-pr-comment@a65df5f64fc741e91c59b8359a4bc56e57aaf5b1
@@ -30,17 +20,8 @@ jobs:
message: |
@slab-ci cpu_fast_test
- name: Add approved label
uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && !contains(fromJSON(env.LABELS), 'approved') }}
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
labels: approved
# PR label 'approved' presence is checked to avoid running the full test suite several times
# in case of multiple approvals without new commits in between.
- name: Launch full tests suite
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && !contains(fromJSON(env.LABELS), 'approved') }}
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' }}
uses: mshick/add-pr-comment@a65df5f64fc741e91c59b8359a4bc56e57aaf5b1
with:
allow-repeats: true
@@ -51,4 +32,3 @@ jobs:
@slab-ci cpu_integer_test
@slab-ci cpu_multi_bit_test
@slab-ci cpu_wasm_test
@slab-ci csprng_randomness_testing

View File

@@ -19,14 +19,6 @@ on:
request_id:
description: "Slab request ID"
type: string
# This input is not used in this workflow but still mandatory since a calling workflow could
# use it. If a triggering command include a user_inputs field, then the triggered workflow
# must include this very input, otherwise the workflow won't be called.
# See start_full_benchmarks.yml as example.
user_inputs:
description: "Type of benchmarks to run"
type: string
default: "weekly_benchmarks"
env:
CARGO_TERM_COLOR: always
@@ -51,7 +43,7 @@ jobs:
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
- name: Checkout tfhe-rs repo with tags
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
fetch-depth: 0
@@ -97,13 +89,13 @@ jobs:
--append-results
- name: Upload parsed results artifact
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
with:
name: ${{ github.sha }}_wasm
path: ${{ env.RESULTS_FILENAME }}
- name: Checkout Slab repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
with:
repository: zama-ai/slab
path: slab

6
.gitignore vendored
View File

@@ -13,9 +13,3 @@ target/
# Some of our bench outputs
/tfhe/benchmarks_parameters
**/*.csv
# dieharder run log
dieharder_run.log
# Coverage reports
./coverage/

View File

@@ -1,14 +0,0 @@
ignore:
- .git
- target
- tfhe/benchmarks_parameters
- tfhe/web_wasm_parallel_tests/node_modules
- tfhe/web_wasm_parallel_tests/dist
- keys
- coverage
rules:
# checks if file ends in a newline character
end-of-file:
enable: true
single-new-line: true

View File

@@ -1,6 +1,6 @@
[workspace]
resolver = "2"
members = ["tfhe", "tasks", "apps/trivium", "concrete-csprng"]
members = ["tfhe", "tasks", "apps/trivium"]
[profile.bench]
lto = "fat"

299
Makefile
View File

@@ -3,25 +3,19 @@ OS:=$(shell uname)
RS_CHECK_TOOLCHAIN:=$(shell cat toolchain.txt | tr -d '\n')
CARGO_RS_CHECK_TOOLCHAIN:=+$(RS_CHECK_TOOLCHAIN)
TARGET_ARCH_FEATURE:=$(shell ./scripts/get_arch_feature.sh)
RS_BUILD_TOOLCHAIN:=stable
RS_BUILD_TOOLCHAIN:=$(shell \
( (echo $(TARGET_ARCH_FEATURE) | grep -q x86) && echo stable) || echo $(RS_CHECK_TOOLCHAIN))
CARGO_RS_BUILD_TOOLCHAIN:=+$(RS_BUILD_TOOLCHAIN)
CARGO_PROFILE?=release
MIN_RUST_VERSION:=$(shell grep '^rust-version[[:space:]]*=' tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
MIN_RUST_VERSION:=$(shell grep rust-version tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
AVX512_SUPPORT?=OFF
WASM_RUSTFLAGS:=
BIG_TESTS_INSTANCE?=FALSE
GEN_KEY_CACHE_MULTI_BIT_ONLY?=FALSE
GEN_KEY_CACHE_COVERAGE_ONLY?=FALSE
PARSE_INTEGER_BENCH_CSV_FILE?=tfhe_rs_integer_benches.csv
FAST_TESTS?=FALSE
FAST_BENCH?=FALSE
BENCH_OP_FLAVOR?=DEFAULT
NODE_VERSION=20
FORWARD_COMPAT?=OFF
TFHE_CURRENT_VERSION:=$(shell grep '^version[[:space:]]*=' tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
# Cargo has a hard time distinguishing between our package from the workspace and a package that
# could be a dependency, so we build an unambiguous spec here
TFHE_SPEC:=tfhe@$(TFHE_CURRENT_VERSION)
# This is done to avoid forgetting it, we still precise the RUSTFLAGS in the commands to be able to
# copy paste the command in the terminal and change them if required without forgetting the flags
export RUSTFLAGS?=-C target-cpu=native
@@ -38,38 +32,10 @@ else
MULTI_BIT_ONLY=
endif
ifeq ($(GEN_KEY_CACHE_COVERAGE_ONLY),TRUE)
COVERAGE_ONLY=--coverage-only
else
COVERAGE_ONLY=
endif
ifeq ($(FORWARD_COMPAT),ON)
FORWARD_COMPAT_FEATURE=forward_compatibility
else
FORWARD_COMPAT_FEATURE=
endif
# Variables used only for regex_engine example
REGEX_STRING?=''
REGEX_PATTERN?=''
# Exclude these files from coverage reports
define COVERAGE_EXCLUDED_FILES
--exclude-files apps/trivium/src/trivium/* \
--exclude-files apps/trivium/src/kreyvium/* \
--exclude-files apps/trivium/src/static_deque/* \
--exclude-files apps/trivium/src/trans_ciphering/* \
--exclude-files tasks/src/* \
--exclude-files tfhe/benches/boolean/* \
--exclude-files tfhe/benches/core_crypto/* \
--exclude-files tfhe/benches/shortint/* \
--exclude-files tfhe/benches/integer/* \
--exclude-files tfhe/benches/* \
--exclude-files tfhe/examples/regex_engine/* \
--exclude-files tfhe/examples/utilities/*
endef
.PHONY: rs_check_toolchain # Echo the rust toolchain used for checks
rs_check_toolchain:
@echo $(RS_CHECK_TOOLCHAIN)
@@ -111,197 +77,136 @@ install_wasm_pack: install_rs_build_toolchain
install_node:
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | $(SHELL)
source ~/.bashrc
$(SHELL) -i -c 'nvm install $(NODE_VERSION)' || \
$(SHELL) -i -c 'nvm install node' || \
( echo "Unable to install node, unknown error." && exit 1 )
.PHONY: install_dieharder # Install dieharder for apt distributions or macOS
install_dieharder:
@dieharder -h > /dev/null 2>&1 || \
if [[ "$(OS)" == "Linux" ]]; then \
sudo apt update && sudo apt install -y dieharder; \
elif [[ "$(OS)" == "Darwin" ]]; then\
brew install dieharder; \
fi || ( echo "Unable to install dieharder, unknown error." && exit 1 )
.PHONY: install_tarpaulin # Install tarpaulin to perform code coverage
install_tarpaulin: install_rs_build_toolchain
@cargo tarpaulin --version > /dev/null 2>&1 || \
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install cargo-tarpaulin --locked || \
( echo "Unable to install cargo tarpaulin, unknown error." && exit 1 )
.PHONY: check_linelint_installed # Check if linelint newline linter is installed
check_linelint_installed:
@printf "\n" | linelint - > /dev/null 2>&1 || \
( echo "Unable to locate linelint. Try installing it: https://github.com/fernandrone/linelint/releases" && exit 1 )
.PHONY: fmt # Format rust code
fmt: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt
.PHONY: check_fmt # Check rust code format
.PHONT: check_fmt # Check rust code format
check_fmt: install_rs_check_toolchain
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt --check
.PHONY: fix_newline # Fix newline at end of file issues to be UNIX compliant
fix_newline: check_linelint_installed
linelint -a .
.PHONY: check_newline # Check for newline at end of file to be UNIX compliant
check_newline: check_linelint_installed
linelint .
.PHONY: clippy_core # Run clippy lints on core_crypto with and without experimental features
clippy_core: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE) \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),experimental \
-p $(TFHE_SPEC) -- --no-deps -D warnings
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),nightly-avx512 \
-p $(TFHE_SPEC) -- --no-deps -D warnings
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),experimental,nightly-avx512 \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
.PHONY: clippy_boolean # Run clippy lints enabling the boolean features
clippy_boolean: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),boolean \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
.PHONY: clippy_shortint # Run clippy lints enabling the shortint features
clippy_shortint: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),shortint \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
.PHONY: clippy_integer # Run clippy lints enabling the integer features
clippy_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),integer \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
.PHONY: clippy # Run clippy lints enabling the boolean, shortint, integer
clippy: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
.PHONY: clippy_c_api # Run clippy lints enabling the boolean, shortint and the C API
clippy_c_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
.PHONY: clippy_js_wasm_api # Run clippy lints enabling the boolean, shortint, integer and the js wasm API
clippy_js_wasm_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
--features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api \
-p $(TFHE_SPEC) -- --no-deps -D warnings
-p tfhe -- --no-deps -D warnings
.PHONY: clippy_tasks # Run clippy lints on helper tasks crate.
clippy_tasks:
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
-p tasks -- --no-deps -D warnings
.PHONY: clippy_trivium # Run clippy lints on Trivium app
clippy_trivium: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
-p tfhe-trivium -- --no-deps -D warnings
.PHONY: clippy_all_targets # Run clippy lints on all targets (benches, examples, etc.)
clippy_all_targets: install_rs_check_toolchain
clippy_all_targets:
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,safe-deserialization \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy_all_targets_forward_compatibility # Run clippy lints on all targets (benches, examples, etc.)
clippy_all_targets_forward_compatibility: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,safe-deserialization,forward_compatibility \
-p $(TFHE_SPEC) -- --no-deps -D warnings
.PHONY: clippy_concrete_csprng # Run clippy lints on concrete-csprng
clippy_concrete_csprng:
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
--features=$(TARGET_ARCH_FEATURE) \
-p concrete-csprng -- --no-deps -D warnings
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
-p tfhe -- --no-deps -D warnings
.PHONY: clippy_all # Run all clippy targets
clippy_all: clippy clippy_boolean clippy_shortint clippy_integer clippy_all_targets clippy_c_api \
clippy_js_wasm_api clippy_tasks clippy_core clippy_concrete_csprng clippy_trivium \
clippy_all_targets_forward_compatibility
clippy_js_wasm_api clippy_tasks clippy_core
.PHONY: clippy_fast # Run main clippy targets
clippy_fast: clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core \
clippy_concrete_csprng
clippy_fast: clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core
.PHONY: gen_key_cache # Run the script to generate keys and cache them for shortint tests
gen_key_cache: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) run --profile $(CARGO_PROFILE) \
--example generates_test_keys \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache -- \
$(MULTI_BIT_ONLY) $(COVERAGE_ONLY)
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p tfhe -- \
$(MULTI_BIT_ONLY)
.PHONY: build_core # Build core_crypto without experimental features
build_core: install_rs_build_toolchain install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE) -p $(TFHE_SPEC)
--features=$(TARGET_ARCH_FEATURE) -p tfhe
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),$(AVX512_FEATURE) -p $(TFHE_SPEC); \
--features=$(TARGET_ARCH_FEATURE),$(AVX512_FEATURE) -p tfhe; \
fi
.PHONY: build_core_experimental # Build core_crypto with experimental features
build_core_experimental: install_rs_build_toolchain install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental -p $(TFHE_SPEC)
--features=$(TARGET_ARCH_FEATURE),experimental -p tfhe
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC); \
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p tfhe; \
fi
.PHONY: build_boolean # Build with boolean enabled
build_boolean: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean -p $(TFHE_SPEC) --all-targets
--features=$(TARGET_ARCH_FEATURE),boolean -p tfhe --all-targets
.PHONY: build_shortint # Build with shortint enabled
build_shortint: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),shortint -p $(TFHE_SPEC) --all-targets
--features=$(TARGET_ARCH_FEATURE),shortint -p tfhe --all-targets
.PHONY: build_integer # Build with integer enabled
build_integer: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),integer -p $(TFHE_SPEC) --all-targets
--features=$(TARGET_ARCH_FEATURE),integer -p tfhe --all-targets
.PHONY: build_tfhe_full # Build with boolean, shortint and integer enabled
build_tfhe_full: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer -p $(TFHE_SPEC) --all-targets
.PHONY: symlink_c_libs_without_fingerprint # Link the .a and .so files without the changing hash part in target
symlink_c_libs_without_fingerprint:
@./scripts/symlink_c_libs_without_fingerprint.sh \
--cargo-profile "$(CARGO_PROFILE)" \
--lib-name tfhe-c-api-dynamic-buffer
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer -p tfhe --all-targets
.PHONY: build_c_api # Build the C API for boolean, shortint and integer
build_c_api: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization,$(FORWARD_COMPAT_FEATURE) \
-p $(TFHE_SPEC)
@"$(MAKE)" symlink_c_libs_without_fingerprint
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api, \
-p tfhe
.PHONY: build_c_api_experimental_deterministic_fft # Build the C API for boolean, shortint and integer with experimental deterministic FFT
build_c_api_experimental_deterministic_fft: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization,experimental-force_fft_algo_dif4,$(FORWARD_COMPAT_FEATURE) \
-p $(TFHE_SPEC)
@"$(MAKE)" symlink_c_libs_without_fingerprint
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,experimental-force_fft_algo_dif4 \
-p tfhe
.PHONY: build_web_js_api # Build the js API targeting the web browser
build_web_js_api: install_rs_build_toolchain install_wasm_pack
@@ -326,61 +231,30 @@ build_node_js_api: install_rs_build_toolchain install_wasm_pack
wasm-pack build --release --target=nodejs \
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api
.PHONY: build_concrete_csprng # Build concrete_csprng
build_concrete_csprng: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE) -p concrete-csprng --all-targets
.PHONY: test_core_crypto # Run the tests of the core_crypto module including experimental ones
test_core_crypto: install_rs_build_toolchain install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental -p $(TFHE_SPEC) -- core_crypto::
--features=$(TARGET_ARCH_FEATURE),experimental -p tfhe -- core_crypto::
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC) -- core_crypto::; \
fi
.PHONY: test_ccs_2024_stair_ks # Run the tests of the core_crypto module including experimental ones
test_ccs_2024_stair_ks: install_rs_build_toolchain install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental -p $(TFHE_SPEC) -- core_crypto::algorithms::test::lwe_stair_keyswitch
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC) -- core_crypto::algorithms::test::lwe_stair_keyswitch; \
fi
.PHONY: test_ccs_2024_fft_shrinking_ks # Run the tests of the core_crypto module including experimental ones
test_ccs_2024_fft_shrinking_ks: install_rs_build_toolchain install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental -p $(TFHE_SPEC) -- core_crypto::algorithms::test::lwe_fast_keyswitch
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC) -- core_crypto::algorithms::test::lwe_fast_keyswitch; \
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p tfhe -- core_crypto::; \
fi
.PHONY: test_boolean # Run the tests of the boolean module
test_boolean: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean -p $(TFHE_SPEC) -- boolean::
.PHONY: test_boolean_cov # Run the tests of the boolean module with code coverage
test_boolean_cov: install_rs_check_toolchain install_tarpaulin
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
--out xml --output-dir coverage/boolean --line --engine llvm --timeout 500 \
$(COVERAGE_EXCLUDED_FILES) \
--features=$(TARGET_ARCH_FEATURE),boolean,internal-keycache,__coverage \
-p $(TFHE_SPEC) -- boolean::
--features=$(TARGET_ARCH_FEATURE),boolean -p tfhe -- boolean::
.PHONY: test_c_api_rs # Run the rust tests for the C API
test_c_api_rs: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization \
-p $(TFHE_SPEC) \
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api \
-p tfhe \
c_api
.PHONY: test_c_api_c # Run the C tests for the C API
test_c_api_c: build_c_api
./scripts/c_api_tests.sh --forward-compat "$(FORWARD_COMPAT)"
./scripts/c_api_tests.sh
.PHONY: test_c_api # Run all the tests for the C API
test_c_api: test_c_api_rs test_c_api_c
@@ -390,68 +264,49 @@ test_shortint_ci: install_rs_build_toolchain install_cargo_nextest
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
FAST_TESTS="$(FAST_TESTS)" \
./scripts/shortint-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
--cargo-profile "$(CARGO_PROFILE)" --tfhe-package "$(TFHE_SPEC)"
--cargo-profile "$(CARGO_PROFILE)"
.PHONY: test_shortint_multi_bit_ci # Run the tests for shortint ci running only multibit tests
test_shortint_multi_bit_ci: install_rs_build_toolchain install_cargo_nextest
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
FAST_TESTS="$(FAST_TESTS)" \
./scripts/shortint-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
--cargo-profile "$(CARGO_PROFILE)" --multi-bit --tfhe-package "$(TFHE_SPEC)"
--cargo-profile "$(CARGO_PROFILE)" --multi-bit
.PHONY: test_shortint # Run all the tests for shortint
test_shortint: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p $(TFHE_SPEC) -- shortint::
.PHONY: test_shortint_cov # Run the tests of the shortint module with code coverage
test_shortint_cov: install_rs_check_toolchain install_tarpaulin
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
--out xml --output-dir coverage/shortint --line --engine llvm --timeout 500 \
$(COVERAGE_EXCLUDED_FILES) \
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,__coverage \
-p $(TFHE_SPEC) -- shortint::
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p tfhe -- shortint::
.PHONY: test_integer_ci # Run the tests for integer ci
test_integer_ci: install_rs_build_toolchain install_cargo_nextest
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
FAST_TESTS="$(FAST_TESTS)" \
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
--cargo-profile "$(CARGO_PROFILE)" --tfhe-package "$(TFHE_SPEC)"
--cargo-profile "$(CARGO_PROFILE)"
.PHONY: test_integer_multi_bit_ci # Run the tests for integer ci running only multibit tests
test_integer_multi_bit_ci: install_rs_build_toolchain install_cargo_nextest
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
FAST_TESTS="$(FAST_TESTS)" \
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
--cargo-profile "$(CARGO_PROFILE)" --multi-bit --tfhe-package "$(TFHE_SPEC)"
.PHONY: test_safe_deserialization # Run the tests for safe deserialization
test_safe_deserialization: install_rs_build_toolchain install_cargo_nextest
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,safe-deserialization -p $(TFHE_SPEC) -- safe_deserialization::
--cargo-profile "$(CARGO_PROFILE)" --multi-bit
.PHONY: test_integer # Run all the tests for integer
test_integer: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache -p $(TFHE_SPEC) -- integer::
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache -p tfhe -- integer::
.PHONY: test_high_level_api # Run all the tests for high_level_api
test_high_level_api: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p $(TFHE_SPEC) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p tfhe \
-- high_level_api::
.PHONY: test_forward_compatibility # Run forward compatibility tests
test_forward_compatibility: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --tests --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,forward_compatibility,internal-keycache -p $(TFHE_SPEC) \
-- forward_compatibility::
.PHONY: test_user_doc # Run tests from the .md documentation
test_user_doc: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) --doc \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p $(TFHE_SPEC) \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p tfhe \
-- test_user_docs::
.PHONY: test_regex_engine # Run tests for regex_engine example
@@ -472,17 +327,14 @@ test_examples: test_sha256_bool test_regex_engine
.PHONY: test_trivium # Run tests for trivium
test_trivium: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
-p tfhe-trivium -- --test-threads=1 trivium::
trivium --features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
-- --test-threads=1
.PHONY: test_kreyvium # Run tests for kreyvium
test_kreyvium: install_rs_build_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
-p tfhe-trivium -- --test-threads=1 kreyvium::
.PHONY: test_concrete_csprng # Run concrete-csprng tests
test_concrete_csprng:
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
--features=$(TARGET_ARCH_FEATURE) -p concrete-csprng
kreyvium --features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
-- --test-threads=1
.PHONY: doc # Build rust doc
doc: install_rs_check_toolchain
@@ -514,20 +366,18 @@ format_doc_latex:
.PHONY: check_compile_tests # Build tests in debug without running them
check_compile_tests:
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
--features=$(TARGET_ARCH_FEATURE),experimental,boolean,shortint,integer,internal-keycache,safe-deserialization \
-p $(TFHE_SPEC)
--features=$(TARGET_ARCH_FEATURE),experimental,boolean,shortint,integer,internal-keycache \
-p tfhe
@if [[ "$(OS)" == "Linux" || "$(OS)" == "Darwin" ]]; then \
"$(MAKE)" build_c_api && \
./scripts/c_api_tests.sh --build-only --forward-compat "$(FORWARD_COMPAT)" && \
FORWARD_COMPAT=ON "$(MAKE)" build_c_api && \
./scripts/c_api_tests.sh --build-only --forward-compat "$(FORWARD_COMPAT)"; \
"$(MAKE)" build_c_api; \
./scripts/c_api_tests.sh --build-only; \
fi
.PHONY: build_nodejs_test_docker # Build a docker image with tools to run nodejs tests for wasm API
build_nodejs_test_docker:
DOCKER_BUILDKIT=1 docker build --build-arg RUST_TOOLCHAIN="$(RS_BUILD_TOOLCHAIN)" \
-f docker/Dockerfile.wasm_tests --build-arg NODE_VERSION=$(NODE_VERSION) -t tfhe-wasm-tests .
-f docker/Dockerfile.wasm_tests -t tfhe-wasm-tests .
.PHONY: test_nodejs_wasm_api_in_docker # Run tests for the nodejs on wasm API in a docker container
test_nodejs_wasm_api_in_docker: build_nodejs_test_docker
@@ -551,8 +401,7 @@ test_web_js_api_parallel: build_web_js_api_parallel
.PHONY: ci_test_web_js_api_parallel # Run tests for the web wasm api
ci_test_web_js_api_parallel: build_web_js_api_parallel
source ~/.nvm/nvm.sh && \
nvm install $(NODE_VERSION) && \
nvm use $(NODE_VERSION) && \
nvm use node && \
$(MAKE) -C tfhe/web_wasm_parallel_tests test-ci
.PHONY: no_tfhe_typo # Check we did not invert the h and f in tfhe
@@ -563,10 +412,6 @@ no_tfhe_typo:
no_dbg_log:
@./scripts/no_dbg_calls.sh
.PHONY: dieharder_csprng # Run the dieharder test suite on our CSPRNG implementation
dieharder_csprng: install_dieharder build_concrete_csprng
./scripts/dieharder_test.sh
#
# Benchmarks
#
@@ -576,7 +421,7 @@ bench_integer: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p tfhe --
.PHONY: bench_integer_multi_bit # Run benchmarks for integer using multi-bit parameters
bench_integer_multi_bit: install_rs_check_toolchain
@@ -584,14 +429,14 @@ bench_integer_multi_bit: install_rs_check_toolchain
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench integer-bench \
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p tfhe --
.PHONY: bench_shortint # Run benchmarks for shortint
bench_shortint: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench shortint-bench \
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe
.PHONY: bench_shortint_multi_bit # Run benchmarks for shortint using multi-bit parameters
bench_shortint_multi_bit: install_rs_check_toolchain
@@ -599,20 +444,20 @@ bench_shortint_multi_bit: install_rs_check_toolchain
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) \
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench shortint-bench \
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe --
.PHONY: bench_boolean # Run benchmarks for boolean
bench_boolean: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench boolean-bench \
--features=$(TARGET_ARCH_FEATURE),boolean,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
--features=$(TARGET_ARCH_FEATURE),boolean,internal-keycache,$(AVX512_FEATURE) -p tfhe
.PHONY: bench_pbs # Run benchmarks for PBS
bench_pbs: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench pbs-bench \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe
.PHONY: bench_web_js_api_parallel # Run benchmarks for the web wasm api
bench_web_js_api_parallel: build_web_js_api_parallel
@@ -624,24 +469,6 @@ ci_bench_web_js_api_parallel: build_web_js_api_parallel
nvm use node && \
$(MAKE) -C tfhe/web_wasm_parallel_tests bench-ci
.PHONY: bench_ccs_2024_cjp # Run benchmarks for PBS
bench_ccs_2024_cjp: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ccs-2024-cjp \
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
.PHONY: bench_ccs_2024_fft_shrinking_ks # Run benchmarks for PBS
bench_ccs_2024_fft_shrinking_ks: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ccs-2024-fft-shrinking-ks \
--features=$(TARGET_ARCH_FEATURE),internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
.PHONY: bench_ccs_2024_stair_ks # Run benchmarks for PBS
bench_ccs_2024_stair_ks: install_rs_check_toolchain
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
--bench ccs-2024-stair-ks \
--features=$(TARGET_ARCH_FEATURE),internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
#
# Utility tools
#
@@ -714,7 +541,7 @@ pcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_all check_compile_tests
fpcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_fast check_compile_tests
.PHONY: conformance # Automatically fix problems that can be fixed
conformance: fix_newline fmt
conformance: fmt
.PHONY: help # Generate list of targets with descriptions
help:

View File

@@ -47,7 +47,7 @@ tfhe = { version = "*", features = ["boolean", "shortint", "integer", "x86_64-un
```toml
tfhe = { version = "*", features = ["boolean", "shortint", "integer", "aarch64-unix"] }
```
Note: users with ARM devices must compile `TFHE-rs` using a stable toolchain with version >= 1.72.
Note: users with ARM devices must use `TFHE-rs` by compiling using the `nightly` toolchain.
+ For x86_64-based machines with the [`rdseed instruction`](https://en.wikipedia.org/wiki/RDRAND)
@@ -92,10 +92,10 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
// On the server side:
set_server_key(server_keys);
// Clear equivalent computations: 1344 * 5 = 6720
// Clear equivalent computations: 1344 * 8 = 10752
let encrypted_res_mul = &encrypted_a * &encrypted_b;
// Clear equivalent computations: 1344 >> 5 = 42
// Clear equivalent computations: 1344 >> 8 = 42
encrypted_a = &encrypted_res_mul >> &encrypted_b;
// Clear equivalent computations: let casted_a = a as u8;

View File

@@ -1,84 +0,0 @@
# Description
In what follows, we provide instructions on how to run benchmarks from the paper entitled "New Secret Keys for Enhanced Performance in (T)FHE". In particular, Table 2 and Figure 4 (in the Appendix) can be easily reproduced using this code.
The implementation of the techniques from the aforementioned paper has been integrated into the TFHE-rs library, version 0.4.
Modified or added source files are located in `tfhe/src/core_crypto/`:
- `algorithms/pseudo_ggsw_encryption.rs`
- `algorithms/pseudo_ggsw_conversion.rs`
- `algorithms/lwe_shrinking_keyswitch_key_generation.rs`
- `algorithms/lwe_shrinking_keyswitch.rs`
- `algorithms/lwe_secret_key_generation.rs`
- `algorithms/lwe_partial_secret_key_generation.rs`
- `algorithms/lwe_fast_keyswitch_key_generation.rs`
- `algorithms/lwe_fast_keyswitch.rs`
- `algorithms/glwe_secret_key_generation.rs`
- `algorithms/glwe_partial_secret_key_generation.rs`
- `algorithms/glwe_partial_sample_extraction.rs`
Test files are located in `tfhe/src/core_crypto/algorithms/test`:
- `lwe_stair_keyswitch.rs`
- `lwe_fast_keyswitch.rs`
Benchmarks are located in `tfhe/benches/core_crypto`:
- `ccs_2024_cjp.rs`
- `ccs_2024_fft_shrinking_ks.rs`
- `ccs_2024_stair_ks.rs`
# Dependencies
Tested on Linux and Mac OS with Rust >= 1.75 (see [here](https://www.rust-lang.org/tools/install) a guide to install Rust).
# How to run benchmarks
At the root of the project (i.e., in the TFHE-rs folder), enter the following commands to run the benchmarks:
- `make bench_ccs_2024_cjp`: Returns the timings associated with the CJP-based bootstrapping (Table 2, Line 1 + Figure 4, blue line);
- `make bench_ccs_2024_stair_ks`: Returns the timings associated with the "All+Stair-KS"-based bootstrapping (Table 2, Line 2 + Figure 4, red line);
- `make bench_ccs_2024_fft_shrinking_ks`: Returns the timings associated with the "All+FFT Shrinking-KS"-based bootstrapping (Table 2, Line 3 + Figure 4, green line);
This outputs the timings depending on the input precision.
Since large precision (>= 6 bits) might be long to execute, particularly on a laptop, these are disable by default. To choose which precision to launch, please uncomment lines associated to the parameter names into the `param_vec` variable, inside the `criterion_bench` function inside one of the benchmark files.
For instance, to launch only the precision 7 of the stair-KS benchmark, the correct `param_vec` variable (line 353 of `ccs_2024_stair_ks.rs`) looks like:
```rust
let param_vec = [
// PRECISION_1_STAIR,
// PRECISION_2_STAIR,
// PRECISION_3_STAIR,
// PRECISION_4_STAIR,
// PRECISION_5_STAIR,
// PRECISION_6_STAIR,
PRECISION_7_STAIR
// PRECISION_8_STAIR,
// PRECISION_9_STAIR,
// PRECISION_10_STAIR,
// PRECISION_11_STAIR,
];
```
Running the command `make bench_ccs_2024_stair_ks`will give the correct benchmark.
# How to run the tests
At the root of the project (i.e., in the TFHE-rs folder), enter the following commands to run the tests:
- `make test_ccs_2024_stair_ks`: Returns the timings associated with the "All+Stair-KS"-based bootstrapping (Table 2, Line 2 + Figure 4, red line);
- `make test_ccs_2024_fft_shrinking_ks`: Returns the timings associated with the "All+FFT Shrinking-KS"-based bootstrapping (Table 2, Line 3 + Figure 4, green line);
As for the benchmarks, all precision are not enabled by default. To add precision in the test, associated parameters must be uncommented in the macro `create_parametrized_test!` (located at the end of each test file)
For instance, in the file `lwe_fast_keyswitch.rs`, testing only the precision will look like this:
```rust
create_parametrized_test!(lwe_encrypt_fast_ks_decrypt_custom_mod {
PRECISION_1_FAST_KS,
PRECISION_2_FAST_KS,
PRECISION_3_FAST_KS,
PRECISION_4_FAST_KS,
PRECISION_5_FAST_KS,
PRECISION_6_FAST_KS,
PRECISION_7_FAST_KS,
PRECISION_8_FAST_KS,
// PRECISION_9_FAST_KS,
PRECISION_10_FAST_KS
// PRECISION_11_FAST_KS
});
```
Please note that the last parameter MUST NOT be followed by a comma `,` to correctly compile.

View File

@@ -17,7 +17,7 @@ path = "../../tfhe"
features = [ "boolean", "shortint", "integer", "aarch64-unix" ]
[dev-dependencies]
criterion = { version = "0.5.1", features = [ "html_reports" ]}
criterion = { version = "0.4", features = [ "html_reports" ]}
[[bench]]
name = "trivium"

View File

@@ -120,7 +120,7 @@ fn main() {
# FHE byte Trivium implementation
The same objects have also been implemented to stream bytes instead of booleans. They can be constructed and used in the same way via the functions `TriviumStreamByte::<u8>::new` and
The same objects have also been implemented to stream bytes insead of booleans. They can be constructed and used in the same way via the functions `TriviumStreamByte::<u8>::new` and
`TriviumStreamByte::<FheUint8>::new` with the same arguments as before. The `FheUint8` version is significantly slower than the `FheBool` version, because not running
with the same cryptographic parameters. Its interest lie in its trans-ciphering capabilities: `TriviumStreamByte<FheUint8>` implements the trait `TransCiphering`,
meaning it implements the functions `trans_encrypt_64`. This function takes as input a `FheUint64` and outputs a `FheUint64`, the output being

View File

@@ -1,5 +1,5 @@
//! This module implements the Kreyvium stream cipher, using booleans or FheBool
//! for the representation of the inner bits.
//! for the representaion of the inner bits.
use crate::static_deque::StaticDeque;
@@ -35,7 +35,7 @@ pub struct KreyviumStream<T> {
}
impl KreyviumStream<bool> {
/// Constructor for `KreyviumStream<bool>`: arguments are the secret key and the input vector.
/// Contructor for `KreyviumStream<bool>`: arguments are the secret key and the input vector.
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(mut key: [bool; 128], mut iv: [bool; 128]) -> KreyviumStream<bool> {
@@ -118,7 +118,7 @@ where
T: KreyviumBoolInput<T> + std::marker::Send + std::marker::Sync,
for<'a> &'a T: KreyviumBoolInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 93],

View File

@@ -1,5 +1,5 @@
//! This module implements the Kreyvium stream cipher, using u8 or FheUint8
//! for the representation of the inner bits.
//! for the representaion of the inner bits.
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
@@ -31,7 +31,7 @@ impl KreyviumByteInput<FheUint8> for &FheUint8 {}
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
/// an Option for a ServerKey.
/// Since the original Kreyvium registers' sizes are not a multiple of 8, these registers (which
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
/// store byte-like objects) have a size that is the eigth of the closest multiple of 8 above the
/// originals' sizes.
pub struct KreyviumStreamByte<T> {
a_byte: StaticByteDeque<12, T>,
@@ -43,7 +43,7 @@ pub struct KreyviumStreamByte<T> {
}
impl KreyviumStreamByte<u8> {
/// Constructor for `KreyviumStreamByte<u8>`: arguments are the secret key and the input vector.
/// Contructor for `KreyviumStreamByte<u8>`: arguments are the secret key and the input vector.
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(key_bytes: [u8; 16], iv_bytes: [u8; 16]) -> KreyviumStreamByte<u8> {
@@ -146,7 +146,7 @@ where
T: KreyviumByteInput<T> + Send,
for<'a> &'a T: KreyviumByteInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 12],

View File

@@ -19,7 +19,7 @@ pub struct KreyviumStreamShortint {
}
impl KreyviumStreamShortint {
/// Constructor for KreyviumStreamShortint: arguments are the secret key and the input vector,
/// Contructor for KreyviumStreamShortint: arguments are the secret key and the input vector,
/// and a ServerKey reference. Outputs a KreyviumStream object already initialized (1152
/// steps have been run before returning)
pub fn new(

View File

@@ -1,6 +1,6 @@
//! This module implements the StaticByteDeque struct: a deque of bytes. The idea
//! is that this is a wrapper around StaticDeque, but StaticByteDeque has an additional
//! functionality: it can construct the "intermediate" bytes, made of parts of other bytes.
//! functionnality: it can construct the "intermediate" bytes, made of parts of other bytes.
//! This is pretending to store bits, and allows accessing bits in chunks of 8 consecutive.
use crate::static_deque::StaticDeque;

View File

@@ -5,7 +5,7 @@
use core::ops::{Index, IndexMut};
/// StaticDeque: a struct implementing a deque whose size is known at compile time.
/// It has 2 members: the static array containing the data (never empty), and a cursor
/// It has 2 members: the static array conatining the data (never empty), and a cursor
/// equal to the index of the oldest element (and the next one to be overwritten).
#[derive(Clone)]
pub struct StaticDeque<const N: usize, T> {

View File

@@ -1,5 +1,6 @@
mod trivium_bool;
pub use trivium_bool::TriviumStream;
#[allow(clippy::module_inception)]
mod trivium;
pub use trivium::TriviumStream;
mod trivium_byte;
pub use trivium_byte::TriviumStreamByte;

View File

@@ -1,5 +1,5 @@
//! This module implements the Trivium stream cipher, using booleans or FheBool
//! for the representation of the inner bits.
//! for the representaion of the inner bits.
use crate::static_deque::StaticDeque;
@@ -33,7 +33,7 @@ pub struct TriviumStream<T> {
}
impl TriviumStream<bool> {
/// Constructor for `TriviumStream<bool>`: arguments are the secret key and the input vector.
/// Contructor for `TriviumStream<bool>`: arguments are the secret key and the input vector.
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(key: [bool; 80], iv: [bool; 80]) -> TriviumStream<bool> {
@@ -94,7 +94,7 @@ where
T: TriviumBoolInput<T> + std::marker::Send + std::marker::Sync,
for<'a> &'a T: TriviumBoolInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 93],

View File

@@ -1,5 +1,5 @@
//! This module implements the Trivium stream cipher, using u8 or FheUint8
//! for the representation of the inner bits.
//! for the representaion of the inner bits.
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
@@ -31,7 +31,7 @@ impl TriviumByteInput<FheUint8> for &FheUint8 {}
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
/// an Option for a ServerKey.
/// Since the original Trivium registers' sizes are not a multiple of 8, these registers (which
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
/// store byte-like objects) have a size that is the eigth of the closest multiple of 8 above the
/// originals' sizes.
pub struct TriviumStreamByte<T> {
a_byte: StaticByteDeque<12, T>,
@@ -41,7 +41,7 @@ pub struct TriviumStreamByte<T> {
}
impl TriviumStreamByte<u8> {
/// Constructor for `TriviumStreamByte<u8>`: arguments are the secret key and the input vector.
/// Contructor for `TriviumStreamByte<u8>`: arguments are the secret key and the input vector.
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
/// returning)
pub fn new(key: [u8; 10], iv: [u8; 10]) -> TriviumStreamByte<u8> {
@@ -111,7 +111,7 @@ where
T: TriviumByteInput<T> + Send,
for<'a> &'a T: TriviumByteInput<T>,
{
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
/// server key
fn new_from_registers(
a_register: [T; 12],

View File

@@ -17,9 +17,9 @@ pub struct TriviumStreamShortint {
}
impl TriviumStreamShortint {
/// Constructor for TriviumStreamShortint: arguments are the secret key and the input vector,
/// and a ServerKey reference. Outputs a TriviumStream object already initialized (1152
/// steps have been run before returning)
/// Contructor for TriviumStreamShortint: arguments are the secret key and the input vector, and
/// a ServerKey reference. Outputs a TriviumStream object already initialized (1152 steps
/// have been run before returning)
pub fn new(
key: [Ciphertext; 80],
iv: [u64; 80],

View File

@@ -36,24 +36,23 @@ def check_security(filename):
try:
# The lattice estimator is not able to manage such large dimension.
# If we have the security for smaller `n` then we have security for larger ones.
if param.n > 16384:
if param.n == 32768:
param = param.updated(n = 16384)
usvp_level = LWE.primal_usvp(param, red_cost_model = model)
dual_level = LWE.dual_hybrid(param, red_cost_model = model)
estimator_level = log(min(usvp_level["rop"], dual_level["rop"]),2 )
security_level = f"security level = {estimator_level} bits"
if estimator_level < 127:
print("FAIL\t({security_level})")
reason = f"attained {security_level} target is 128 bits"
print("FAIL")
reason = f"attained security level = {estimator_level} bits target is 128 bits"
to_update.append((param, reason))
continue
except Exception as err:
print("FAIL")
to_update.append((param, f"{repr(err)}"))
else:
print(f"OK\t({security_level})")
print("OK")
return to_update
@@ -73,4 +72,4 @@ if __name__ == "__main__":
print(f"[{param.tag}] reason: {reason} (param)")
sys.exit(int(1)) # Explicit conversion is needed to make this call work
else:
print("All parameters passed the security check")
print("All parameters passed the security check")

View File

@@ -1,16 +1,16 @@
[profile.cpu-big]
region = "eu-west-3"
image_id = "ami-051942e4055555752"
image_id = "ami-0ab73f5bd11708a85"
instance_type = "m6i.32xlarge"
[profile.cpu-small]
region = "eu-west-3"
image_id = "ami-051942e4055555752"
image_id = "ami-0ab73f5bd11708a85"
instance_type = "m6i.4xlarge"
[profile.bench]
region = "eu-west-3"
image_id = "ami-051942e4055555752"
image_id = "ami-0ab73f5bd11708a85"
instance_type = "m6i.metal"
[command.cpu_test]
@@ -77,13 +77,3 @@ check_run_name = "PBS CPU AWS Benchmarks"
workflow = "wasm_client_benchmark.yml"
profile = "cpu-small"
check_run_name = "WASM Client AWS Benchmarks"
[command.csprng_randomness_testing]
workflow = "csprng_randomness_testing.yml"
profile = "cpu-small"
check_run_name = "CSPRNG randomness testing"
[command.code_coverage]
workflow = "code_coverage.yml"
profile = "cpu-small"
check_run_name = "Code coverage"

View File

@@ -1,4 +0,0 @@
coverage:
status:
# Disable patch checks in GitHub until all tfhe-rs layers have coverage implemented.
patch: false

View File

@@ -1,53 +0,0 @@
[package]
name = "concrete-csprng"
version = "0.4.0"
edition = "2021"
license = "BSD-3-Clause-Clear"
description = "Cryptographically Secure PRNG used in the TFHE-rs library."
homepage = "https://zama.ai/"
documentation = "https://docs.zama.ai/tfhe-rs"
repository = "https://github.com/zama-ai/tfhe-rs"
readme = "README.md"
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
rust-version = "1.72"
[dependencies]
aes = "0.8.2"
rayon = { version = "1.5.0", optional = true }
[target.'cfg(target_os = "macos")'.dependencies]
libc = "0.2.133"
[dev-dependencies]
rand = "0.8.3"
criterion = "0.5.1"
clap = "=4.4.4"
[features]
parallel = ["rayon"]
seeder_x86_64_rdseed = []
seeder_unix = []
generator_x86_64_aesni = []
generator_fallback = []
generator_aarch64_aes = []
x86_64 = [
"parallel",
"seeder_x86_64_rdseed",
"generator_x86_64_aesni",
"generator_fallback",
]
x86_64-unix = ["x86_64", "seeder_unix"]
aarch64 = ["parallel", "generator_aarch64_aes", "generator_fallback"]
aarch64-unix = ["aarch64", "seeder_unix"]
[[bench]]
name = "benchmark"
path = "benches/benchmark.rs"
harness = false
required-features = ["seeder_x86_64_rdseed", "generator_x86_64_aesni"]
[[example]]
name = "generate"
path = "examples/generate.rs"
required-features = ["seeder_unix", "generator_fallback"]

View File

@@ -1,28 +0,0 @@
BSD 3-Clause Clear License
Copyright © 2023 ZAMA.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
3. Neither the name of ZAMA nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
THIS SOFTWARE IS PROVIDED BY THE ZAMA AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
ZAMA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,23 +0,0 @@
# Concrete CSPRNG
This crate contains a fast *Cryptographically Secure Pseudoramdon Number Generator*, used in the
['concrete-core'](https://crates.io/crates/concrete-core) library, you can find it [here](../concrete-core/) in this repo.
The implementation is based on the AES blockcipher used in CTR mode, as described in the ISO/IEC
18033-4 standard.
Two implementations are available, an accelerated one on x86_64 CPUs with the `aes` feature and the `sse2` feature, and a pure software one that can be used on other platforms.
The crate also makes two seeders available, one needing the x86_64 feature `rdseed` and another one based on the Unix random device `/dev/random` the latter requires the user to provide a secret.
## Running the benchmarks
To execute the benchmarks on an x86_64 platform:
```shell
RUSTFLAGS="-Ctarget-cpu=native" cargo bench --features=seeder_x86_64_rdseed,generator_x86_64_aesni
```
## License
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions,
please contact us at `hello@zama.ai`.

View File

@@ -1,54 +0,0 @@
use concrete_csprng::generators::{
AesniRandomGenerator, BytesPerChild, ChildrenCount, RandomGenerator,
};
use concrete_csprng::seeders::{RdseedSeeder, Seeder};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
// The number of bytes to generate during one benchmark iteration.
const N_GEN: usize = 1_000_000;
fn parent_generate(c: &mut Criterion) {
let mut seeder = RdseedSeeder;
let mut generator = AesniRandomGenerator::new(seeder.seed());
c.bench_function("parent_generate", |b| {
b.iter(|| {
(0..N_GEN).for_each(|_| {
generator.next();
})
})
});
}
fn child_generate(c: &mut Criterion) {
let mut seeder = RdseedSeeder;
let mut generator = AesniRandomGenerator::new(seeder.seed());
let mut generator = generator
.try_fork(ChildrenCount(1), BytesPerChild(N_GEN * 10_000))
.unwrap()
.next()
.unwrap();
c.bench_function("child_generate", |b| {
b.iter(|| {
(0..N_GEN).for_each(|_| {
generator.next();
})
})
});
}
fn fork(c: &mut Criterion) {
let mut seeder = RdseedSeeder;
let mut generator = AesniRandomGenerator::new(seeder.seed());
c.bench_function("fork", |b| {
b.iter(|| {
black_box(
generator
.try_fork(ChildrenCount(2048), BytesPerChild(2048))
.unwrap(),
)
})
});
}
criterion_group!(benches, parent_generate, child_generate, fork);
criterion_main!(benches);

View File

@@ -1,112 +0,0 @@
// To have clear error messages during compilation about why some piece of code may not be available
// we decided to check the features compatibility with the target configuration in this script.
use std::collections::HashMap;
use std::env;
// See https://doc.rust-lang.org/reference/conditional-compilation.html#target_arch for various
// compilation configuration
// Can be easily extended if needed
pub struct FeatureRequirement {
pub feature_name: &'static str,
// target_arch requirement
pub feature_req_target_arch: Option<&'static str>,
// target_family requirement
pub feature_req_target_family: Option<&'static str>,
}
// We implement a version of default that is const which is not possible through the Default trait
impl FeatureRequirement {
// As we cannot use cfg!(feature = "feature_name") with something else than a literal, we need
// a reference to the HashMap we populate with the enabled features
fn is_activated(&self, build_activated_features: &HashMap<&'static str, bool>) -> bool {
*build_activated_features.get(self.feature_name).unwrap()
}
// panics if the requirements are not met
fn check_requirements(&self) {
let target_arch = get_target_arch_cfg();
if let Some(feature_req_target_arch) = self.feature_req_target_arch {
if feature_req_target_arch != target_arch {
panic!(
"Feature `{}` requires target_arch `{}`, current cfg: `{}`",
self.feature_name, feature_req_target_arch, target_arch
)
}
}
let target_family = get_target_family_cfg();
if let Some(feature_req_target_family) = self.feature_req_target_family {
if feature_req_target_family != target_family {
panic!(
"Feature `{}` requires target_family `{}`, current cfg: `{}`",
self.feature_name, feature_req_target_family, target_family
)
}
}
}
}
// const vecs are not yet a thing so use a fixed size array (update the array size when adding
// requirements)
static FEATURE_REQUIREMENTS: [FeatureRequirement; 4] = [
FeatureRequirement {
feature_name: "seeder_x86_64_rdseed",
feature_req_target_arch: Some("x86_64"),
feature_req_target_family: None,
},
FeatureRequirement {
feature_name: "generator_x86_64_aesni",
feature_req_target_arch: Some("x86_64"),
feature_req_target_family: None,
},
FeatureRequirement {
feature_name: "seeder_unix",
feature_req_target_arch: None,
feature_req_target_family: Some("unix"),
},
FeatureRequirement {
feature_name: "generator_aarch64_aes",
feature_req_target_arch: Some("aarch64"),
feature_req_target_family: None,
},
];
// For a "feature_name" feature_cfg!("feature_name") expands to
// ("feature_name", cfg!(feature = "feature_name"))
macro_rules! feature_cfg {
($feat_name:literal) => {
($feat_name, cfg!(feature = $feat_name))
};
}
// Static HashMap would require an additional crate (phf or lazy static e.g.), so we just write a
// function that returns the HashMap we are interested in
fn get_feature_enabled_status() -> HashMap<&'static str, bool> {
HashMap::from([
feature_cfg!("seeder_x86_64_rdseed"),
feature_cfg!("generator_x86_64_aesni"),
feature_cfg!("seeder_unix"),
feature_cfg!("generator_aarch64_aes"),
])
}
// See https://stackoverflow.com/a/43435335/18088947 for the inspiration of this code
fn get_target_arch_cfg() -> String {
env::var("CARGO_CFG_TARGET_ARCH").expect("CARGO_CFG_TARGET_ARCH is not set")
}
fn get_target_family_cfg() -> String {
env::var("CARGO_CFG_TARGET_FAMILY").expect("CARGO_CFG_TARGET_FAMILY is not set")
}
fn main() {
let feature_enabled_status = get_feature_enabled_status();
// This will panic if some requirements for a feature are not met
FEATURE_REQUIREMENTS
.iter()
.filter(|&req| FeatureRequirement::is_activated(req, &feature_enabled_status))
.for_each(FeatureRequirement::check_requirements);
}

View File

@@ -1,113 +0,0 @@
//! This program uses the concrete csprng to generate an infinite stream of random bytes on
//! the program stdout. It can also generate a fixed number of bytes by passing a value along the
//! optional argument `--bytes_total`. For testing purpose.
use clap::{value_parser, Arg, Command};
#[cfg(feature = "generator_x86_64_aesni")]
use concrete_csprng::generators::AesniRandomGenerator as ActivatedRandomGenerator;
#[cfg(feature = "generator_aarch64_aes")]
use concrete_csprng::generators::NeonAesRandomGenerator as ActivatedRandomGenerator;
#[cfg(all(
not(feature = "generator_x86_64_aesni"),
not(feature = "generator_aarch64_aes"),
feature = "generator_fallback"
))]
use concrete_csprng::generators::SoftwareRandomGenerator as ActivatedRandomGenerator;
use concrete_csprng::generators::RandomGenerator;
#[cfg(target_os = "macos")]
use concrete_csprng::seeders::AppleSecureEnclaveSeeder as ActivatedSeeder;
#[cfg(all(not(target_os = "macos"), feature = "seeder_x86_64_rdseed"))]
use concrete_csprng::seeders::RdseedSeeder as ActivatedSeeder;
#[cfg(all(
not(target_os = "macos"),
not(feature = "seeder_x86_64_rdseed"),
feature = "seeder_unix"
))]
use concrete_csprng::seeders::UnixSeeder as ActivatedSeeder;
use concrete_csprng::seeders::Seeder;
use std::io::prelude::*;
use std::io::{stdout, StdoutLock};
fn write_bytes(
buffer: &mut [u8],
generator: &mut ActivatedRandomGenerator,
stdout: &mut StdoutLock<'_>,
) -> std::io::Result<()> {
buffer.iter_mut().zip(generator).for_each(|(b, g)| *b = g);
stdout.write_all(buffer)
}
fn infinite_bytes_generation(
buffer: &mut [u8],
generator: &mut ActivatedRandomGenerator,
stdout: &mut StdoutLock<'_>,
) {
while write_bytes(buffer, generator, stdout).is_ok() {}
}
fn bytes_generation(
bytes_total: usize,
buffer: &mut [u8],
generator: &mut ActivatedRandomGenerator,
stdout: &mut StdoutLock<'_>,
) {
let quotient = bytes_total / buffer.len();
let remaining = bytes_total % buffer.len();
for _ in 0..quotient {
write_bytes(buffer, generator, stdout).unwrap();
}
write_bytes(&mut buffer[0..remaining], generator, stdout).unwrap()
}
pub fn main() {
let matches = Command::new(
"Generate a stream of random numbers, specify no flags for infinite generation",
)
.arg(
Arg::new("bytes_total")
.short('b')
.long("bytes_total")
.value_parser(value_parser!(usize))
.help("Total number of bytes that has to be generated"),
)
.get_matches();
// Ugly hack to be able to use UnixSeeder
#[cfg(all(
not(target_os = "macos"),
not(feature = "seeder_x86_64_rdseed"),
feature = "seeder_unix"
))]
let new_seeder = || ActivatedSeeder::new(0);
#[cfg(not(all(
not(target_os = "macos"),
not(feature = "seeder_x86_64_rdseed"),
feature = "seeder_unix"
)))]
let new_seeder = || ActivatedSeeder;
let mut seeder = new_seeder();
let seed = seeder.seed();
// Don't print on std out
eprintln!("seed={seed:?}");
let mut generator = ActivatedRandomGenerator::new(seed);
let stdout = stdout();
let mut buffer = [0u8; 16];
// lock stdout as there is a single thread running
let mut stdout = stdout.lock();
match matches.get_one::<usize>("bytes_total") {
Some(&total) => {
bytes_generation(total, &mut buffer, &mut generator, &mut stdout);
}
None => {
infinite_bytes_generation(&mut buffer, &mut generator, &mut stdout);
}
};
}

View File

@@ -1,20 +0,0 @@
use crate::generators::aes_ctr::index::AesIndex;
use crate::generators::aes_ctr::BYTES_PER_BATCH;
/// Represents a key used in the AES block cipher.
#[derive(Clone, Copy)]
pub struct AesKey(pub u128);
/// A trait for AES block ciphers.
///
/// Note:
/// -----
///
/// The block cipher is used in a batched manner (to reduce amortized cost on special hardware).
/// For this reason we only expose a `generate_batch` method.
pub trait AesBlockCipher: Clone + Send + Sync {
/// Instantiate a new generator from a secret key.
fn new(key: AesKey) -> Self;
/// Generates the batch corresponding to the given index.
fn generate_batch(&mut self, index: AesIndex) -> [u8; BYTES_PER_BATCH];
}

View File

@@ -1,379 +0,0 @@
use crate::generators::aes_ctr::block_cipher::{AesBlockCipher, AesKey};
use crate::generators::aes_ctr::index::TableIndex;
use crate::generators::aes_ctr::states::{BufferPointer, ShiftAction, State};
use crate::generators::aes_ctr::BYTES_PER_BATCH;
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError};
// Usually, to work with iterators and parallel iterators, we would use opaque types such as
// `impl Iterator<..>`. Unfortunately, it is not yet possible to return existential types in
// traits, which we would need for `RandomGenerator`. For this reason, we have to use the
// full type name where needed. Hence the following trait aliases definition:
/// A type alias for the children iterator closure type.
pub type ChildrenClosure<BlockCipher> =
fn((usize, (Box<BlockCipher>, TableIndex, BytesPerChild))) -> AesCtrGenerator<BlockCipher>;
/// A type alias for the children iterator type.
pub type ChildrenIterator<BlockCipher> = std::iter::Map<
std::iter::Zip<
std::ops::Range<usize>,
std::iter::Repeat<(Box<BlockCipher>, TableIndex, BytesPerChild)>,
>,
ChildrenClosure<BlockCipher>,
>;
/// A type implementing the `RandomGenerator` api using the AES block cipher in counter mode.
#[derive(Clone)]
pub struct AesCtrGenerator<BlockCipher: AesBlockCipher> {
// The block cipher used in the background
pub(crate) block_cipher: Box<BlockCipher>,
// The state corresponding to the latest outputted byte.
pub(crate) state: State,
// The last legal index. This makes bound check faster.
pub(crate) last: TableIndex,
// The buffer containing the current batch of aes calls.
pub(crate) buffer: [u8; BYTES_PER_BATCH],
}
#[allow(unused)] // to please clippy when tests are not activated
impl<BlockCipher: AesBlockCipher> AesCtrGenerator<BlockCipher> {
/// Generates a new csprng.
///
/// Note :
/// ------
///
/// The `start_index` given as input, points to the first byte that will be outputted by the
/// generator. If not given, this one is automatically set to the second table index. The
/// first table index is not used to prevent an edge case from happening: since `state` is
/// supposed to contain the index of the previous byte, the initial value must be decremented.
/// Using the second value prevents wrapping to the max index, which would make the bound
/// checking fail.
///
/// The `bound_index` given as input, points to the first byte that can __not__ be legally
/// outputted by the generator. If not given, the bound is automatically set to the last
/// table index.
pub fn new(
key: AesKey,
start_index: Option<TableIndex>,
bound_index: Option<TableIndex>,
) -> AesCtrGenerator<BlockCipher> {
AesCtrGenerator::from_block_cipher(
Box::new(BlockCipher::new(key)),
start_index.unwrap_or(TableIndex::SECOND),
bound_index.unwrap_or(TableIndex::LAST),
)
}
/// Generates a csprng from an existing block cipher.
pub fn from_block_cipher(
block_cipher: Box<BlockCipher>,
start_index: TableIndex,
bound_index: TableIndex,
) -> AesCtrGenerator<BlockCipher> {
assert!(start_index < bound_index);
let last = bound_index.decremented();
let buffer = [0u8; BYTES_PER_BATCH];
let state = State::new(start_index);
AesCtrGenerator {
block_cipher,
state,
last,
buffer,
}
}
/// Returns the table index related to the previous random byte.
pub fn table_index(&self) -> TableIndex {
self.state.table_index()
}
/// Returns the bound of the generator if any.
///
/// The bound is the table index of the first byte that can not be outputted by the generator.
pub fn get_bound(&self) -> TableIndex {
self.last.incremented()
}
/// Returns whether the generator is bounded or not.
pub fn is_bounded(&self) -> bool {
self.get_bound() != TableIndex::LAST
}
/// Computes the number of bytes that can still be outputted by the generator.
///
/// Note :
/// ------
///
/// Note that `ByteCount` uses the `u128` datatype to store the byte count. Unfortunately, the
/// number of remaining bytes is in ⟦0;2¹³² -1⟧. When the number is greater than 2¹²⁸ - 1,
/// we saturate the count at 2¹²⁸ - 1.
pub fn remaining_bytes(&self) -> ByteCount {
TableIndex::distance(&self.last, &self.state.table_index()).unwrap()
}
/// Outputs the next random byte.
pub fn generate_next(&mut self) -> u8 {
self.next()
.expect("Tried to generate a byte after the bound.")
}
/// Tries to fork the current generator into `n_child` generators each able to output
/// `child_bytes` random bytes.
pub fn try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<ChildrenIterator<BlockCipher>, ForkError> {
if n_children.0 == 0 {
return Err(ForkError::ZeroChildrenCount);
}
if n_bytes.0 == 0 {
return Err(ForkError::ZeroBytesPerChild);
}
if !self.is_fork_in_bound(n_children, n_bytes) {
return Err(ForkError::ForkTooLarge);
}
// The state currently stored in the parent generator points to the table index of the last
// generated byte. The first index to be generated is the next one:
let first_index = self.state.table_index().incremented();
let output = (0..n_children.0)
.zip(std::iter::repeat((
self.block_cipher.clone(),
first_index,
n_bytes,
)))
.map(
// This map is a little weird because we need to cast the closure to a fn pointer
// that matches the signature of `ChildrenIterator<BlockCipher>`.
// Unfortunately, the compiler does not manage to coerce this one
// automatically.
(|(i, (block_cipher, first_index, n_bytes))| {
// The first index to be outputted by the child is the `first_index` shifted by
// the proper amount of `child_bytes`.
let child_first_index = first_index.increased(n_bytes.0 * i);
// The bound of the child is the first index of its next sibling.
let child_bound_index = first_index.increased(n_bytes.0 * (i + 1));
AesCtrGenerator::from_block_cipher(
block_cipher,
child_first_index,
child_bound_index,
)
}) as ChildrenClosure<BlockCipher>,
);
// The parent next index is the bound of the last child.
let next_index = first_index.increased(n_bytes.0 * n_children.0);
self.state = State::new(next_index);
Ok(output)
}
pub(crate) fn is_fork_in_bound(
&self,
n_child: ChildrenCount,
child_bytes: BytesPerChild,
) -> bool {
let mut end = self.state.table_index();
end.increase(n_child.0 * child_bytes.0);
end <= self.last
}
}
impl<BlockCipher: AesBlockCipher> Iterator for AesCtrGenerator<BlockCipher> {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
if self.state.table_index() >= self.last {
None
} else {
match self.state.increment() {
ShiftAction::OutputByte(BufferPointer(ptr)) => Some(self.buffer[ptr]),
ShiftAction::RefreshBatchAndOutputByte(aes_index, BufferPointer(ptr)) => {
self.buffer = self.block_cipher.generate_batch(aes_index);
Some(self.buffer[ptr])
}
}
}
}
}
#[cfg(test)]
pub mod aes_ctr_generic_test {
#![allow(unused)] // to please clippy when tests are not activated
use super::*;
use crate::generators::aes_ctr::index::{AesIndex, ByteIndex};
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
use rand::{thread_rng, Rng};
const REPEATS: usize = 1_000_000;
pub fn any_table_index() -> impl Iterator<Item = TableIndex> {
std::iter::repeat_with(|| {
TableIndex::new(
AesIndex(thread_rng().gen()),
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
)
})
}
pub fn any_usize() -> impl Iterator<Item = usize> {
std::iter::repeat_with(|| thread_rng().gen())
}
pub fn any_children_count() -> impl Iterator<Item = ChildrenCount> {
std::iter::repeat_with(|| ChildrenCount(thread_rng().gen::<usize>() % 2048 + 1))
}
pub fn any_bytes_per_child() -> impl Iterator<Item = BytesPerChild> {
std::iter::repeat_with(|| BytesPerChild(thread_rng().gen::<usize>() % 2048 + 1))
}
pub fn any_key() -> impl Iterator<Item = AesKey> {
std::iter::repeat_with(|| AesKey(thread_rng().gen()))
}
/// Output a valid fork:
/// a table index t,
/// a number of children nc,
/// a number of bytes per children nb
/// and a positive integer i such that:
/// increase(t, nc*nb+i) < MAX with MAX the largest table index.
///
/// Put differently, if we initialize a parent generator at t and fork it with (nc, nb), our
/// parent generator current index gets shifted to an index, distant of at least i bytes of
/// the max index.
pub fn any_valid_fork(
) -> impl Iterator<Item = (TableIndex, ChildrenCount, BytesPerChild, usize)> {
any_table_index()
.zip(any_children_count())
.zip(any_bytes_per_child())
.zip(any_usize())
.map(|(((t, nc), nb), i)| (t, nc, nb, i))
.filter(|(t, nc, nb, i)| {
TableIndex::distance(&TableIndex::LAST, t).unwrap().0 > (nc.0 * nb.0 + i) as u128
})
}
/// Check the property:
/// On a valid fork, the table index of the first child is the same as the table index of
/// the parent before the fork.
pub fn prop_fork_first_state_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
let first_child = forked_generator.try_fork(nc, nb).unwrap().next().unwrap();
assert_eq!(original_generator.table_index(), first_child.table_index());
}
}
/// Check the property:
/// On a valid fork, the table index of the first byte outputted by the parent after the
/// fork, is the bound of the last child of the fork.
pub fn prop_fork_last_bound_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let mut parent_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let last_child = parent_generator.try_fork(nc, nb).unwrap().last().unwrap();
assert_eq!(
parent_generator.table_index().incremented(),
last_child.get_bound()
);
}
}
/// Check the property:
/// On a valid fork, the bound of the parent does not change.
pub fn prop_fork_parent_bound_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
forked_generator.try_fork(nc, nb).unwrap().last().unwrap();
assert_eq!(original_generator.get_bound(), forked_generator.get_bound());
}
}
/// Check the property:
/// On a valid fork, the parent table index is increased of the number of children
/// multiplied by the number of bytes per child.
pub fn prop_fork_parent_state_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
forked_generator.try_fork(nc, nb).unwrap().last().unwrap();
assert_eq!(
forked_generator.table_index(),
// Decrement accounts for the fact that the table index stored is the previous one
t.increased(nc.0 * nb.0).decremented()
);
}
}
/// Check the property:
/// On a valid fork, the bytes outputted by the children in the fork order form the same
/// sequence the parent would have had yielded no fork had happened.
pub fn prop_fork<G: AesBlockCipher>() {
for _ in 0..1000 {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let bytes_to_go = nc.0 * nb.0;
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
let initial_output: Vec<u8> = original_generator.take(bytes_to_go).collect();
let forked_output: Vec<u8> = forked_generator
.try_fork(nc, nb)
.unwrap()
.flat_map(|child| child.collect::<Vec<_>>())
.collect();
assert_eq!(initial_output, forked_output);
}
}
/// Check the property:
/// On a valid fork, all children got a number of remaining bytes equals to the number of
/// bytes per child given as fork input.
pub fn prop_fork_children_remaining_bytes<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let mut generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
assert!(generator
.try_fork(nc, nb)
.unwrap()
.all(|c| c.remaining_bytes().0 == nb.0 as u128));
}
}
/// Check the property:
/// On a valid fork, the number of remaining bybtes of the parent is reduced by the number
/// of children multiplied by the number of bytes per child.
pub fn prop_fork_parent_remaining_bytes<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let bytes_to_go = nc.0 * nb.0;
let mut generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let before_remaining_bytes = generator.remaining_bytes();
let _ = generator.try_fork(nc, nb).unwrap();
let after_remaining_bytes = generator.remaining_bytes();
assert_eq!(
before_remaining_bytes.0 - after_remaining_bytes.0,
bytes_to_go as u128
);
}
}
}

View File

@@ -1,389 +0,0 @@
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
use crate::generators::ByteCount;
use std::cmp::Ordering;
/// A structure representing an [aes index](#coarse-grained-pseudo-random-table-lookup).
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct AesIndex(pub u128);
/// A structure representing a [byte index](#fine-grained-pseudo-random-table-lookup).
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct ByteIndex(pub usize);
/// A structure representing a [table index](#fine-grained-pseudo-random-table-lookup)
#[derive(Clone, Copy, Debug)]
pub struct TableIndex {
pub(crate) aes_index: AesIndex,
pub(crate) byte_index: ByteIndex,
}
impl TableIndex {
/// The first table index.
pub const FIRST: TableIndex = TableIndex {
aes_index: AesIndex(0),
byte_index: ByteIndex(0),
};
/// The second table index.
pub const SECOND: TableIndex = TableIndex {
aes_index: AesIndex(0),
byte_index: ByteIndex(1),
};
/// The last table index.
pub const LAST: TableIndex = TableIndex {
aes_index: AesIndex(u128::MAX),
byte_index: ByteIndex(BYTES_PER_AES_CALL - 1),
};
/// Creates a table index from an aes index and a byte index.
#[allow(unused)] // to please clippy when tests are not activated
pub fn new(aes_index: AesIndex, byte_index: ByteIndex) -> Self {
assert!(byte_index.0 < BYTES_PER_AES_CALL);
TableIndex {
aes_index,
byte_index,
}
}
/// Shifts the table index forward of `shift` bytes.
pub fn increase(&mut self, shift: usize) {
// Compute full shifts to avoid overflows
let full_aes_shifts = shift / BYTES_PER_AES_CALL;
let shift_remainder = shift % BYTES_PER_AES_CALL;
// Get the additional shift if any
let new_byte_index = self.byte_index.0 + shift_remainder;
let full_aes_shifts = full_aes_shifts + new_byte_index / BYTES_PER_AES_CALL;
// Store the reaminder in the byte index
self.byte_index.0 = new_byte_index % BYTES_PER_AES_CALL;
self.aes_index.0 = self.aes_index.0.wrapping_add(full_aes_shifts as u128);
}
/// Shifts the table index backward of `shift` bytes.
pub fn decrease(&mut self, shift: usize) {
let remainder = shift % BYTES_PER_AES_CALL;
if remainder <= self.byte_index.0 {
self.aes_index.0 = self
.aes_index
.0
.wrapping_sub((shift / BYTES_PER_AES_CALL) as u128);
self.byte_index.0 -= remainder;
} else {
self.aes_index.0 = self
.aes_index
.0
.wrapping_sub((shift / BYTES_PER_AES_CALL) as u128 + 1);
self.byte_index.0 += BYTES_PER_AES_CALL - remainder;
}
}
/// Shifts the table index forward of one byte.
pub fn increment(&mut self) {
self.increase(1)
}
/// Shifts the table index backward of one byte.
pub fn decrement(&mut self) {
self.decrease(1)
}
/// Returns the table index shifted forward by `shift` bytes.
pub fn increased(mut self, shift: usize) -> Self {
self.increase(shift);
self
}
/// Returns the table index shifted backward by `shift` bytes.
#[allow(unused)] // to please clippy when tests are not activated
pub fn decreased(mut self, shift: usize) -> Self {
self.decrease(shift);
self
}
/// Returns the table index to the next byte.
pub fn incremented(mut self) -> Self {
self.increment();
self
}
/// Returns the table index to the previous byte.
pub fn decremented(mut self) -> Self {
self.decrement();
self
}
/// Returns the distance between two table indices in bytes.
///
/// Note:
/// -----
///
/// This method assumes that the `larger` input is, well, larger than the `smaller` input. If
/// this is not the case, the method returns `None`. Also, note that `ByteCount` uses the
/// `u128` datatype to store the byte count. Unfortunately, the number of bytes between two
/// table indices is in ⟦0;2¹³² -1⟧. When the distance is greater than 2¹²⁸ - 1, we saturate
/// the count at 2¹²⁸ - 1.
pub fn distance(larger: &Self, smaller: &Self) -> Option<ByteCount> {
match std::cmp::Ord::cmp(larger, smaller) {
Ordering::Less => None,
Ordering::Equal => Some(ByteCount(0)),
Ordering::Greater => {
let mut result = larger.aes_index.0 - smaller.aes_index.0;
result = result.saturating_mul(BYTES_PER_AES_CALL as u128);
result = result.saturating_add(larger.byte_index.0 as u128);
result = result.saturating_sub(smaller.byte_index.0 as u128);
Some(ByteCount(result))
}
}
}
}
impl Eq for TableIndex {}
impl PartialEq<Self> for TableIndex {
fn eq(&self, other: &Self) -> bool {
matches!(self.partial_cmp(other), Some(Ordering::Equal))
}
}
impl PartialOrd<Self> for TableIndex {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for TableIndex {
fn cmp(&self, other: &Self) -> Ordering {
match self.aes_index.cmp(&other.aes_index) {
Ordering::Equal => self.byte_index.cmp(&other.byte_index),
other => other,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use rand::{thread_rng, Rng};
const REPEATS: usize = 1_000_000;
fn any_table_index() -> impl Iterator<Item = TableIndex> {
std::iter::repeat_with(|| {
TableIndex::new(
AesIndex(thread_rng().gen()),
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
)
})
}
fn any_usize() -> impl Iterator<Item = usize> {
std::iter::repeat_with(|| thread_rng().gen())
}
#[test]
#[should_panic]
/// Verifies that the constructor of `TableIndex` panics when the byte index is too large.
fn test_table_index_new_panic() {
TableIndex::new(AesIndex(12), ByteIndex(144));
}
#[test]
/// Verifies that the `TableIndex` wraps nicely with predecessor
fn test_table_index_predecessor_edge() {
assert_eq!(TableIndex::FIRST.decremented(), TableIndex::LAST);
}
#[test]
/// Verifies that the `TableIndex` wraps nicely with successor
fn test_table_index_successor_edge() {
assert_eq!(TableIndex::LAST.incremented(), TableIndex::FIRST);
}
#[test]
/// Check that the table index distance saturates nicely.
fn prop_table_index_distance_saturates() {
assert_eq!(
TableIndex::distance(&TableIndex::LAST, &TableIndex::FIRST)
.unwrap()
.0,
u128::MAX
)
}
#[test]
/// Check the property:
/// For all table indices t,
/// distance(t, t) = Some(0).
fn prop_table_index_distance_zero() {
for _ in 0..REPEATS {
let t = any_table_index().next().unwrap();
assert_eq!(TableIndex::distance(&t, &t), Some(ByteCount(0)));
}
}
#[test]
/// Check the property:
/// For all table indices t1, t2 such that t1 < t2,
/// distance(t1, t2) = None.
fn prop_table_index_distance_wrong_order_none() {
for _ in 0..REPEATS {
let (t1, t2) = any_table_index()
.zip(any_table_index())
.find(|(t1, t2)| t1 < t2)
.unwrap();
assert_eq!(TableIndex::distance(&t1, &t2), None);
}
}
#[test]
/// Check the property:
/// For all table indices t1, t2 such that t1 > t2,
/// distance(t1, t2) = Some(v) where v is strictly positive.
fn prop_table_index_distance_some_positive() {
for _ in 0..REPEATS {
let (t1, t2) = any_table_index()
.zip(any_table_index())
.find(|(t1, t2)| t1 > t2)
.unwrap();
assert!(matches!(TableIndex::distance(&t1, &t2), Some(ByteCount(v)) if v > 0));
}
}
#[test]
/// Check the property:
/// For all table indices t, positive i such that i < distance (MAX, t) with MAX the largest
/// table index,
/// distance(t.increased(i), t) = Some(i).
fn prop_table_index_distance_increase() {
for _ in 0..REPEATS {
let (t, inc) = any_table_index()
.zip(any_usize())
.find(|(t, inc)| {
(*inc as u128) < TableIndex::distance(&TableIndex::LAST, t).unwrap().0
})
.unwrap();
assert_eq!(
TableIndex::distance(&t.increased(inc), &t).unwrap().0 as usize,
inc
);
}
}
#[test]
/// Check the property:
/// For all table indices t, t =? t = true.
fn prop_table_index_equality() {
for _ in 0..REPEATS {
let t = any_table_index().next().unwrap();
assert_eq!(
std::cmp::PartialOrd::partial_cmp(&t, &t),
Some(std::cmp::Ordering::Equal)
);
}
}
#[test]
/// Check the property:
/// For all table indices t, positive i such that i < distance (MAX, t) with MAX the largest
/// table index,
/// t.increased(i) >? t = true.
fn prop_table_index_greater() {
for _ in 0..REPEATS {
let (t, inc) = any_table_index()
.zip(any_usize())
.find(|(t, inc)| {
(*inc as u128) < TableIndex::distance(&TableIndex::LAST, t).unwrap().0
})
.unwrap();
assert_eq!(
std::cmp::PartialOrd::partial_cmp(&t.increased(inc), &t),
Some(std::cmp::Ordering::Greater),
);
}
}
#[test]
/// Check the property:
/// For all table indices t, positive i such that i < distance (t, 0) with MAX the largest
/// table index,
/// t.decreased(i) <? t = true.
fn prop_table_index_less() {
for _ in 0..REPEATS {
let (t, inc) = any_table_index()
.zip(any_usize())
.find(|(t, inc)| {
(*inc as u128) < TableIndex::distance(t, &TableIndex::FIRST).unwrap().0
})
.unwrap();
assert_eq!(
std::cmp::PartialOrd::partial_cmp(&t.decreased(inc), &t),
Some(std::cmp::Ordering::Less)
);
}
}
#[test]
/// Check the property:
/// For all table indices t,
/// successor(predecessor(t)) = t.
fn prop_table_index_decrement_increment() {
for _ in 0..REPEATS {
let t = any_table_index().next().unwrap();
assert_eq!(t.decremented().incremented(), t);
}
}
#[test]
/// Check the property:
/// For all table indices t,
/// predecessor(successor(t)) = t.
fn prop_table_index_increment_decrement() {
for _ in 0..REPEATS {
let t = any_table_index().next().unwrap();
assert_eq!(t.incremented().decremented(), t);
}
}
#[test]
/// Check the property:
/// For all table indices t, positive integer i,
/// increase(decrease(t, i), i) = t.
fn prop_table_index_increase_decrease() {
for _ in 0..REPEATS {
let (t, i) = any_table_index().zip(any_usize()).next().unwrap();
assert_eq!(t.increased(i).decreased(i), t);
}
}
#[test]
/// Check the property:
/// For all table indices t, positive integer i,
/// decrease(increase(t, i), i) = t.
fn prop_table_index_decrease_increase() {
for _ in 0..REPEATS {
let (t, i) = any_table_index().zip(any_usize()).next().unwrap();
assert_eq!(t.decreased(i).increased(i), t);
}
}
#[test]
/// Check that a big increase does not overflow
fn prop_table_increase_max_no_overflow() {
let first = TableIndex::FIRST;
// Increase so that ByteIndex is at 1usize
let second = first.increased(1);
// Now increase by usize::MAX, as the underlying byte index stores a usize this may overflow
// depending on implementation, ensure it does not overflow
let big_increase = second.increased(usize::MAX);
let total_full_aes_shifts = (1u128 + usize::MAX as u128) / BYTES_PER_AES_CALL as u128;
assert_eq!(
big_increase,
TableIndex::new(AesIndex(total_full_aes_shifts), ByteIndex(0))
);
}
}

View File

@@ -1,223 +0,0 @@
//! A module implementing the random generator api with batched aes calls.
//!
//! This module provides a generic [`AesCtrGenerator`] structure which implements the
//! [`super::RandomGenerator`] api using the AES block cipher in counter mode. That is, the
//! generator holds a state (i.e. counter) which is incremented iteratively, to produce the stream
//! of random values:
//! ```ascii
//! state=0 state=1 state=2
//! ╔══↧══╗ ╔══↧══╗ ╔══↧══╗
//! key ↦ AES ║ key ↦ AES ║ key ↦ AES ║ ...
//! ╚══↧══╝ ╚══↧══╝ ╚══↧══╝
//! output0 output1 output2
//!
//! t=0 t=1 t=2
//! ```
//!
//! The [`AesCtrGenerator`] structure is generic over the AES block ciphers, which are
//! represented by the [`AesBlockCipher`] trait. Consequently, implementers only need to implement
//! the `AesBlockCipher` trait, to benefit from the whole api of the `AesCtrGenerator` structure.
//!
//! In the following section, we give details on the implementation of this generic generator.
//!
//! Coarse-grained pseudo-random lookup table
//! =========================================
//!
//! To generate random values, we use the AES block cipher in counter mode. If we denote f the aes
//! encryption function, we have:
//! ```ascii
//! f: ⟦0;2¹²⁸ -1⟧ X ⟦0;2¹²⁸ -1⟧ ↦ ⟦0;2¹²⁸ -1⟧
//! f(secret_key, input) ↦ output
//! ```
//! If we fix the secret key to a value k, we have a function fₖ from ⟦0;2¹²⁸ -1⟧ to ⟦0;2¹²⁸-1⟧,
//! transforming the state of the counter into a pseudo random value. Essentially, this fₖ
//! function can be considered as a the following lookup table, containing 2¹²⁸ pseudo-random
//! values:
//! ```ascii
//! ╭──────────────┬──────────────┬─────┬──────────────╮
//! │ 0 │ 1 │ │ 2¹²⁸ -1 │
//! ├──────────────┼──────────────┼─────┼──────────────┤
//! │ fₖ(0) │ fₖ(1) │ │ fₖ(2¹²⁸ -1) │
//! ╔═══════↧══════╦═══════↧══════╦═════╦═══════↧══════╗
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
//! ║┃ u128 ┃║┃ u128 ┃║ ... ║┃ u128 ┃║
//! ║┗━━━━━━━━━━━━┛║┗━━━━━━━━━━━━┛║ ║┗━━━━━━━━━━━━┛║
//! ╚══════════════╩══════════════╩═════╩══════════════╝
//! ```
//!
//! An input to the fₖ function is called an _aes index_ (also called state or counter in the
//! standards) of the pseudo-random table. The [`AesIndex`] structure defined in this module
//! represents such an index in the code.
//!
//! Fine-grained pseudo-random table lookup
//! =======================================
//!
//! Since we want to deliver the pseudo-random bytes one by one, we have to come with a finer
//! grained indexing. Fortunately, each `u128` value outputted by fₖ can be seen as a table of 16
//! `u8`:
//! ```ascii
//! ╭──────────────┬──────────────┬─────┬──────────────╮
//! │ 0 │ 1 │ │ 2¹²⁸ -1 │
//! ├──────────────┼──────────────┼─────┼──────────────┤
//! │ fₖ(0) │ fₖ(1) │ │ fₖ(2¹²⁸ -1) │
//! ╔═══════↧══════╦═══════↧══════╦═════╦═══════↧══════╗
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
//! ║┃ u128 ┃║┃ u128 ┃║ ║┃ u128 ┃║
//! ║┣━━┯━━┯━━━┯━━┫║┣━━┯━━┯━━━┯━━┫║ ... ║┣━━┯━━┯━━━┯━━┫║
//! ║┃u8│u8│...│u8┃║┃u8│u8│...│u8┃║ ║┃u8│u8│...│u8┃║
//! ║┗━━┷━━┷━━━┷━━┛║┗━━┷━━┷━━━┷━━┛║ ║┗━━┷━━┷━━━┷━━┛║
//! ╚══════════════╩══════════════╩═════╩══════════════╝
//! ```
//!
//! We introduce a second function to select a chunk of 8 bits:
//! ```ascii
//! g: ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ ↦ ⟦0;2⁸ -1⟧
//! g(big_int, index) ↦ byte
//! ```
//!
//! If we fix the `u128` value to a value e, we have a function gₑ from ⟦0;15⟧ to ⟦0;2⁸ -1⟧
//! transforming an index into a pseudo-random byte:
//! ```ascii
//! ┏━━━━━━━━┯━━━━━━━━┯━━━┯━━━━━━━━┓
//! ┃ u8 │ u8 │...│ u8 ┃
//! ┗━━━━━━━━┷━━━━━━━━┷━━━┷━━━━━━━━┛
//! │ gₑ(0) │ gₑ(1) │ │ gₑ(15) │
//! ╰────────┴─────-──┴───┴────────╯
//! ```
//!
//! We call this input to the gₑ function, a _byte index_ of the pseudo-random table. The
//! [`ByteIndex`] structure defined in this module represents such an index in the code.
//!
//! By using both the g and the fₖ functions, we can define a new function l which allows to index
//! any byte of the pseudo-random table:
//! ```ascii
//! l: ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ ↦ ⟦0;2⁸ -1⟧
//! l(aes_index, byte_index) ↦ g(fₖ(aes_index), byte_index)
//! ```
//!
//! In this sense, any member of ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ uniquely defines a byte in this pseudo-random
//! table:
//! ```ascii
//! e = fₖ(a)
//! ╔══════════════╦═══════↧══════╦═════╦══════════════╗
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
//! ║┃ u128 ┃║┃ u128 ┃║ ║┃ u128 ┃║
//! ║┣━━┯━━┯━━━┯━━┫║┣━━┯━━┯━━━┯━━┫║ ... ║┣━━┯━━┯━━━┯━━┫║
//! ║┃u8│u8│...│u8┃║┃u8│u8│...│u8┃║ ║┃u8│u8│...│u8┃║
//! ║┗━━┷━━┷━━━┷━━┛║┗━━┷↥━┷━━━┷━━┛║ ║┗━━┷━━┷━━━┷━━┛║
//! ║ ║│ gₑ(b) │║ ║ ║
//! ║ ║╰───-────────╯║ ║ ║
//! ╚══════════════╩══════════════╩═════╩══════════════╝
//! ```
//!
//! We call this input to the l function, a _table index_ of the pseudo-random table. The
//! [`TableIndex`] structure defined in this module represents such an index in the code.
//!
//! Prngs current table index
//! =========================
//!
//! When created, a prng is given an initial _table index_, denoted (a₀, b₀), which identifies the
//! first byte of the table to be outputted by the prng. Then, each time the prng is queried for a
//! new value, the byte corresponding to the current _table index_ is returned, and the current
//! _table index_ is incremented:
//! ```ascii
//! e = fₖ(a₀) e = fₖ(a₁)
//! ╔═════↧═════╦═══════════╦═════╦═══════════╗ ╔═══════════╦═════↧═════╦═════╦═══════════╗
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║ ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
//! ║┃ │ │...│ ┃║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║
//! ║┗━┷━┷━━━┷↥┛║┗━┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ → ║┗━┷━┷━━━┷━┛║┗↥┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║
//! ║│ gₑ(b₀) │║ ║ ║ ║ ║ ║│ gₑ(b₁) │║ ║ ║
//! ║╰─────────╯║ ║ ║ ║ ║ ║╰─────────╯║ ║ ║
//! ╚═══════════╩═══════════╩═════╩═══════════╝ ╚═══════════╩═══════════╩═════╩═══════════╝
//! ```
//!
//! Prng bound
//! ==========
//!
//! When created, a prng is also given a _bound_ (aₘ, bₘ) , that is a table index which it is not
//! allowed to exceed:
//! ```ascii
//! e = fₖ(a₀)
//! ╔═════↧═════╦═══════════╦═════╦═══════════╗
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
//! ║┃ │ │...│ ┃║┃ │╳│...│╳┃║ ║┃╳│╳│...│╳┃║
//! ║┗━┷━┷━━━┷↥┛║┗━┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ The current byte can be returned.
//! ║│ gₑ(b₀) │║ ║ ║ ║
//! ║╰─────────╯║ ║ ║ ║
//! ╚═══════════╩═══════════╩═════╩═══════════╝
//!
//! e = fₖ(aₘ)
//! ╔═══════════╦═════↧═════╦═════╦═══════════╗
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
//! ║┃ │ │...│ ┃║┃ │╳│...│╳┃║ ║┃╳│╳│...│╳┃║ The table index reached the bound,
//! ║┗━┷━┷━━━┷━┛║┗━┷↥┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ the current byte can not be
//! ║ ║│ gₑ(bₘ) │║ ║ ║ returned.
//! ║ ║╰─────────╯║ ║ ║
//! ╚═══════════╩═══════════╩═════╩═══════════╝
//! ```
//!
//! Buffering
//! =========
//!
//! Calling the aes function every time we need to output a single byte would be a huge waste of
//! resources. In practice, we call aes 8 times in a row, for 8 successive values of aes index, and
//! store the results in a buffer. For platforms which have a dedicated aes chip, this allows to
//! fill the unit pipeline and reduces the amortized cost of the aes function.
//!
//! Together with the current table index of the prng, we also store a pointer p (initialized at
//! p₀=b₀) to the current byte in the buffer. If we denote v the lookup function we have :
//! ```ascii
//! e = fₖ(a₀) Buffer(length=128)
//! ╔═════╦═══════════╦═════↧═════╦═══════════╦═════╗ ┏━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓
//! ║ ... ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║ ┃▓│▓│▓│▓│▓│▓│▓│▓│...│▓┃
//! ║ ║┃ │ │...│ ┃║┃▓│▓│...│▓┃║┃▓│▓│...│▓┃║ ║ ┗━┷↥┷━┷━┷━┷━┷━┷━┷━━━┷━┛
//! ║ ║┗━┷━┷━━━┷━┛║┗━┷↥┷━━━┷━┛║┗━┷━┷━━━┷━┛║ ║ │ v(p₀) │
//! ║ ║ ║│ gₑ(b₀) │║ ║ ║ ╰─────────────────────╯
//! ║ ║ ║╰─────────╯║ ║ ║
//! ╚═════╩═══════════╩═══════════╩═══════════╩═════╝
//! ```
//!
//! We call this input to the v function, a _buffer pointer_. The [`BufferPointer`] structure
//! defined in this module represents such a pointer in the code.
//!
//! When the table index is incremented, the buffer pointer is incremented alongside:
//! ```ascii
//! e = fₖ(a) Buffer(length=128)
//! ╔═════╦═══════════╦═════↧═════╦═══════════╦═════╗ ┏━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓
//! ║ ... ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║ ┃▓│▓│▓│▓│▓│▓│▓│▓│...│▓┃
//! ║ ║┃ │ │...│ ┃║┃▓│▓│...│▓┃║┃▓│▓│...│▓┃║ ║ ┗━┷━┷↥┷━┷━┷━┷━┷━┷━━━┷━┛
//! ║ ║┗━┷━┷━━━┷━┛║┗━┷━┷↥━━┷━┛║┗━┷━┷━━━┷━┛║ ║ │ v(p) │
//! ║ ║ ║│ gₑ(b) │║ ║ ║ ╰─────────────────────╯
//! ║ ║ ║╰─────────╯║ ║ ║
//! ╚═════╩═══════════╩═══════════╩═══════════╩═════╝
//! ```
//!
//! When the buffer pointer is incremented it is checked against the size of the buffer, and if
//! necessary, a new batch of aes index values is generated.
pub const AES_CALLS_PER_BATCH: usize = 8;
pub const BYTES_PER_AES_CALL: usize = 128 / 8;
pub const BYTES_PER_BATCH: usize = BYTES_PER_AES_CALL * AES_CALLS_PER_BATCH;
/// A module containing structures to manage table indices.
mod index;
pub use index::*;
/// A module containing structures to manage table indices and buffer pointers together properly.
mod states;
pub use states::*;
/// A module containing an abstraction for aes block ciphers.
mod block_cipher;
pub use block_cipher::*;
/// A module containing a generic implementation of a random generator.
mod generic;
pub use generic::*;
/// A module extending `generic` to the `rayon` paradigm.
#[cfg(feature = "parallel")]
mod parallel;
#[cfg(feature = "parallel")]
pub use parallel::*;

View File

@@ -1,222 +0,0 @@
use crate::generators::aes_ctr::{
AesBlockCipher, AesCtrGenerator, ChildrenClosure, State, TableIndex,
};
use crate::generators::{BytesPerChild, ChildrenCount, ForkError};
/// A type alias for the parallel children iterator type.
pub type ParallelChildrenIterator<BlockCipher> = rayon::iter::Map<
rayon::iter::Zip<
rayon::range::Iter<usize>,
rayon::iter::RepeatN<(Box<BlockCipher>, TableIndex, BytesPerChild)>,
>,
fn((usize, (Box<BlockCipher>, TableIndex, BytesPerChild))) -> AesCtrGenerator<BlockCipher>,
>;
impl<BlockCipher: AesBlockCipher> AesCtrGenerator<BlockCipher> {
/// Tries to fork the current generator into `n_child` generators each able to output
/// `child_bytes` random bytes as a parallel iterator.
///
/// # Notes
///
/// This method necessitate the "multithread" feature.
pub fn par_try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<ParallelChildrenIterator<BlockCipher>, ForkError>
where
BlockCipher: Send + Sync,
{
use rayon::prelude::*;
if n_children.0 == 0 {
return Err(ForkError::ZeroChildrenCount);
}
if n_bytes.0 == 0 {
return Err(ForkError::ZeroBytesPerChild);
}
if !self.is_fork_in_bound(n_children, n_bytes) {
return Err(ForkError::ForkTooLarge);
}
// The state currently stored in the parent generator points to the table index of the last
// generated byte. The first index to be generated is the next one :
let first_index = self.state.table_index().incremented();
let output = (0..n_children.0)
.into_par_iter()
.zip(rayon::iter::repeatn(
(self.block_cipher.clone(), first_index, n_bytes),
n_children.0,
))
.map(
// This map is a little weird because we need to cast the closure to a fn pointer
// that matches the signature of `ChildrenIterator<BlockCipher>`. Unfortunately,
// the compiler does not manage to coerce this one automatically.
(|(i, (block_cipher, first_index, n_bytes))| {
// The first index to be outputted by the child is the `first_index` shifted by
// the proper amount of `child_bytes`.
let child_first_index = first_index.increased(n_bytes.0 * i);
// The bound of the child is the first index of its next sibling.
let child_bound_index = first_index.increased(n_bytes.0 * (i + 1));
AesCtrGenerator::from_block_cipher(
block_cipher,
child_first_index,
child_bound_index,
)
}) as ChildrenClosure<BlockCipher>,
);
// The parent next index is the bound of the last child.
let next_index = first_index.increased(n_bytes.0 * n_children.0);
self.state = State::new(next_index);
Ok(output)
}
}
#[cfg(test)]
pub mod aes_ctr_parallel_generic_tests {
use super::*;
use crate::generators::aes_ctr::aes_ctr_generic_test::{any_key, any_valid_fork};
use rayon::prelude::*;
const REPEATS: usize = 1_000_000;
/// Check the property:
/// On a valid fork, the table index of the first child is the same as the table index of
/// the parent before the fork.
pub fn prop_fork_first_state_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
let first_child = forked_generator
.par_try_fork(nc, nb)
.unwrap()
.find_first(|_| true)
.unwrap();
assert_eq!(original_generator.table_index(), first_child.table_index());
}
}
/// Check the property:
/// On a valid fork, the table index of the first byte outputted by the parent after the
/// fork, is the bound of the last child of the fork.
pub fn prop_fork_last_bound_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let mut parent_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let last_child = parent_generator
.par_try_fork(nc, nb)
.unwrap()
.find_last(|_| true)
.unwrap();
assert_eq!(
parent_generator.table_index().incremented(),
last_child.get_bound()
);
}
}
/// Check the property:
/// On a valid fork, the bound of the parent does not change.
pub fn prop_fork_parent_bound_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
forked_generator
.par_try_fork(nc, nb)
.unwrap()
.find_last(|_| true)
.unwrap();
assert_eq!(original_generator.get_bound(), forked_generator.get_bound());
}
}
/// Check the property:
/// On a valid fork, the parent table index is increased of the number of children
/// multiplied by the number of bytes per child.
pub fn prop_fork_parent_state_table_index<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
forked_generator
.par_try_fork(nc, nb)
.unwrap()
.find_last(|_| true)
.unwrap();
assert_eq!(
forked_generator.table_index(),
// Decrement accounts for the fact that the table index stored is the previous one
t.increased(nc.0 * nb.0).decremented()
);
}
}
/// Check the property:
/// On a valid fork, the bytes outputted by the children in the fork order form the same
/// sequence the parent would have had outputted no fork had happened.
pub fn prop_fork<G: AesBlockCipher>() {
for _ in 0..1000 {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let bytes_to_go = nc.0 * nb.0;
let original_generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let mut forked_generator = original_generator.clone();
let initial_output: Vec<u8> = original_generator.take(bytes_to_go).collect();
let forked_output: Vec<u8> = forked_generator
.par_try_fork(nc, nb)
.unwrap()
.flat_map(|child| child.collect::<Vec<_>>())
.collect();
assert_eq!(initial_output, forked_output);
}
}
/// Check the property:
/// On a valid fork, all children got a number of remaining bytes equals to the number of
/// bytes per child given as fork input.
pub fn prop_fork_children_remaining_bytes<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let mut generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
assert!(generator
.par_try_fork(nc, nb)
.unwrap()
.all(|c| c.remaining_bytes().0 == nb.0 as u128));
}
}
/// Check the property:
/// On a valid fork, the number of remaining bytes of the parent is reduced by the
/// number of children multiplied by the number of bytes per child.
pub fn prop_fork_parent_remaining_bytes<G: AesBlockCipher>() {
for _ in 0..REPEATS {
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
let k = any_key().next().unwrap();
let bytes_to_go = nc.0 * nb.0;
let mut generator =
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
let before_remaining_bytes = generator.remaining_bytes();
let _ = generator.par_try_fork(nc, nb).unwrap();
let after_remaining_bytes = generator.remaining_bytes();
assert_eq!(
before_remaining_bytes.0 - after_remaining_bytes.0,
bytes_to_go as u128
);
}
}
}

View File

@@ -1,176 +0,0 @@
use crate::generators::aes_ctr::index::{AesIndex, TableIndex};
use crate::generators::aes_ctr::BYTES_PER_BATCH;
/// A pointer to the next byte to be outputted by the generator.
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct BufferPointer(pub usize);
/// A structure representing the current state of generator using batched aes-ctr approach.
#[derive(Debug, Clone, Copy)]
pub struct State {
table_index: TableIndex,
buffer_pointer: BufferPointer,
}
/// A structure representing the action to be taken by the generator after shifting its state.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ShiftAction {
/// Outputs the byte pointed to by the 0-th field.
OutputByte(BufferPointer),
/// Refresh the buffer starting from the 0-th field, and output the byte pointed to by the 0-th
/// field.
RefreshBatchAndOutputByte(AesIndex, BufferPointer),
}
impl State {
/// Creates a new state from the initial table index.
///
/// Note :
/// ------
///
/// The `table_index` input, is the __first__ table index that will be outputted on the next
/// call to `increment`. Put differently, the current table index of the newly created state
/// is the predecessor of this one.
pub fn new(table_index: TableIndex) -> Self {
// We ensure that the table index is not the first one, to prevent wrapping on `decrement`,
// and outputting `RefreshBatchAndOutputByte(AesIndex::MAX, ...)` on the first increment
// (which would lead to loading a non continuous batch).
assert_ne!(table_index, TableIndex::FIRST);
State {
// To ensure that the first outputted table index is the proper one, we decrement the
// table index.
table_index: table_index.decremented(),
// To ensure that the first `ShiftAction` will be a `RefreshBatchAndOutputByte`, we set
// the buffer to the last allowed value.
buffer_pointer: BufferPointer(BYTES_PER_BATCH - 1),
}
}
/// Shifts the state forward of `shift` bytes.
pub fn increase(&mut self, shift: usize) -> ShiftAction {
self.table_index.increase(shift);
let total_batch_index = self.buffer_pointer.0 + shift;
if total_batch_index > BYTES_PER_BATCH - 1 {
self.buffer_pointer.0 = self.table_index.byte_index.0;
ShiftAction::RefreshBatchAndOutputByte(self.table_index.aes_index, self.buffer_pointer)
} else {
self.buffer_pointer.0 = total_batch_index;
ShiftAction::OutputByte(self.buffer_pointer)
}
}
/// Shifts the state forward of one byte.
pub fn increment(&mut self) -> ShiftAction {
self.increase(1)
}
/// Returns the current table index.
pub fn table_index(&self) -> TableIndex {
self.table_index
}
}
impl Default for State {
fn default() -> Self {
State::new(TableIndex::FIRST)
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::generators::aes_ctr::index::ByteIndex;
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
use rand::{thread_rng, Rng};
const REPEATS: usize = 1_000_000;
fn any_table_index() -> impl Iterator<Item = TableIndex> {
std::iter::repeat_with(|| {
TableIndex::new(
AesIndex(thread_rng().gen()),
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
)
})
}
fn any_usize() -> impl Iterator<Item = usize> {
std::iter::repeat_with(|| thread_rng().gen())
}
#[test]
/// Check the property:
/// For all table indices t,
/// State::new(t).increment() = RefreshBatchAndOutputByte(t.aes_index, t.byte_index)
fn prop_state_new_increment() {
for _ in 0..REPEATS {
let (t, mut s) = any_table_index()
.map(|t| (t, State::new(t)))
.next()
.unwrap();
assert!(matches!(
s.increment(),
ShiftAction::RefreshBatchAndOutputByte(t_, BufferPointer(p_)) if t_ == t.aes_index && p_ == t.byte_index.0
))
}
}
#[test]
/// Check the property:
/// For all states s, table indices t, positive integer i
/// if s = State::new(t), then t.increased(i) = s.increased(i-1).table_index().
fn prop_state_increase_table_index() {
for _ in 0..REPEATS {
let (t, mut s, i) = any_table_index()
.zip(any_usize())
.map(|(t, i)| (t, State::new(t), i))
.next()
.unwrap();
s.increase(i);
assert_eq!(s.table_index(), t.increased(i - 1))
}
}
#[test]
/// Check the property:
/// For all table indices t, positive integer i such as t.byte_index + i < 127,
/// if s = State::new(t), and s.increment() was executed, then
/// s.increase(i) = OutputByte(t.byte_index + i).
fn prop_state_increase_small() {
for _ in 0..REPEATS {
let (t, mut s, i) = any_table_index()
.zip(any_usize())
.map(|(t, i)| (t, State::new(t), i % BYTES_PER_BATCH))
.find(|(t, _, i)| t.byte_index.0 + i < BYTES_PER_BATCH - 1)
.unwrap();
s.increment();
assert!(matches!(
s.increase(i),
ShiftAction::OutputByte(BufferPointer(p_)) if p_ == t.byte_index.0 + i
));
}
}
#[test]
/// Check the property:
/// For all table indices t, positive integer i such as t.byte_index + i >= 127,
/// if s = State::new(t), and s.increment() was executed, then
/// s.increase(i) = RefreshBatchAndOutputByte(
/// t.increased(i).aes_index,
/// t.increased(i).byte_index).
fn prop_state_increase_large() {
for _ in 0..REPEATS {
let (t, mut s, i) = any_table_index()
.zip(any_usize())
.map(|(t, i)| (t, State::new(t), i))
.find(|(t, _, i)| t.byte_index.0 + i >= BYTES_PER_BATCH - 1)
.unwrap();
s.increment();
assert!(matches!(
s.increase(i),
ShiftAction::RefreshBatchAndOutputByte(t_, BufferPointer(p_))
if t_ == t.increased(i).aes_index && p_ == t.increased(i).byte_index.0
));
}
}
}

View File

@@ -1,184 +0,0 @@
use crate::generators::aes_ctr::{AesBlockCipher, AesIndex, AesKey, BYTES_PER_BATCH};
use core::arch::aarch64::{
uint8x16_t, vaeseq_u8, vaesmcq_u8, vdupq_n_u32, vdupq_n_u8, veorq_u8, vgetq_lane_u32,
vreinterpretq_u32_u8, vreinterpretq_u8_u32,
};
use std::arch::is_aarch64_feature_detected;
use std::mem::transmute;
const RCONS: [u32; 10] = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36];
const NUM_WORDS_IN_KEY: usize = 4;
const NUM_ROUNDS: usize = 10;
const NUM_ROUND_KEYS: usize = NUM_ROUNDS + 1;
/// An aes block cipher implementation which uses `neon` and `aes` instructions.
#[derive(Clone)]
pub struct ArmAesBlockCipher {
round_keys: [uint8x16_t; NUM_ROUND_KEYS],
}
impl AesBlockCipher for ArmAesBlockCipher {
fn new(key: AesKey) -> ArmAesBlockCipher {
let aes_detected = is_aarch64_feature_detected!("aes");
let neon_detected = is_aarch64_feature_detected!("neon");
if !(aes_detected && neon_detected) {
panic!(
"The ArmAesBlockCipher requires both aes and neon aarch64 CPU features.\n\
aes feature available: {}\nneon feature available: {}\n.",
aes_detected, neon_detected
)
}
let round_keys = unsafe { generate_round_keys(key) };
ArmAesBlockCipher { round_keys }
}
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
#[target_feature(enable = "aes,neon")]
unsafe fn implementation(
this: &ArmAesBlockCipher,
AesIndex(aes_ctr): AesIndex,
) -> [u8; BYTES_PER_BATCH] {
let mut output = [0u8; BYTES_PER_BATCH];
// We want 128 bytes of output, the ctr gives 128 bit message (16 bytes)
for (i, out) in output.chunks_exact_mut(16).enumerate() {
// Safe because we prevent the user from creating the Generator
// on non-supported hardware
let encrypted = encrypt(aes_ctr + (i as u128), &this.round_keys);
out.copy_from_slice(&encrypted.to_ne_bytes());
}
output
}
// SAFETY: we checked for aes and neon availability in `Self::new`
unsafe { implementation(self, AesIndex(aes_ctr)) }
}
}
/// Does the AES SubWord operation for the Key Expansion step
///
/// # SAFETY
///
/// You must make sure the CPU's arch is`aarch64` and has
/// `neon` and `aes` features.
#[inline(always)]
unsafe fn sub_word(word: u32) -> u32 {
let data = vreinterpretq_u8_u32(vdupq_n_u32(word));
let zero_key = vdupq_n_u8(0u8);
let temp = vaeseq_u8(data, zero_key);
// vaeseq_u8 does SubBytes(ShiftRow(XOR(data, key))
// But because we used a zero aes key,the XOR did not alter data
// We now have temp = SubBytes(ShiftRow(data))
// Since in AES ShiftRow operation, the first row is not shifted
// We can just get that one to have our SubWord(word) result
vgetq_lane_u32::<0>(vreinterpretq_u32_u8(temp))
}
#[inline(always)]
fn uint8x16_t_to_u128(input: uint8x16_t) -> u128 {
unsafe { transmute(input) }
}
#[inline(always)]
fn u128_to_uint8x16_t(input: u128) -> uint8x16_t {
unsafe { transmute(input) }
}
#[target_feature(enable = "aes,neon")]
unsafe fn generate_round_keys(key: AesKey) -> [uint8x16_t; NUM_ROUND_KEYS] {
let mut round_keys: [uint8x16_t; NUM_ROUND_KEYS] = std::mem::zeroed();
round_keys[0] = u128_to_uint8x16_t(key.0);
let words = std::slice::from_raw_parts_mut(
round_keys.as_mut_ptr() as *mut u32,
NUM_ROUND_KEYS * NUM_WORDS_IN_KEY,
);
debug_assert_eq!(words.len(), 44);
// Skip the words of the first key, its already done
for i in NUM_WORDS_IN_KEY..words.len() {
if (i % NUM_WORDS_IN_KEY) == 0 {
words[i] = words[i - NUM_WORDS_IN_KEY]
^ sub_word(words[i - 1]).rotate_right(8)
^ RCONS[(i / NUM_WORDS_IN_KEY) - 1];
} else {
words[i] = words[i - NUM_WORDS_IN_KEY] ^ words[i - 1];
}
// Note: there is also a special thing to do when
// i mod SElf::NUM_WORDS_IN_KEY == 4 but it cannot happen on 128 bits keys
}
round_keys
}
/// Encrypts a 128-bit message
///
/// # SAFETY
///
/// You must make sure the CPU's arch is`aarch64` and has
/// `neon` and `aes` features.
#[inline(always)]
unsafe fn encrypt(message: u128, keys: &[uint8x16_t; NUM_ROUND_KEYS]) -> u128 {
// Notes:
// According the [ARM Manual](https://developer.arm.com/documentation/ddi0487/gb/):
// `vaeseq_u8` is the following AES operations:
// 1. AddRoundKey (XOR)
// 2. ShiftRows
// 3. SubBytes
// `vaesmcq_u8` is MixColumns
let mut data: uint8x16_t = u128_to_uint8x16_t(message);
for &key in keys.iter().take(NUM_ROUNDS - 1) {
data = vaesmcq_u8(vaeseq_u8(data, key));
}
data = vaeseq_u8(data, keys[NUM_ROUNDS - 1]);
data = veorq_u8(data, keys[NUM_ROUND_KEYS - 1]);
uint8x16_t_to_u128(data)
}
#[cfg(test)]
mod test {
use super::*;
// Test vector for aes128, from the FIPS publication 197
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
const KEY_SCHEDULE: [u128; 11] = [
u128::from_be(0x000102030405060708090a0b0c0d0e0f),
u128::from_be(0xd6aa74fdd2af72fadaa678f1d6ab76fe),
u128::from_be(0xb692cf0b643dbdf1be9bc5006830b3fe),
u128::from_be(0xb6ff744ed2c2c9bf6c590cbf0469bf41),
u128::from_be(0x47f7f7bc95353e03f96c32bcfd058dfd),
u128::from_be(0x3caaa3e8a99f9deb50f3af57adf622aa),
u128::from_be(0x5e390f7df7a69296a7553dc10aa31f6b),
u128::from_be(0x14f9701ae35fe28c440adf4d4ea9c026),
u128::from_be(0x47438735a41c65b9e016baf4aebf7ad2),
u128::from_be(0x549932d1f08557681093ed9cbe2c974e),
u128::from_be(0x13111d7fe3944a17f307a78b4d2b30c5),
];
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
#[test]
fn test_generate_key_schedule() {
// Checks that the round keys are correctly generated from the sample key from FIPS
let key = AesKey(CIPHER_KEY);
let keys = unsafe { generate_round_keys(key) };
for (expected, actual) in KEY_SCHEDULE.iter().zip(keys.iter()) {
assert_eq!(*expected, uint8x16_t_to_u128(*actual));
}
}
#[test]
fn test_encrypt_message() {
// Checks that encrypting many plaintext at the same time gives the correct output.
let message = PLAINTEXT;
let key = AesKey(CIPHER_KEY);
let keys = unsafe { generate_round_keys(key) };
let ciphertext = unsafe { encrypt(message, &keys) };
assert_eq!(CIPHERTEXT, ciphertext);
}
}

View File

@@ -1,110 +0,0 @@
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
use crate::seeders::Seed;
/// A random number generator using the `aesni` instructions.
pub struct NeonAesRandomGenerator(pub(super) AesCtrGenerator<ArmAesBlockCipher>);
/// The children iterator used by [`NeonAesRandomGenerator`].
///
/// Outputs children generators one by one.
pub struct ArmAesChildrenIterator(ChildrenIterator<ArmAesBlockCipher>);
impl Iterator for ArmAesChildrenIterator {
type Item = NeonAesRandomGenerator;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(NeonAesRandomGenerator)
}
}
impl RandomGenerator for NeonAesRandomGenerator {
type ChildrenIter = ArmAesChildrenIterator;
fn new(seed: Seed) -> Self {
NeonAesRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
}
fn remaining_bytes(&self) -> ByteCount {
self.0.remaining_bytes()
}
fn try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ChildrenIter, ForkError> {
self.0
.try_fork(n_children, n_bytes)
.map(ArmAesChildrenIterator)
}
}
impl Iterator for NeonAesRandomGenerator {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
#[cfg(test)]
mod test {
use crate::generators::aes_ctr::aes_ctr_generic_test;
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
use crate::generators::{generator_generic_test, NeonAesRandomGenerator};
#[test]
fn prop_fork_first_state_table_index() {
aes_ctr_generic_test::prop_fork_first_state_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_last_bound_table_index() {
aes_ctr_generic_test::prop_fork_last_bound_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_parent_bound_table_index() {
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_parent_state_table_index() {
aes_ctr_generic_test::prop_fork_parent_state_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork() {
aes_ctr_generic_test::prop_fork::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_children_remaining_bytes() {
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_parent_remaining_bytes() {
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<ArmAesBlockCipher>();
}
#[test]
fn test_roughly_uniform() {
generator_generic_test::test_roughly_uniform::<NeonAesRandomGenerator>();
}
#[test]
fn test_generator_determinism() {
generator_generic_test::test_generator_determinism::<NeonAesRandomGenerator>();
}
#[test]
fn test_fork() {
generator_generic_test::test_fork_children::<NeonAesRandomGenerator>();
}
#[test]
#[should_panic(expected = "expected test panic")]
fn test_bounded_panic() {
generator_generic_test::test_bounded_none_should_panic::<NeonAesRandomGenerator>();
}
}

View File

@@ -1,16 +0,0 @@
//! A module implementing a random number generator, using the aarch64 `neon` and `aes`
//! instructions.
//!
//! This module implements a cryptographically secure pseudorandom number generator
//! (CS-PRNG), using a fast block cipher. The implementation is based on the
//! [intel aesni white paper 323641-001 revision 3.0](https://www.intel.com/content/dam/doc/white-paper/advanced-encryption-standard-new-instructions-set-paper.pdf).
mod block_cipher;
mod generator;
pub use generator::*;
#[cfg(feature = "parallel")]
mod parallel;
#[cfg(feature = "parallel")]
pub use parallel::*;

View File

@@ -1,95 +0,0 @@
use super::*;
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
use rayon::prelude::*;
/// The parallel children iterator used by [`NeonAesRandomGenerator`].
///
/// Outputs the children generators one by one.
#[allow(clippy::type_complexity)]
pub struct ParallelArmAesChildrenIterator(
rayon::iter::Map<
ParallelChildrenIterator<ArmAesBlockCipher>,
fn(AesCtrGenerator<ArmAesBlockCipher>) -> NeonAesRandomGenerator,
>,
);
impl ParallelIterator for ParallelArmAesChildrenIterator {
type Item = NeonAesRandomGenerator;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.0.drive_unindexed(consumer)
}
}
impl IndexedParallelIterator for ParallelArmAesChildrenIterator {
fn len(&self) -> usize {
self.0.len()
}
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
self.0.drive(consumer)
}
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
self.0.with_producer(callback)
}
}
impl ParallelRandomGenerator for NeonAesRandomGenerator {
type ParChildrenIter = ParallelArmAesChildrenIterator;
fn par_try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ParChildrenIter, ForkError> {
self.0
.par_try_fork(n_children, n_bytes)
.map(|iterator| ParallelArmAesChildrenIterator(iterator.map(NeonAesRandomGenerator)))
}
}
#[cfg(test)]
mod test {
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
#[test]
fn prop_fork_first_state_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_last_bound_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_parent_bound_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_parent_state_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_ttt() {
aes_ctr_parallel_generic_tests::prop_fork::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_children_remaining_bytes() {
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<ArmAesBlockCipher>();
}
#[test]
fn prop_fork_parent_remaining_bytes() {
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<ArmAesBlockCipher>();
}
}

View File

@@ -1,231 +0,0 @@
use crate::generators::aes_ctr::{AesBlockCipher, AesIndex, AesKey, BYTES_PER_BATCH};
use std::arch::x86_64::{
__m128i, _mm_aesenc_si128, _mm_aesenclast_si128, _mm_aeskeygenassist_si128, _mm_shuffle_epi32,
_mm_slli_si128, _mm_store_si128, _mm_xor_si128,
};
use std::mem::transmute;
/// An aes block cipher implementation which uses `aesni` instructions.
#[derive(Clone)]
pub struct AesniBlockCipher {
// The set of round keys used for the aes encryption
round_keys: [__m128i; 11],
}
impl AesBlockCipher for AesniBlockCipher {
fn new(key: AesKey) -> AesniBlockCipher {
let aes_detected = is_x86_feature_detected!("aes");
let sse2_detected = is_x86_feature_detected!("sse2");
if !(aes_detected && sse2_detected) {
panic!(
"The AesniBlockCipher requires both aes and sse2 x86 CPU features.\n\
aes feature available: {}\nsse2 feature available: {}\n.",
aes_detected, sse2_detected
)
}
// SAFETY: we checked for aes and sse2 availability
let round_keys = unsafe { generate_round_keys(key) };
AesniBlockCipher { round_keys }
}
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
#[target_feature(enable = "sse2,aes")]
unsafe fn implementation(
this: &AesniBlockCipher,
AesIndex(aes_ctr): AesIndex,
) -> [u8; BYTES_PER_BATCH] {
si128arr_to_u8arr(aes_encrypt_many(
u128_to_si128(aes_ctr),
u128_to_si128(aes_ctr + 1),
u128_to_si128(aes_ctr + 2),
u128_to_si128(aes_ctr + 3),
u128_to_si128(aes_ctr + 4),
u128_to_si128(aes_ctr + 5),
u128_to_si128(aes_ctr + 6),
u128_to_si128(aes_ctr + 7),
&this.round_keys,
))
}
// SAFETY: we checked for aes and sse2 availability in `Self::new`
unsafe { implementation(self, AesIndex(aes_ctr)) }
}
}
#[target_feature(enable = "sse2,aes")]
unsafe fn generate_round_keys(key: AesKey) -> [__m128i; 11] {
let key = u128_to_si128(key.0);
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
aes_128_key_expansion(key, &mut keys);
keys
}
// Uses aes to encrypt many values at once. This allows a substantial speedup (around 30%)
// compared to the naive approach.
#[allow(clippy::too_many_arguments)]
#[inline(always)]
fn aes_encrypt_many(
message_1: __m128i,
message_2: __m128i,
message_3: __m128i,
message_4: __m128i,
message_5: __m128i,
message_6: __m128i,
message_7: __m128i,
message_8: __m128i,
keys: &[__m128i; 11],
) -> [__m128i; 8] {
unsafe {
let mut tmp_1 = _mm_xor_si128(message_1, keys[0]);
let mut tmp_2 = _mm_xor_si128(message_2, keys[0]);
let mut tmp_3 = _mm_xor_si128(message_3, keys[0]);
let mut tmp_4 = _mm_xor_si128(message_4, keys[0]);
let mut tmp_5 = _mm_xor_si128(message_5, keys[0]);
let mut tmp_6 = _mm_xor_si128(message_6, keys[0]);
let mut tmp_7 = _mm_xor_si128(message_7, keys[0]);
let mut tmp_8 = _mm_xor_si128(message_8, keys[0]);
for key in keys.iter().take(10).skip(1) {
tmp_1 = _mm_aesenc_si128(tmp_1, *key);
tmp_2 = _mm_aesenc_si128(tmp_2, *key);
tmp_3 = _mm_aesenc_si128(tmp_3, *key);
tmp_4 = _mm_aesenc_si128(tmp_4, *key);
tmp_5 = _mm_aesenc_si128(tmp_5, *key);
tmp_6 = _mm_aesenc_si128(tmp_6, *key);
tmp_7 = _mm_aesenc_si128(tmp_7, *key);
tmp_8 = _mm_aesenc_si128(tmp_8, *key);
}
tmp_1 = _mm_aesenclast_si128(tmp_1, keys[10]);
tmp_2 = _mm_aesenclast_si128(tmp_2, keys[10]);
tmp_3 = _mm_aesenclast_si128(tmp_3, keys[10]);
tmp_4 = _mm_aesenclast_si128(tmp_4, keys[10]);
tmp_5 = _mm_aesenclast_si128(tmp_5, keys[10]);
tmp_6 = _mm_aesenclast_si128(tmp_6, keys[10]);
tmp_7 = _mm_aesenclast_si128(tmp_7, keys[10]);
tmp_8 = _mm_aesenclast_si128(tmp_8, keys[10]);
[tmp_1, tmp_2, tmp_3, tmp_4, tmp_5, tmp_6, tmp_7, tmp_8]
}
}
fn aes_128_assist(temp1: __m128i, temp2: __m128i) -> __m128i {
let mut temp3: __m128i;
let mut temp2 = temp2;
let mut temp1 = temp1;
unsafe {
temp2 = _mm_shuffle_epi32(temp2, 0xff);
temp3 = _mm_slli_si128(temp1, 0x4);
temp1 = _mm_xor_si128(temp1, temp3);
temp3 = _mm_slli_si128(temp3, 0x4);
temp1 = _mm_xor_si128(temp1, temp3);
temp3 = _mm_slli_si128(temp3, 0x4);
temp1 = _mm_xor_si128(temp1, temp3);
temp1 = _mm_xor_si128(temp1, temp2);
}
temp1
}
#[inline(always)]
fn aes_128_key_expansion(key: __m128i, keys: &mut [__m128i; 11]) {
let (mut temp1, mut temp2): (__m128i, __m128i);
temp1 = key;
unsafe {
_mm_store_si128(keys.as_mut_ptr(), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x01);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(1), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x02);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(2), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x04);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(3), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x08);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(4), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x10);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(5), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x20);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(6), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x40);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(7), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x80);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(8), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x1b);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(9), temp1);
temp2 = _mm_aeskeygenassist_si128(temp1, 0x36);
temp1 = aes_128_assist(temp1, temp2);
_mm_store_si128(keys.as_mut_ptr().offset(10), temp1);
}
}
#[inline(always)]
fn u128_to_si128(input: u128) -> __m128i {
unsafe { transmute(input) }
}
#[allow(unused)] // to please clippy when tests are not activated
fn si128_to_u128(input: __m128i) -> u128 {
unsafe { transmute(input) }
}
#[inline(always)]
fn si128arr_to_u8arr(input: [__m128i; 8]) -> [u8; BYTES_PER_BATCH] {
unsafe { transmute(input) }
}
#[cfg(test)]
mod test {
use super::*;
// Test vector for aes128, from the FIPS publication 197
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
const KEY_SCHEDULE: [u128; 11] = [
u128::from_be(0x000102030405060708090a0b0c0d0e0f),
u128::from_be(0xd6aa74fdd2af72fadaa678f1d6ab76fe),
u128::from_be(0xb692cf0b643dbdf1be9bc5006830b3fe),
u128::from_be(0xb6ff744ed2c2c9bf6c590cbf0469bf41),
u128::from_be(0x47f7f7bc95353e03f96c32bcfd058dfd),
u128::from_be(0x3caaa3e8a99f9deb50f3af57adf622aa),
u128::from_be(0x5e390f7df7a69296a7553dc10aa31f6b),
u128::from_be(0x14f9701ae35fe28c440adf4d4ea9c026),
u128::from_be(0x47438735a41c65b9e016baf4aebf7ad2),
u128::from_be(0x549932d1f08557681093ed9cbe2c974e),
u128::from_be(0x13111d7fe3944a17f307a78b4d2b30c5),
];
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
#[test]
fn test_generate_key_schedule() {
// Checks that the round keys are correctly generated from the sample key from FIPS
let key = u128_to_si128(CIPHER_KEY);
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
aes_128_key_expansion(key, &mut keys);
for (expected, actual) in KEY_SCHEDULE.iter().zip(keys.iter()) {
assert_eq!(*expected, si128_to_u128(*actual));
}
}
#[test]
fn test_encrypt_many_messages() {
// Checks that encrypting many plaintext at the same time gives the correct output.
let message = u128_to_si128(PLAINTEXT);
let key = u128_to_si128(CIPHER_KEY);
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
aes_128_key_expansion(key, &mut keys);
let ciphertexts = aes_encrypt_many(
message, message, message, message, message, message, message, message, &keys,
);
for ct in &ciphertexts {
assert_eq!(CIPHERTEXT, si128_to_u128(*ct));
}
}
}

View File

@@ -1,110 +0,0 @@
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
use crate::seeders::Seed;
/// A random number generator using the `aesni` instructions.
pub struct AesniRandomGenerator(pub(super) AesCtrGenerator<AesniBlockCipher>);
/// The children iterator used by [`AesniRandomGenerator`].
///
/// Outputs children generators one by one.
pub struct AesniChildrenIterator(ChildrenIterator<AesniBlockCipher>);
impl Iterator for AesniChildrenIterator {
type Item = AesniRandomGenerator;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(AesniRandomGenerator)
}
}
impl RandomGenerator for AesniRandomGenerator {
type ChildrenIter = AesniChildrenIterator;
fn new(seed: Seed) -> Self {
AesniRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
}
fn remaining_bytes(&self) -> ByteCount {
self.0.remaining_bytes()
}
fn try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ChildrenIter, ForkError> {
self.0
.try_fork(n_children, n_bytes)
.map(AesniChildrenIterator)
}
}
impl Iterator for AesniRandomGenerator {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
#[cfg(test)]
mod test {
use crate::generators::aes_ctr::aes_ctr_generic_test;
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
use crate::generators::{generator_generic_test, AesniRandomGenerator};
#[test]
fn prop_fork_first_state_table_index() {
aes_ctr_generic_test::prop_fork_first_state_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork_last_bound_table_index() {
aes_ctr_generic_test::prop_fork_last_bound_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork_parent_bound_table_index() {
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork_parent_state_table_index() {
aes_ctr_generic_test::prop_fork_parent_state_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork() {
aes_ctr_generic_test::prop_fork::<AesniBlockCipher>();
}
#[test]
fn prop_fork_children_remaining_bytes() {
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<AesniBlockCipher>();
}
#[test]
fn prop_fork_parent_remaining_bytes() {
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<AesniBlockCipher>();
}
#[test]
fn test_roughly_uniform() {
generator_generic_test::test_roughly_uniform::<AesniRandomGenerator>();
}
#[test]
fn test_generator_determinism() {
generator_generic_test::test_generator_determinism::<AesniRandomGenerator>();
}
#[test]
fn test_fork() {
generator_generic_test::test_fork_children::<AesniRandomGenerator>();
}
#[test]
#[should_panic(expected = "expected test panic")]
fn test_bounded_panic() {
generator_generic_test::test_bounded_none_should_panic::<AesniRandomGenerator>();
}
}

View File

@@ -1,15 +0,0 @@
//! A module implementing a random number generator, using the x86_64 `aesni` instructions.
//!
//! This module implements a cryptographically secure pseudorandom number generator
//! (CS-PRNG), using a fast block cipher. The implementation is based on the
//! [intel aesni white paper 323641-001 revision 3.0](https://www.intel.com/content/dam/doc/white-paper/advanced-encryption-standard-new-instructions-set-paper.pdf).
mod block_cipher;
mod generator;
pub use generator::*;
#[cfg(feature = "parallel")]
mod parallel;
#[cfg(feature = "parallel")]
pub use parallel::*;

View File

@@ -1,95 +0,0 @@
use super::*;
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
use rayon::prelude::*;
/// The parallel children iterator used by [`AesniRandomGenerator`].
///
/// Outputs the children generators one by one.
#[allow(clippy::type_complexity)]
pub struct ParallelAesniChildrenIterator(
rayon::iter::Map<
ParallelChildrenIterator<AesniBlockCipher>,
fn(AesCtrGenerator<AesniBlockCipher>) -> AesniRandomGenerator,
>,
);
impl ParallelIterator for ParallelAesniChildrenIterator {
type Item = AesniRandomGenerator;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.0.drive_unindexed(consumer)
}
}
impl IndexedParallelIterator for ParallelAesniChildrenIterator {
fn len(&self) -> usize {
self.0.len()
}
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
self.0.drive(consumer)
}
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
self.0.with_producer(callback)
}
}
impl ParallelRandomGenerator for AesniRandomGenerator {
type ParChildrenIter = ParallelAesniChildrenIterator;
fn par_try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ParChildrenIter, ForkError> {
self.0
.par_try_fork(n_children, n_bytes)
.map(|iterator| ParallelAesniChildrenIterator(iterator.map(AesniRandomGenerator)))
}
}
#[cfg(test)]
mod test {
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
#[test]
fn prop_fork_first_state_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork_last_bound_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork_parent_bound_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork_parent_state_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<AesniBlockCipher>();
}
#[test]
fn prop_fork_ttt() {
aes_ctr_parallel_generic_tests::prop_fork::<AesniBlockCipher>();
}
#[test]
fn prop_fork_children_remaining_bytes() {
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<AesniBlockCipher>();
}
#[test]
fn prop_fork_parent_remaining_bytes() {
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<AesniBlockCipher>();
}
}

View File

@@ -1,14 +0,0 @@
#[cfg(feature = "generator_x86_64_aesni")]
mod aesni;
#[cfg(feature = "generator_x86_64_aesni")]
pub use aesni::*;
#[cfg(feature = "generator_aarch64_aes")]
mod aarch64;
#[cfg(feature = "generator_aarch64_aes")]
pub use aarch64::*;
#[cfg(feature = "generator_fallback")]
mod soft;
#[cfg(feature = "generator_fallback")]
pub use soft::*;

View File

@@ -1,114 +0,0 @@
use crate::generators::aes_ctr::{
AesBlockCipher, AesIndex, AesKey, AES_CALLS_PER_BATCH, BYTES_PER_AES_CALL, BYTES_PER_BATCH,
};
use aes::cipher::generic_array::GenericArray;
use aes::cipher::{BlockEncrypt, KeyInit};
use aes::Aes128;
#[derive(Clone)]
pub struct SoftwareBlockCipher {
// Aes structure
aes: Aes128,
}
impl AesBlockCipher for SoftwareBlockCipher {
fn new(key: AesKey) -> SoftwareBlockCipher {
let key: [u8; BYTES_PER_AES_CALL] = key.0.to_ne_bytes();
let key = GenericArray::clone_from_slice(&key[..]);
let aes = Aes128::new(&key);
SoftwareBlockCipher { aes }
}
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
aes_encrypt_many(
aes_ctr,
aes_ctr + 1,
aes_ctr + 2,
aes_ctr + 3,
aes_ctr + 4,
aes_ctr + 5,
aes_ctr + 6,
aes_ctr + 7,
&self.aes,
)
}
}
// Uses aes to encrypt many values at once. This allows a substantial speedup (around 30%)
// compared to the naive approach.
#[allow(clippy::too_many_arguments)]
fn aes_encrypt_many(
message_1: u128,
message_2: u128,
message_3: u128,
message_4: u128,
message_5: u128,
message_6: u128,
message_7: u128,
message_8: u128,
cipher: &Aes128,
) -> [u8; BYTES_PER_BATCH] {
let mut b1 = GenericArray::clone_from_slice(&message_1.to_ne_bytes()[..]);
let mut b2 = GenericArray::clone_from_slice(&message_2.to_ne_bytes()[..]);
let mut b3 = GenericArray::clone_from_slice(&message_3.to_ne_bytes()[..]);
let mut b4 = GenericArray::clone_from_slice(&message_4.to_ne_bytes()[..]);
let mut b5 = GenericArray::clone_from_slice(&message_5.to_ne_bytes()[..]);
let mut b6 = GenericArray::clone_from_slice(&message_6.to_ne_bytes()[..]);
let mut b7 = GenericArray::clone_from_slice(&message_7.to_ne_bytes()[..]);
let mut b8 = GenericArray::clone_from_slice(&message_8.to_ne_bytes()[..]);
cipher.encrypt_block(&mut b1);
cipher.encrypt_block(&mut b2);
cipher.encrypt_block(&mut b3);
cipher.encrypt_block(&mut b4);
cipher.encrypt_block(&mut b5);
cipher.encrypt_block(&mut b6);
cipher.encrypt_block(&mut b7);
cipher.encrypt_block(&mut b8);
let output_array: [[u8; BYTES_PER_AES_CALL]; AES_CALLS_PER_BATCH] = [
b1.into(),
b2.into(),
b3.into(),
b4.into(),
b5.into(),
b6.into(),
b7.into(),
b8.into(),
];
unsafe { *{ output_array.as_ptr() as *const [u8; BYTES_PER_BATCH] } }
}
#[cfg(test)]
mod test {
use super::*;
use std::convert::TryInto;
// Test vector for aes128, from the FIPS publication 197
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
#[test]
fn test_encrypt_many_messages() {
// Checks that encrypting many plaintext at the same time gives the correct output.
let key: [u8; BYTES_PER_AES_CALL] = CIPHER_KEY.to_ne_bytes();
let aes = Aes128::new(&GenericArray::from(key));
let ciphertexts = aes_encrypt_many(
PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT,
&aes,
);
let ciphertexts: [u8; BYTES_PER_BATCH] = ciphertexts[..].try_into().unwrap();
for i in 0..8 {
assert_eq!(
u128::from_ne_bytes(
ciphertexts[BYTES_PER_AES_CALL * i..BYTES_PER_AES_CALL * (i + 1)]
.try_into()
.unwrap()
),
CIPHERTEXT
);
}
}
}

View File

@@ -1,110 +0,0 @@
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
use crate::generators::implem::soft::block_cipher::SoftwareBlockCipher;
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
use crate::seeders::Seed;
/// A random number generator using a software implementation.
pub struct SoftwareRandomGenerator(pub(super) AesCtrGenerator<SoftwareBlockCipher>);
/// The children iterator used by [`SoftwareRandomGenerator`].
///
/// Outputs children generators one by one.
pub struct SoftwareChildrenIterator(ChildrenIterator<SoftwareBlockCipher>);
impl Iterator for SoftwareChildrenIterator {
type Item = SoftwareRandomGenerator;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(SoftwareRandomGenerator)
}
}
impl RandomGenerator for SoftwareRandomGenerator {
type ChildrenIter = SoftwareChildrenIterator;
fn new(seed: Seed) -> Self {
SoftwareRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
}
fn remaining_bytes(&self) -> ByteCount {
self.0.remaining_bytes()
}
fn try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ChildrenIter, ForkError> {
self.0
.try_fork(n_children, n_bytes)
.map(SoftwareChildrenIterator)
}
}
impl Iterator for SoftwareRandomGenerator {
type Item = u8;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::generators::aes_ctr::aes_ctr_generic_test;
use crate::generators::generator_generic_test;
#[test]
fn prop_fork_first_state_table_index() {
aes_ctr_generic_test::prop_fork_first_state_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_last_bound_table_index() {
aes_ctr_generic_test::prop_fork_last_bound_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_parent_bound_table_index() {
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_parent_state_table_index() {
aes_ctr_generic_test::prop_fork_parent_state_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork() {
aes_ctr_generic_test::prop_fork::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_children_remaining_bytes() {
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_parent_remaining_bytes() {
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<SoftwareBlockCipher>();
}
#[test]
fn test_roughly_uniform() {
generator_generic_test::test_roughly_uniform::<SoftwareRandomGenerator>();
}
#[test]
fn test_fork() {
generator_generic_test::test_fork_children::<SoftwareRandomGenerator>();
}
#[test]
fn test_generator_determinism() {
generator_generic_test::test_generator_determinism::<SoftwareRandomGenerator>();
}
#[test]
#[should_panic(expected = "expected test panic")]
fn test_bounded_panic() {
generator_generic_test::test_bounded_none_should_panic::<SoftwareRandomGenerator>();
}
}

View File

@@ -1,11 +0,0 @@
//! A module using a software fallback implementation of random number generator.
mod block_cipher;
mod generator;
pub use generator::*;
#[cfg(feature = "parallel")]
mod parallel;
#[cfg(feature = "parallel")]
pub use parallel::*;

View File

@@ -1,94 +0,0 @@
use super::*;
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
use crate::generators::implem::soft::block_cipher::SoftwareBlockCipher;
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
use rayon::prelude::*;
/// The parallel children iterator used by [`SoftwareRandomGenerator`].
///
/// Outputs the children generators one by one.
#[allow(clippy::type_complexity)]
pub struct ParallelSoftwareChildrenIterator(
rayon::iter::Map<
ParallelChildrenIterator<SoftwareBlockCipher>,
fn(AesCtrGenerator<SoftwareBlockCipher>) -> SoftwareRandomGenerator,
>,
);
impl ParallelIterator for ParallelSoftwareChildrenIterator {
type Item = SoftwareRandomGenerator;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.0.drive_unindexed(consumer)
}
}
impl IndexedParallelIterator for ParallelSoftwareChildrenIterator {
fn len(&self) -> usize {
self.0.len()
}
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
self.0.drive(consumer)
}
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
self.0.with_producer(callback)
}
}
impl ParallelRandomGenerator for SoftwareRandomGenerator {
type ParChildrenIter = ParallelSoftwareChildrenIterator;
fn par_try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ParChildrenIter, ForkError> {
self.0
.par_try_fork(n_children, n_bytes)
.map(|iterator| ParallelSoftwareChildrenIterator(iterator.map(SoftwareRandomGenerator)))
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
#[test]
fn prop_fork_first_state_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_last_bound_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_parent_bound_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_parent_state_table_index() {
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork() {
aes_ctr_parallel_generic_tests::prop_fork::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_children_remaining_bytes() {
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<SoftwareBlockCipher>();
}
#[test]
fn prop_fork_parent_remaining_bytes() {
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<SoftwareBlockCipher>();
}
}

View File

@@ -1,235 +0,0 @@
//! A module containing random generators objects.
//!
//! See [crate-level](`crate`) explanations.
use crate::seeders::Seed;
use std::error::Error;
use std::fmt::{Display, Formatter};
/// The number of children created when a generator is forked.
#[derive(Debug, Copy, Clone)]
pub struct ChildrenCount(pub usize);
/// The number of bytes each child can generate, when a generator is forked.
#[derive(Debug, Copy, Clone)]
pub struct BytesPerChild(pub usize);
/// A structure representing the number of bytes between two table indices.
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
pub struct ByteCount(pub u128);
/// An error occurring during a generator fork.
#[derive(Debug)]
pub enum ForkError {
ForkTooLarge,
ZeroChildrenCount,
ZeroBytesPerChild,
}
impl Display for ForkError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ForkError::ForkTooLarge => {
write!(
f,
"The children generators would output bytes after the parent bound. "
)
}
ForkError::ZeroChildrenCount => {
write!(
f,
"The number of children in the fork must be greater than zero."
)
}
ForkError::ZeroBytesPerChild => {
write!(
f,
"The number of bytes per child must be greater than zero."
)
}
}
}
}
impl Error for ForkError {}
/// A trait for cryptographically secure pseudo-random generators.
///
/// See the [crate-level](#crate) documentation for details.
pub trait RandomGenerator: Iterator<Item = u8> {
/// The iterator over children generators, returned by `try_fork` in case of success.
type ChildrenIter: Iterator<Item = Self>;
/// Creates a new generator from a seed.
///
/// This operation is usually costly to perform, as the aes round keys need to be generated from
/// the seed.
fn new(seed: Seed) -> Self;
/// Returns the number of bytes that can still be outputted by the generator before reaching its
/// bound.
///
/// Note:
/// -----
///
/// A fresh generator can generate 2¹³² bytes. Unfortunately, no rust integer type in is able
/// to encode such a large number. Consequently [`ByteCount`] uses the largest integer type
/// available to encode this value: the `u128` type. For this reason, this method does not
/// effectively return the number of remaining bytes, but instead
/// `min(2¹²⁸-1, remaining_bytes)`.
fn remaining_bytes(&self) -> ByteCount;
/// Returns the next byte of the stream, if the generator did not yet reach its bound.
fn next_byte(&mut self) -> Option<u8> {
self.next()
}
/// Tries to fork the generator into an iterator of `n_children` new generators, each able to
/// output `n_bytes` bytes.
///
/// Note:
/// -----
///
/// To be successful, the number of remaining bytes for the parent generator must be larger than
/// `n_children*n_bytes`.
fn try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ChildrenIter, ForkError>;
}
/// A trait extending [`RandomGenerator`] to the parallel iterators of `rayon`.
#[cfg(feature = "parallel")]
pub trait ParallelRandomGenerator: RandomGenerator + Send {
/// The iterator over children generators, returned by `par_try_fork` in case of success.
type ParChildrenIter: rayon::prelude::IndexedParallelIterator<Item = Self>;
/// Tries to fork the generator into a parallel iterator of `n_children` new generators, each
/// able to output `n_bytes` bytes.
///
/// Note:
/// -----
///
/// To be successful, the number of remaining bytes for the parent generator must be larger than
/// `n_children*n_bytes`.
fn par_try_fork(
&mut self,
n_children: ChildrenCount,
n_bytes: BytesPerChild,
) -> Result<Self::ParChildrenIter, ForkError>;
}
mod aes_ctr;
mod implem;
pub use implem::*;
#[cfg(test)]
pub mod generator_generic_test {
#![allow(unused)] // to please clippy when tests are not activated
use super::*;
use rand::Rng;
const REPEATS: usize = 1_000;
fn any_seed() -> impl Iterator<Item = Seed> {
std::iter::repeat_with(|| Seed(rand::thread_rng().gen()))
}
fn some_children_count() -> impl Iterator<Item = ChildrenCount> {
std::iter::repeat_with(|| ChildrenCount(rand::thread_rng().gen::<usize>() % 16 + 1))
}
fn some_bytes_per_child() -> impl Iterator<Item = BytesPerChild> {
std::iter::repeat_with(|| BytesPerChild(rand::thread_rng().gen::<usize>() % 128 + 1))
}
/// Checks that the PRNG roughly generates uniform numbers.
///
/// To do that, we perform an histogram of the occurrences of each byte value, over a fixed
/// number of samples and check that the empirical probabilities of the bins are close to
/// the theoretical probabilities.
pub fn test_roughly_uniform<G: RandomGenerator>() {
// Number of bins to use for the histogram.
const N_BINS: usize = u8::MAX as usize + 1;
// Number of samples to use for the histogram.
let n_samples = 10_000_000_usize;
// Theoretical probability of a each bins.
let expected_prob: f64 = 1. / N_BINS as f64;
// Absolute error allowed on the empirical probabilities.
// This value was tuned to make the test pass on an arguably correct state of
// implementation. 10^-4 precision is arguably pretty fine for this rough test, but it would
// be interesting to improve this test.
let precision = 10f64.powi(-3);
for _ in 0..REPEATS {
// We instantiate a new generator.
let seed = any_seed().next().unwrap();
let mut generator = G::new(seed);
// We create a new histogram
let mut counts = [0usize; N_BINS];
// We fill the histogram.
for _ in 0..n_samples {
counts[generator.next_byte().unwrap() as usize] += 1;
}
// We check that the empirical probabilities are close enough to the theoretical one.
counts
.iter()
.map(|a| (*a as f64) / (n_samples as f64))
.for_each(|a| assert!((a - expected_prob).abs() < precision))
}
}
/// Checks that given a state and a key, the PRNG is determinist.
pub fn test_generator_determinism<G: RandomGenerator>() {
for _ in 0..REPEATS {
let seed = any_seed().next().unwrap();
let mut first_generator = G::new(seed);
let mut second_generator = G::new(seed);
for _ in 0..1024 {
assert_eq!(first_generator.next(), second_generator.next());
}
}
}
/// Checks that forks returns a bounded child, and that the proper number of bytes can be
/// generated.
pub fn test_fork_children<G: RandomGenerator>() {
for _ in 0..REPEATS {
let ((seed, n_children), n_bytes) = any_seed()
.zip(some_children_count())
.zip(some_bytes_per_child())
.next()
.unwrap();
let mut gen = G::new(seed);
let mut bounded = gen.try_fork(n_children, n_bytes).unwrap().next().unwrap();
assert_eq!(bounded.remaining_bytes(), ByteCount(n_bytes.0 as u128));
for _ in 0..n_bytes.0 {
bounded.next().unwrap();
}
// Assert we are at the bound
assert!(bounded.next().is_none());
}
}
/// Checks that a bounded prng returns none when exceeding the allowed number of bytes.
///
/// To properly check for panic use `#[should_panic(expected = "expected test panic")]` as an
/// attribute on the test function.
pub fn test_bounded_none_should_panic<G: RandomGenerator>() {
let ((seed, n_children), n_bytes) = any_seed()
.zip(some_children_count())
.zip(some_bytes_per_child())
.next()
.unwrap();
let mut gen = G::new(seed);
let mut bounded = gen.try_fork(n_children, n_bytes).unwrap().next().unwrap();
assert_eq!(bounded.remaining_bytes(), ByteCount(n_bytes.0 as u128));
for _ in 0..n_bytes.0 {
assert!(bounded.next().is_some());
}
// One call too many, should panic
bounded.next().ok_or("expected test panic").unwrap();
}
}

View File

@@ -1,114 +0,0 @@
#![deny(rustdoc::broken_intra_doc_links)]
//! Cryptographically secure pseudo random number generator.
//!
//! Welcome to the `concrete-csprng` documentation.
//!
//! This crate provides a fast cryptographically secure pseudo-random number generator, suited to
//! work in a multithreaded setting.
//!
//! Random Generators
//! =================
//!
//! The central abstraction of this crate is the [`RandomGenerator`](generators::RandomGenerator)
//! trait, which is implemented by different types, each supporting a different platform. In
//! essence, a type implementing [`RandomGenerator`](generators::RandomGenerator) is a type that
//! outputs a new pseudo-random byte at each call to
//! [`next_byte`](generators::RandomGenerator::next_byte). Such a generator `g` can be seen as
//! enclosing a growing index into an imaginary array of pseudo-random bytes:
//! ```ascii
//! 0 1 2 3 4 5 6 7 8 9 M-1 │
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
//! ┃ │ │ │ │ │ │ │ │ │ │...│ ┃ │
//! ┗↥┷━┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
//! g │
//! │
//! g.next_byte() │
//! │
//! 0 1 2 3 4 5 6 7 8 9 M-1 │
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
//! ┃╳│ │ │ │ │ │ │ │ │ │...│ ┃ │
//! ┗━┷↥┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
//! g │
//! │
//! g.next_byte() │ legend:
//! │ -------
//! 0 1 2 3 4 5 6 7 8 9 M-1 │ ↥ : next byte to be outputted by g
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │ │: byte not yet outputted by g
//! ┃╳│╳│ │ │ │ │ │ │ │ │...│ ┃ │ │╳│: byte already outputted by g
//! ┗━┷━┷↥┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
//! g 🭭
//! ```
//!
//! While being large, this imaginary array is still bounded to M = 2¹³² bytes. Consequently, a
//! generator is always bounded to a maximal index. That is, there is always a max amount of
//! elements of this array that can be outputted by the generator. By default, generators created
//! via [`new`](generators::RandomGenerator::new) are always bounded to M-1.
//!
//! Tree partition of the pseudo-random stream
//! ==========================================
//!
//! One particularity of this implementation is that you can use the
//! [`try_fork`](generators::RandomGenerator::try_fork) method to create an arbitrary partition tree
//! of a region of this array. Indeed, calling `try_fork(nc, nb)` outputs `nc` new generators, each
//! able to output `nb` bytes. The `try_fork` method ensures that the states and bounds of the
//! parent and children generators are set so as to prevent the same substream to be outputted
//! twice:
//! ```ascii
//! 0 1 2 3 4 5 6 7 8 9 M │
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
//! ┃P│P│P│P│P│P│P│P│P│P│...│P┃ │
//! ┗↥┷━┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
//! p │
//! │
//! (a,b) = p.fork(2,4) │
//! │
//! 0 1 2 3 4 5 6 7 8 9 M │
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
//! ┃A│A│A│A│B│B│B│B│P│P│...│P┃ │
//! ┗↥┷━┷━┷━┷↥┷━┷━┷━┷↥┷━┷━━━┷━┛ │
//! a b p │
//! │ legend:
//! (c,d) = b.fork(2, 1) │ -------
//! │ ↥ : next byte to be outputted by p
//! 0 1 2 3 4 5 6 7 8 9 M │ │P│: byte to be outputted by p
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │╳│: byte already outputted
//! ┃A│A│A│A│C│D│B│B│P│P│...│P┃ │
//! ┗↥┷━┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
//! a c d b p 🭭
//! ```
//!
//! This makes it possible to consume the stream at different places. This is particularly useful in
//! a multithreaded setting, in which we want to use the same generator from different independent
//! threads:
//!
//! ```ascii
//! 0 1 2 3 4 5 6 7 8 9 M │
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
//! ┃A│A│A│A│C│D│B│B│P│P│...│P┃ │
//! ┗↥┷━┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
//! a c d b p │
//! │
//! a.next_byte() │
//! │
//! 0 1 2 3 4 5 6 7 8 9 M │
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
//! ┃│A│A│A│C│D│B│B│P│P│...│P┃ │
//! ┗━┷↥┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
//! a c d b p │
//! │ legend:
//! b.next_byte() │ -------
//! │ ↥ : next byte to be outputted by p
//! 0 1 2 3 4 5 6 7 8 9 M │ │P│: byte to be outputted by p
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │╳│: byte already outputted
//! ┃│A│A│A│C│D││B│P│P│...│P┃ │
//! ┗━┷↥┷━┷━┷↥┷↥┷━┷↥┷↥┷━┷━━━┷━┛ │
//! a c d b p 🭭
//! ```
//!
//! Implementation
//! ==============
//!
//! The implementation is based on the AES blockcipher used in counter (CTR) mode, as presented
//! in the ISO/IEC 18033-4 document.
pub mod generators;
pub mod seeders;

View File

@@ -1,141 +0,0 @@
use crate::seeders::{Seed, Seeder};
use libc;
use std::cmp::Ordering;
/// There is no `rseed` equivalent in the ARM specification until `ARMv8.5-A`.
/// However it seems that these instructions are not exposed in `core::arch::aarch64`.
///
/// Our primary interest for supporting aarch64 targets is AppleSilicon support
/// which for the M1 macs available, they are based on the `ARMv8.4-A` set.
///
/// So we fall back to using a function from Apple's API which
/// uses the [Secure Enclave] to generate cryptographically secure random bytes.
///
/// [Secure Enclave]: https://support.apple.com/fr-fr/guide/security/sec59b0b31ff/web
mod secure_enclave {
pub enum __SecRandom {}
pub type SecRandomRef = *const __SecRandom;
use libc::{c_int, c_void};
#[link(name = "Security", kind = "framework")]
extern "C" {
pub static kSecRandomDefault: SecRandomRef;
pub fn SecRandomCopyBytes(rnd: SecRandomRef, count: usize, bytes: *mut c_void) -> c_int;
}
pub fn generate_random_bytes(bytes: &mut [u8]) -> std::io::Result<()> {
// As per Apple's documentation:
// - https://developer.apple.com/documentation/security/randomization_services?language=objc
// - https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
//
// The `SecRandomCopyBytes` "Generate cryptographically secure random numbers"
unsafe {
let res = SecRandomCopyBytes(
kSecRandomDefault,
bytes.len(),
bytes.as_mut_ptr() as *mut c_void,
);
if res != 0 {
Err(std::io::Error::last_os_error())
} else {
Ok(())
}
}
}
}
/// A seeder which uses the `SecRandomCopyBytes` function from Apple's `Security` framework.
///
/// <https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc>
pub struct AppleSecureEnclaveSeeder;
impl Seeder for AppleSecureEnclaveSeeder {
fn seed(&mut self) -> Seed {
// 16 bytes == 128 bits
let mut bytes = [0u8; 16];
secure_enclave::generate_random_bytes(&mut bytes)
.expect("Failure while using Apple secure enclave: {err:?}");
Seed(u128::from_le_bytes(bytes))
}
fn is_available() -> bool {
let os_version_sysctl_name = match std::ffi::CString::new("kern.osproductversion") {
Ok(c_str) => c_str,
_ => return false,
};
// Big enough buffer to get a version output as an ASCII string
const OUTPUT_BUFFER_SIZE: usize = 64;
let mut output_buffer_size = OUTPUT_BUFFER_SIZE;
let mut output_buffer = [0u8; OUTPUT_BUFFER_SIZE];
let res = unsafe {
libc::sysctlbyname(
os_version_sysctl_name.as_ptr() as *const _ as *const _,
&mut output_buffer as *mut _ as *mut _,
&mut output_buffer_size as *mut _ as *mut _,
std::ptr::null_mut(),
0,
)
};
if res != 0 {
return false;
}
let result_c_str =
match std::ffi::CStr::from_bytes_with_nul(&output_buffer[..output_buffer_size]) {
Ok(c_str) => c_str,
_ => return false,
};
let result_string = match result_c_str.to_str() {
Ok(str) => str,
_ => return false,
};
// Normally we get a major version and minor version
let split_string: Vec<&str> = result_string.split('.').collect();
let mut major = -1;
let mut minor = -1;
// Major part of the version string
if !split_string.is_empty() {
major = match split_string[0].parse() {
Ok(major_from_str) => major_from_str,
_ => return false,
};
}
// SecRandomCopyBytes is available starting with mac OS 10.7
// https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
// This match pattern is recommended by clippy, so we oblige here
match major.cmp(&10) {
Ordering::Greater => true,
Ordering::Equal => {
// Minor part of the version string
if split_string.len() >= 2 {
minor = match split_string[1].parse() {
Ok(minor_from_str) => minor_from_str,
_ => return false,
};
}
minor >= 7
}
Ordering::Less => false,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
#[test]
fn check_bounded_sequence_difference() {
check_seeder_fixed_sequences_different(|_| AppleSecureEnclaveSeeder);
}
}

View File

@@ -1,14 +0,0 @@
#[cfg(target_os = "macos")]
mod apple_secure_enclave_seeder;
#[cfg(target_os = "macos")]
pub use apple_secure_enclave_seeder::AppleSecureEnclaveSeeder;
#[cfg(feature = "seeder_x86_64_rdseed")]
mod rdseed;
#[cfg(feature = "seeder_x86_64_rdseed")]
pub use rdseed::RdseedSeeder;
#[cfg(feature = "seeder_unix")]
mod unix;
#[cfg(feature = "seeder_unix")]
pub use unix::UnixSeeder;

View File

@@ -1,51 +0,0 @@
use crate::seeders::{Seed, Seeder};
/// A seeder which uses the `rdseed` x86_64 instruction.
///
/// The `rdseed` instruction allows to deliver seeds from a hardware source of entropy see
/// <https://www.felixcloutier.com/x86/rdseed> .
pub struct RdseedSeeder;
impl Seeder for RdseedSeeder {
fn seed(&mut self) -> Seed {
Seed(unsafe { rdseed_random_m128() })
}
fn is_available() -> bool {
is_x86_feature_detected!("rdseed")
}
}
// Generates a random 128 bits value from rdseed
#[target_feature(enable = "rdseed")]
unsafe fn rdseed_random_m128() -> u128 {
let mut rand1: u64 = 0;
let mut rand2: u64 = 0;
let mut output_bytes = [0u8; 16];
unsafe {
loop {
if core::arch::x86_64::_rdseed64_step(&mut rand1) == 1 {
break;
}
}
loop {
if core::arch::x86_64::_rdseed64_step(&mut rand2) == 1 {
break;
}
}
}
output_bytes[0..8].copy_from_slice(&rand1.to_ne_bytes());
output_bytes[8..16].copy_from_slice(&rand2.to_ne_bytes());
u128::from_ne_bytes(output_bytes)
}
#[cfg(test)]
mod test {
use super::*;
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
#[test]
fn check_bounded_sequence_difference() {
check_seeder_fixed_sequences_different(|_| RdseedSeeder);
}
}

View File

@@ -1,72 +0,0 @@
use crate::seeders::{Seed, Seeder};
use std::fs::File;
use std::io::Read;
/// A seeder which uses the `/dev/random` source on unix-like systems.
pub struct UnixSeeder {
counter: u128,
secret: u128,
file: File,
}
impl UnixSeeder {
/// Creates a new seeder from a user defined secret.
///
/// Important:
/// ----------
///
/// This secret is used to ensure the quality of the seed in scenarios where `/dev/random` may
/// be compromised.
///
/// The attack hypotheses are as follow:
/// - `/dev/random` output can be predicted by a process running on the machine by just
/// observing various states of the machine
/// - The attacker cannot read data from the process where `concrete-csprng` is running
///
/// Using a secret in `concrete-csprng` allows to generate values that the attacker cannot
/// predict, making this seeder secure on systems were `/dev/random` outputs can be
/// predicted.
pub fn new(secret: u128) -> UnixSeeder {
let file = std::fs::File::open("/dev/random").expect("Failed to open /dev/random .");
let counter = std::time::UNIX_EPOCH
.elapsed()
.expect("Failed to initialize unix seeder.")
.as_nanos();
UnixSeeder {
secret,
counter,
file,
}
}
}
impl Seeder for UnixSeeder {
fn seed(&mut self) -> Seed {
let output = self.secret ^ self.counter ^ dev_random(&mut self.file);
self.counter = self.counter.wrapping_add(1);
Seed(output)
}
fn is_available() -> bool {
cfg!(target_family = "unix")
}
}
fn dev_random(random: &mut File) -> u128 {
let mut buf = [0u8; 16];
random
.read_exact(&mut buf[..])
.expect("Failed to read from /dev/random .");
u128::from_ne_bytes(buf)
}
#[cfg(test)]
mod test {
use super::*;
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
#[test]
fn check_bounded_sequence_difference() {
check_seeder_fixed_sequences_different(UnixSeeder::new);
}
}

View File

@@ -1,47 +0,0 @@
//! A module containing seeders objects.
//!
//! When initializing a generator, one needs to provide a [`Seed`], which is then used as key to the
//! AES blockcipher. As a consequence, the quality of the outputs of the generator is directly
//! conditioned by the quality of this seed. This module proposes different mechanisms to deliver
//! seeds that can accommodate varying scenarios.
/// A seed value, used to initialize a generator.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct Seed(pub u128);
/// A trait representing a seeding strategy.
pub trait Seeder {
/// Generates a new seed.
fn seed(&mut self) -> Seed;
/// Check whether the seeder can be used on the current machine. This function may check if some
/// required CPU features are available or if some OS features are available for example.
fn is_available() -> bool
where
Self: Sized;
}
mod implem;
pub use implem::*;
#[cfg(test)]
mod generic_tests {
use crate::seeders::Seeder;
/// Naively verifies that two fixed-size sequences generated by repeatedly calling the seeder
/// are different.
#[allow(unused)] // to please clippy when tests are not activated
pub fn check_seeder_fixed_sequences_different<S: Seeder, F: Fn(u128) -> S>(
construct_seeder: F,
) {
const SEQUENCE_SIZE: usize = 500;
const REPEATS: usize = 10_000;
for i in 0..REPEATS {
let mut seeder = construct_seeder(i as u128);
let orig_seed = seeder.seed();
for _ in 0..SEQUENCE_SIZE {
assert_ne!(seeder.seed(), orig_seed);
}
}
}
}

View File

@@ -11,7 +11,6 @@ RUN sed -i 's|^deb http://archive.ubuntu.com/ubuntu/|deb http://mirror.ubuntu.ik
ENV CARGO_TARGET_DIR=/root/tfhe-rs-target
ARG RUST_TOOLCHAIN="stable"
ARG NODE_VERSION
WORKDIR /tfhe-wasm-tests
@@ -35,6 +34,6 @@ RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs > install-rustup.s
chmod +x install-node.sh && \
./install-node.sh && \
. "$HOME/.nvm/nvm.sh" && \
bash -i -c 'nvm install ${NODE_VERSION} && nvm use ${NODE_VERSION}'
bash -i -c 'nvm install node && nvm use node'
WORKDIR /tfhe-wasm-tests/tfhe-rs/

View File

@@ -7,12 +7,10 @@ function usage() {
echo
echo "--help Print this message"
echo "--build-only Pass to only build the tests without running them"
echo "--forward-compat Indicate if we have forward compatibility enabled"
echo
}
BUILD_ONLY=0
forward_compat="OFF"
while [ -n "$1" ]
do
@@ -26,11 +24,6 @@ do
BUILD_ONLY=1
;;
"--forward-compat" )
shift
forward_compat="$1"
;;
*)
echo "Unknown param : $1"
exit 1
@@ -42,18 +35,12 @@ done
CURR_DIR="$(dirname "$0")"
REPO_ROOT="${CURR_DIR}/.."
TFHE_BUILD_DIR="${REPO_ROOT}/tfhe/build/"
DEFINE_FORWARD_COMPAT="OFF"
if [[ "${forward_compat}" == "ON" ]]; then
DEFINE_FORWARD_COMPAT="ON"
fi
mkdir -p "${TFHE_BUILD_DIR}"
cd "${TFHE_BUILD_DIR}"
cmake .. -DCMAKE_BUILD_TYPE=RELEASE -DCARGO_PROFILE="${CARGO_PROFILE}" \
"-DWITH_FORWARD_COMPATIBILITY=${DEFINE_FORWARD_COMPAT}"
cmake .. -DCMAKE_BUILD_TYPE=RELEASE -DCARGO_PROFILE="${CARGO_PROFILE}"
make -j

View File

@@ -1,77 +0,0 @@
#!/usr/bin/env bash
# dieharder does not support running a subset of its tests, so we'll check which ones are not good
# and ignore the output from those tests in the final log
set -e
DIEHARDER_RUN_LOG_FILE="dieharder_run.log"
bad_tests="$(dieharder -l | \
# select lines with the -d
grep -w '\-d' | \
# forget about the good tests
grep -v -i 'good' | \
# get the test id
cut -d ' ' -f 4 | \
# nice formatting
xargs)"
bad_test_filter=""
for bad_test in ${bad_tests}; do
bad_test_filter="${bad_test_filter:+${bad_test_filter}|}$(dieharder -d "${bad_test}" -t 1 -p 1 -D test_name | xargs)"
done
echo "The following tests will be ignored as they are marked as either 'suspect' or 'do not use': "
echo ""
echo "${bad_test_filter}"
echo ""
# by default we may have no pv just forward the input
pv="cat"
if which pv > /dev/null; then
pv="pv -t -a -b"
fi
rm -f "${DIEHARDER_RUN_LOG_FILE}"
# ignore potential errors and parse the log afterwards
set +e
# We are writing in both cases
# shellcheck disable=SC2094
./target/release/examples/generate 2>"${DIEHARDER_RUN_LOG_FILE}" | \
$pv | \
# -a: all tests
# -g 200: get random bytes from input
# -Y 1: disambiguate results, i.e. if a weak result appear check if it's a random failure/weakness
# -k 2: better maths formulas to determine some test statistics
dieharder -a -g 200 -Y 1 -k 2 | \
tee -a "${DIEHARDER_RUN_LOG_FILE}"
set -e
printf "\n\n"
cat "${DIEHARDER_RUN_LOG_FILE}"
if ! grep -q -i "failed" < "${DIEHARDER_RUN_LOG_FILE}"; then
echo "All tests passed!"
exit 0
fi
printf "\n\n"
failed_tests="$(grep -i "failed" < "${DIEHARDER_RUN_LOG_FILE}")"
true_failed_test="$(grep -i "failed" < "${DIEHARDER_RUN_LOG_FILE}" | { grep -v -E "${bad_test_filter}" || true; } | sed -z '$ s/\n$//')"
if [[ "${true_failed_test}" == "" ]]; then
echo "There were test failures, but the tests were either marked as 'suspect' or 'do not use'"
echo "${failed_tests}"
exit 0
fi
echo "The following tests failed:"
echo "${true_failed_test}"
exit 1

View File

@@ -3,13 +3,12 @@
set -e
function usage() {
echo "$0: integer test runner"
echo "$0: shortint test runner"
echo
echo "--help Print this message"
echo "--rust-toolchain The toolchain to run the tests with default: stable"
echo "--multi-bit Run multi-bit tests only: default off"
echo "--cargo-profile The cargo profile used to build tests"
echo "--tfhe-package The package spec like tfhe@0.4.2, default=tfhe"
echo
}
@@ -17,7 +16,6 @@ RUST_TOOLCHAIN="+stable"
multi_bit=""
not_multi_bit="_multi_bit"
cargo_profile="release"
tfhe_package="tfhe"
while [ -n "$1" ]
do
@@ -42,11 +40,6 @@ do
cargo_profile="$1"
;;
"--tfhe-package" )
shift
tfhe_package="$1"
;;
*)
echo "Unknown param : $1"
exit 1
@@ -109,7 +102,7 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
cargo "${RUST_TOOLCHAIN}" nextest run \
--tests \
--cargo-profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--profile ci \
--features="${ARCH_FEATURE}",integer,internal-keycache \
--test-threads "${n_threads}" \
@@ -118,7 +111,7 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
if [[ "${multi_bit}" == "" ]]; then
cargo "${RUST_TOOLCHAIN}" test \
--profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--features="${ARCH_FEATURE}",integer,internal-keycache \
--doc \
-- integer::
@@ -153,7 +146,7 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
cargo "${RUST_TOOLCHAIN}" nextest run \
--tests \
--cargo-profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--profile ci \
--features="${ARCH_FEATURE}",integer,internal-keycache \
--test-threads $num_threads \
@@ -162,7 +155,7 @@ and not test(/.*default_add_sequence_multi_thread_param_message_3_carry_3_ks_pbs
if [[ "${multi_bit}" == "" ]]; then
cargo "${RUST_TOOLCHAIN}" test \
--profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--features="${ARCH_FEATURE}",integer,internal-keycache \
--doc \
-- --test-threads="$(${nproc_bin})" integer::

View File

@@ -9,14 +9,12 @@ function usage() {
echo "--rust-toolchain The toolchain to run the tests with default: stable"
echo "--multi-bit Run multi-bit tests only: default off"
echo "--cargo-profile The cargo profile used to build tests"
echo "--tfhe-package The package spec like tfhe@0.4.2, default=tfhe"
echo
}
RUST_TOOLCHAIN="+stable"
multi_bit=""
cargo_profile="release"
tfhe_package="tfhe"
while [ -n "$1" ]
do
@@ -40,11 +38,6 @@ do
cargo_profile="$1"
;;
"--tfhe-package" )
shift
tfhe_package="$1"
;;
*)
echo "Unknown param : $1"
exit 1
@@ -117,7 +110,7 @@ and not test(~smart_add_and_mul)""" # This test is too slow
cargo "${RUST_TOOLCHAIN}" nextest run \
--tests \
--cargo-profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--profile ci \
--features="${ARCH_FEATURE}",shortint,internal-keycache \
--test-threads "${n_threads_small}" \
@@ -134,7 +127,7 @@ and not test(~smart_add_and_mul)"""
cargo "${RUST_TOOLCHAIN}" nextest run \
--tests \
--cargo-profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--profile ci \
--features="${ARCH_FEATURE}",shortint,internal-keycache \
--test-threads "${n_threads_big}" \
@@ -143,7 +136,7 @@ and not test(~smart_add_and_mul)"""
if [[ "${multi_bit}" == "" ]]; then
cargo "${RUST_TOOLCHAIN}" test \
--profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--features="${ARCH_FEATURE}",shortint,internal-keycache \
--doc \
-- shortint::
@@ -182,7 +175,7 @@ and not test(~smart_add_and_mul)""" # This test is too slow
cargo "${RUST_TOOLCHAIN}" nextest run \
--tests \
--cargo-profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--profile ci \
--features="${ARCH_FEATURE}",shortint,internal-keycache \
--test-threads "$(${nproc_bin})" \
@@ -191,7 +184,7 @@ and not test(~smart_add_and_mul)""" # This test is too slow
if [[ "${multi_bit}" == "" ]]; then
cargo "${RUST_TOOLCHAIN}" test \
--profile "${cargo_profile}" \
--package "${tfhe_package}" \
--package tfhe \
--features="${ARCH_FEATURE}",shortint,internal-keycache \
--doc \
-- --test-threads="$(${nproc_bin})" shortint::

View File

@@ -1,90 +0,0 @@
#!/usr/bin/env bash
set -e
function usage() {
echo "$0: symlink C libs to names without the variable fingerprint part"
echo
echo "--help Print this message"
echo "--cargo-profile The cargo profile used"
echo "--lib-name The lib name without the lib prefix, '-' will be converted to '_'"
echo
}
CARGO_PROFILE=""
LIBNAME=""
while [ -n "$1" ]
do
case "$1" in
"--help" | "-h" )
usage
exit 0
;;
"--cargo-profile" )
shift
CARGO_PROFILE="$1"
;;
"--lib-name" )
shift
LIBNAME="$1"
;;
*)
echo "Unknown param : $1"
exit 1
;;
esac
shift
done
if [[ "${CARGO_PROFILE}" == "" ]]; then
echo "CARGO_PROFILE is not set, aborting."
exit 1
fi
UNAME="$(uname)"
if [[ "${UNAME}" != "Linux" && "${UNAME}" != "Darwin" ]]; then
echo "This script is compatible with Linux and macOS and may not work for your system"
fi
# Add the lib prefix
LIB_OF_INTEREST="lib${LIBNAME}"
LIB_OF_INTEREST_UNDERSCORE="${LIB_OF_INTEREST//-/_}"
CURR_DIR="$(dirname "$0")"
REPO_DIR="${CURR_DIR}/.."
OUTPUT_TARGET_DIR="${REPO_DIR}/target/${CARGO_PROFILE}"
OUTPUT_DEPS_DIR="${OUTPUT_TARGET_DIR}/deps"
cd "${OUTPUT_DEPS_DIR}"
echo "In ${PWD}"
# Find most recent file with similar name
MAYBE_STATIC_LIB="$(find . -maxdepth 1 -type f -name "${LIB_OF_INTEREST_UNDERSCORE}*.a")"
if [[ "${MAYBE_STATIC_LIB}" != "" ]]; then
STATIC_LIB="$(find . -maxdepth 1 -type f -name "${LIB_OF_INTEREST_UNDERSCORE}*.a" -print0 \
| xargs -0 ls -t | head -n 1)"
echo "Symlinking ${STATIC_LIB} to ${LIB_OF_INTEREST_UNDERSCORE}.a"
ln -snf "${STATIC_LIB}" "${LIB_OF_INTEREST_UNDERSCORE}.a"
else
echo "Could not find static lib that might correspond to $1, is there a typo in the lib name?"
fi
DYNAMIC_LIB_EXT="so"
if [[ "${UNAME}" == "Darwin" ]]; then
DYNAMIC_LIB_EXT="dylib"
fi
DYNAMIC_LIB_PATTERN="${LIB_OF_INTEREST_UNDERSCORE}*.${DYNAMIC_LIB_EXT}"
MAYBE_DYNAMIC_LIB="$(find . -maxdepth 1 -type f -name "${DYNAMIC_LIB_PATTERN}")"
if [[ "${MAYBE_DYNAMIC_LIB}" != "" ]]; then
DYNAMIC_LIB="$(find . -maxdepth 1 -type f -name "${DYNAMIC_LIB_PATTERN}" -print0 \
| xargs -0 ls -t | head -n 1)"
echo "Symlinking ${DYNAMIC_LIB} to ${LIB_OF_INTEREST_UNDERSCORE}.${DYNAMIC_LIB_EXT}"
ln -snf "${DYNAMIC_LIB}" "${LIB_OF_INTEREST_UNDERSCORE}.${DYNAMIC_LIB_EXT}"
else
echo "Could not find dynamic lib that might correspond to $1, is there a typo in the lib name?"
fi

View File

@@ -6,7 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "=4.4.4"
clap = "3.1"
lazy_static = "1.4"
log = "0.4"
simplelog = "0.12"

View File

@@ -196,7 +196,7 @@ fn find_contiguous_doc_test<'a>(
fn find_contiguous_part_in_doc_test_or_comment(
part_is_code_block: bool,
full_doc_comment_content: &[CommentContent],
full_doc_comment_content: &Vec<CommentContent>,
part_start_idx: usize,
) -> (usize, usize) {
let mut next_line_idx = part_start_idx + 1;
@@ -348,7 +348,7 @@ fn process_doc_lines_until_impossible<'a>(
}
fn process_non_doc_lines_until_impossible(
lines: &[&str],
lines: &Vec<&str>,
rewritten_content: &mut String,
mut line_idx: usize,
) -> usize {

2
tfhe/.gitignore vendored
View File

@@ -1 +1 @@
build/
build/

View File

@@ -1,6 +1,6 @@
[package]
name = "tfhe"
version = "0.4.4"
version = "0.4.0"
edition = "2021"
readme = "../README.md"
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
@@ -17,7 +17,7 @@ exclude = [
"/js_on_wasm_tests/",
"/web_wasm_parallel_tests/",
]
rust-version = "1.72"
rust-version = "1.67"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -26,30 +26,29 @@ rand = "0.8.5"
rand_distr = "0.4.3"
paste = "1.0.7"
lazy_static = { version = "1.4.0" }
criterion = "0.5.1"
criterion = "0.4.0"
doc-comment = "0.3.3"
serde_json = "1.0.94"
# clap has to be pinned as its minimum supported rust version
# changes often between minor releases, which breaks our CI
clap = { version = "=4.4.4", features = ["derive"] }
clap = { version = "=4.2.7", features = ["derive"] }
# Used in user documentation
bincode = "1.3.3"
fs2 = { version = "0.4.3" }
itertools = "0.11.0"
itertools = "0.10.5"
num_cpus = "1.15"
# For erf and normality test
libm = "0.2.6"
# Begin regex-engine deps
test-case = "3.1.0"
combine = "4.6.6"
env_logger = "0.10.0"
log = "0.4.19"
# End regex-engine deps
[build-dependencies]
cbindgen = { package = "tfhe-c-api-bindgen", version = "0.26.1", optional = true }
cbindgen = { version = "0.24.3", optional = true }
[dependencies]
concrete-csprng = { version = "0.4.0", path = "../concrete-csprng", features = [
concrete-csprng = { version = "0.3.0", features = [
"generator_fallback",
"parallel",
] }
@@ -57,18 +56,15 @@ lazy_static = { version = "1.4.0", optional = true }
serde = { version = "1.0", features = ["derive"] }
rayon = { version = "1.5.0" }
bincode = { version = "1.3.3", optional = true }
concrete-fft = { version = "0.4.0", features = ["serde", "fft128"] }
pulp = "0.18.8"
concrete-fft = { version = "0.2.1", features = ["serde", "fft128"] }
pulp = "0.11"
aligned-vec = { version = "0.5", features = ["serde"] }
dyn-stack = { version = "0.9" }
paste = { version = "1.0.7", optional = true }
once_cell = "1.13"
paste = "1.0.7"
fs2 = { version = "0.4.3", optional = true }
# While we wait for repeat_n in rust standard library
itertools = "0.11.0"
tfhe-c-api-dynamic-buffer = { version = "0.1.0", optional = true, features = [
"c_api",
] }
next_tfhe = { package = "tfhe", version = "0.5.0", optional = true }
itertools = "0.10.5"
# wasm deps
wasm-bindgen = { version = "0.2.86", features = [
@@ -77,103 +73,66 @@ wasm-bindgen = { version = "0.2.86", features = [
wasm-bindgen-rayon = { version = "1.0", optional = true }
js-sys = { version = "0.3", optional = true }
console_error_panic_hook = { version = "0.1.7", optional = true }
serde-wasm-bindgen = { version = "0.6.0", optional = true }
serde-wasm-bindgen = { version = "0.4", optional = true }
getrandom = { version = "0.2.8", optional = true }
bytemuck = "1.14.3"
bytemuck = "1.13.1"
[features]
forward_compatibility = [
"dep:bincode",
"dep:tfhe-c-api-dynamic-buffer",
"dep:next_tfhe",
"dep:paste",
]
# paste is used by the HL API
boolean = ["dep:paste", "next_tfhe?/boolean"]
shortint = ["dep:paste", "next_tfhe?/shortint"]
integer = ["shortint", "dep:paste", "next_tfhe?/integer"]
internal-keycache = [
"dep:lazy_static",
"dep:fs2",
"dep:bincode",
"dep:paste",
"next_tfhe?/internal-keycache",
]
safe-deserialization = ["dep:bincode"]
boolean = []
shortint = []
integer = ["shortint"]
internal-keycache = ["lazy_static", "fs2", "bincode"]
# Experimental section
experimental = ["next_tfhe?/experimental"]
experimental-force_fft_algo_dif4 = [
"next_tfhe?/experimental-force_fft_algo_dif4",
]
experimental = []
experimental-force_fft_algo_dif4 = []
# End experimental section
__c_api = [
"dep:cbindgen",
"dep:bincode",
"dep:paste",
"next_tfhe?/__force_skip_cbindgen",
]
__c_api = ["cbindgen", "bincode"]
boolean-c-api = ["boolean", "__c_api"]
shortint-c-api = ["shortint", "__c_api"]
high-level-c-api = ["boolean-c-api", "shortint-c-api", "integer", "__c_api"]
__wasm_api = [
"dep:wasm-bindgen",
"dep:js-sys",
"dep:console_error_panic_hook",
"dep:serde-wasm-bindgen",
"dep:getrandom",
"wasm-bindgen",
"js-sys",
"console_error_panic_hook",
"serde-wasm-bindgen",
"getrandom",
"getrandom/js",
"dep:bincode",
"safe-deserialization",
"bincode",
]
boolean-client-js-wasm-api = ["boolean", "__wasm_api"]
shortint-client-js-wasm-api = ["shortint", "__wasm_api"]
integer-client-js-wasm-api = ["integer", "__wasm_api"]
high-level-client-js-wasm-api = ["boolean", "shortint", "integer", "__wasm_api"]
parallel-wasm-api = ["dep:wasm-bindgen-rayon"]
parallel-wasm-api = ["wasm-bindgen-rayon"]
nightly-avx512 = [
"concrete-fft/nightly",
"pulp/nightly",
"next_tfhe?/nightly-avx512",
]
nightly-avx512 = ["concrete-fft/nightly", "pulp/nightly"]
# Enable the x86_64 specific accelerated implementation of the random generator for the default
# backend
generator_x86_64_aesni = [
"concrete-csprng/generator_x86_64_aesni",
"next_tfhe?/generator_x86_64_aesni",
]
generator_x86_64_aesni = ["concrete-csprng/generator_x86_64_aesni"]
# Enable the aarch64 specific accelerated implementation of the random generator for the default
# backend
generator_aarch64_aes = [
"concrete-csprng/generator_aarch64_aes",
"next_tfhe?/generator_aarch64_aes",
]
generator_aarch64_aes = ["concrete-csprng/generator_aarch64_aes"]
# Private features
__profiling = []
__coverage = []
seeder_unix = ["concrete-csprng/seeder_unix", "next_tfhe?/seeder_unix"]
seeder_x86_64_rdseed = [
"concrete-csprng/seeder_x86_64_rdseed",
"next_tfhe?/seeder_x86_64_rdseed",
]
seeder_unix = ["concrete-csprng/seeder_unix"]
seeder_x86_64_rdseed = ["concrete-csprng/seeder_x86_64_rdseed"]
# These target_arch features enable a set of public features for tfhe if users want a known
# good/working configuration for tfhe.
# For a target_arch that does not yet have such a feature, one can still enable features manually or
# create a feature for said target_arch to make its use simpler.
x86_64 = ["generator_x86_64_aesni", "seeder_x86_64_rdseed", "next_tfhe?/x86_64"]
x86_64-unix = ["x86_64", "seeder_unix", "next_tfhe?/x86_64-unix"]
x86_64 = ["generator_x86_64_aesni", "seeder_x86_64_rdseed"]
x86_64-unix = ["x86_64", "seeder_unix"]
aarch64 = ["generator_aarch64_aes", "next_tfhe?/aarch64"]
aarch64-unix = ["aarch64", "seeder_unix", "next_tfhe?/aarch64-unix"]
aarch64 = ["generator_aarch64_aes"]
aarch64-unix = ["aarch64", "seeder_unix"]
[package.metadata.docs.rs]
# TODO: manage builds for docs.rs based on their documentation https://docs.rs/about
@@ -185,23 +144,6 @@ rustdoc-args = ["--html-in-header", "katex-header.html"]
# Benches #
# #
###########
[[bench]]
name = "ccs-2024-fft-shrinking-ks"
path = "benches/core_crypto/ccs_2024_fft_shrinking_ks.rs"
harness = false
required-features = ["internal-keycache"]
[[bench]]
name = "ccs-2024-cjp"
path = "benches/core_crypto/ccs_2024_cjp.rs"
harness = false
required-features = ["internal-keycache"]
[[bench]]
name = "ccs-2024-stair-ks"
path = "benches/core_crypto/ccs_2024_stair_ks.rs"
harness = false
required-features = ["internal-keycache"]
[[bench]]
name = "pbs-bench"
@@ -238,12 +180,6 @@ path = "benches/integer/bench.rs"
harness = false
required-features = ["integer", "internal-keycache"]
[[bench]]
name = "integer-signed-bench"
path = "benches/integer/signed_bench.rs"
harness = false
required-features = ["integer", "internal-keycache"]
[[bench]]
name = "keygen"
path = "benches/keygen/bench.rs"

View File

@@ -43,7 +43,7 @@ pub fn write_to_json_boolean<T: Into<CryptoParametersRecord<u32>>>(
// Put all `bench_function` in one place
// so the keygen is only run once per parameters saving time.
fn benches(c: &mut Criterion, params: BooleanParameters, parameter_name: &str) {
fn benchs(c: &mut Criterion, params: BooleanParameters, parameter_name: &str) {
let mut bench_group = c.benchmark_group("gates_benches");
let cks = ClientKey::new(&params);
@@ -83,15 +83,15 @@ fn benches(c: &mut Criterion, params: BooleanParameters, parameter_name: &str) {
}
fn bench_default_parameters(c: &mut Criterion) {
benches(c, DEFAULT_PARAMETERS, "DEFAULT_PARAMETERS");
benchs(c, DEFAULT_PARAMETERS, "DEFAULT_PARAMETERS");
}
fn bench_default_parameters_ks_pbs(c: &mut Criterion) {
benches(c, DEFAULT_PARAMETERS_KS_PBS, "DEFAULT_PARAMETERS_KS_PBS");
benchs(c, DEFAULT_PARAMETERS_KS_PBS, "DEFAULT_PARAMETERS_KS_PBS");
}
fn bench_low_prob_parameters(c: &mut Criterion) {
benches(
benchs(
c,
PARAMETERS_ERROR_PROB_2_POW_MINUS_165,
"PARAMETERS_ERROR_PROB_2_POW_MINUS_165",
@@ -99,7 +99,7 @@ fn bench_low_prob_parameters(c: &mut Criterion) {
}
fn bench_low_prob_parameters_ks_pbs(c: &mut Criterion) {
benches(
benchs(
c,
PARAMETERS_ERROR_PROB_2_POW_MINUS_165_KS_PBS,
"PARAMETERS_ERROR_PROB_2_POW_MINUS_165_KS_PBS",
@@ -107,5 +107,5 @@ fn bench_low_prob_parameters_ks_pbs(c: &mut Criterion) {
}
fn bench_tfhe_lib_parameters(c: &mut Criterion) {
benches(c, TFHE_LIB_PARAMETERS, " TFHE_LIB_PARAMETERS");
benchs(c, TFHE_LIB_PARAMETERS, " TFHE_LIB_PARAMETERS");
}

View File

@@ -1,445 +0,0 @@
use criterion::{criterion_group, criterion_main, Criterion};
use tfhe::core_crypto::prelude::*;
use tfhe::keycache::NamedParam;
use tfhe::named_params_impl;
fn generate_accumulator<F, Scalar: UnsignedTorus + CastFrom<usize>>(
polynomial_size: PolynomialSize,
glwe_size: GlweSize,
message_modulus: usize,
ciphertext_modulus: CiphertextModulus<Scalar>,
delta: Scalar,
f: F,
) -> GlweCiphertextOwned<Scalar>
where
F: Fn(Scalar) -> Scalar,
{
// N/(p/2) = size of each block, to correct noise from the input we introduce the
// notion of box, which manages redundancy to yield a denoised value
// for several noisy values around a true input value.
let box_size = polynomial_size.0 / message_modulus;
// Create the accumulator
let mut accumulator_scalar = vec![Scalar::ZERO; polynomial_size.0];
// Fill each box with the encoded denoised value
for i in 0..message_modulus {
let index = i * box_size;
accumulator_scalar[index..index + box_size]
.iter_mut()
.for_each(|a| *a = f(Scalar::cast_from(i)) * delta);
}
let half_box_size = box_size / 2;
// Negate the first half_box_size coefficients to manage negacyclicity and rotate
for a_i in accumulator_scalar[0..half_box_size].iter_mut() {
*a_i = (*a_i).wrapping_neg();
}
// Rotate the accumulator
accumulator_scalar.rotate_left(half_box_size);
let accumulator_plaintext = PlaintextList::from_container(accumulator_scalar);
allocate_and_trivially_encrypt_new_glwe_ciphertext(
glwe_size,
&accumulator_plaintext,
ciphertext_modulus,
)
}
named_params_impl!(
CJPParam<u64> =>
PRECISION_1_CJP,
PRECISION_2_CJP,
PRECISION_3_CJP,
PRECISION_4_CJP,
PRECISION_5_CJP,
PRECISION_6_CJP,
PRECISION_7_CJP,
PRECISION_8_CJP,
PRECISION_9_CJP,
PRECISION_10_CJP,
PRECISION_11_CJP,
);
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct CJPParam<Scalar: UnsignedTorus> {
pub log_precision: usize,
pub _log_mu: usize,
pub glwe_dimension: GlweDimension,
pub polynomial_size: PolynomialSize,
pub std_dev_bsk: StandardDev,
pub lwe_dimension: LweDimension,
pub ks1_lwe_modular_std_dev: StandardDev,
pub pbs_level: DecompositionLevelCount,
pub pbs_base_log: DecompositionBaseLog,
pub ks1_level: DecompositionLevelCount,
pub ks1_base_log: DecompositionBaseLog,
pub ciphertext_modulus: CiphertextModulus<Scalar>,
}
// CJP
// p,log(nu), k, N, stddev, n, stddev, br_l,br_b, ks_l,ks_b, cost
// 1, 1, 5, 8, -31.07, 588, -12.66, 1, 15, 3, 3, 31445684
// 2, 2, 6, 8, -37.88, 668, -14.79, 1, 18, 3, 4, 42387300
// 3, 3, 4, 9, -51.49, 720, -16.17, 1, 21, 3, 4, 63333680
// 4, 4, 2, 10, -51.49, 788, -17.98, 1, 23, 3, 4, 78607596
// 5, 5, 1, 11, -51.49, 840, -19.36, 1, 23, 6, 3, 118525112
// 6, 6, 1, 12, -62.00, 840, -19.36, 2, 14, 5, 3, 347139256
// 7, 7, 1, 13, -62.00, 896, -20.85, 2, 15, 6, 3, 797064320
// 8, 8, 1, 14, -62.00, 968, -22.77, 3, 11, 6, 3, 2351201336
// 9, 9, 1, 15, -62.00, 1024, -24.26, 4, 9, 7, 3, 6510246912
// 10, 10, 1, 16, -62.00, 1096, -26.17, 6, 6, 12, 2, 20813446072
// 11, 11, 1, 17, -62.00, 1132, -27.13, 20, 2, 13, 2, 128439942036
pub const PRECISION_1_CJP: CJPParam<u64> = CJPParam {
log_precision: 1,
_log_mu: 1,
glwe_dimension: GlweDimension(5),
polynomial_size: PolynomialSize(256),
std_dev_bsk: StandardDev(4.43606636507407e-10),
lwe_dimension: LweDimension(588),
ks1_lwe_modular_std_dev: StandardDev(0.000154511302974888),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(15),
ks1_level: DecompositionLevelCount(3),
ks1_base_log: DecompositionBaseLog(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 2, 2, 6, 8, -37.88, 668, -14.79, 1, 18, 3, 4, 42387300
pub const PRECISION_2_CJP: CJPParam<u64> = CJPParam {
log_precision: 2,
_log_mu: 2,
glwe_dimension: GlweDimension(6),
polynomial_size: PolynomialSize(256),
std_dev_bsk: StandardDev(3.95351839879752e-12),
lwe_dimension: LweDimension(668),
ks1_lwe_modular_std_dev: StandardDev(0.0000352993220185940),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(18),
ks1_level: DecompositionLevelCount(3),
ks1_base_log: DecompositionBaseLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 3, 3, 4, 9, -51.49, 720, -16.17, 1, 21, 3, 4, 63333680
pub const PRECISION_3_CJP: CJPParam<u64> = CJPParam {
log_precision: 3,
_log_mu: 3,
glwe_dimension: GlweDimension(4),
polynomial_size: PolynomialSize(512),
std_dev_bsk: StandardDev(3.16202663074765e-16),
lwe_dimension: LweDimension(720),
ks1_lwe_modular_std_dev: StandardDev(0.0000135626629816676),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(21),
ks1_level: DecompositionLevelCount(3),
ks1_base_log: DecompositionBaseLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 4, 4, 2, 10, -51.49, 788, -17.98, 1, 23, 3, 4, 78607596
pub const PRECISION_4_CJP: CJPParam<u64> = CJPParam {
log_precision: 4,
_log_mu: 4,
glwe_dimension: GlweDimension(2),
polynomial_size: PolynomialSize(1024),
std_dev_bsk: StandardDev(3.16202663074765e-16),
lwe_dimension: LweDimension(788),
ks1_lwe_modular_std_dev: StandardDev(3.86794845500957e-6),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(23),
ks1_level: DecompositionLevelCount(3),
ks1_base_log: DecompositionBaseLog(4),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 5, 5, 1, 11, -51.49, 840, -19.36, 1, 23, 6, 3, 118525112
pub const PRECISION_5_CJP: CJPParam<u64> = CJPParam {
log_precision: 5,
_log_mu: 5,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
std_dev_bsk: StandardDev(3.16202663074765e-16),
lwe_dimension: LweDimension(840),
ks1_lwe_modular_std_dev: StandardDev(1.48613849575138e-6),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(23),
ks1_level: DecompositionLevelCount(6),
ks1_base_log: DecompositionBaseLog(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 6, 6, 1, 12, -62.00, 840, -19.36, 2, 14, 5, 3, 347139256
pub const PRECISION_6_CJP: CJPParam<u64> = CJPParam {
log_precision: 6,
_log_mu: 6,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(4096),
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(840),
ks1_lwe_modular_std_dev: StandardDev(1.48613849575138e-6),
pbs_level: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(14),
ks1_level: DecompositionLevelCount(5),
ks1_base_log: DecompositionBaseLog(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 7, 7, 1, 13, -62.00, 896, -20.85, 2, 15, 6, 3, 797064320
pub const PRECISION_7_CJP: CJPParam<u64> = CJPParam {
log_precision: 7,
_log_mu: 7,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(896),
ks1_lwe_modular_std_dev: StandardDev(5.29083953889772e-7),
pbs_level: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(15),
ks1_level: DecompositionLevelCount(6),
ks1_base_log: DecompositionBaseLog(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 8, 8, 1, 14, -62.00, 968, -22.77, 3, 11, 6, 3, 2351201336
pub const PRECISION_8_CJP: CJPParam<u64> = CJPParam {
log_precision: 8,
_log_mu: 8,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(16384),
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(968),
ks1_lwe_modular_std_dev: StandardDev(1.39812821058259e-7),
pbs_level: DecompositionLevelCount(3),
pbs_base_log: DecompositionBaseLog(11),
ks1_level: DecompositionLevelCount(6),
ks1_base_log: DecompositionBaseLog(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 9, 9, 1, 15, -62.00, 1024, -24.26, 4, 9, 7, 3, 6510246912
pub const PRECISION_9_CJP: CJPParam<u64> = CJPParam {
log_precision: 9,
_log_mu: 9,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(32768),
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(1024),
ks1_lwe_modular_std_dev: StandardDev(4.97751187937479e-8),
pbs_level: DecompositionLevelCount(4),
pbs_base_log: DecompositionBaseLog(9),
ks1_level: DecompositionLevelCount(7),
ks1_base_log: DecompositionBaseLog(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 10, 10, 1, 16, -62.00, 1096, -26.17, 6, 6, 12, 2, 20813446072
pub const PRECISION_10_CJP: CJPParam<u64> = CJPParam {
log_precision: 10,
_log_mu: 10,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(65536),
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(1096),
ks1_lwe_modular_std_dev: StandardDev(1.32447880680348e-8),
pbs_level: DecompositionLevelCount(6),
pbs_base_log: DecompositionBaseLog(6),
ks1_level: DecompositionLevelCount(12),
ks1_base_log: DecompositionBaseLog(2),
ciphertext_modulus: CiphertextModulus::new_native(),
};
// 11, 11, 1, 17, -62.00, 1132, -27.13, 20, 2, 13, 2, 128439942036
pub const PRECISION_11_CJP: CJPParam<u64> = CJPParam {
log_precision: 11,
_log_mu: 11,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(131072),
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(1132),
ks1_lwe_modular_std_dev: StandardDev(6.80857487193794e-9),
pbs_level: DecompositionLevelCount(20),
pbs_base_log: DecompositionBaseLog(2),
ks1_level: DecompositionLevelCount(13),
ks1_base_log: DecompositionBaseLog(2),
ciphertext_modulus: CiphertextModulus::new_native(),
};
fn criterion_bench(c: &mut Criterion) {
let param_vec = [
PRECISION_1_CJP,
PRECISION_2_CJP,
PRECISION_3_CJP,
PRECISION_4_CJP,
PRECISION_5_CJP,
PRECISION_6_CJP,
PRECISION_7_CJP,
PRECISION_8_CJP,
PRECISION_9_CJP,
PRECISION_10_CJP,
PRECISION_11_CJP,
];
for params in param_vec {
let log_precision = params.log_precision;
let _log_mu = params._log_mu;
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let std_dev_bsk = params.std_dev_bsk;
let lwe_dimension = params.lwe_dimension;
let ks1_lwe_modular_std_dev = params.ks1_lwe_modular_std_dev;
let pbs_level = params.pbs_level;
let pbs_base_mpg = params.pbs_base_log;
let ks1_level = params.ks1_level;
let ks1_base_log = params.ks1_base_log;
let ciphertext_modulus = params.ciphertext_modulus;
let precision = 1 << (log_precision);
// let mu = 1<< _log_mu;
// let carry = (mu*(precision -1) +1)/precision;
// let log_carry = ((carry as f32).log2().ceil()) as usize;
// let delta_log = 63 - (log_precision + log_carry);
//println!("Delta log = {:?}", delta_log);
let delta_log = 63 - log_precision;
//TODO: to randomize
let msg = 1;
let pt = Plaintext(msg << delta_log);
// Create the PRNG
let mut seeder = new_seeder();
let seeder = seeder.as_mut();
let mut encryption_generator =
EncryptionRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed(), seeder);
let mut secret_generator =
SecretRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed());
let small_lwe_secret_key =
allocate_and_generate_new_binary_lwe_secret_key(lwe_dimension, &mut secret_generator);
let large_glwe_secret_key = allocate_and_generate_new_binary_glwe_secret_key(
glwe_dimension,
polynomial_size,
&mut secret_generator,
);
let large_lwe_secret_key = large_glwe_secret_key.clone().into_lwe_secret_key();
//KSK Generation
let ksk_large_to_small = allocate_and_generate_new_lwe_keyswitch_key(
&large_lwe_secret_key,
&small_lwe_secret_key,
ks1_base_log,
ks1_level,
ks1_lwe_modular_std_dev,
ciphertext_modulus,
&mut encryption_generator,
);
//Encryption
let mut large_lwe = LweCiphertext::new(
0u64,
large_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
encrypt_lwe_ciphertext(
&large_lwe_secret_key,
&mut large_lwe,
pt,
std_dev_bsk,
&mut encryption_generator,
);
//KS
let mut small_lwe =
LweCiphertext::new(0u64, lwe_dimension.to_lwe_size(), ciphertext_modulus);
//PBS PART
let mut bsk = LweBootstrapKey::new(
0,
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_base_mpg,
pbs_level,
lwe_dimension,
ciphertext_modulus,
);
par_generate_lwe_bootstrap_key(
&small_lwe_secret_key,
&large_glwe_secret_key,
&mut bsk,
std_dev_bsk,
&mut encryption_generator,
);
let mut fbsk = FourierLweBootstrapKey::new(
small_lwe_secret_key.lwe_dimension(),
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_base_mpg,
pbs_level,
);
convert_standard_lwe_bootstrap_key_to_fourier(&bsk, &mut fbsk);
drop(bsk);
let accumulator = generate_accumulator(
polynomial_size,
glwe_dimension.to_glwe_size(),
precision,
ciphertext_modulus,
1 << delta_log,
|x| x & 1,
);
let mut out_pbs_ct = LweCiphertext::new(
0u64,
large_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
let bench_id = format!("CJP::{}", params.name());
c.bench_function(&bench_id, |b| {
b.iter(|| {
keyswitch_lwe_ciphertext(&ksk_large_to_small, &large_lwe, &mut small_lwe);
programmable_bootstrap_lwe_ciphertext(
&small_lwe,
&mut out_pbs_ct,
&accumulator,
&fbsk,
);
})
});
}
}
criterion_group!(benches, criterion_bench);
criterion_main!(benches);
// #[test]
// fn test_CJP() {
// let param_vec = [
// PRECISION_1_CJP,
// PRECISION_2_CJP,
// PRECISION_3_CJP,
// PRECISION_4_CJP,
// PRECISION_5_CJP,
// // PRECISION_6_CJP,
// // PRECISION_7_CJP,
// // PRECISION_8_CJP,
// // PRECISION_9_CJP,
// // PRECISION_10_CJP,
// // PRECISION_11_CJP,
// ];
// for params in param_vec {
// to_gen_test_CJP(params);
// }
// }

View File

@@ -1,581 +0,0 @@
#![allow(clippy::excessive_precision)]
use criterion::{criterion_group, criterion_main, Criterion};
use tfhe::core_crypto::prelude::*;
use tfhe::keycache::NamedParam;
use tfhe::named_params_impl;
fn generate_accumulator<F, Scalar: UnsignedTorus + CastFrom<usize>>(
polynomial_size: PolynomialSize,
glwe_size: GlweSize,
message_modulus: usize,
ciphertext_modulus: CiphertextModulus<Scalar>,
delta: Scalar,
f: F,
) -> GlweCiphertextOwned<Scalar>
where
F: Fn(Scalar) -> Scalar,
{
// N/(p/2) = size of each block, to correct noise from the input we introduce the
// notion of box, which manages redundancy to yield a denoised value
// for several noisy values around a true input value.
let box_size = polynomial_size.0 / message_modulus;
// Create the accumulator
let mut accumulator_scalar = vec![Scalar::ZERO; polynomial_size.0];
// Fill each box with the encoded denoised value
for i in 0..message_modulus {
let index = i * box_size;
accumulator_scalar[index..index + box_size]
.iter_mut()
.for_each(|a| *a = f(Scalar::cast_from(i)) * delta);
}
let half_box_size = box_size / 2;
// Negate the first half_box_size coefficients to manage negacyclicity and rotate
for a_i in accumulator_scalar[0..half_box_size].iter_mut() {
*a_i = (*a_i).wrapping_neg();
}
// Rotate the accumulator
accumulator_scalar.rotate_left(half_box_size);
let accumulator_plaintext = PlaintextList::from_container(accumulator_scalar);
allocate_and_trivially_encrypt_new_glwe_ciphertext(
glwe_size,
&accumulator_plaintext,
ciphertext_modulus,
)
}
named_params_impl!(
FastKSParam<u64> =>
PRECISION_1_FAST_KS,
PRECISION_2_FAST_KS,
PRECISION_3_FAST_KS,
PRECISION_4_FAST_KS,
PRECISION_5_FAST_KS,
PRECISION_6_FAST_KS,
PRECISION_7_FAST_KS,
PRECISION_8_FAST_KS,
PRECISION_9_FAST_KS,
PRECISION_10_FAST_KS,
PRECISION_11_FAST_KS
);
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct FastKSParam<Scalar: UnsignedTorus> {
pub log_precision: usize,
pub _log_mu: usize,
pub glwe_dimension: GlweDimension,
pub polynomial_size: PolynomialSize,
pub phi_bsk: usize,
pub std_dev_bsk: StandardDev,
pub lwe_dimension: LweDimension,
pub ks1_lwe_modular_std_dev: StandardDev,
pub pbs_level: DecompositionLevelCount,
pub pbs_base_log: DecompositionBaseLog,
pub ks1_level: DecompositionLevelCount,
pub ks1_base_log: DecompositionBaseLog,
pub ks1_polynomial_size: PolynomialSize,
pub ks_in_glwe_dimension: GlweDimension,
pub phi_in: usize,
pub ks_out_glwe_dimension: GlweDimension,
pub ciphertext_modulus: CiphertextModulus<Scalar>,
}
pub const PRECISION_1_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 1,
_log_mu: 1,
glwe_dimension: GlweDimension(5),
polynomial_size: PolynomialSize(256),
phi_bsk: 1280,
std_dev_bsk: StandardDev(4.436066365074074e-10),
lwe_dimension: LweDimension(534),
ks1_lwe_modular_std_dev: StandardDev(0.0004192214045106218),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(15),
ks1_level: DecompositionLevelCount(9),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(256),
ks_in_glwe_dimension: GlweDimension(3),
phi_in: 746,
ks_out_glwe_dimension: GlweDimension(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_2_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 2,
_log_mu: 2,
glwe_dimension: GlweDimension(6),
polynomial_size: PolynomialSize(256),
phi_bsk: 1536,
std_dev_bsk: StandardDev(3.953518398797519e-12),
lwe_dimension: LweDimension(590),
ks1_lwe_modular_std_dev: StandardDev(0.0001492480807729575),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(18),
ks1_level: DecompositionLevelCount(11),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(1024),
ks_in_glwe_dimension: GlweDimension(1),
phi_in: 946,
ks_out_glwe_dimension: GlweDimension(1),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_3_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 3,
_log_mu: 3,
glwe_dimension: GlweDimension(3),
polynomial_size: PolynomialSize(512),
phi_bsk: 1536,
std_dev_bsk: StandardDev(3.953518398797519e-12),
lwe_dimension: LweDimension(686),
ks1_lwe_modular_std_dev: StandardDev(2.5308824029981747e-05),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(18),
ks1_level: DecompositionLevelCount(13),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(1024),
ks_in_glwe_dimension: GlweDimension(1),
phi_in: 850,
ks_out_glwe_dimension: GlweDimension(1),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_4_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 4,
_log_mu: 4,
glwe_dimension: GlweDimension(2),
polynomial_size: PolynomialSize(1024),
phi_bsk: 2048,
std_dev_bsk: StandardDev(3.162026630747649e-16),
lwe_dimension: LweDimension(682),
ks1_lwe_modular_std_dev: StandardDev(2.7313997525878062e-05),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(23),
ks1_level: DecompositionLevelCount(14),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(512),
ks_in_glwe_dimension: GlweDimension(3),
phi_in: 1366,
ks_out_glwe_dimension: GlweDimension(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_5_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 5,
_log_mu: 5,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
phi_bsk: 2048,
std_dev_bsk: StandardDev(3.162026630747649e-16),
lwe_dimension: LweDimension(766),
ks1_lwe_modular_std_dev: StandardDev(5.822216831056818e-06),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(23),
ks1_level: DecompositionLevelCount(15),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(512),
ks_in_glwe_dimension: GlweDimension(3),
phi_in: 1282,
ks_out_glwe_dimension: GlweDimension(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_6_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 6,
_log_mu: 6,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(4096),
phi_bsk: 2443,
std_dev_bsk: StandardDev(2.168404344971009e-19),
lwe_dimension: LweDimension(774),
ks1_lwe_modular_std_dev: StandardDev(4.998754134591537e-06),
pbs_level: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(14),
ks1_level: DecompositionLevelCount(15),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(2048),
ks_in_glwe_dimension: GlweDimension(1),
phi_in: 1669,
ks_out_glwe_dimension: GlweDimension(1),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_7_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 7,
_log_mu: 7,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
phi_bsk: 2443,
std_dev_bsk: StandardDev(2.168404344971009e-19),
lwe_dimension: LweDimension(818),
ks1_lwe_modular_std_dev: StandardDev(2.2215530137414073e-06),
pbs_level: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(14),
ks1_level: DecompositionLevelCount(16),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(2048),
ks_in_glwe_dimension: GlweDimension(1),
phi_in: 1625,
ks_out_glwe_dimension: GlweDimension(1),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_8_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 8,
_log_mu: 8,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(16384),
phi_bsk: 2443,
std_dev_bsk: StandardDev(2.168404344971009e-19),
lwe_dimension: LweDimension(854),
ks1_lwe_modular_std_dev: StandardDev(1.1499479557902908e-06),
pbs_level: DecompositionLevelCount(3),
pbs_base_log: DecompositionBaseLog(11),
ks1_level: DecompositionLevelCount(18),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(2048),
ks_in_glwe_dimension: GlweDimension(1),
phi_in: 1589,
ks_out_glwe_dimension: GlweDimension(1),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_9_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 9,
_log_mu: 9,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(32768),
phi_bsk: 2443,
std_dev_bsk: StandardDev(2.168404344971009e-19),
lwe_dimension: LweDimension(902),
ks1_lwe_modular_std_dev: StandardDev(4.7354340335704556e-07),
pbs_level: DecompositionLevelCount(4),
pbs_base_log: DecompositionBaseLog(8),
ks1_level: DecompositionLevelCount(18),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(2048),
ks_in_glwe_dimension: GlweDimension(1),
phi_in: 1541,
ks_out_glwe_dimension: GlweDimension(1),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_10_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 10,
_log_mu: 10,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(65536),
phi_bsk: 2443,
std_dev_bsk: StandardDev(2.168404344971009e-19),
lwe_dimension: LweDimension(938),
ks1_lwe_modular_std_dev: StandardDev(2.434282602565751e-07),
pbs_level: DecompositionLevelCount(6),
pbs_base_log: DecompositionBaseLog(6),
ks1_level: DecompositionLevelCount(20),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(512),
ks_in_glwe_dimension: GlweDimension(3),
phi_in: 1505,
ks_out_glwe_dimension: GlweDimension(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
pub const PRECISION_11_FAST_KS: FastKSParam<u64> = FastKSParam {
log_precision: 11,
_log_mu: 11,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(131072),
phi_bsk: 2443,
std_dev_bsk: StandardDev(2.168404344971009e-19),
lwe_dimension: LweDimension(1018),
ks1_lwe_modular_std_dev: StandardDev(5.56131000242714e-08),
pbs_level: DecompositionLevelCount(13),
pbs_base_log: DecompositionBaseLog(3),
ks1_level: DecompositionLevelCount(22),
ks1_base_log: DecompositionBaseLog(1),
ks1_polynomial_size: PolynomialSize(512),
ks_in_glwe_dimension: GlweDimension(3),
phi_in: 1425,
ks_out_glwe_dimension: GlweDimension(3),
ciphertext_modulus: CiphertextModulus::new_native(),
};
fn criterion_bench(c: &mut Criterion) {
let param_vec = [
PRECISION_1_FAST_KS,
PRECISION_2_FAST_KS,
PRECISION_3_FAST_KS,
PRECISION_4_FAST_KS,
PRECISION_5_FAST_KS,
PRECISION_6_FAST_KS,
PRECISION_7_FAST_KS,
PRECISION_8_FAST_KS,
PRECISION_9_FAST_KS,
PRECISION_10_FAST_KS,
PRECISION_11_FAST_KS,
];
for params in param_vec {
let log_precision = params.log_precision;
let _log_mu = params._log_mu;
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let phi_bsk = params.phi_bsk;
let std_dev_bsk = params.std_dev_bsk;
let lwe_dimension = params.lwe_dimension;
let ks1_lwe_modular_std_dev = params.ks1_lwe_modular_std_dev;
let pbs_level = params.pbs_level;
let pbs_base_mpg = params.pbs_base_log;
let ks1_level = params.ks1_level;
let ks1_base_log = params.ks1_base_log;
let ks1_polynomial_size = params.ks1_polynomial_size;
let ks_in_glwe_dimension = params.ks_in_glwe_dimension;
let phi_in = params.phi_in;
let ks_out_glwe_dimension = params.ks_out_glwe_dimension;
let ciphertext_modulus = params.ciphertext_modulus;
let precision = 1 << (log_precision);
let delta_log = 63 - log_precision;
//TODO: to randomize
let msg = 1;
let pt = Plaintext(msg << delta_log);
// Create the PRNG
let mut seeder = new_seeder();
let seeder = seeder.as_mut();
let mut encryption_generator =
EncryptionRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed(), seeder);
let mut secret_generator =
SecretRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed());
//Shared and partial key generations
let glwe_secret_key = allocate_and_generate_new_binary_partial_glwe_secret_key(
glwe_dimension,
polynomial_size,
PartialGlweSecretKeyRandomCoefCount(phi_bsk),
&mut secret_generator,
);
let large_lwe_secret_key = glwe_secret_key.clone().into_lwe_secret_key();
let mut large_glwe_secret_key_without_shared =
GlweSecretKey::new_empty_key(0u64, ks_in_glwe_dimension, ks1_polynomial_size);
large_glwe_secret_key_without_shared.as_mut()[0..phi_bsk - lwe_dimension.0]
.copy_from_slice(&glwe_secret_key.as_ref()[lwe_dimension.0..phi_bsk]);
let small_glwe_secret_key =
allocate_and_generate_new_shared_glwe_secret_key_from_glwe_secret_key(
// large_glwe_secret_key_without_shared.clone(),
&glwe_secret_key,
ks_out_glwe_dimension,
lwe_dimension.0,
ks1_polynomial_size,
);
let mut ggsw = PseudoGgswCiphertext::new(
0u64,
ks_in_glwe_dimension.to_glwe_size(),
ks_out_glwe_dimension.to_glwe_size(),
ks1_polynomial_size,
ks1_base_log,
ks1_level,
ciphertext_modulus,
);
//Encryption
let mut large_lwe = LweCiphertext::new(
0u64,
large_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
encrypt_lwe_ciphertext(
&large_lwe_secret_key,
&mut large_lwe,
pt,
std_dev_bsk,
&mut encryption_generator,
);
let mut split_large_lwe =
LweCiphertext::new(0u64, lwe_dimension.to_lwe_size(), ciphertext_modulus);
split_large_lwe.as_mut()[0..lwe_dimension.0]
.iter_mut()
.zip(large_lwe.as_ref()[0..lwe_dimension.0].iter())
.for_each(|(dst, &src)| *dst = src);
let body = split_large_lwe.get_mut_body().data;
*body = *large_lwe.get_body().data;
let mut split_large_lwe_to_ks = LweCiphertext::new(
0u64,
LweSize(phi_bsk - lwe_dimension.0 + 1),
ciphertext_modulus,
);
split_large_lwe_to_ks.as_mut()[0..phi_bsk - lwe_dimension.0]
.iter_mut()
.zip(large_lwe.as_ref()[lwe_dimension.0..phi_bsk].iter())
.for_each(|(dst, &src)| *dst = src);
let mut nks_kin_glwe = GlweCiphertext::new(
0u64,
ks_in_glwe_dimension.to_glwe_size(),
ks1_polynomial_size,
ciphertext_modulus,
);
encrypt_pseudo_ggsw_ciphertext(
&small_glwe_secret_key,
&large_glwe_secret_key_without_shared,
&mut ggsw,
ks1_lwe_modular_std_dev,
&mut encryption_generator,
);
//Memory stuff
let fft = Fft::new(ks1_polynomial_size);
let fft = fft.as_view();
let mut buffers = ComputationBuffers::new();
let buffer_size_req =
add_external_product_fast_keyswitch_assign_mem_optimized_requirement::<u64>(
small_glwe_secret_key.glwe_dimension(),
ks1_polynomial_size,
fft,
)
.unwrap()
.unaligned_bytes_required();
let buffer_size_req = buffer_size_req.max(
convert_standard_ggsw_ciphertext_to_fourier_mem_optimized_requirement(fft)
.unwrap()
.unaligned_bytes_required(),
);
buffers.resize(10 * buffer_size_req);
//To Fourier
let mut fourier_ggsw = PseudoFourierGgswCiphertext::new(
ks_in_glwe_dimension.to_glwe_size(),
ks_out_glwe_dimension.to_glwe_size(),
ks1_polynomial_size,
ks1_base_log,
ks1_level,
);
convert_standard_pseudo_ggsw_ciphertext_to_fourier_mem_optimized(
&ggsw,
&mut fourier_ggsw,
fft,
buffers.stack(),
);
let mut glwe_after_ks = GlweCiphertext::new(
0u64,
ks_out_glwe_dimension.to_glwe_size(),
ks1_polynomial_size,
ciphertext_modulus,
);
let mut small_lwe_secret_key = LweSecretKey::new_empty_key(0u64, lwe_dimension);
small_lwe_secret_key.as_mut()[0..lwe_dimension.0].copy_from_slice(
&glwe_secret_key.clone().into_lwe_secret_key().as_ref()[0..lwe_dimension.0],
);
let mut bsk = LweBootstrapKey::new(
0,
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_base_mpg,
pbs_level,
lwe_dimension,
ciphertext_modulus,
);
par_generate_lwe_bootstrap_key(
&small_lwe_secret_key,
&glwe_secret_key,
&mut bsk,
std_dev_bsk,
&mut encryption_generator,
);
let mut fbsk = FourierLweBootstrapKey::new(
small_lwe_secret_key.lwe_dimension(),
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_base_mpg,
pbs_level,
);
convert_standard_lwe_bootstrap_key_to_fourier(&bsk, &mut fbsk);
drop(bsk);
let accumulator = generate_accumulator(
polynomial_size,
glwe_dimension.to_glwe_size(),
precision,
ciphertext_modulus,
1 << delta_log,
|x| x,
);
let mut out_pbs_ct = LweCiphertext::new(
0u64,
large_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
let bench_id = format!("FAST_KS::{}", params.name());
c.bench_function(&bench_id, |b| {
b.iter(|| {
partial_convert_lwe_ciphertext_into_constant_glwe_ciphertext(
&split_large_lwe_to_ks,
&mut nks_kin_glwe,
phi_in,
);
let large_glwe_without_shared = nks_kin_glwe.clone();
add_external_product_fast_keyswitch_assign_mem_optimized(
&mut glwe_after_ks,
&fourier_ggsw,
&large_glwe_without_shared,
fft,
buffers.stack(),
);
//Encryption
let mut lwe_after_partial_sample_extract =
LweCiphertext::new(0u64, lwe_dimension.to_lwe_size(), ciphertext_modulus);
partial_extract_lwe_sample_from_glwe_ciphertext(
&glwe_after_ks,
&mut lwe_after_partial_sample_extract,
MonomialDegree(0),
lwe_dimension.0,
);
//Sum with initial ct
lwe_after_partial_sample_extract.as_mut()[0..lwe_dimension.0]
.iter_mut()
.zip(split_large_lwe.as_ref()[0..lwe_dimension.0].iter())
.for_each(|(dst, &src)| *dst = dst.wrapping_add(src));
let body = lwe_after_partial_sample_extract.get_mut_body().data;
*body = body.wrapping_add(*split_large_lwe.get_body().data);
programmable_bootstrap_lwe_ciphertext(
&lwe_after_partial_sample_extract,
&mut out_pbs_ct,
&accumulator,
&fbsk,
);
})
});
}
}
criterion_group!(benches, criterion_bench);
criterion_main!(benches);

View File

@@ -1,575 +0,0 @@
#![allow(clippy::excessive_precision)]
use criterion::{criterion_group, criterion_main, Criterion};
use tfhe::core_crypto::prelude::*;
use tfhe::keycache::NamedParam;
use tfhe::named_params_impl;
fn generate_accumulator<F, Scalar: UnsignedTorus + CastFrom<usize>>(
polynomial_size: PolynomialSize,
glwe_size: GlweSize,
message_modulus: usize,
ciphertext_modulus: CiphertextModulus<Scalar>,
delta: Scalar,
f: F,
) -> GlweCiphertextOwned<Scalar>
where
F: Fn(Scalar) -> Scalar,
{
// N/(p/2) = size of each block, to correct noise from the input we introduce the
// notion of box, which manages redundancy to yield a denoised value
// for several noisy values around a true input value.
let box_size = polynomial_size.0 / message_modulus;
// Create the accumulator
let mut accumulator_scalar = vec![Scalar::ZERO; polynomial_size.0];
// Fill each box with the encoded denoised value
for i in 0..message_modulus {
let index = i * box_size;
accumulator_scalar[index..index + box_size]
.iter_mut()
.for_each(|a| *a = f(Scalar::cast_from(i)) * delta);
}
let half_box_size = box_size / 2;
// Negate the first half_box_size coefficients to manage negacyclicity and rotate
for a_i in accumulator_scalar[0..half_box_size].iter_mut() {
*a_i = (*a_i).wrapping_neg();
}
// Rotate the accumulator
accumulator_scalar.rotate_left(half_box_size);
let accumulator_plaintext = PlaintextList::from_container(accumulator_scalar);
allocate_and_trivially_encrypt_new_glwe_ciphertext(
glwe_size,
&accumulator_plaintext,
ciphertext_modulus,
)
}
named_params_impl!(
StairKSParam<u64> =>
PRECISION_1_STAIR,
PRECISION_2_STAIR,
PRECISION_3_STAIR,
PRECISION_4_STAIR,
PRECISION_5_STAIR,
PRECISION_6_STAIR,
PRECISION_7_STAIR,
PRECISION_8_STAIR,
PRECISION_9_STAIR,
PRECISION_10_STAIR,
PRECISION_11_STAIR,
);
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct StairKSParam<Scalar: UnsignedTorus> {
pub log_precision: usize,
pub _log_mu: usize,
pub glwe_dimension: GlweDimension,
pub polynomial_size: PolynomialSize,
pub phi: usize,
pub std_dev_bsk: StandardDev,
pub lwe_dimension: LweDimension,
pub ks1_lwe_modular_std_dev: StandardDev,
pub ks2_lwe_modular_std_dev: StandardDev,
pub pbs_level: DecompositionLevelCount,
pub pbs_base_log: DecompositionBaseLog,
pub ks1_level: DecompositionLevelCount,
pub ks1_base_log: DecompositionBaseLog,
pub ks2_level: DecompositionLevelCount,
pub ks2_base_log: DecompositionBaseLog,
pub size1: usize,
pub size2: usize,
pub ciphertext_modulus: CiphertextModulus<Scalar>,
}
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 1, 1, 5, 8, 1280, -31.07, 532, -17.82, -11.17, 1, 15, 1, 9, 4,
// 2, 498, 250, 26700580
pub const PRECISION_1_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 1,
_log_mu: 1,
glwe_dimension: GlweDimension(5),
polynomial_size: PolynomialSize(256),
phi: 1280,
std_dev_bsk: StandardDev(4.43606636507407e-10),
lwe_dimension: LweDimension(532),
ks1_lwe_modular_std_dev: StandardDev(4.32160905950851e-6),
ks2_lwe_modular_std_dev: StandardDev(0.000434005215413364),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(15),
ks1_level: DecompositionLevelCount(1),
ks1_base_log: DecompositionBaseLog(9),
ks2_level: DecompositionLevelCount(4),
ks2_base_log: DecompositionBaseLog(2),
size1: 498,
size2: 250,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 2, 2, 6, 8, 1536, -37.88, 576, -20.85, -12.34, 1, 18, 1, 10, 5,
// 2, 640, 320, 35091584
pub const PRECISION_2_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 2,
_log_mu: 2,
glwe_dimension: GlweDimension(6),
polynomial_size: PolynomialSize(256),
phi: 1536,
std_dev_bsk: StandardDev(3.953518398797519e-12),
lwe_dimension: LweDimension(576),
ks1_lwe_modular_std_dev: StandardDev(5.290839538897724e-07),
ks2_lwe_modular_std_dev: StandardDev(0.00019288117965414483),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(18),
ks1_level: DecompositionLevelCount(1),
ks1_base_log: DecompositionBaseLog(10),
ks2_level: DecompositionLevelCount(5),
ks2_base_log: DecompositionBaseLog(2),
size1: 640,
size2: 320,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 3, 3, 3, 9, 1536, -37.88, 648, -22.13, -14.25, 1, 18, 2, 7, 6,
// 2, 592, 296, 42686328
pub const PRECISION_3_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 3,
_log_mu: 3,
glwe_dimension: GlweDimension(3),
polynomial_size: PolynomialSize(512),
phi: 1536,
std_dev_bsk: StandardDev(3.95351839879752e-12),
lwe_dimension: LweDimension(648),
ks1_lwe_modular_std_dev: StandardDev(2.17874395902014e-7),
ks2_lwe_modular_std_dev: StandardDev(5.132424409507535e-05),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(18),
ks1_level: DecompositionLevelCount(2),
ks1_base_log: DecompositionBaseLog(7),
ks2_level: DecompositionLevelCount(6),
ks2_base_log: DecompositionBaseLog(2),
size1: 592,
size2: 296,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 4, 4, 2, 10, 2048, -51.49, 664, -26.97, -14.68, 1, 22, 1, 13, 6,
// 2, 922, 462, 65150660
pub const PRECISION_4_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 4,
_log_mu: 4,
glwe_dimension: GlweDimension(2),
polynomial_size: PolynomialSize(1024),
phi: 2048,
std_dev_bsk: StandardDev(3.16202663074765e-16),
lwe_dimension: LweDimension(664),
ks1_lwe_modular_std_dev: StandardDev(7.60713313301797e-9),
ks2_lwe_modular_std_dev: StandardDev(0.0000380960250519291),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(22),
ks1_level: DecompositionLevelCount(1),
ks1_base_log: DecompositionBaseLog(13),
ks2_level: DecompositionLevelCount(6),
ks2_base_log: DecompositionBaseLog(2),
size1: 922,
size2: 462,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 5, 5, 1, 11, 2048, -51.49, 732, -28.17, -16.49, 1, 23, 2, 9, 7,
// 2, 877, 439, 94962998
pub const PRECISION_5_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 5,
_log_mu: 5,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(2048),
phi: 2048,
std_dev_bsk: StandardDev(3.16202663074765e-16),
lwe_dimension: LweDimension(732),
ks1_lwe_modular_std_dev: StandardDev(3.31119701700870e-9),
ks2_lwe_modular_std_dev: StandardDev(0.0000108646407745138),
pbs_level: DecompositionLevelCount(1),
pbs_base_log: DecompositionBaseLog(23),
ks1_level: DecompositionLevelCount(2),
ks1_base_log: DecompositionBaseLog(9),
ks2_level: DecompositionLevelCount(7),
ks2_base_log: DecompositionBaseLog(2),
size1: 877,
size2: 439,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 6, 6, 1, 12, 2443, -62.00, 748, -31.94, -16.91, 2, 14, 1, 16, 8,
// 2, 1130, 565, 283238205
pub const PRECISION_6_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 6,
_log_mu: 6,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(4096),
phi: 2443,
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(748),
ks1_lwe_modular_std_dev: StandardDev(2.42717974083759e-10),
ks2_lwe_modular_std_dev: StandardDev(8.12050004923523e-6),
pbs_level: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(14),
ks1_level: DecompositionLevelCount(1),
ks1_base_log: DecompositionBaseLog(16),
ks2_level: DecompositionLevelCount(8),
ks2_base_log: DecompositionBaseLog(2),
size1: 1130,
size2: 565,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 7, 7, 1, 13, 2443, -62.00, 776, -32.45, -17.66, 2, 15, 2, 10, 16,
// 1, 1111, 556, 614607038
pub const PRECISION_7_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 7,
_log_mu: 7,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(8192),
phi: 2443,
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(776),
ks1_lwe_modular_std_dev: StandardDev(1.70442007475721e-10),
ks2_lwe_modular_std_dev: StandardDev(4.82847821796524e-6),
pbs_level: DecompositionLevelCount(2),
pbs_base_log: DecompositionBaseLog(15),
ks1_level: DecompositionLevelCount(2),
ks1_base_log: DecompositionBaseLog(10),
ks2_level: DecompositionLevelCount(16),
ks2_base_log: DecompositionBaseLog(1),
size1: 1111,
size2: 556,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 8, 8, 1, 14, 2443, -62.00, 816, -33.17, -18.72, 3, 11, 2, 9, 17,
// 1, 1084, 543, 1795749574
pub const PRECISION_8_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 8,
_log_mu: 8,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(16384),
phi: 2443,
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(816),
ks1_lwe_modular_std_dev: StandardDev(1.03474906781522e-10),
ks2_lwe_modular_std_dev: StandardDev(2.31589295271883e-6),
pbs_level: DecompositionLevelCount(3),
pbs_base_log: DecompositionBaseLog(11),
ks1_level: DecompositionLevelCount(2),
ks1_base_log: DecompositionBaseLog(9),
ks2_level: DecompositionLevelCount(17),
ks2_base_log: DecompositionBaseLog(1),
size1: 1084,
size2: 543,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 9, 9, 1, 15, 2443, -62.00, 860, -33.94, -19.89, 4, 8, 2, 10, 18,
// 1, 1055, 528, 4992007842
pub const PRECISION_9_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 9,
_log_mu: 9,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(32768),
phi: 2443,
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(860),
ks1_lwe_modular_std_dev: StandardDev(6.06794935209399e-11),
ks2_lwe_modular_std_dev: StandardDev(1.02923225069468e-6),
pbs_level: DecompositionLevelCount(4),
pbs_base_log: DecompositionBaseLog(8),
ks1_level: DecompositionLevelCount(2),
ks1_base_log: DecompositionBaseLog(10),
ks2_level: DecompositionLevelCount(18),
ks2_base_log: DecompositionBaseLog(1),
size1: 1055,
size2: 528,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 10, 10, 1, 16, 2443, -62.00, 904, -34.71, -21.06, 6, 6, 2, 11, 19,
// 1, 1026, 513, 15555548076
pub const PRECISION_10_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 10,
_log_mu: 10,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(65536),
phi: 2443,
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(904),
ks1_lwe_modular_std_dev: StandardDev(3.55835153515238e-11),
ks2_lwe_modular_std_dev: StandardDev(4.77994271508188e-14),
pbs_level: DecompositionLevelCount(6),
pbs_base_log: DecompositionBaseLog(6),
ks1_level: DecompositionLevelCount(2),
ks1_base_log: DecompositionBaseLog(11),
ks2_level: DecompositionLevelCount(19),
ks2_base_log: DecompositionBaseLog(1),
size1: 1026,
size2: 513,
ciphertext_modulus: CiphertextModulus::new_native(),
};
// p,log(nu), k, N, phi, stddev, n, stddev, stddev, br_l,br_b, ksl1,ksb1, ksl2,ksb2, size1,
// size2, cost 11, 11, 1, 17, 2443, -62.00, 984, -36.15, -23.19, 12, 3, 2, 11, 21,
// 1, 972, 487, 66586908138
pub const PRECISION_11_STAIR: StairKSParam<u64> = StairKSParam {
log_precision: 11,
_log_mu: 11,
glwe_dimension: GlweDimension(1),
polynomial_size: PolynomialSize(131072),
phi: 2443,
std_dev_bsk: StandardDev(2.16840434497101e-19),
lwe_dimension: LweDimension(984),
ks1_lwe_modular_std_dev: StandardDev(1.31149203314392e-11),
ks2_lwe_modular_std_dev: StandardDev(1.04499545254235e-7),
pbs_level: DecompositionLevelCount(12),
pbs_base_log: DecompositionBaseLog(3),
ks1_level: DecompositionLevelCount(2),
ks1_base_log: DecompositionBaseLog(11),
ks2_level: DecompositionLevelCount(21),
ks2_base_log: DecompositionBaseLog(1),
size1: 972,
size2: 487,
ciphertext_modulus: CiphertextModulus::new_native(),
};
fn criterion_bench(c: &mut Criterion) {
let param_vec = [
PRECISION_1_STAIR,
PRECISION_2_STAIR,
PRECISION_3_STAIR,
PRECISION_4_STAIR,
PRECISION_5_STAIR,
PRECISION_6_STAIR,
PRECISION_7_STAIR,
PRECISION_8_STAIR,
PRECISION_9_STAIR,
PRECISION_10_STAIR,
PRECISION_11_STAIR,
];
for params in param_vec {
let log_precision = params.log_precision;
let _log_mu = params._log_mu;
let glwe_dimension = params.glwe_dimension;
let polynomial_size = params.polynomial_size;
let std_dev_bsk = params.std_dev_bsk;
let lwe_dimension = params.lwe_dimension;
let ks1_lwe_modular_std_dev = params.ks1_lwe_modular_std_dev;
let ks2_lwe_modular_std_dev = params.ks2_lwe_modular_std_dev;
let pbs_level = params.pbs_level;
let pbs_base_mpg = params.pbs_base_log;
let ks1_level = params.ks1_level;
let ks1_base_log = params.ks1_base_log;
let ks2_level = params.ks2_level;
let ks2_base_log = params.ks2_base_log;
let size1 = params.size1;
let size2 = params.size2;
let ciphertext_modulus = params.ciphertext_modulus;
let precision = 1 << (log_precision);
// let mu = 1<< _log_mu;
// let carry = (mu*(precision -1) +1)/precision;
// let log_carry = ((carry as f32).log2().ceil()) as usize;
// let delta_log = 63 - (log_precision + log_carry);
let delta_log = 63 - log_precision;
//TODO: to randomize
let msg = 1;
let pt = Plaintext(msg << delta_log);
// Create the PRNG
let mut seeder = new_seeder();
let seeder = seeder.as_mut();
let mut encryption_generator =
EncryptionRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed(), seeder);
let mut secret_generator =
SecretRandomGenerator::<ActivatedRandomGenerator>::new(seeder.seed());
//Shared and partial key generations
let large_lwe_dimension_phi = LweDimension(lwe_dimension.0 + size1 + size2);
let large_lwe_dimension = LweDimension(glwe_dimension.0 * polynomial_size.0);
let glwe_secret_key = allocate_and_generate_new_binary_partial_glwe_secret_key(
glwe_dimension,
polynomial_size,
PartialGlweSecretKeyRandomCoefCount(large_lwe_dimension_phi.0),
&mut secret_generator,
);
let large_lwe_secret_key = glwe_secret_key.clone().into_lwe_secret_key();
// let large_lwe_secret_key = allocate_and_generate_new_binary_lwe_partial_secret_key
// (large_lwe_dimension, &mut secret_generator, large_lwe_dimension_phi);
// large_lwe_dimension_phi - size1 == lwe_dimension.0 + size2
let inter_phi = lwe_dimension.0 + size2;
let inter_lwe_secret_key =
allocate_and_generate_new_shared_lwe_secret_key_from_lwe_secret_key(
&large_lwe_secret_key,
SharedLweSecretKeyCommonCoefCount(inter_phi),
);
let small_lwe_secret_key =
allocate_and_generate_new_shared_lwe_secret_key_from_lwe_secret_key(
&inter_lwe_secret_key,
SharedLweSecretKeyCommonCoefCount(lwe_dimension.0),
);
// println!("Large Key Dimension = {:?}",large_lwe_secret_key.lwe_dimension().0);
// println!("Inter Key Dimension = {:?}",inter_lwe_secret_key.lwe_dimension().0);
// println!("Small Key Dimension = {:?}",small_lwe_secret_key.lwe_dimension().0);
//
//
//
// println!("Large phi = {:?}",large_lwe_dimension_phi);
// println!("Inter phi = {:?}",inter_phi);
//Shrinking KSK generations
let ksk_large_to_inter = allocate_and_generate_new_lwe_shrinking_keyswitch_key(
&large_lwe_secret_key,
&inter_lwe_secret_key,
SharedLweSecretKeyCommonCoefCount(inter_phi),
ks1_base_log,
ks1_level,
ks1_lwe_modular_std_dev,
ciphertext_modulus,
&mut encryption_generator,
);
let ksk_inter_to_small = allocate_and_generate_new_lwe_shrinking_keyswitch_key(
&inter_lwe_secret_key,
&small_lwe_secret_key,
SharedLweSecretKeyCommonCoefCount(lwe_dimension.0),
ks2_base_log,
ks2_level,
ks2_lwe_modular_std_dev,
ciphertext_modulus,
&mut encryption_generator,
);
//Encryption
let mut large_lwe =
LweCiphertext::new(0u64, large_lwe_dimension.to_lwe_size(), ciphertext_modulus);
encrypt_lwe_ciphertext(
&large_lwe_secret_key,
&mut large_lwe,
pt,
std_dev_bsk,
&mut encryption_generator,
);
//Shrinking KS
let mut inter_lwe =
LweCiphertext::new(0, LweDimension(inter_phi).to_lwe_size(), ciphertext_modulus);
let mut small_lwe = LweCiphertext::new(0, lwe_dimension.to_lwe_size(), ciphertext_modulus);
//PBS PART
let mut bsk = LweBootstrapKey::new(
0,
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_base_mpg,
pbs_level,
lwe_dimension,
ciphertext_modulus,
);
par_generate_lwe_bootstrap_key(
&small_lwe_secret_key,
&glwe_secret_key,
&mut bsk,
std_dev_bsk,
&mut encryption_generator,
);
let mut fbsk = FourierLweBootstrapKey::new(
small_lwe_secret_key.lwe_dimension(),
glwe_dimension.to_glwe_size(),
polynomial_size,
pbs_base_mpg,
pbs_level,
);
convert_standard_lwe_bootstrap_key_to_fourier(&bsk, &mut fbsk);
drop(bsk);
let accumulator = generate_accumulator(
polynomial_size,
glwe_dimension.to_glwe_size(),
precision,
ciphertext_modulus,
1 << delta_log,
|x| x,
);
let mut out_pbs_ct = LweCiphertext::new(
0u64,
large_lwe_secret_key.lwe_dimension().to_lwe_size(),
ciphertext_modulus,
);
let decomposer = SignedDecomposer::new(
DecompositionBaseLog(log_precision + 1),
DecompositionLevelCount(1),
);
shrinking_keyswitch_lwe_ciphertext(&ksk_large_to_inter, &large_lwe, &mut inter_lwe);
let dec_inter = decrypt_lwe_ciphertext(&inter_lwe_secret_key, &inter_lwe);
let decoded = decomposer.closest_representable(dec_inter.0) >> delta_log;
assert_eq!(decoded, msg, "Err after first shrinking KS");
shrinking_keyswitch_lwe_ciphertext(&ksk_inter_to_small, &inter_lwe, &mut small_lwe);
let dec_small = decrypt_lwe_ciphertext(&small_lwe_secret_key, &small_lwe);
let decoded = decomposer.closest_representable(dec_small.0) >> delta_log;
assert_eq!(decoded, msg, "Err after second shrinking KS");
programmable_bootstrap_lwe_ciphertext(&small_lwe, &mut out_pbs_ct, &accumulator, &fbsk);
let dec_large = decrypt_lwe_ciphertext(&large_lwe_secret_key, &out_pbs_ct);
let decoded = decomposer.closest_representable(dec_large.0) >> delta_log;
assert_eq!(decoded, msg, "Err after PBS");
let bench_id = format!("stairKS::{}", params.name());
c.bench_function(&bench_id, |b| {
b.iter(|| {
shrinking_keyswitch_lwe_ciphertext(&ksk_large_to_inter, &large_lwe, &mut inter_lwe);
shrinking_keyswitch_lwe_ciphertext(&ksk_inter_to_small, &inter_lwe, &mut small_lwe);
programmable_bootstrap_lwe_ciphertext(
&small_lwe,
&mut out_pbs_ct,
&accumulator,
&fbsk,
);
})
});
}
}
criterion_group!(benches, criterion_bench);
criterion_main!(benches);

View File

@@ -66,8 +66,8 @@ criterion_group!(
criterion_main!(pbs_group, multi_bit_pbs_group, pbs_throughput_group);
fn benchmark_parameters<Scalar: UnsignedInteger>() -> Vec<(String, CryptoParametersRecord<Scalar>)>
{
fn benchmark_parameters<Scalar: UnsignedInteger>(
) -> Vec<(&'static str, CryptoParametersRecord<Scalar>)> {
if Scalar::BITS == 64 {
SHORTINT_BENCH_PARAMS
.iter()
@@ -83,7 +83,7 @@ fn benchmark_parameters<Scalar: UnsignedInteger>() -> Vec<(String, CryptoParamet
} else if Scalar::BITS == 32 {
BOOLEAN_BENCH_PARAMS
.iter()
.map(|(name, params)| (name.to_string(), params.to_owned().into()))
.map(|(name, params)| (*name, params.to_owned().into()))
.collect()
} else {
vec![]
@@ -91,7 +91,7 @@ fn benchmark_parameters<Scalar: UnsignedInteger>() -> Vec<(String, CryptoParamet
}
fn throughput_benchmark_parameters<Scalar: UnsignedInteger>(
) -> Vec<(String, CryptoParametersRecord<Scalar>)> {
) -> Vec<(&'static str, CryptoParametersRecord<Scalar>)> {
if Scalar::BITS == 64 {
vec![
PARAM_MESSAGE_1_CARRY_1_KS_PBS,
@@ -111,15 +111,18 @@ fn throughput_benchmark_parameters<Scalar: UnsignedInteger>(
} else if Scalar::BITS == 32 {
BOOLEAN_BENCH_PARAMS
.iter()
.map(|(name, params)| (name.to_string(), params.to_owned().into()))
.map(|(name, params)| (*name, params.to_owned().into()))
.collect()
} else {
vec![]
}
}
fn multi_bit_benchmark_parameters<Scalar: UnsignedInteger + Default>(
) -> Vec<(String, CryptoParametersRecord<Scalar>, LweBskGroupingFactor)> {
fn multi_bit_benchmark_parameters<Scalar: UnsignedInteger + Default>() -> Vec<(
&'static str,
CryptoParametersRecord<Scalar>,
LweBskGroupingFactor,
)> {
if Scalar::BITS == 64 {
vec![
PARAM_MULTI_BIT_MESSAGE_1_CARRY_1_GROUP_2_KS_PBS,
@@ -240,7 +243,7 @@ fn mem_optimized_pbs<Scalar: UnsignedTorus + CastInto<usize> + Serialize>(c: &mu
write_to_json(
&id,
*params,
name,
*name,
"pbs",
&OperatorType::Atomic,
bit_size,
@@ -329,7 +332,7 @@ fn multi_bit_pbs<
write_to_json(
&id,
*params,
name,
*name,
"pbs",
&OperatorType::Atomic,
bit_size,
@@ -418,7 +421,7 @@ fn multi_bit_deterministic_pbs<
write_to_json(
&id,
*params,
name,
*name,
"pbs",
&OperatorType::Atomic,
bit_size,
@@ -538,7 +541,7 @@ fn pbs_throughput<Scalar: UnsignedTorus + CastInto<usize> + Sync + Send + Serial
write_to_json(
&id,
*params,
name,
*name,
"pbs",
&OperatorType::Atomic,
bit_size,

View File

@@ -650,23 +650,15 @@ define_server_key_bench_fn!(method_name: smart_bitxor, display_name: bitxor);
define_server_key_bench_fn!(method_name: smart_add_parallelized, display_name: add);
define_server_key_bench_fn!(method_name: smart_sub_parallelized, display_name: sub);
define_server_key_bench_fn!(method_name: smart_mul_parallelized, display_name: mul);
define_server_key_bench_fn!(method_name: smart_div_parallelized, display_name: div);
define_server_key_bench_fn!(method_name: smart_div_rem_parallelized, display_name: div_mod);
define_server_key_bench_fn!(method_name: smart_rem_parallelized, display_name: rem);
define_server_key_bench_fn!(method_name: smart_bitand_parallelized, display_name: bitand);
define_server_key_bench_fn!(method_name: smart_bitxor_parallelized, display_name: bitxor);
define_server_key_bench_fn!(method_name: smart_bitor_parallelized, display_name: bitor);
define_server_key_bench_fn!(method_name: smart_rotate_right_parallelized, display_name: rotate_right);
define_server_key_bench_fn!(method_name: smart_rotate_left_parallelized, display_name: rotate_left);
define_server_key_bench_fn!(method_name: smart_right_shift_parallelized, display_name: right_shift);
define_server_key_bench_fn!(method_name: smart_left_shift_parallelized, display_name: left_shift);
define_server_key_bench_default_fn!(method_name: add_parallelized, display_name: add);
define_server_key_bench_default_fn!(method_name: sub_parallelized, display_name: sub);
define_server_key_bench_default_fn!(method_name: mul_parallelized, display_name: mul);
define_server_key_bench_default_fn!(method_name: div_parallelized, display_name: div);
define_server_key_bench_default_fn!(method_name: rem_parallelized, display_name: modulo);
define_server_key_bench_default_fn!(method_name: div_rem_parallelized, display_name: div_mod);
define_server_key_bench_default_fn!(method_name: bitand_parallelized, display_name: bitand);
define_server_key_bench_default_fn!(method_name: bitxor_parallelized, display_name: bitxor);
define_server_key_bench_default_fn!(method_name: bitor_parallelized, display_name: bitor);
@@ -679,11 +671,7 @@ define_server_key_bench_default_fn!(method_name: unchecked_bitand, display_name:
define_server_key_bench_default_fn!(method_name: unchecked_bitor, display_name: bitor);
define_server_key_bench_default_fn!(method_name: unchecked_bitxor, display_name: bitxor);
define_server_key_bench_default_fn!(method_name: unchecked_add_parallelized, display_name: add);
define_server_key_bench_default_fn!(method_name: unchecked_mul_parallelized, display_name: mul);
define_server_key_bench_default_fn!(method_name: unchecked_div_parallelized, display_name: div);
define_server_key_bench_default_fn!(method_name: unchecked_rem_parallelized, display_name: modulo);
define_server_key_bench_default_fn!(method_name: unchecked_div_rem_parallelized, display_name: div_mod);
define_server_key_bench_default_fn!(
method_name: unchecked_bitand_parallelized,
display_name: bitand
@@ -696,38 +684,6 @@ define_server_key_bench_default_fn!(
method_name: unchecked_bitxor_parallelized,
display_name: bitxor
);
define_server_key_bench_default_fn!(
method_name: unchecked_rotate_right_parallelized,
display_name: rotate_right
);
define_server_key_bench_default_fn!(
method_name: unchecked_rotate_left_parallelized,
display_name: rotate_left
);
define_server_key_bench_default_fn!(
method_name: unchecked_right_shift_parallelized,
display_name: right_shift
);
define_server_key_bench_default_fn!(
method_name: unchecked_left_shift_parallelized,
display_name: left_shift
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_bitand_parallelized,
display_name: bitand,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_bitor_parallelized,
display_name: bitor,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_bitxor_parallelized,
display_name: bitxor,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_add,
@@ -760,46 +716,6 @@ define_server_key_bench_scalar_fn!(
display_name: mul,
rng_func: mul_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_div_parallelized,
display_name: div,
rng_func: div_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_rem_parallelized,
display_name: modulo,
rng_func: div_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_div_rem_parallelized,
display_name: div_mod,
rng_func: div_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_bitand_parallelized,
display_name: bitand,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_bitor_parallelized,
display_name: bitor,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_bitxor_parallelized,
display_name: bitxor,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_rotate_left_parallelized,
display_name: rotate_left,
rng_func: shift_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_rotate_right_parallelized,
display_name: rotate_right,
rng_func: shift_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: scalar_add_parallelized,
@@ -826,11 +742,6 @@ define_server_key_bench_scalar_default_fn!(
display_name: modulo,
rng_func: div_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: scalar_div_rem_parallelized,
display_name: div_mod,
rng_func: div_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: scalar_left_shift_parallelized,
display_name: left_shift,
@@ -851,22 +762,6 @@ define_server_key_bench_scalar_default_fn!(
display_name: rotate_right,
rng_func: shift_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: scalar_bitand_parallelized,
display_name: bitand,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: scalar_bitor_parallelized,
display_name: bitor,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: scalar_bitxor_parallelized,
display_name: bitxor,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: scalar_eq_parallelized,
display_name: equal,
@@ -908,47 +803,6 @@ define_server_key_bench_scalar_default_fn!(
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_eq_parallelized,
display_name: equal,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_ne_parallelized,
display_name: not_equal,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_le_parallelized,
display_name: less_or_equal,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_lt_parallelized,
display_name: less_than,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_ge_parallelized,
display_name: greater_or_equal,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_gt_parallelized,
display_name: greater_than,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_max_parallelized,
display_name: max,
rng_func: default_scalar
);
define_server_key_bench_scalar_fn!(
method_name: smart_scalar_min_parallelized,
display_name: min,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_add,
display_name: add,
@@ -964,50 +818,10 @@ define_server_key_bench_scalar_default_fn!(
display_name: mul,
rng_func: mul_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_div_parallelized,
display_name: div,
rng_func: div_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_rem_parallelized,
display_name: modulo,
rng_func: div_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_div_rem_parallelized,
display_name: div_mod,
rng_func: div_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_rotate_right_parallelized,
display_name: rotate_right,
rng_func: shift_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_rotate_left_parallelized,
display_name: rotate_left,
rng_func: shift_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_right_shift_parallelized,
display_name: right_shift,
rng_func: shift_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_left_shift_parallelized,
display_name: left_shift,
rng_func: shift_scalar
);
define_server_key_bench_unary_fn!(method_name: smart_neg, display_name: negation);
define_server_key_bench_unary_fn!(method_name: smart_neg_parallelized, display_name: negation);
define_server_key_bench_unary_fn!(method_name: smart_abs_parallelized, display_name: abs);
define_server_key_bench_unary_default_fn!(method_name: neg_parallelized, display_name: negation);
define_server_key_bench_unary_default_fn!(method_name: abs_parallelized, display_name: abs);
define_server_key_bench_unary_default_fn!(method_name: unchecked_abs_parallelized, display_name: abs);
define_server_key_bench_unary_fn!(method_name: full_propagate, display_name: carry_propagation);
define_server_key_bench_unary_fn!(
@@ -1018,7 +832,6 @@ define_server_key_bench_unary_fn!(
define_server_key_bench_default_fn!(method_name: unchecked_max, display_name: max);
define_server_key_bench_default_fn!(method_name: unchecked_min, display_name: min);
define_server_key_bench_default_fn!(method_name: unchecked_eq, display_name: equal);
define_server_key_bench_default_fn!(method_name: unchecked_ne, display_name: not_equal);
define_server_key_bench_default_fn!(method_name: unchecked_lt, display_name: less_than);
define_server_key_bench_default_fn!(method_name: unchecked_le, display_name: less_or_equal);
define_server_key_bench_default_fn!(method_name: unchecked_gt, display_name: greater_than);
@@ -1027,7 +840,6 @@ define_server_key_bench_default_fn!(method_name: unchecked_ge, display_name: gre
define_server_key_bench_default_fn!(method_name: unchecked_max_parallelized, display_name: max);
define_server_key_bench_default_fn!(method_name: unchecked_min_parallelized, display_name: min);
define_server_key_bench_default_fn!(method_name: unchecked_eq_parallelized, display_name: equal);
define_server_key_bench_default_fn!(method_name: unchecked_ne_parallelized, display_name: not_equal);
define_server_key_bench_default_fn!(
method_name: unchecked_lt_parallelized,
display_name: less_than
@@ -1045,35 +857,9 @@ define_server_key_bench_default_fn!(
display_name: greater_or_equal
);
define_server_key_bench_scalar_default_fn!(method_name: unchecked_scalar_max_parallelized, display_name: max,rng_func: default_scalar);
define_server_key_bench_scalar_default_fn!(method_name: unchecked_scalar_min_parallelized, display_name: min,rng_func: default_scalar);
define_server_key_bench_scalar_default_fn!(method_name: unchecked_scalar_eq_parallelized, display_name: equal,rng_func: default_scalar);
define_server_key_bench_scalar_default_fn!(method_name: unchecked_scalar_ne_parallelized, display_name: not_equal,rng_func: default_scalar);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_lt_parallelized,
display_name: less_than,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_le_parallelized,
display_name: less_or_equal,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_gt_parallelized,
display_name: greater_than,
rng_func: default_scalar
);
define_server_key_bench_scalar_default_fn!(
method_name: unchecked_scalar_ge_parallelized,
display_name: greater_or_equal,
rng_func: default_scalar
);
define_server_key_bench_fn!(method_name: smart_max, display_name: max);
define_server_key_bench_fn!(method_name: smart_min, display_name: min);
define_server_key_bench_fn!(method_name: smart_eq, display_name: equal);
define_server_key_bench_fn!(method_name: smart_ne, display_name: not_equal);
define_server_key_bench_fn!(method_name: smart_lt, display_name: less_than);
define_server_key_bench_fn!(method_name: smart_le, display_name: less_or_equal);
define_server_key_bench_fn!(method_name: smart_gt, display_name: greater_than);
@@ -1082,7 +868,6 @@ define_server_key_bench_fn!(method_name: smart_ge, display_name: greater_or_equa
define_server_key_bench_fn!(method_name: smart_max_parallelized, display_name: max);
define_server_key_bench_fn!(method_name: smart_min_parallelized, display_name: min);
define_server_key_bench_fn!(method_name: smart_eq_parallelized, display_name: equal);
define_server_key_bench_fn!(method_name: smart_ne_parallelized, display_name: not_equal);
define_server_key_bench_fn!(method_name: smart_lt_parallelized, display_name: less_than);
define_server_key_bench_fn!(
method_name: smart_le_parallelized,
@@ -1138,7 +923,6 @@ criterion_group!(
smart_max,
smart_min,
smart_eq,
smart_ne,
smart_lt,
smart_le,
smart_gt,
@@ -1147,21 +931,12 @@ criterion_group!(
criterion_group!(
smart_parallelized_ops,
smart_neg_parallelized,
smart_abs_parallelized,
smart_add_parallelized,
smart_sub_parallelized,
smart_mul_parallelized,
// smart_div_parallelized,
// smart_rem_parallelized,
smart_div_rem_parallelized, // For ciphertext div == rem == div_rem
smart_bitand_parallelized,
smart_bitor_parallelized,
smart_bitxor_parallelized,
smart_rotate_right_parallelized,
smart_rotate_left_parallelized,
smart_right_shift_parallelized,
smart_left_shift_parallelized,
);
criterion_group!(
@@ -1169,7 +944,6 @@ criterion_group!(
smart_max_parallelized,
smart_min_parallelized,
smart_eq_parallelized,
smart_ne_parallelized,
smart_lt_parallelized,
smart_le_parallelized,
smart_gt_parallelized,
@@ -1178,14 +952,12 @@ criterion_group!(
criterion_group!(
default_parallelized_ops,
neg_parallelized,
abs_parallelized,
add_parallelized,
sub_parallelized,
mul_parallelized,
// div_parallelized,
// rem_parallelized,
div_rem_parallelized,
div_parallelized,
rem_parallelized,
neg_parallelized,
bitand_parallelized,
bitnot_parallelized,
bitor_parallelized,
@@ -1221,26 +993,6 @@ criterion_group!(
smart_scalar_add_parallelized,
smart_scalar_sub_parallelized,
smart_scalar_mul_parallelized,
smart_scalar_div_parallelized,
smart_scalar_rem_parallelized, // For scalar rem == div_rem
// smart_scalar_div_rem_parallelized,
smart_scalar_bitand_parallelized,
smart_scalar_bitor_parallelized,
smart_scalar_bitxor_parallelized,
smart_scalar_rotate_right_parallelized,
smart_scalar_rotate_left_parallelized,
);
criterion_group!(
smart_scalar_parallelized_ops_comp,
smart_scalar_max_parallelized,
smart_scalar_min_parallelized,
smart_scalar_eq_parallelized,
smart_scalar_ne_parallelized,
smart_scalar_lt_parallelized,
smart_scalar_le_parallelized,
smart_scalar_gt_parallelized,
smart_scalar_ge_parallelized,
);
criterion_group!(
@@ -1250,14 +1002,10 @@ criterion_group!(
scalar_mul_parallelized,
scalar_div_parallelized,
scalar_rem_parallelized,
// scalar_div_rem_parallelized,
scalar_left_shift_parallelized,
scalar_right_shift_parallelized,
scalar_rotate_left_parallelized,
scalar_rotate_right_parallelized,
scalar_bitand_parallelized,
scalar_bitor_parallelized,
scalar_bitxor_parallelized,
);
criterion_group!(
@@ -1282,40 +1030,11 @@ criterion_group!(
unchecked_bitxor,
);
criterion_group!(
unchecked_parallelized_ops,
unchecked_abs_parallelized,
unchecked_add_parallelized,
unchecked_mul_parallelized,
// unchecked_div_parallelized,
// unchecked_rem_parallelized,
unchecked_div_rem_parallelized,
unchecked_bitand_parallelized,
unchecked_bitor_parallelized,
unchecked_bitxor_parallelized,
unchecked_rotate_right_parallelized,
unchecked_rotate_left_parallelized,
unchecked_right_shift_parallelized,
unchecked_left_shift_parallelized,
);
criterion_group!(
unchecked_parallelized_ops_comp,
unchecked_eq_parallelized,
unchecked_ne_parallelized,
unchecked_gt_parallelized,
unchecked_ge_parallelized,
unchecked_lt_parallelized,
unchecked_max_parallelized,
unchecked_min_parallelized,
);
criterion_group!(
unchecked_ops_comp,
unchecked_max,
unchecked_min,
unchecked_eq,
unchecked_ne,
unchecked_lt,
unchecked_le,
unchecked_gt,
@@ -1327,28 +1046,20 @@ criterion_group!(
unchecked_scalar_add,
unchecked_scalar_sub,
unchecked_scalar_mul_parallelized,
unchecked_scalar_div_parallelized,
unchecked_scalar_rem_parallelized,
// unchecked_scalar_div_rem_parallelized,
unchecked_scalar_bitand_parallelized,
unchecked_scalar_bitor_parallelized,
unchecked_scalar_bitxor_parallelized,
unchecked_scalar_rotate_right_parallelized,
unchecked_scalar_rotate_left_parallelized,
unchecked_scalar_right_shift_parallelized,
unchecked_scalar_left_shift_parallelized,
unchecked_bitand_parallelized,
unchecked_bitor_parallelized,
unchecked_bitxor_parallelized,
);
criterion_group!(
unchecked_scalar_ops_comp,
unchecked_scalar_max_parallelized,
unchecked_scalar_min_parallelized,
unchecked_scalar_eq_parallelized,
unchecked_scalar_ne_parallelized,
unchecked_scalar_lt_parallelized,
unchecked_scalar_le_parallelized,
unchecked_scalar_gt_parallelized,
unchecked_scalar_ge_parallelized,
unchecked_max_parallelized,
unchecked_min_parallelized,
unchecked_eq_parallelized,
unchecked_lt_parallelized,
unchecked_le_parallelized,
unchecked_gt_parallelized,
unchecked_ge_parallelized,
);
criterion_group!(misc, full_propagate, full_propagate_parallelized);
@@ -1367,9 +1078,7 @@ fn main() {
"smart_parallelized" => smart_parallelized_ops(),
"smart_parallelized_comp" => smart_parallelized_ops_comp(),
"smart_scalar_parallelized" => smart_scalar_parallelized_ops(),
"smart_scalar_parallelized_comp" => smart_scalar_parallelized_ops_comp(),
"unchecked" => unchecked_ops(),
"unchecked_parallelized" => unchecked_parallelized_ops(),
"unchecked_comp" => unchecked_ops_comp(),
"unchecked_scalar" => unchecked_scalar_ops(),
"unchecked_scalar_comp" => unchecked_scalar_ops_comp(),

View File

@@ -1,811 +0,0 @@
#[path = "../utilities.rs"]
mod utilities;
use crate::utilities::{write_to_json, OperatorType};
use std::env;
use criterion::{criterion_group, Criterion};
use itertools::iproduct;
use rand::prelude::*;
use rand::Rng;
use std::vec::IntoIter;
use tfhe::integer::keycache::KEY_CACHE;
use tfhe::integer::{RadixCiphertext, ServerKey, SignedRadixCiphertext, I256};
use tfhe::keycache::NamedParam;
use tfhe::shortint::parameters::{
PARAM_MESSAGE_2_CARRY_2_KS_PBS, PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_2_KS_PBS,
};
fn gen_random_i256(rng: &mut ThreadRng) -> I256 {
let clearlow = rng.gen::<u128>();
let clearhigh = rng.gen::<u128>();
tfhe::integer::I256::from((clearlow, clearhigh))
}
/// An iterator that yields a succession of combinations
/// of parameters and a num_block to achieve a certain bit_size ciphertext
/// in radix decomposition
struct ParamsAndNumBlocksIter {
params_and_bit_sizes:
itertools::Product<IntoIter<tfhe::shortint::PBSParameters>, IntoIter<usize>>,
}
impl Default for ParamsAndNumBlocksIter {
fn default() -> Self {
let is_multi_bit = match env::var("__TFHE_RS_BENCH_TYPE") {
Ok(val) => val.to_lowercase() == "multi_bit",
Err(_) => false,
};
let is_fast_bench = match env::var("__TFHE_RS_FAST_BENCH") {
Ok(val) => val.to_lowercase() == "true",
Err(_) => false,
};
if is_multi_bit {
let params = vec![PARAM_MULTI_BIT_MESSAGE_2_CARRY_2_GROUP_2_KS_PBS.into()];
let bit_sizes = if is_fast_bench {
vec![32]
} else {
vec![8, 16, 32, 40, 64]
};
let params_and_bit_sizes = iproduct!(params, bit_sizes);
Self {
params_and_bit_sizes,
}
} else {
// FIXME One set of parameter is tested since we want to benchmark only quickest
// operations.
let params = vec![
PARAM_MESSAGE_2_CARRY_2_KS_PBS.into(),
// PARAM_MESSAGE_3_CARRY_3_KS_PBS.into(),
// PARAM_MESSAGE_4_CARRY_4_KS_PBS.into(),
];
let bit_sizes = if is_fast_bench {
vec![32]
} else {
vec![8, 16, 32, 40, 64, 128, 256]
};
let params_and_bit_sizes = iproduct!(params, bit_sizes);
Self {
params_and_bit_sizes,
}
}
}
}
impl Iterator for ParamsAndNumBlocksIter {
type Item = (tfhe::shortint::PBSParameters, usize, usize);
fn next(&mut self) -> Option<Self::Item> {
let (param, bit_size) = self.params_and_bit_sizes.next()?;
let num_block =
(bit_size as f64 / param.message_modulus().0.ilog2() as f64).ceil() as usize;
Some((param, num_block, bit_size))
}
}
/// Base function to bench a server key function that is a binary operation, input ciphertext will
/// contain only zero carries
fn bench_server_key_signed_binary_function_clean_inputs<F>(
c: &mut Criterion,
bench_name: &str,
display_name: &str,
binary_op: F,
sample_size: usize,
) where
F: Fn(&ServerKey, &SignedRadixCiphertext, &SignedRadixCiphertext),
{
let mut bench_group = c.benchmark_group(bench_name);
bench_group
.sample_size(sample_size)
.measurement_time(std::time::Duration::from_secs(60));
let mut rng = rand::thread_rng();
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
let param_name = param.name();
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
bench_group.bench_function(&bench_id, |b| {
let (cks, sks) = KEY_CACHE.get_from_params(param);
let encrypt_two_values = || {
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
let ct_1 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
(ct_0, ct_1)
};
b.iter_batched(
encrypt_two_values,
|(ct_0, ct_1)| {
binary_op(&sks, &ct_0, &ct_1);
},
criterion::BatchSize::SmallInput,
)
});
write_to_json::<u64, _>(
&bench_id,
param,
param.name(),
display_name,
&OperatorType::Atomic,
bit_size as u32,
vec![param.message_modulus().0.ilog2(); num_block],
);
}
bench_group.finish()
}
/// Shifts and rotations require a special function as the rhs,
/// i.e. the shift amount has to be a positive radix type.
fn bench_server_key_signed_shift_function_clean_inputs<F>(
c: &mut Criterion,
bench_name: &str,
display_name: &str,
binary_op: F,
) where
F: Fn(&ServerKey, &SignedRadixCiphertext, &RadixCiphertext),
{
let mut bench_group = c.benchmark_group(bench_name);
bench_group
.sample_size(15)
.measurement_time(std::time::Duration::from_secs(60));
let mut rng = rand::thread_rng();
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
let param_name = param.name();
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
bench_group.bench_function(&bench_id, |b| {
let (cks, sks) = KEY_CACHE.get_from_params(param);
let encrypt_two_values = || {
let clear_1 = rng.gen_range(0u128..bit_size as u128);
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
let ct_1 = cks.encrypt_radix(clear_1, num_block);
(ct_0, ct_1)
};
b.iter_batched(
encrypt_two_values,
|(ct_0, ct_1)| {
binary_op(&sks, &ct_0, &ct_1);
},
criterion::BatchSize::SmallInput,
)
});
write_to_json::<u64, _>(
&bench_id,
param,
param.name(),
display_name,
&OperatorType::Atomic,
bit_size as u32,
vec![param.message_modulus().0.ilog2(); num_block],
);
}
bench_group.finish()
}
/// Base function to bench a server key function that is a unary operation, input ciphertext will
/// contain only zero carries
fn bench_server_key_unary_function_clean_inputs<F>(
c: &mut Criterion,
bench_name: &str,
display_name: &str,
unary_fn: F,
) where
F: Fn(&ServerKey, &SignedRadixCiphertext),
{
let mut bench_group = c.benchmark_group(bench_name);
bench_group
.sample_size(15)
.measurement_time(std::time::Duration::from_secs(60));
let mut rng = rand::thread_rng();
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
let param_name = param.name();
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
bench_group.bench_function(&bench_id, |b| {
let (cks, sks) = KEY_CACHE.get_from_params(param);
let encrypt_one_value =
|| cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
b.iter_batched(
encrypt_one_value,
|ct_0| {
unary_fn(&sks, &ct_0);
},
criterion::BatchSize::SmallInput,
)
});
write_to_json::<u64, _>(
&bench_id,
param,
param.name(),
display_name,
&OperatorType::Atomic,
bit_size as u32,
vec![param.message_modulus().0.ilog2(); num_block],
);
}
bench_group.finish()
}
fn signed_if_then_else_parallelized(c: &mut Criterion) {
let bench_name = "integer::signed::if_then_else_parallelized";
let display_name = "if_then_else";
let mut bench_group = c.benchmark_group(bench_name);
bench_group
.sample_size(15)
.measurement_time(std::time::Duration::from_secs(60));
let mut rng = rand::thread_rng();
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
let param_name = param.name();
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits");
bench_group.bench_function(&bench_id, |b| {
let (cks, sks) = KEY_CACHE.get_from_params(param);
let encrypt_tree_values = || {
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
let ct_1 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
let cond = sks.create_trivial_radix(rng.gen_bool(0.5) as u64, num_block);
(cond, ct_0, ct_1)
};
b.iter_batched(
encrypt_tree_values,
|(condition, true_ct, false_ct)| {
sks.if_then_else_parallelized(&condition, &true_ct, &false_ct)
},
criterion::BatchSize::SmallInput,
)
});
write_to_json::<u64, _>(
&bench_id,
param,
param.name(),
display_name,
&OperatorType::Atomic,
bit_size as u32,
vec![param.message_modulus().0.ilog2(); num_block],
);
}
bench_group.finish()
}
macro_rules! define_server_key_bench_binary_signed_clean_inputs_fn (
(method_name: $server_key_method:ident, display_name:$name:ident $(,)?) => {
fn $server_key_method(c: &mut Criterion) {
bench_server_key_signed_binary_function_clean_inputs(
c,
concat!("integer::signed::", stringify!($server_key_method)),
stringify!($name),
|server_key, lhs, rhs| {
server_key.$server_key_method(lhs, rhs);
},
15 /* sample_size */
)
}
};
(
method_name: $server_key_method:ident,
display_name:$name:ident,
sample_size: $sample_size:expr $(,)?
) => {
fn $server_key_method(c: &mut Criterion) {
bench_server_key_signed_binary_function_clean_inputs(
c,
concat!("integer::signed::", stringify!($server_key_method)),
stringify!($name),
|server_key, lhs, rhs| {
server_key.$server_key_method(lhs, rhs);
},
$sample_size
)
}
}
);
macro_rules! define_server_key_bench_unary_signed_clean_input_fn (
(method_name: $server_key_method:ident, display_name:$name:ident $(,)?) => {
fn $server_key_method(c: &mut Criterion) {
bench_server_key_unary_function_clean_inputs(
c,
concat!("integer::signed::", stringify!($server_key_method)),
stringify!($name),
|server_key, lhs| {
server_key.$server_key_method(lhs);
},
)
}
};
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_mul_parallelized,
display_name: mul
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_bitand_parallelized,
display_name: bitand
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_bitor_parallelized,
display_name: bitand
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_bitxor_parallelized,
display_name: bitand
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_eq_parallelized,
display_name: eq
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_ne_parallelized,
display_name: ne
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_le_parallelized,
display_name: le
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_lt_parallelized,
display_name: lt
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_ge_parallelized,
display_name: ge
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_gt_parallelized,
display_name: gt
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_max_parallelized,
display_name: max
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_min_parallelized,
display_name: min
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_div_rem_parallelized,
display_name: div_rem,
sample_size: 10,
);
define_server_key_bench_binary_signed_clean_inputs_fn!(
method_name: unchecked_div_rem_floor_parallelized,
display_name: div_rem_floor,
sample_size: 10,
);
fn unchecked_left_shift_parallelized(c: &mut Criterion) {
bench_server_key_signed_shift_function_clean_inputs(
c,
concat!("integer::signed::", "unchecked_left_shift_parallelized"),
"left_shift",
|server_key, lhs, rhs| {
server_key.unchecked_left_shift_parallelized(lhs, rhs);
},
)
}
fn unchecked_right_shift_parallelized(c: &mut Criterion) {
bench_server_key_signed_shift_function_clean_inputs(
c,
concat!("integer::signed::", "unchecked_right_shift_parallelized"),
"right_shift",
|server_key, lhs, rhs| {
server_key.unchecked_right_shift_parallelized(lhs, rhs);
},
)
}
fn unchecked_rotate_left_parallelized(c: &mut Criterion) {
bench_server_key_signed_shift_function_clean_inputs(
c,
concat!("integer::signed::", "unchecked_rotate_left_parallelized"),
"rotate_left",
|server_key, lhs, rhs| {
server_key.unchecked_rotate_left_parallelized(lhs, rhs);
},
)
}
fn unchecked_rotate_right_parallelized(c: &mut Criterion) {
bench_server_key_signed_shift_function_clean_inputs(
c,
concat!("integer::signed::", "unchecked_rotate_right_parallelized"),
"rotate_right",
|server_key, lhs, rhs| {
server_key.unchecked_rotate_right_parallelized(lhs, rhs);
},
)
}
define_server_key_bench_unary_signed_clean_input_fn!(
method_name: unchecked_abs_parallelized,
display_name: abs,
);
criterion_group!(
unchecked_ops,
unchecked_mul_parallelized,
unchecked_left_shift_parallelized,
unchecked_right_shift_parallelized,
unchecked_rotate_left_parallelized,
unchecked_rotate_right_parallelized,
unchecked_bitand_parallelized,
unchecked_bitor_parallelized,
unchecked_bitxor_parallelized,
unchecked_abs_parallelized,
unchecked_div_rem_parallelized,
unchecked_div_rem_floor_parallelized,
);
criterion_group!(
unchecked_ops_comp,
unchecked_eq_parallelized,
unchecked_ne_parallelized,
unchecked_ge_parallelized,
unchecked_gt_parallelized,
unchecked_le_parallelized,
unchecked_lt_parallelized,
unchecked_max_parallelized,
unchecked_min_parallelized,
);
//================================================================================
// Scalar Benches
//================================================================================
type ScalarType = I256;
fn bench_server_key_binary_scalar_function_clean_inputs<F, G>(
c: &mut Criterion,
bench_name: &str,
display_name: &str,
binary_op: F,
rng_func: G,
) where
F: Fn(&ServerKey, &mut SignedRadixCiphertext, ScalarType),
G: Fn(&mut ThreadRng, usize) -> ScalarType,
{
let mut bench_group = c.benchmark_group(bench_name);
bench_group
.sample_size(15)
.measurement_time(std::time::Duration::from_secs(60));
let mut rng = rand::thread_rng();
for (param, num_block, bit_size) in ParamsAndNumBlocksIter::default() {
if bit_size > ScalarType::BITS as usize {
break;
}
let param_name = param.name();
let range = range_for_signed_bit_size(bit_size);
let bench_id = format!("{bench_name}::{param_name}::{bit_size}_bits_scalar_{bit_size}");
bench_group.bench_function(&bench_id, |b| {
let (cks, sks) = KEY_CACHE.get_from_params(param);
let encrypt_one_value = || {
let ct_0 = cks.encrypt_signed_radix(gen_random_i256(&mut rng), num_block);
let clear_1 = rng_func(&mut rng, bit_size);
assert!(
range.contains(&clear_1),
"{:?} is not within the range {:?}",
clear_1,
range
);
(ct_0, clear_1)
};
b.iter_batched(
encrypt_one_value,
|(mut ct_0, clear_1)| {
binary_op(&sks, &mut ct_0, clear_1);
},
criterion::BatchSize::SmallInput,
)
});
write_to_json::<u64, _>(
&bench_id,
param,
param.name(),
display_name,
&OperatorType::Atomic,
bit_size as u32,
vec![param.message_modulus().0.ilog2(); num_block],
);
}
bench_group.finish()
}
fn range_for_signed_bit_size(bit_size: usize) -> std::ops::RangeInclusive<ScalarType> {
assert!(bit_size <= ScalarType::BITS as usize);
assert!(bit_size > 0);
let modulus = ScalarType::ONE << (bit_size - 1);
// if clear_bit_size == ScalarType::BITS then modulus==T::MIN
// -T::MIN = -T::MIN so we sill have our correct lower value
// (in two's complement which rust uses)
let lowest = modulus.wrapping_neg();
// if clear_bit_size == 128 then modulus==T::MIN
// T::MIN - 1 = T::MAX (in two's complement which rust uses)
let highest = modulus.wrapping_sub(ScalarType::ONE);
lowest..=highest
}
/// Creates a bitmask where bit_size bits are 1s, rest are 0s
/// Only works if ScalarType in signed
fn positive_bit_mask_for_bit_size(bit_size: usize) -> ScalarType {
assert!(bit_size <= ScalarType::BITS as usize);
assert!(bit_size > 0);
let minus_one = -ScalarType::ONE; // (In two's complement this is full of 1s)
// The last bit of bit_size can only be set for when value is positive
let bitmask = (minus_one) >> (ScalarType::BITS as usize - bit_size - 1);
// flib msb as they would still be one due to '>>' being arithmetic shift
bitmask ^ ((minus_one) << (bit_size - 1))
}
fn negative_bit_mask_for_bit_size(bit_size: usize) -> ScalarType {
assert!(bit_size <= ScalarType::BITS as usize);
assert!(bit_size > 0);
let minus_one = -ScalarType::ONE; // (In two's complement this is full of 1s)
let bitmask = (minus_one) >> (ScalarType::BITS as usize - bit_size);
// flib msb as they would still be one due to '>>' being arithmetic shift
bitmask ^ ((minus_one) << bit_size)
}
// We have to do this complex stuff because we cannot impl
// rand::distributions::Distribution<I256> because benches are considered out of the crate
// so neither I256 nor rand::distributions::Distribution belong to the benches.
//
// rand::distributions::Distribution can't be implemented in tfhe sources
// in a way that it becomes available to the benches, because rand is a dev dependency
fn gen_random_i256_in_range(rng: &mut ThreadRng, bit_size: usize) -> I256 {
let value = gen_random_i256(rng);
if value >= I256::ZERO {
value & positive_bit_mask_for_bit_size(bit_size)
} else {
(value & negative_bit_mask_for_bit_size(bit_size)) | -I256::ONE
}
}
// Functions used to apply different way of selecting a scalar based on the context.
fn default_scalar(rng: &mut ThreadRng, clear_bit_size: usize) -> ScalarType {
gen_random_i256_in_range(rng, clear_bit_size)
}
fn shift_scalar(_rng: &mut ThreadRng, _clear_bit_size: usize) -> ScalarType {
// Shifting by one is the worst case scenario.
ScalarType::ONE
}
fn div_scalar(rng: &mut ThreadRng, clear_bit_size: usize) -> ScalarType {
loop {
let scalar = gen_random_i256_in_range(rng, clear_bit_size);
if scalar != ScalarType::ZERO {
return scalar;
}
}
}
macro_rules! define_server_key_bench_binary_scalar_clean_inputs_fn (
(method_name: $server_key_method:ident, display_name:$name:ident, rng_func:$($rng_fn:tt)*) => {
fn $server_key_method(c: &mut Criterion) {
bench_server_key_binary_scalar_function_clean_inputs(
c,
concat!("integer::", stringify!($server_key_method)),
stringify!($name),
|server_key, lhs, rhs| {
server_key.$server_key_method(lhs, rhs);
}, $($rng_fn)*)
}
}
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_left_shift_parallelized,
display_name: scalar_left_shift,
rng_func: shift_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_right_shift_parallelized,
display_name: scalar_right_shift,
rng_func: shift_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_rotate_right_parallelized,
display_name: scalar_rotate_right,
rng_func: shift_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_rotate_left_parallelized,
display_name: scalar_rotate_left,
rng_func: shift_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_mul_parallelized,
display_name: scalar_mul,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_bitand_parallelized,
display_name: scalar_bitand,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_bitor_parallelized,
display_name: scalar_bitor,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_bitxor_parallelized,
display_name: scalar_bitxor,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_eq_parallelized,
display_name: scalar_eq,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_ne_parallelized,
display_name: scalar_ne,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_le_parallelized,
display_name: scalar_le,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_lt_parallelized,
display_name: scalar_lt,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_ge_parallelized,
display_name: scalar_ge,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_gt_parallelized,
display_name: scalar_gt,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_max_parallelized,
display_name: scalar_max,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_scalar_min_parallelized,
display_name: scalar_min,
rng_func: default_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_signed_scalar_div_rem_parallelized,
display_name: scalar_div_rem,
rng_func: div_scalar
);
define_server_key_bench_binary_scalar_clean_inputs_fn!(
method_name: unchecked_signed_scalar_div_parallelized,
display_name: scalar_div,
rng_func: div_scalar
);
criterion_group!(
unchecked_scalar_ops,
unchecked_scalar_left_shift_parallelized,
unchecked_scalar_right_shift_parallelized,
unchecked_scalar_rotate_right_parallelized,
unchecked_scalar_rotate_left_parallelized,
unchecked_scalar_bitand_parallelized,
unchecked_scalar_bitor_parallelized,
unchecked_scalar_bitxor_parallelized,
unchecked_scalar_mul_parallelized,
unchecked_signed_scalar_div_rem_parallelized,
unchecked_signed_scalar_div_parallelized,
unchecked_div_rem_floor_parallelized,
);
criterion_group!(
unchecked_scalar_ops_comp,
unchecked_scalar_eq_parallelized,
unchecked_scalar_ne_parallelized,
unchecked_scalar_le_parallelized,
unchecked_scalar_lt_parallelized,
unchecked_scalar_ge_parallelized,
unchecked_scalar_gt_parallelized,
unchecked_scalar_max_parallelized,
unchecked_scalar_min_parallelized,
);
criterion_group!(default_ops, signed_if_then_else_parallelized,);
fn main() {
match env::var("__TFHE_RS_BENCH_OP_FLAVOR") {
Ok(val) => {
match val.to_lowercase().as_str() {
"unchecked" => unchecked_ops(),
"unchecked_comp" => unchecked_ops_comp(),
"unchecked_scalar" => unchecked_scalar_ops(),
"unchecked_scalar_comp" => unchecked_scalar_ops_comp(),
_ => panic!("unknown benchmark operations flavor"),
};
}
Err(_) => {
unchecked_ops();
unchecked_ops_comp();
unchecked_scalar_ops();
unchecked_scalar_ops_comp();
}
};
Criterion::default().configure_from_args().final_summary();
}

View File

@@ -393,7 +393,7 @@ fn _bench_wopbs_param_message_8_norm2_5(c: &mut Criterion) {
let mut bench_group = c.benchmark_group("programmable_bootstrap");
let param = WOPBS_PARAM_MESSAGE_4_NORM2_6_KS_PBS;
let param_set: ShortintParameterSet = param.into();
let param_set: ShortintParameterSet = param.try_into().unwrap();
let pbs_params = param_set.pbs_parameters().unwrap();
let keys = KEY_CACHE_WOPBS.get_from_param((pbs_params, param));

View File

@@ -32,9 +32,12 @@ fn gen_c_api() {
}
extern crate cbindgen;
let crate_dir: PathBuf = env::var("CARGO_MANIFEST_DIR").unwrap().into();
let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let package_name = env::var("CARGO_PKG_NAME").unwrap();
let output_file = target_dir().join(format!("{package_name}.h"));
let output_file = target_dir()
.join(format!("{package_name}.h"))
.display()
.to_string();
let parse_expand_features_vec = vec![
#[cfg(feature = "__c_api")]
@@ -51,8 +54,6 @@ fn gen_c_api() {
"shortint",
#[cfg(feature = "integer")]
"integer",
#[cfg(feature = "forward_compatibility")]
"forward_compatibility",
];
let parse_expand_vec = if parse_expand_features_vec.is_empty() {
@@ -61,16 +62,14 @@ fn gen_c_api() {
vec![package_name.as_str()]
};
let builder = cbindgen::Builder::new()
.with_crate(crate_dir.as_path())
.with_config(cbindgen::Config::from_file(crate_dir.join("cbindgen.toml")).unwrap())
cbindgen::Builder::new()
.with_crate(crate_dir.clone())
.with_config(cbindgen::Config::from_root_or_default(crate_dir))
.with_parse_expand(&parse_expand_vec)
.with_parse_expand_features(&parse_expand_features_vec);
#[cfg(feature = "forward_compatibility")]
let builder = builder.with_include("tfhe-c-api-dynamic-buffer.h");
builder.generate().unwrap().write_to_file(output_file);
.with_parse_expand_features(&parse_expand_features_vec)
.generate()
.unwrap()
.write_to_file(output_file);
}
fn main() {

View File

@@ -8,14 +8,8 @@ endif()
set(TFHE_C_API_RELEASE "${CMAKE_CURRENT_SOURCE_DIR}/../../target/${CARGO_PROFILE}")
include_directories(${TFHE_C_API_RELEASE})
include_directories(${TFHE_C_API_RELEASE}/deps)
add_library(Tfhe STATIC IMPORTED)
set_target_properties(Tfhe PROPERTIES IMPORTED_LOCATION ${TFHE_C_API_RELEASE}/libtfhe.a)
if("${WITH_FORWARD_COMPATIBILITY}" STREQUAL "ON")
add_definitions(-DWITH_FORWARD_COMPATIBILITY)
add_library(TfheDynamicBuffer STATIC IMPORTED)
set_target_properties(TfheDynamicBuffer PROPERTIES IMPORTED_LOCATION ${TFHE_C_API_RELEASE}/deps/libtfhe_c_api_dynamic_buffer.a)
endif()
if(APPLE)
find_library(SECURITY_FRAMEWORK Security)
@@ -36,9 +30,6 @@ foreach (testsourcefile ${TEST_CASES})
)
target_include_directories(${testname} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_link_libraries(${testname} LINK_PUBLIC Tfhe m pthread dl)
if("${WITH_FORWARD_COMPATIBILITY}" STREQUAL "ON")
target_link_libraries(${testname} LINK_PUBLIC TfheDynamicBuffer)
endif()
if(APPLE)
target_link_libraries(${testname} LINK_PUBLIC ${SECURITY_FRAMEWORK})
endif()
@@ -46,3 +37,4 @@ foreach (testsourcefile ${TEST_CASES})
# Enabled asserts even in release mode
add_definitions(-UNDEBUG)
endforeach (testsourcefile ${TEST_CASES})

View File

@@ -1,68 +0,0 @@
// If this test break the c_api doc needs to be updated
#include <tfhe.h>
#include <assert.h>
#include <stdio.h>
int main(void)
{
int ok = 0;
// Prepare the config builder for the high level API and choose which types to enable
ConfigBuilder *builder;
Config *config;
// Put the builder in a default state without any types enabled
config_builder_all_disabled(&builder);
// Enable the uint128 type using the small LWE key for encryption
config_builder_enable_default_integers_small(&builder);
// Populate the config
config_builder_build(builder, &config);
ClientKey *client_key = NULL;
ServerKey *server_key = NULL;
// Generate the keys using the config
generate_keys(config, &client_key, &server_key);
// Set the server key for the current thread
set_server_key(server_key);
FheUint128 *lhs = NULL;
FheUint128 *rhs = NULL;
FheUint128 *result = NULL;
// A 128-bit unsigned integer containing value: 20 << 64 | 10
U128 clear_lhs = { .w0 = 10, .w1 = 20 };
// A 128-bit unsigned integer containing value: 2 << 64 | 1
U128 clear_rhs = { .w0 = 1, .w1 = 2 };
ok = fhe_uint128_try_encrypt_with_client_key_u128(clear_lhs, client_key, &lhs);
assert(ok == 0);
ok = fhe_uint128_try_encrypt_with_client_key_u128(clear_rhs, client_key, &rhs);
assert(ok == 0);
// Compute the subtraction
ok = fhe_uint128_sub(lhs, rhs, &result);
assert(ok == 0);
U128 clear_result;
// Decrypt
ok = fhe_uint128_decrypt(result, client_key, &clear_result);
assert(ok == 0);
// Here the subtraction allows us to compare each word
assert(clear_result.w0 == 9);
assert(clear_result.w1 == 18);
// Destroy the ciphertexts
fhe_uint128_destroy(lhs);
fhe_uint128_destroy(rhs);
fhe_uint128_destroy(result);
// Destroy the keys
client_key_destroy(client_key);
server_key_destroy(server_key);
printf("FHE computation successful!\n");
return EXIT_SUCCESS;
}

View File

@@ -1,120 +0,0 @@
#include <tfhe.h>
#include <assert.h>
#include <inttypes.h>
#include <stdio.h>
#ifdef WITH_FORWARD_COMPATIBILITY
int uint8_format_update(const ClientKey *client_key, const ServerKey *server_key) {
int ok;
FheUint8 *lhs = NULL;
FheUint8 *deserialized_lhs = NULL;
FheUint8 *result = NULL;
Buffer value_buffer = {.pointer = NULL, .length = 0};
Buffer conformant_value_buffer = {.pointer = NULL, .length = 0};
Buffer cks_buffer = {.pointer = NULL, .length = 0};
BufferView deser_view = {.pointer = NULL, .length = 0};
ClientKey *deserialized_client_key = NULL;
DynamicBuffer out_buffer = {.pointer = NULL, .length = 0};
const uint64_t max_serialization_size = UINT64_C(1) << UINT64_C(20);
uint8_t lhs_clear = 123;
ok = client_key_serialize(client_key, &cks_buffer);
assert(ok == 0);
deser_view.pointer = cks_buffer.pointer;
deser_view.length = cks_buffer.length;
ok = client_key_update_serialization_from_0_4_to_0_5(deser_view, &out_buffer);
assert(ok == 0);
destroy_dynamic_buffer(&out_buffer);
deser_view.pointer = cks_buffer.pointer;
deser_view.length = cks_buffer.length;
ok = client_key_deserialize(deser_view, &deserialized_client_key);
assert(ok == 0);
ok = fhe_uint8_try_encrypt_with_client_key_u8(lhs_clear, deserialized_client_key, &lhs);
assert(ok == 0);
ok = fhe_uint8_serialize(lhs, &value_buffer);
assert(ok == 0);
deser_view.pointer = value_buffer.pointer;
deser_view.length = value_buffer.length;
ok = fhe_uint8_update_serialization_from_0_4_to_0_5(deser_view, &out_buffer);
assert(ok == 0);
destroy_dynamic_buffer(&out_buffer);
ok = fhe_uint8_safe_serialize(lhs, &conformant_value_buffer, max_serialization_size);
assert(ok == 0);
deser_view.pointer = conformant_value_buffer.pointer;
deser_view.length = conformant_value_buffer.length;
ok = fhe_uint8_safe_update_serialization_conformant_from_0_4_to_0_5(
deser_view, max_serialization_size, server_key, &out_buffer);
assert(ok == 0);
destroy_dynamic_buffer(&out_buffer);
deser_view.pointer = value_buffer.pointer;
deser_view.length = value_buffer.length;
ok = fhe_uint8_deserialize(deser_view, &deserialized_lhs);
assert(ok == 0);
uint8_t clear;
ok = fhe_uint8_decrypt(deserialized_lhs, deserialized_client_key, &clear);
assert(ok == 0);
assert(clear == lhs_clear);
if (value_buffer.pointer != NULL) {
destroy_buffer(&value_buffer);
}
if (conformant_value_buffer.pointer != NULL) {
destroy_buffer(&conformant_value_buffer);
}
fhe_uint8_destroy(lhs);
fhe_uint8_destroy(deserialized_lhs);
fhe_uint8_destroy(result);
return ok;
}
#endif
int main(void) {
int ok = 0;
#ifdef WITH_FORWARD_COMPATIBILITY
{
ConfigBuilder *builder;
Config *config;
ok = config_builder_all_disabled(&builder);
assert(ok == 0);
ok = config_builder_enable_default_integers(&builder);
assert(ok == 0);
ok = config_builder_build(builder, &config);
assert(ok == 0);
ClientKey *client_key = NULL;
ServerKey *server_key = NULL;
PublicKey *public_key = NULL;
ok = generate_keys(config, &client_key, &server_key);
assert(ok == 0);
ok = uint8_format_update(client_key, server_key);
client_key_destroy(client_key);
public_key_destroy(public_key);
server_key_destroy(server_key);
}
#endif
return ok;
}

View File

@@ -110,134 +110,6 @@ int uint256_public_key(const ClientKey *client_key, const PublicKey *public_key)
return ok;
}
int int256_client_key(const ClientKey *client_key) {
int ok;
FheInt256 *lhs = NULL;
FheInt256 *rhs = NULL;
FheInt256 *result = NULL;
FheInt64 *cast_result = NULL;
// This is +1
I256 lhs_clear = {1, 0, 0, 0};
// This is -1
I256 rhs_clear = {UINT64_MAX, UINT64_MAX, UINT64_MAX, UINT64_MAX};
I256 result_clear = {0};
ok = fhe_int256_try_encrypt_with_client_key_i256(lhs_clear, client_key, &lhs);
assert(ok == 0);
ok = fhe_int256_try_encrypt_with_client_key_i256(rhs_clear, client_key, &rhs);
assert(ok == 0);
ok = fhe_int256_add(lhs, rhs, &result);
assert(ok == 0);
ok = fhe_int256_decrypt(result, client_key, &result_clear);
assert(ok == 0);
// We did 1 + (-1), so we expect 0
assert(result_clear.w0 == 0);
assert(result_clear.w1 == 0);
assert(result_clear.w2 == 0);
assert(result_clear.w3 == 0);
fhe_int256_destroy(result);
ok = fhe_int256_sub(lhs, rhs, &result);
assert(ok == 0);
ok = fhe_int256_decrypt(result, client_key, &result_clear);
assert(ok == 0);
// We did 1 - (-1), so we expect 2
assert(result_clear.w0 == 2);
assert(result_clear.w1 == 0);
assert(result_clear.w2 == 0);
assert(result_clear.w3 == 0);
// try some casting
ok = fhe_int256_cast_into_fhe_int64(result, &cast_result);
assert(ok == 0);
int64_t u64_clear;
ok = fhe_int64_decrypt(cast_result, client_key, &u64_clear);
assert(ok == 0);
assert(u64_clear == 2);
fhe_int256_destroy(lhs);
fhe_int256_destroy(rhs);
fhe_int256_destroy(result);
fhe_int64_destroy(cast_result);
return ok;
}
int int256_encrypt_trivial(const ClientKey *client_key) {
int ok;
FheInt256 *lhs = NULL;
FheInt256 *rhs = NULL;
FheInt256 *result = NULL;
I256 lhs_clear = {1, 2, 3, 4};
I256 rhs_clear = {5, 6, 7, 8};
I256 result_clear = {0};
ok = fhe_int256_try_encrypt_trivial_i256(lhs_clear, &lhs);
assert(ok == 0);
ok = fhe_int256_try_encrypt_trivial_i256(rhs_clear, &rhs);
assert(ok == 0);
ok = fhe_int256_add(lhs, rhs, &result);
assert(ok == 0);
ok = fhe_int256_decrypt(result, client_key, &result_clear);
assert(ok == 0);
assert(result_clear.w0 == 6);
assert(result_clear.w1 == 8);
assert(result_clear.w2 == 10);
assert(result_clear.w3 == 12);
fhe_int256_destroy(lhs);
fhe_int256_destroy(rhs);
fhe_int256_destroy(result);
return ok;
}
int int256_public_key(const ClientKey *client_key, const PublicKey *public_key) {
int ok;
FheInt256 *lhs = NULL;
FheInt256 *rhs = NULL;
FheInt256 *result = NULL;
// This is +1
I256 lhs_clear = {1, 0, 0, 0};
// This is -1
I256 rhs_clear = {UINT64_MAX, UINT64_MAX, UINT64_MAX, UINT64_MAX};
I256 result_clear = {0};
ok = fhe_int256_try_encrypt_with_public_key_i256(lhs_clear, public_key, &lhs);
assert(ok == 0);
ok = fhe_int256_try_encrypt_with_public_key_i256(rhs_clear, public_key, &rhs);
assert(ok == 0);
ok = fhe_int256_sub(lhs, rhs, &result);
assert(ok == 0);
ok = fhe_int256_decrypt(result, client_key, &result_clear);
assert(ok == 0);
// We did 1 - (-1), so we expect 2
assert(result_clear.w0 == 2);
assert(result_clear.w1 == 0);
assert(result_clear.w2 == 0);
assert(result_clear.w3 == 0);
fhe_int256_destroy(lhs);
fhe_int256_destroy(rhs);
fhe_int256_destroy(result);
return ok;
}
int main(void) {
int ok = 0;
ConfigBuilder *builder;
@@ -260,10 +132,6 @@ int main(void) {
uint256_encrypt_trivial(client_key);
uint256_public_key(client_key, public_key);
int256_client_key(client_key);
int256_encrypt_trivial(client_key);
int256_public_key(client_key, public_key);
client_key_destroy(client_key);
public_key_destroy(public_key);
server_key_destroy(server_key);

View File

@@ -78,7 +78,7 @@ int uint256_encrypt_trivial(const ClientKey *client_key) {
return ok;
}
int uint256_compact_public_key(const ClientKey *client_key,
int uint256_public_key(const ClientKey *client_key,
const CompressedCompactPublicKey *compressed_public_key) {
int ok;
CompactPublicKey *public_key = NULL;
@@ -112,7 +112,7 @@ int uint256_compact_public_key(const ClientKey *client_key,
lhs = expand_output[0];
rhs = expand_output[1];
// We can destroy the compact list
// The expanded ciphertext are independent from it
// The expanded ciphertext are independant from it
compact_fhe_uint256_list_destroy(list);
ok = fhe_uint256_sub(lhs, rhs, &result);
@@ -158,80 +158,6 @@ int uint256_compact_public_key(const ClientKey *client_key,
return ok;
}
int int32_compact_public_key(const ClientKey *client_key,
const CompressedCompactPublicKey *compressed_public_key) {
int ok;
CompactPublicKey *public_key = NULL;
FheInt32 *lhs = NULL;
FheInt32 *rhs = NULL;
FheInt32 *result = NULL;
CompactFheInt32List *list = NULL;
int32_t result_clear = 0;
int32_t clears[2] = {-9482394, 98712234};
ok = compressed_compact_public_key_decompress(compressed_public_key, &public_key);
assert(ok == 0);
// Compact list example
{
ok = compact_fhe_int32_list_try_encrypt_with_compact_public_key_i32(&clears[0], 2,
public_key, &list);
assert(ok == 0);
size_t len = 0;
ok = compact_fhe_int32_list_len(list, &len);
assert(ok == 0);
assert(len == 2);
FheInt32 *expand_output[2] = {NULL};
ok = compact_fhe_int32_list_expand(list, &expand_output[0], 2);
assert(ok == 0);
// transfer ownership
lhs = expand_output[0];
rhs = expand_output[1];
// We can destroy the compact list
// The expanded ciphertext are independent from it
compact_fhe_int32_list_destroy(list);
ok = fhe_int32_sub(lhs, rhs, &result);
assert(ok == 0);
ok = fhe_int32_decrypt(result, client_key, &result_clear);
assert(ok == 0);
assert(result_clear == clears[0] - clears[1]);
fhe_int32_destroy(lhs);
fhe_int32_destroy(rhs);
fhe_int32_destroy(result);
}
{
ok = fhe_int32_try_encrypt_with_compact_public_key_i32(clears[0], public_key, &lhs);
assert(ok == 0);
ok = fhe_int32_try_encrypt_with_compact_public_key_i32(clears[1], public_key, &rhs);
assert(ok == 0);
ok = fhe_int32_add(lhs, rhs, &result);
assert(ok == 0);
ok = fhe_int32_decrypt(result, client_key, &result_clear);
assert(ok == 0);
assert(result_clear == clears[0] + clears[1]);
fhe_int32_destroy(lhs);
fhe_int32_destroy(rhs);
fhe_int32_destroy(result);
}
compact_public_key_destroy(public_key);
return ok;
}
int main(void) {
int ok = 0;
{
@@ -254,8 +180,7 @@ int main(void) {
uint256_client_key(client_key);
uint256_encrypt_trivial(client_key);
uint256_compact_public_key(client_key, compressed_public_key);
int32_compact_public_key(client_key, compressed_public_key);
uint256_public_key(client_key, compressed_public_key);
client_key_destroy(client_key);
compressed_compact_public_key_destroy(compressed_public_key);
@@ -282,8 +207,7 @@ int main(void) {
uint256_client_key(client_key);
uint256_encrypt_trivial(client_key);
uint256_compact_public_key(client_key, compressed_public_key);
int32_compact_public_key(client_key, compressed_public_key);
uint256_public_key(client_key, compressed_public_key);
client_key_destroy(client_key);
compressed_compact_public_key_destroy(compressed_public_key);

Some files were not shown because too many files have changed in this diff Show More