mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-13 08:38:03 -05:00
Compare commits
12 Commits
cm
...
clean_smar
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce625df1f6 | ||
|
|
5f16704fa8 | ||
|
|
d686cd86b5 | ||
|
|
a2a3f10341 | ||
|
|
d7ac3b6d15 | ||
|
|
44b9241c83 | ||
|
|
9df1725efc | ||
|
|
55d97ed794 | ||
|
|
d10c11720e | ||
|
|
f1e0742b9f | ||
|
|
e6136ca418 | ||
|
|
0805cd69e0 |
10
.github/workflows/aws_tfhe_fast_tests.yml
vendored
10
.github/workflows/aws_tfhe_fast_tests.yml
vendored
@@ -51,7 +51,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -66,10 +66,6 @@ jobs:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Run concrete-csprng tests
|
||||
run: |
|
||||
make test_concrete_csprng
|
||||
|
||||
- name: Run core tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON make test_core_crypto
|
||||
@@ -110,10 +106,6 @@ jobs:
|
||||
run: |
|
||||
make test_high_level_api
|
||||
|
||||
- name: Run safe deserialization tests
|
||||
run: |
|
||||
make test_safe_deserialization
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
|
||||
22
.github/workflows/aws_tfhe_integer_tests.yml
vendored
22
.github/workflows/aws_tfhe_integer_tests.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: AWS Unsigned Integer Tests on CPU
|
||||
name: AWS Integer Tests on CPU
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -23,13 +23,13 @@ on:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
description: 'Slab request ID'
|
||||
type: string
|
||||
fork_repo:
|
||||
description: "Name of forked repo as user/repo"
|
||||
description: 'Name of forked repo as user/repo'
|
||||
type: string
|
||||
fork_git_sha:
|
||||
description: "Git SHA to checkout from fork"
|
||||
description: 'Git SHA to checkout from fork'
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -65,21 +65,13 @@ jobs:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Gen Keys if required
|
||||
run: |
|
||||
make GEN_KEY_CACHE_MULTI_BIT_ONLY=TRUE gen_key_cache
|
||||
|
||||
- name: Run unsigned integer multi-bit tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON make test_unsigned_integer_multi_bit_ci
|
||||
|
||||
- name: Gen Keys if required
|
||||
run: |
|
||||
make gen_key_cache
|
||||
|
||||
- name: Run unsigned integer tests
|
||||
- name: Run integer tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON BIG_TESTS_INSTANCE=TRUE make test_unsigned_integer_ci
|
||||
BIG_TESTS_INSTANCE=TRUE make test_integer_ci
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: AWS Signed Integer Tests on CPU
|
||||
name: AWS Multi Bit Tests on CPU
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -23,13 +23,13 @@ on:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
description: 'Slab request ID'
|
||||
type: string
|
||||
fork_repo:
|
||||
description: "Name of forked repo as user/repo"
|
||||
description: 'Name of forked repo as user/repo'
|
||||
type: string
|
||||
fork_git_sha:
|
||||
description: "Git SHA to checkout from fork"
|
||||
description: 'Git SHA to checkout from fork'
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -73,17 +73,9 @@ jobs:
|
||||
run: |
|
||||
make test_shortint_multi_bit_ci
|
||||
|
||||
- name: Run signed integer multi-bit tests
|
||||
- name: Run integer multi-bit tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON make test_signed_integer_multi_bit_ci
|
||||
|
||||
- name: Gen Keys if required
|
||||
run: |
|
||||
make gen_key_cache
|
||||
|
||||
- name: Run signed integer tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON BIG_TESTS_INSTANCE=TRUE make test_signed_integer_ci
|
||||
make test_integer_multi_bit_ci
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
12
.github/workflows/aws_tfhe_tests.yml
vendored
12
.github/workflows/aws_tfhe_tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
@@ -65,10 +65,6 @@ jobs:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Run concrete-csprng tests
|
||||
run: |
|
||||
make test_concrete_csprng
|
||||
|
||||
- name: Run core tests
|
||||
run: |
|
||||
AVX512_SUPPORT=ON make test_core_crypto
|
||||
@@ -100,12 +96,6 @@ jobs:
|
||||
- name: Run example tests
|
||||
run: |
|
||||
make test_examples
|
||||
make dark_market
|
||||
|
||||
- name: Run apps tests
|
||||
run: |
|
||||
make test_trivium
|
||||
make test_kreyvium
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ always() }}
|
||||
|
||||
2
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
2
.github/workflows/aws_tfhe_wasm_tests.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
|
||||
14
.github/workflows/boolean_benchmark.yml
vendored
14
.github/workflows/boolean_benchmark.yml
vendored
@@ -19,14 +19,6 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -51,7 +43,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -96,13 +88,13 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_boolean
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
16
.github/workflows/cargo_build.yml
vendored
16
.github/workflows/cargo_build.yml
vendored
@@ -21,26 +21,12 @@ jobs:
|
||||
fail-fast: false
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
|
||||
- name: Install and run newline linter checks
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: |
|
||||
wget https://github.com/fernandrone/linelint/releases/download/0.0.6/linelint-linux-amd64
|
||||
echo "16b70fb7b471d6f95cbdc0b4e5dc2b0ac9e84ba9ecdc488f7bdf13df823aca4b linelint-linux-amd64" > checksum
|
||||
sha256sum -c checksum || exit 1
|
||||
chmod +x linelint-linux-amd64
|
||||
mv linelint-linux-amd64 /usr/local/bin/linelint
|
||||
make check_newline
|
||||
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
|
||||
- name: Run pcc checks
|
||||
run: |
|
||||
make pcc
|
||||
|
||||
- name: Build concrete-csprng
|
||||
run: |
|
||||
make build_concrete_csprng
|
||||
|
||||
- name: Build Release core
|
||||
run: |
|
||||
make build_core AVX512_SUPPORT=ON
|
||||
|
||||
119
.github/workflows/code_coverage.yml
vendored
119
.github/workflows/code_coverage.yml
vendored
@@ -1,119 +0,0 @@
|
||||
name: Code Coverage
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUSTFLAGS: "-C target-cpu=native"
|
||||
|
||||
on:
|
||||
# Allows you to run this workflow manually from the Actions tab as an alternative.
|
||||
workflow_dispatch:
|
||||
# All the inputs are provided by Slab
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "AWS instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "AWS instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "AWS instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: 'Slab request ID'
|
||||
type: string
|
||||
fork_repo:
|
||||
description: 'Name of forked repo as user/repo'
|
||||
type: string
|
||||
fork_git_sha:
|
||||
description: 'Git SHA to checkout from fork'
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
code-coverage:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}_${{ github.ref }}_${{ inputs.instance_image_id }}_${{ inputs.instance_type }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ${{ inputs.runner_name }}
|
||||
timeout-minutes: 1080
|
||||
steps:
|
||||
# Step used for log purpose.
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
echo "ID: ${{ inputs.instance_id }}"
|
||||
echo "AMI: ${{ inputs.instance_image_id }}"
|
||||
echo "Type: ${{ inputs.instance_type }}"
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
echo "Fork repo: ${{ inputs.fork_repo }}"
|
||||
echo "Fork git sha: ${{ inputs.fork_git_sha }}"
|
||||
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
|
||||
- name: Set up home
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install latest stable
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@1c938490c880156b746568a518594309cfb3f66b
|
||||
with:
|
||||
files_yaml: |
|
||||
tfhe:
|
||||
- tfhe/src/**
|
||||
concrete_csprng:
|
||||
- concrete-csprng/src/**
|
||||
|
||||
- name: Generate Keys
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
run: |
|
||||
make GEN_KEY_CACHE_COVERAGE_ONLY=TRUE gen_key_cache
|
||||
make gen_key_cache_core_crypto
|
||||
|
||||
- name: Run coverage for core_crypto
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
run: |
|
||||
make test_core_crypto_cov AVX512_SUPPORT=ON
|
||||
|
||||
- name: Run coverage for boolean
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
run: |
|
||||
make test_boolean_cov
|
||||
|
||||
- name: Run coverage for shortint
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
run: |
|
||||
make test_shortint_cov
|
||||
|
||||
- name: Upload tfhe coverage to Codecov
|
||||
uses: codecov/codecov-action@eaaf4bedf32dbdc6b720b63067d99c4d77d6047d
|
||||
if: steps.changed-files.outputs.tfhe_any_changed == 'true'
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
directory: ./coverage/
|
||||
fail_ci_if_error: true
|
||||
files: shortint/cobertura.xml,boolean/cobertura.xml,core_crypto/cobertura.xml,core_crypto_avx512/cobertura.xml
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Code coverage finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
74
.github/workflows/csprng_randomness_testing.yml
vendored
74
.github/workflows/csprng_randomness_testing.yml
vendored
@@ -1,74 +0,0 @@
|
||||
name: CSPRNG randomness testing Workflow
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
RUSTFLAGS: "-C target-cpu=native"
|
||||
|
||||
on:
|
||||
# Allows you to run this workflow manually from the Actions tab as an alternative.
|
||||
workflow_dispatch:
|
||||
# All the inputs are provided by Slab
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "AWS instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "AWS instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "AWS instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: 'Slab request ID'
|
||||
type: string
|
||||
fork_repo:
|
||||
description: 'Name of forked repo as user/repo'
|
||||
type: string
|
||||
fork_git_sha:
|
||||
description: 'Git SHA to checkout from fork'
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
csprng-randomness-teting:
|
||||
name: CSPRNG randomness testing
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}_${{ github.ref }}_${{ inputs.instance_image_id }}_${{ inputs.instance_type }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ${{ inputs.runner_name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
repository: ${{ inputs.fork_repo }}
|
||||
ref: ${{ inputs.fork_git_sha }}
|
||||
|
||||
- name: Set up home
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install latest stable
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: stable
|
||||
default: true
|
||||
|
||||
- name: Dieharder randomness test suite
|
||||
run: |
|
||||
make dieharder_csprng
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "concrete-csprng randomness check finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
8
.github/workflows/integer_benchmark.yml
vendored
8
.github/workflows/integer_benchmark.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
parse_integer_benches
|
||||
|
||||
- name: Upload csv results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_csv_integer
|
||||
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
|
||||
@@ -90,13 +90,13 @@ jobs:
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_integer
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
37
.github/workflows/integer_full_benchmark.yml
vendored
37
.github/workflows/integer_full_benchmark.yml
vendored
@@ -19,10 +19,6 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -30,33 +26,8 @@ env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
prepare-matrix:
|
||||
name: Prepare operations matrix
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
op_flavor: ${{ steps.set_op_flavor.outputs.op_flavor }}
|
||||
steps:
|
||||
- name: Weekly benchmarks
|
||||
if: ${{ github.event.inputs.user_inputs == 'weekly_benchmarks' }}
|
||||
run: |
|
||||
echo "OP_FLAVOR=[\"default\", \"default_comp\", \"default_scalar\", \"default_scalar_comp\"]" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Quarterly benchmarks
|
||||
if: ${{ github.event.inputs.user_inputs == 'quarterly_benchmarks' }}
|
||||
run: |
|
||||
echo "OP_FLAVOR=[\"default\", \"default_comp\", \"default_scalar\", \"default_scalar_comp\", \
|
||||
\"smart\", \"smart_comp\", \"smart_scalar\", \"smart_parallelized\", \"smart_parallelized_comp\", \"smart_scalar_parallelized\", \"smart_scalar_parallelized_comp\", \
|
||||
\"unchecked\", \"unchecked_comp\", \"unchecked_scalar\", \"unchecked_scalar_comp\", \
|
||||
\"misc\"]" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Set operation flavor output
|
||||
id: set_op_flavor
|
||||
run: |
|
||||
echo "op_flavor=${{ toJSON(env.OP_FLAVOR) }}" >> ${GITHUB_OUTPUT}
|
||||
|
||||
integer-benchmarks:
|
||||
name: Execute integer benchmarks for all operations flavor
|
||||
needs: prepare-matrix
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ !cancelled() }}
|
||||
continue-on-error: true
|
||||
@@ -64,7 +35,7 @@ jobs:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
command: [ integer, integer_multi_bit]
|
||||
op_flavor: ${{ fromJson(needs.prepare-matrix.outputs.op_flavor) }}
|
||||
op_flavor: [ default, default_comp, default_scalar, default_scalar_comp, smart, smart_comp, smart_scalar, smart_parallelized, smart_parallelized_comp, smart_scalar_parallelized, unchecked, unchecked_comp, unchecked_scalar, unchecked_scalar_comp, misc ]
|
||||
steps:
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
@@ -74,7 +45,7 @@ jobs:
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -96,7 +67,7 @@ jobs:
|
||||
override: true
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -120,7 +91,7 @@ jobs:
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
parse_integer_benches
|
||||
|
||||
- name: Upload csv results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_csv_integer
|
||||
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
|
||||
@@ -90,13 +90,13 @@ jobs:
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_integer
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
18
.github/workflows/m1_tests.yml
vendored
18
.github/workflows/m1_tests.yml
vendored
@@ -15,6 +15,7 @@ env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUSTFLAGS: "-C target-cpu=native"
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
CARGO_PROFILE: release_lto_off
|
||||
FAST_TESTS: "TRUE"
|
||||
|
||||
concurrency:
|
||||
@@ -27,7 +28,7 @@ jobs:
|
||||
runs-on: ["self-hosted", "m1mac"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
|
||||
- name: Install latest stable
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
@@ -39,10 +40,6 @@ jobs:
|
||||
run: |
|
||||
make pcc
|
||||
|
||||
- name: Build concrete-csprng
|
||||
run: |
|
||||
make build_concrete_csprng
|
||||
|
||||
- name: Build Release core
|
||||
run: |
|
||||
make build_core
|
||||
@@ -67,10 +64,6 @@ jobs:
|
||||
run: |
|
||||
make build_c_api
|
||||
|
||||
- name: Run concrete-csprng tests
|
||||
run: |
|
||||
make test_concrete_csprng
|
||||
|
||||
- name: Run core tests
|
||||
run: |
|
||||
make test_core_crypto
|
||||
@@ -110,9 +103,10 @@ jobs:
|
||||
run: |
|
||||
make test_shortint_multi_bit_ci
|
||||
|
||||
- name: Run integer multi bit tests
|
||||
run: |
|
||||
make test_integer_multi_bit_ci
|
||||
# # These multi bit integer tests are too slow on M1 with low core count and low RAM
|
||||
# - name: Run integer multi bit tests
|
||||
# run: |
|
||||
# make test_integer_multi_bit_ci
|
||||
|
||||
remove_label:
|
||||
name: Remove m1_test label
|
||||
|
||||
8
.github/workflows/make_release.yml
vendored
8
.github/workflows/make_release.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Publish web package
|
||||
if: ${{ inputs.push_web_package }}
|
||||
uses: JS-DevTools/npm-publish@4b07b26a2f6e0a51846e1870223e545bae91c552
|
||||
uses: JS-DevTools/npm-publish@5a85faf05d2ade2d5b6682bfe5359915d5159c6c
|
||||
with:
|
||||
token: ${{ secrets.NPM_TOKEN }}
|
||||
package: tfhe/pkg/package.json
|
||||
@@ -65,7 +65,7 @@ jobs:
|
||||
|
||||
- name: Publish Node package
|
||||
if: ${{ inputs.push_node_package }}
|
||||
uses: JS-DevTools/npm-publish@4b07b26a2f6e0a51846e1870223e545bae91c552
|
||||
uses: JS-DevTools/npm-publish@5a85faf05d2ade2d5b6682bfe5359915d5159c6c
|
||||
with:
|
||||
token: ${{ secrets.NPM_TOKEN }}
|
||||
package: tfhe/pkg/package.json
|
||||
@@ -79,6 +79,6 @@ jobs:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "tfhe release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_MESSAGE: "Integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
# Publish new release of tfhe-rs on various platform.
|
||||
name: Publish concrete-csprng release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: "Dry-run"
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
env:
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
publish_release:
|
||||
name: Publish concrete-csprng Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Publish crate.io package
|
||||
env:
|
||||
CRATES_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||
DRY_RUN: ${{ inputs.dry_run && '--dry-run' || '' }}
|
||||
run: |
|
||||
cargo publish -p concrete-csprng --token ${{ env.CRATES_TOKEN }} ${{ env.DRY_RUN }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "concrete-csprng release failed: (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
6
.github/workflows/parameters_check.yml
vendored
6
.github/workflows/parameters_check.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
|
||||
- name: Checkout lattice-estimator
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: malb/lattice-estimator
|
||||
path: lattice_estimator
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
|
||||
- name: Collect parameters
|
||||
run: |
|
||||
CARGO_PROFILE=devo make write_params_to_file
|
||||
make write_params_to_file
|
||||
|
||||
- name: Perform security check
|
||||
run: |
|
||||
|
||||
14
.github/workflows/pbs_benchmark.yml
vendored
14
.github/workflows/pbs_benchmark.yml
vendored
@@ -19,14 +19,6 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -51,7 +43,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -86,13 +78,13 @@ jobs:
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_pbs
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
6
.github/workflows/shortint_benchmark.yml
vendored
6
.github/workflows/shortint_benchmark.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -88,13 +88,13 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_shortint
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
14
.github/workflows/shortint_full_benchmark.yml
vendored
14
.github/workflows/shortint_full_benchmark.yml
vendored
@@ -19,14 +19,6 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -51,7 +43,7 @@ jobs:
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -73,7 +65,7 @@ jobs:
|
||||
override: true
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
@@ -112,7 +104,7 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_shortint_${{ matrix.op_flavor }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
129
.github/workflows/signed_integer_benchmark.yml
vendored
129
.github/workflows/signed_integer_benchmark.yml
vendored
@@ -1,129 +0,0 @@
|
||||
# Run signed integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Signed Integer benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "Instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "Instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "Instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||
PARSE_INTEGER_BENCH_CSV_FILE: tfhe_rs_integer_benches_${{ github.sha }}.csv
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
run-integer-benchmarks:
|
||||
name: Execute signed integer benchmarks in EC2
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
echo "IDs: ${{ inputs.instance_id }}"
|
||||
echo "AMI: ${{ inputs.instance_image_id }}"
|
||||
echo "Type: ${{ inputs.instance_type }}"
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
|
||||
- name: Get benchmark date
|
||||
run: |
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up home
|
||||
# "Install rust" step require root user to have a HOME directory which is not set.
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Run benchmarks with AVX512
|
||||
run: |
|
||||
make AVX512_SUPPORT=ON FAST_BENCH=TRUE bench_signed_integer
|
||||
|
||||
- name: Parse benchmarks to csv
|
||||
run: |
|
||||
make PARSE_INTEGER_BENCH_CSV_FILE=${{ env.PARSE_INTEGER_BENCH_CSV_FILE }} \
|
||||
parse_integer_benches
|
||||
|
||||
- name: Upload csv results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_csv_integer
|
||||
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
|
||||
COMMIT_HASH="$(git describe --tags --dirty)"
|
||||
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
|
||||
--database tfhe_rs \
|
||||
--hardware ${{ inputs.instance_type }} \
|
||||
--project-version "${COMMIT_HASH}" \
|
||||
--branch ${{ github.ref_name }} \
|
||||
--commit-date "${COMMIT_DATE}" \
|
||||
--bench-date "${{ env.BENCH_DATE }}" \
|
||||
--walk-subdirs \
|
||||
--name-suffix avx512 \
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_integer
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Computing HMac on results file"
|
||||
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
|
||||
echo "Sending results to Slab..."
|
||||
curl -v -k \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Slab-Repository: ${{ github.repository }}" \
|
||||
-H "X-Slab-Command: store_data_v2" \
|
||||
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
|
||||
-d @${{ env.RESULTS_FILENAME }} \
|
||||
${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Signed integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
133
.github/workflows/signed_integer_full_benchmark.yml
vendored
133
.github/workflows/signed_integer_full_benchmark.yml
vendored
@@ -1,133 +0,0 @@
|
||||
# Run all signed integer benchmarks on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Signed Integer full benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "Instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "Instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "Instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
integer-benchmarks:
|
||||
name: Execute signed integer benchmarks for all operations flavor
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ !cancelled() }}
|
||||
continue-on-error: true
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
matrix:
|
||||
command: [ integer, integer_multi_bit ]
|
||||
op_flavor: [ default, default_comp, default_scalar, default_scalar_comp,
|
||||
unchecked, unchecked_comp, unchecked_scalar, unchecked_scalar_comp ]
|
||||
steps:
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
echo "IDs: ${{ inputs.instance_id }}"
|
||||
echo "AMI: ${{ inputs.instance_image_id }}"
|
||||
echo "Type: ${{ inputs.instance_type }}"
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get benchmark details
|
||||
run: |
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
echo "COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})" >> "${GITHUB_ENV}"
|
||||
echo "COMMIT_HASH=$(git describe --tags --dirty)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set up home
|
||||
# "Install rust" step require root user to have a HOME directory which is not set.
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Run benchmarks with AVX512
|
||||
run: |
|
||||
make AVX512_SUPPORT=ON BENCH_OP_FLAVOR=${{ matrix.op_flavor }} bench_signed_${{ matrix.command }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
|
||||
--database tfhe_rs \
|
||||
--hardware ${{ inputs.instance_type }} \
|
||||
--project-version "${{ env.COMMIT_HASH }}" \
|
||||
--branch ${{ github.ref_name }} \
|
||||
--commit-date "${{ env.COMMIT_DATE }}" \
|
||||
--bench-date "${{ env.BENCH_DATE }}" \
|
||||
--walk-subdirs \
|
||||
--name-suffix avx512 \
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_${{ matrix.command }}_${{ matrix.op_flavor }}
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Computing HMac on results file"
|
||||
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
|
||||
echo "Sending results to Slab..."
|
||||
curl -v -k \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Slab-Repository: ${{ github.repository }}" \
|
||||
-H "X-Slab-Command: store_data_v2" \
|
||||
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
|
||||
-d @${{ env.RESULTS_FILENAME }} \
|
||||
${{ secrets.SLAB_URL }}
|
||||
|
||||
slack-notification:
|
||||
name: Slack Notification
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ failure() }}
|
||||
needs: integer-benchmarks
|
||||
steps:
|
||||
- name: Notify
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Signed integer full benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
@@ -1,129 +0,0 @@
|
||||
# Run signed integer benchmarks with multi-bit cryptographic parameters on an AWS instance and return parsed results to Slab CI bot.
|
||||
name: Signed Integer Multi-bit benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
instance_id:
|
||||
description: "Instance ID"
|
||||
type: string
|
||||
instance_image_id:
|
||||
description: "Instance AMI ID"
|
||||
type: string
|
||||
instance_type:
|
||||
description: "Instance product type"
|
||||
type: string
|
||||
runner_name:
|
||||
description: "Action runner name"
|
||||
type: string
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||
PARSE_INTEGER_BENCH_CSV_FILE: tfhe_rs_integer_benches_${{ github.sha }}.csv
|
||||
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
|
||||
jobs:
|
||||
run-integer-benchmarks:
|
||||
name: Execute signed integer multi-bit benchmarks in EC2
|
||||
runs-on: ${{ github.event.inputs.runner_name }}
|
||||
if: ${{ !cancelled() }}
|
||||
steps:
|
||||
- name: Instance configuration used
|
||||
run: |
|
||||
echo "IDs: ${{ inputs.instance_id }}"
|
||||
echo "AMI: ${{ inputs.instance_image_id }}"
|
||||
echo "Type: ${{ inputs.instance_type }}"
|
||||
echo "Request ID: ${{ inputs.request_id }}"
|
||||
|
||||
- name: Get benchmark date
|
||||
run: |
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up home
|
||||
# "Install rust" step require root user to have a HOME directory which is not set.
|
||||
run: |
|
||||
echo "HOME=/home/ubuntu" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Install rust
|
||||
uses: actions-rs/toolchain@16499b5e05bf2e26879000db0c1d13f7e13fa3af
|
||||
with:
|
||||
toolchain: nightly
|
||||
override: true
|
||||
|
||||
- name: Run multi-bit benchmarks with AVX512
|
||||
run: |
|
||||
make AVX512_SUPPORT=ON FAST_BENCH=TRUE bench_signed_integer_multi_bit
|
||||
|
||||
- name: Parse benchmarks to csv
|
||||
run: |
|
||||
make PARSE_INTEGER_BENCH_CSV_FILE=${{ env.PARSE_INTEGER_BENCH_CSV_FILE }} \
|
||||
parse_integer_benches
|
||||
|
||||
- name: Upload csv results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_csv_integer
|
||||
path: ${{ env.PARSE_INTEGER_BENCH_CSV_FILE }}
|
||||
|
||||
- name: Parse results
|
||||
run: |
|
||||
COMMIT_DATE="$(git --no-pager show -s --format=%cd --date=iso8601-strict ${{ github.sha }})"
|
||||
COMMIT_HASH="$(git describe --tags --dirty)"
|
||||
python3 ./ci/benchmark_parser.py target/criterion ${{ env.RESULTS_FILENAME }} \
|
||||
--database tfhe_rs \
|
||||
--hardware ${{ inputs.instance_type }} \
|
||||
--project-version "${COMMIT_HASH}" \
|
||||
--branch ${{ github.ref_name }} \
|
||||
--commit-date "${COMMIT_DATE}" \
|
||||
--bench-date "${{ env.BENCH_DATE }}" \
|
||||
--walk-subdirs \
|
||||
--name-suffix avx512 \
|
||||
--throughput
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
with:
|
||||
name: ${{ github.sha }}_integer
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Send data to Slab
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Computing HMac on results file"
|
||||
SIGNATURE="$(slab/scripts/hmac_calculator.sh ${{ env.RESULTS_FILENAME }} '${{ secrets.JOB_SECRET }}')"
|
||||
echo "Sending results to Slab..."
|
||||
curl -v -k \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Slab-Repository: ${{ github.repository }}" \
|
||||
-H "X-Slab-Command: store_data_v2" \
|
||||
-H "X-Hub-Signature-256: sha256=${SIGNATURE}" \
|
||||
-d @${{ env.RESULTS_FILENAME }} \
|
||||
${{ secrets.SLAB_URL }}
|
||||
|
||||
- name: Slack Notification
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8
|
||||
env:
|
||||
SLACK_COLOR: ${{ job.status }}
|
||||
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||
SLACK_MESSAGE: "Signed integer benchmarks failed. (${{ env.ACTION_RUN_URL }})"
|
||||
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
35
.github/workflows/start_benchmarks.yml
vendored
35
.github/workflows/start_benchmarks.yml
vendored
@@ -20,18 +20,10 @@ on:
|
||||
description: "Run integer benches"
|
||||
type: boolean
|
||||
default: true
|
||||
signed_integer_bench:
|
||||
description: "Run signed integer benches"
|
||||
type: boolean
|
||||
default: true
|
||||
integer_multi_bit_bench:
|
||||
description: "Run integer multi bit benches"
|
||||
type: boolean
|
||||
default: true
|
||||
signed_integer_multi_bit_bench:
|
||||
description: "Run signed integer multi bit benches"
|
||||
type: boolean
|
||||
default: true
|
||||
pbs_bench:
|
||||
description: "Run PBS benches"
|
||||
type: boolean
|
||||
@@ -46,20 +38,17 @@ jobs:
|
||||
if: ${{ (github.event_name == 'push' && github.repository == 'zama-ai/tfhe-rs') || github.event_name == 'workflow_dispatch' }}
|
||||
strategy:
|
||||
matrix:
|
||||
command: [ boolean_bench, shortint_bench,
|
||||
integer_bench, integer_multi_bit_bench,
|
||||
signed_integer_bench, signed_integer_multi_bit_bench,
|
||||
pbs_bench, wasm_client_bench ]
|
||||
command: [boolean_bench, shortint_bench, integer_bench, integer_multi_bit_bench, pbs_bench, wasm_client_bench]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check for file changes
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@1c938490c880156b746568a518594309cfb3f66b
|
||||
uses: tj-actions/changed-files@2f7246cb26e8bb6709b6cbfc1fec7febfe82e96a
|
||||
with:
|
||||
files_yaml: |
|
||||
common_benches:
|
||||
@@ -80,23 +69,13 @@ jobs:
|
||||
integer_bench:
|
||||
- tfhe/src/shortint/**
|
||||
- tfhe/src/integer/**
|
||||
- tfhe/benches/integer/bench.rs
|
||||
- tfhe/benches/integer/**
|
||||
- .github/workflows/integer_benchmark.yml
|
||||
integer_multi_bit_bench:
|
||||
- tfhe/src/shortint/**
|
||||
- tfhe/src/integer/**
|
||||
- tfhe/benches/integer/bench.rs
|
||||
- .github/workflows/integer_multi_bit_benchmark.yml
|
||||
signed_integer_bench:
|
||||
- tfhe/src/shortint/**
|
||||
- tfhe/src/integer/**
|
||||
- tfhe/benches/integer/signed_bench.rs
|
||||
- .github/workflows/signed_integer_benchmark.yml
|
||||
signed_integer_multi_bit_bench:
|
||||
- tfhe/src/shortint/**
|
||||
- tfhe/src/integer/**
|
||||
- tfhe/benches/integer/signed_bench.rs
|
||||
- .github/workflows/signed_integer_multi_bit_benchmark.yml
|
||||
- tfhe/benches/integer/**
|
||||
- .github/workflows/integer_benchmark.yml
|
||||
pbs_bench:
|
||||
- tfhe/src/core_crypto/**
|
||||
- tfhe/benches/core_crypto/**
|
||||
@@ -106,7 +85,7 @@ jobs:
|
||||
- .github/workflows/wasm_client_benchmark.yml
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
33
.github/workflows/start_full_benchmarks.yml
vendored
33
.github/workflows/start_full_benchmarks.yml
vendored
@@ -3,57 +3,34 @@ name: Start full suite benchmarks
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Weekly benchmarks will be triggered each Saturday at 1a.m.
|
||||
# Job will be triggered each Saturday at 1a.m.
|
||||
- cron: '0 1 * * 6'
|
||||
# Quarterly benchmarks will be triggered right before end of quarter, the 25th of the current month at 4a.m.
|
||||
# These benchmarks are far longer to execute hence the reason to run them only four time a year.
|
||||
- cron: '0 4 25 MAR,JUN,SEP,DEC *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
benchmark_type:
|
||||
description: 'Benchmark type'
|
||||
required: true
|
||||
default: 'weekly'
|
||||
type: choice
|
||||
options:
|
||||
- weekly
|
||||
- quarterly
|
||||
|
||||
jobs:
|
||||
start-benchmarks:
|
||||
if: ${{ (github.event_name == 'schedule' && github.repository == 'zama-ai/tfhe-rs') || github.event_name == 'workflow_dispatch' }}
|
||||
strategy:
|
||||
matrix:
|
||||
command: [ boolean_bench, shortint_full_bench, integer_full_bench,
|
||||
signed_integer_full_bench, pbs_bench, wasm_client_bench ]
|
||||
command: [ boolean_bench, shortint_full_bench, integer_full_bench, pbs_bench, wasm_client_bench ]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout tfhe-rs
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
token: ${{ secrets.CONCRETE_ACTIONS_TOKEN }}
|
||||
|
||||
- name: Set benchmarks type as weekly
|
||||
if: (github.event_name == 'workflow_dispatch' && inputs.benchmark_type == 'weekly') || github.event.schedule == '0 1 * * 6'
|
||||
run: |
|
||||
echo "BENCH_TYPE=weekly_benchmarks" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Set benchmarks type as quarterly
|
||||
if: (github.event_name == 'workflow_dispatch' && inputs.benchmark_type == 'quarterly') || github.event.schedule == '0 4 25 MAR,JUN,SEP,DEC *'
|
||||
run: |
|
||||
echo "BENCH_TYPE=quarterly_benchmarks" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Start AWS job in Slab
|
||||
shell: bash
|
||||
run: |
|
||||
echo -n '{"command": "${{ matrix.command }}", "git_ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "user_inputs": "${{ env.BENCH_TYPE }}"}' > command.json
|
||||
echo -n '{"command": "${{ matrix.command }}", "git_ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' > command.json
|
||||
SIGNATURE="$(slab/scripts/hmac_calculator.sh command.json '${{ secrets.JOB_SECRET }}')"
|
||||
curl -v -k \
|
||||
--fail-with-body \
|
||||
|
||||
4
.github/workflows/sync_on_push.yml
vendored
4
.github/workflows/sync_on_push.yml
vendored
@@ -13,11 +13,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Save repo
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: repo-archive
|
||||
path: '.'
|
||||
|
||||
26
.github/workflows/trigger_aws_tests_on_pr.yml
vendored
26
.github/workflows/trigger_aws_tests_on_pr.yml
vendored
@@ -12,16 +12,6 @@ jobs:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Get current labels
|
||||
uses: snnaplab/get-labels-action@f426df40304808ace3b5282d4f036515f7609576
|
||||
|
||||
- name: Remove approved label
|
||||
if: ${{ github.event_name == 'pull_request' && contains(fromJSON(env.LABELS), 'approved') }}
|
||||
uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
labels: approved
|
||||
|
||||
- name: Launch fast tests
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
uses: mshick/add-pr-comment@a65df5f64fc741e91c59b8359a4bc56e57aaf5b1
|
||||
@@ -30,17 +20,8 @@ jobs:
|
||||
message: |
|
||||
@slab-ci cpu_fast_test
|
||||
|
||||
- name: Add approved label
|
||||
uses: actions-ecosystem/action-add-labels@18f1af5e3544586314bbe15c0273249c770b2daf
|
||||
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && !contains(fromJSON(env.LABELS), 'approved') }}
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
labels: approved
|
||||
|
||||
# PR label 'approved' presence is checked to avoid running the full test suite several times
|
||||
# in case of multiple approvals without new commits in between.
|
||||
- name: Launch full tests suite
|
||||
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' && !contains(fromJSON(env.LABELS), 'approved') }}
|
||||
if: ${{ github.event_name == 'pull_request_review' && github.event.review.state == 'approved' }}
|
||||
uses: mshick/add-pr-comment@a65df5f64fc741e91c59b8359a4bc56e57aaf5b1
|
||||
with:
|
||||
allow-repeats: true
|
||||
@@ -48,7 +29,6 @@ jobs:
|
||||
Pull Request has been approved :tada:
|
||||
Launching full test suite...
|
||||
@slab-ci cpu_test
|
||||
@slab-ci cpu_unsigned_integer_test
|
||||
@slab-ci cpu_signed_integer_test
|
||||
@slab-ci cpu_integer_test
|
||||
@slab-ci cpu_multi_bit_test
|
||||
@slab-ci cpu_wasm_test
|
||||
@slab-ci csprng_randomness_testing
|
||||
|
||||
14
.github/workflows/wasm_client_benchmark.yml
vendored
14
.github/workflows/wasm_client_benchmark.yml
vendored
@@ -19,14 +19,6 @@ on:
|
||||
request_id:
|
||||
description: "Slab request ID"
|
||||
type: string
|
||||
# This input is not used in this workflow but still mandatory since a calling workflow could
|
||||
# use it. If a triggering command include a user_inputs field, then the triggered workflow
|
||||
# must include this very input, otherwise the workflow won't be called.
|
||||
# See start_full_benchmarks.yml as example.
|
||||
user_inputs:
|
||||
description: "Type of benchmarks to run"
|
||||
type: string
|
||||
default: "weekly_benchmarks"
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -51,7 +43,7 @@ jobs:
|
||||
echo "BENCH_DATE=$(date --iso-8601=seconds)" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Checkout tfhe-rs repo with tags
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -97,13 +89,13 @@ jobs:
|
||||
--append-results
|
||||
|
||||
- name: Upload parsed results artifact
|
||||
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32
|
||||
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce
|
||||
with:
|
||||
name: ${{ github.sha }}_wasm
|
||||
path: ${{ env.RESULTS_FILENAME }}
|
||||
|
||||
- name: Checkout Slab repo
|
||||
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
with:
|
||||
repository: zama-ai/slab
|
||||
path: slab
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -3,9 +3,9 @@ target/
|
||||
.vscode/
|
||||
|
||||
# Path we use for internal-keycache during tests
|
||||
/keys/
|
||||
./keys/
|
||||
# In case of symlinked keys
|
||||
/keys
|
||||
./keys
|
||||
|
||||
**/Cargo.lock
|
||||
**/*.bin
|
||||
@@ -13,9 +13,3 @@ target/
|
||||
# Some of our bench outputs
|
||||
/tfhe/benchmarks_parameters
|
||||
**/*.csv
|
||||
|
||||
# dieharder run log
|
||||
dieharder_run.log
|
||||
|
||||
# Coverage reports
|
||||
/coverage/
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
ignore:
|
||||
- .git
|
||||
- target
|
||||
- tfhe/benchmarks_parameters
|
||||
- tfhe/web_wasm_parallel_tests/node_modules
|
||||
- tfhe/web_wasm_parallel_tests/dist
|
||||
- keys
|
||||
- coverage
|
||||
|
||||
rules:
|
||||
# checks if file ends in a newline character
|
||||
end-of-file:
|
||||
enable: true
|
||||
single-new-line: true
|
||||
@@ -1,6 +1,6 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["tfhe", "tasks", "apps/trivium", "concrete-csprng", "concrete-float"]
|
||||
members = ["tfhe", "tasks", "apps/trivium"]
|
||||
|
||||
[profile.bench]
|
||||
lto = "fat"
|
||||
|
||||
402
Makefile
402
Makefile
@@ -3,30 +3,19 @@ OS:=$(shell uname)
|
||||
RS_CHECK_TOOLCHAIN:=$(shell cat toolchain.txt | tr -d '\n')
|
||||
CARGO_RS_CHECK_TOOLCHAIN:=+$(RS_CHECK_TOOLCHAIN)
|
||||
TARGET_ARCH_FEATURE:=$(shell ./scripts/get_arch_feature.sh)
|
||||
RS_BUILD_TOOLCHAIN:=stable
|
||||
RS_BUILD_TOOLCHAIN:=$(shell \
|
||||
( (echo $(TARGET_ARCH_FEATURE) | grep -q x86) && echo stable) || echo $(RS_CHECK_TOOLCHAIN))
|
||||
CARGO_RS_BUILD_TOOLCHAIN:=+$(RS_BUILD_TOOLCHAIN)
|
||||
CARGO_PROFILE?=release
|
||||
MIN_RUST_VERSION:=$(shell grep '^rust-version[[:space:]]*=' tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
|
||||
MIN_RUST_VERSION:=$(shell grep rust-version tfhe/Cargo.toml | cut -d '=' -f 2 | xargs)
|
||||
AVX512_SUPPORT?=OFF
|
||||
WASM_RUSTFLAGS:=
|
||||
BIG_TESTS_INSTANCE?=FALSE
|
||||
GEN_KEY_CACHE_MULTI_BIT_ONLY?=FALSE
|
||||
GEN_KEY_CACHE_COVERAGE_ONLY?=FALSE
|
||||
PARSE_INTEGER_BENCH_CSV_FILE?=tfhe_rs_integer_benches.csv
|
||||
FAST_TESTS?=FALSE
|
||||
FAST_BENCH?=FALSE
|
||||
BENCH_OP_FLAVOR?=DEFAULT
|
||||
NODE_VERSION=20
|
||||
# sed: -n, do not print input stream, -e means a script/expression
|
||||
# 1,/version/ indicates from the first line, to the line matching version at the start of the line
|
||||
# p indicates to print, so we keep only the start of the Cargo.toml until we hit the first version
|
||||
# entry which should be the version of tfhe
|
||||
TFHE_CURRENT_VERSION:=\
|
||||
$(shell sed -n -e '1,/^version/p' tfhe/Cargo.toml | \
|
||||
grep '^version[[:space:]]*=' | cut -d '=' -f 2 | xargs)
|
||||
# Cargo has a hard time distinguishing between our package from the workspace and a package that
|
||||
# could be a dependency, so we build an unambiguous spec here
|
||||
TFHE_SPEC:=tfhe@$(TFHE_CURRENT_VERSION)
|
||||
# This is done to avoid forgetting it, we still precise the RUSTFLAGS in the commands to be able to
|
||||
# copy paste the command in the terminal and change them if required without forgetting the flags
|
||||
export RUSTFLAGS?=-C target-cpu=native
|
||||
@@ -43,32 +32,10 @@ else
|
||||
MULTI_BIT_ONLY=
|
||||
endif
|
||||
|
||||
ifeq ($(GEN_KEY_CACHE_COVERAGE_ONLY),TRUE)
|
||||
COVERAGE_ONLY=--coverage-only
|
||||
else
|
||||
COVERAGE_ONLY=
|
||||
endif
|
||||
|
||||
# Variables used only for regex_engine example
|
||||
REGEX_STRING?=''
|
||||
REGEX_PATTERN?=''
|
||||
|
||||
# Exclude these files from coverage reports
|
||||
define COVERAGE_EXCLUDED_FILES
|
||||
--exclude-files apps/trivium/src/trivium/* \
|
||||
--exclude-files apps/trivium/src/kreyvium/* \
|
||||
--exclude-files apps/trivium/src/static_deque/* \
|
||||
--exclude-files apps/trivium/src/trans_ciphering/* \
|
||||
--exclude-files tasks/src/* \
|
||||
--exclude-files tfhe/benches/boolean/* \
|
||||
--exclude-files tfhe/benches/core_crypto/* \
|
||||
--exclude-files tfhe/benches/shortint/* \
|
||||
--exclude-files tfhe/benches/integer/* \
|
||||
--exclude-files tfhe/benches/* \
|
||||
--exclude-files tfhe/examples/regex_engine/* \
|
||||
--exclude-files tfhe/examples/utilities/*
|
||||
endef
|
||||
|
||||
.PHONY: rs_check_toolchain # Echo the rust toolchain used for checks
|
||||
rs_check_toolchain:
|
||||
@echo $(RS_CHECK_TOOLCHAIN)
|
||||
@@ -110,174 +77,136 @@ install_wasm_pack: install_rs_build_toolchain
|
||||
install_node:
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.3/install.sh | $(SHELL)
|
||||
source ~/.bashrc
|
||||
$(SHELL) -i -c 'nvm install $(NODE_VERSION)' || \
|
||||
$(SHELL) -i -c 'nvm install node' || \
|
||||
( echo "Unable to install node, unknown error." && exit 1 )
|
||||
|
||||
.PHONY: install_dieharder # Install dieharder for apt distributions or macOS
|
||||
install_dieharder:
|
||||
@dieharder -h > /dev/null 2>&1 || \
|
||||
if [[ "$(OS)" == "Linux" ]]; then \
|
||||
sudo apt update && sudo apt install -y dieharder; \
|
||||
elif [[ "$(OS)" == "Darwin" ]]; then\
|
||||
brew install dieharder; \
|
||||
fi || ( echo "Unable to install dieharder, unknown error." && exit 1 )
|
||||
|
||||
.PHONY: install_tarpaulin # Install tarpaulin to perform code coverage
|
||||
install_tarpaulin: install_rs_build_toolchain
|
||||
@cargo tarpaulin --version > /dev/null 2>&1 || \
|
||||
cargo $(CARGO_RS_BUILD_TOOLCHAIN) install cargo-tarpaulin --locked || \
|
||||
( echo "Unable to install cargo tarpaulin, unknown error." && exit 1 )
|
||||
|
||||
.PHONY: check_linelint_installed # Check if linelint newline linter is installed
|
||||
check_linelint_installed:
|
||||
@printf "\n" | linelint - > /dev/null 2>&1 || \
|
||||
( echo "Unable to locate linelint. Try installing it: https://github.com/fernandrone/linelint/releases" && exit 1 )
|
||||
|
||||
.PHONY: fmt # Format rust code
|
||||
fmt: install_rs_check_toolchain
|
||||
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt
|
||||
|
||||
.PHONY: check_fmt # Check rust code format
|
||||
.PHONT: check_fmt # Check rust code format
|
||||
check_fmt: install_rs_check_toolchain
|
||||
cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" fmt --check
|
||||
|
||||
.PHONY: fix_newline # Fix newline at end of file issues to be UNIX compliant
|
||||
fix_newline: check_linelint_installed
|
||||
linelint -a .
|
||||
|
||||
.PHONY: check_newline # Check for newline at end of file to be UNIX compliant
|
||||
check_newline: check_linelint_installed
|
||||
linelint .
|
||||
|
||||
.PHONY: clippy_float # Run clippy lints on core_crypto with and without experimental features
|
||||
clippy_float: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
-p concrete-float -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_core # Run clippy lints on core_crypto with and without experimental features
|
||||
clippy_core: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
--features=$(TARGET_ARCH_FEATURE) \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_boolean # Run clippy lints enabling the boolean features
|
||||
clippy_boolean: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_shortint # Run clippy lints enabling the shortint features
|
||||
clippy_shortint: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_integer # Run clippy lints enabling the integer features
|
||||
clippy_integer: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy # Run clippy lints enabling the boolean, shortint, integer
|
||||
clippy: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_c_api # Run clippy lints enabling the boolean, shortint and the C API
|
||||
clippy_c_api: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_js_wasm_api # Run clippy lints enabling the boolean, shortint, integer and the js wasm API
|
||||
clippy_js_wasm_api: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
--features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_tasks # Run clippy lints on helper tasks crate.
|
||||
clippy_tasks:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
-p tasks -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_trivium # Run clippy lints on Trivium app
|
||||
clippy_trivium: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy \
|
||||
-p tfhe-trivium -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_all_targets # Run clippy lints on all targets (benches, examples, etc.)
|
||||
clippy_all_targets:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,safe-deserialization \
|
||||
-p $(TFHE_SPEC) -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_concrete_csprng # Run clippy lints on concrete-csprng
|
||||
clippy_concrete_csprng:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo "$(CARGO_RS_CHECK_TOOLCHAIN)" clippy --all-targets \
|
||||
--features=$(TARGET_ARCH_FEATURE) \
|
||||
-p concrete-csprng -- --no-deps -D warnings
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
|
||||
-p tfhe -- --no-deps -D warnings
|
||||
|
||||
.PHONY: clippy_all # Run all clippy targets
|
||||
clippy_all: clippy clippy_boolean clippy_shortint clippy_integer clippy_all_targets clippy_c_api \
|
||||
clippy_js_wasm_api clippy_tasks clippy_core clippy_concrete_csprng clippy_trivium
|
||||
clippy_js_wasm_api clippy_tasks clippy_core
|
||||
|
||||
.PHONY: clippy_fast # Run main clippy targets
|
||||
clippy_fast: clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core \
|
||||
clippy_concrete_csprng
|
||||
clippy_fast: clippy clippy_all_targets clippy_c_api clippy_js_wasm_api clippy_tasks clippy_core
|
||||
|
||||
.PHONY: gen_key_cache # Run the script to generate keys and cache them for shortint tests
|
||||
gen_key_cache: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) run --profile $(CARGO_PROFILE) \
|
||||
--example generates_test_keys \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p tfhe -- \
|
||||
$(MULTI_BIT_ONLY)
|
||||
|
||||
.PHONY: build_core # Build core_crypto without experimental features
|
||||
build_core: install_rs_build_toolchain install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE) -p tfhe
|
||||
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),$(AVX512_FEATURE) -p $(TFHE_SPEC); \
|
||||
--features=$(TARGET_ARCH_FEATURE),$(AVX512_FEATURE) -p tfhe; \
|
||||
fi
|
||||
|
||||
.PHONY: build_core_experimental # Build core_crypto with experimental features
|
||||
build_core_experimental: install_rs_build_toolchain install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental -p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental -p tfhe
|
||||
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC); \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p tfhe; \
|
||||
fi
|
||||
|
||||
.PHONY: build_boolean # Build with boolean enabled
|
||||
build_boolean: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean -p $(TFHE_SPEC) --all-targets
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean -p tfhe --all-targets
|
||||
|
||||
.PHONY: build_shortint # Build with shortint enabled
|
||||
build_shortint: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint -p $(TFHE_SPEC) --all-targets
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint -p tfhe --all-targets
|
||||
|
||||
.PHONY: build_integer # Build with integer enabled
|
||||
build_integer: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer -p $(TFHE_SPEC) --all-targets
|
||||
--features=$(TARGET_ARCH_FEATURE),integer -p tfhe --all-targets
|
||||
|
||||
.PHONY: build_tfhe_full # Build with boolean, shortint and integer enabled
|
||||
build_tfhe_full: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer -p $(TFHE_SPEC) --all-targets
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer -p tfhe --all-targets
|
||||
|
||||
.PHONY: build_c_api # Build the C API for boolean, shortint and integer
|
||||
build_c_api: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization \
|
||||
-p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api, \
|
||||
-p tfhe
|
||||
|
||||
.PHONY: build_c_api_experimental_deterministic_fft # Build the C API for boolean, shortint and integer with experimental deterministic FFT
|
||||
build_c_api_experimental_deterministic_fft: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization,experimental-force_fft_algo_dif4 \
|
||||
-p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,experimental-force_fft_algo_dif4 \
|
||||
-p tfhe
|
||||
|
||||
.PHONY: build_web_js_api # Build the js API targeting the web browser
|
||||
build_web_js_api: install_rs_build_toolchain install_wasm_pack
|
||||
@@ -302,53 +231,25 @@ build_node_js_api: install_rs_build_toolchain install_wasm_pack
|
||||
wasm-pack build --release --target=nodejs \
|
||||
-- --features=boolean-client-js-wasm-api,shortint-client-js-wasm-api,integer-client-js-wasm-api
|
||||
|
||||
.PHONY: build_concrete_csprng # Build concrete_csprng
|
||||
build_concrete_csprng: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) build --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-csprng --all-targets
|
||||
|
||||
.PHONY: test_core_crypto # Run the tests of the core_crypto module including experimental ones
|
||||
test_core_crypto: install_rs_build_toolchain install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental -p $(TFHE_SPEC) -- core_crypto::
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental -p tfhe -- core_crypto::
|
||||
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p $(TFHE_SPEC) -- core_crypto::; \
|
||||
fi
|
||||
|
||||
.PHONY: test_core_crypto_cov # Run the tests of the core_crypto module with code coverage
|
||||
test_core_crypto_cov: install_rs_build_toolchain install_rs_check_toolchain install_tarpaulin
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
|
||||
--out xml --output-dir coverage/core_crypto --line --engine llvm --timeout 500 \
|
||||
--implicit-test-threads $(COVERAGE_EXCLUDED_FILES) \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,internal-keycache,__coverage \
|
||||
-p $(TFHE_SPEC) -- core_crypto::
|
||||
@if [[ "$(AVX512_SUPPORT)" == "ON" ]]; then \
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
|
||||
--out xml --output-dir coverage/core_crypto_avx512 --line --engine llvm --timeout 500 \
|
||||
--implicit-test-threads $(COVERAGE_EXCLUDED_FILES) \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,internal-keycache,__coverage,$(AVX512_FEATURE) \
|
||||
-p $(TFHE_SPEC) -- core_crypto::; \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,$(AVX512_FEATURE) -p tfhe -- core_crypto::; \
|
||||
fi
|
||||
|
||||
.PHONY: test_boolean # Run the tests of the boolean module
|
||||
test_boolean: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean -p $(TFHE_SPEC) -- boolean::
|
||||
|
||||
.PHONY: test_boolean_cov # Run the tests of the boolean module with code coverage
|
||||
test_boolean_cov: install_rs_check_toolchain install_tarpaulin
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
|
||||
--out xml --output-dir coverage/boolean --line --engine llvm --timeout 500 \
|
||||
$(COVERAGE_EXCLUDED_FILES) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,internal-keycache,__coverage \
|
||||
-p $(TFHE_SPEC) -- boolean::
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean -p tfhe -- boolean::
|
||||
|
||||
.PHONY: test_c_api_rs # Run the rust tests for the C API
|
||||
test_c_api_rs: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api,safe-deserialization \
|
||||
-p $(TFHE_SPEC) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean-c-api,shortint-c-api,high-level-c-api \
|
||||
-p tfhe \
|
||||
c_api
|
||||
|
||||
.PHONY: test_c_api_c # Run the C tests for the C API
|
||||
@@ -375,82 +276,37 @@ test_shortint_multi_bit_ci: install_rs_build_toolchain install_cargo_nextest
|
||||
.PHONY: test_shortint # Run all the tests for shortint
|
||||
test_shortint: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p $(TFHE_SPEC) -- shortint::
|
||||
|
||||
.PHONY: test_shortint_cov # Run the tests of the shortint module with code coverage
|
||||
test_shortint_cov: install_rs_check_toolchain install_tarpaulin
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) tarpaulin --profile $(CARGO_PROFILE) \
|
||||
--out xml --output-dir coverage/shortint --line --engine llvm --timeout 500 \
|
||||
$(COVERAGE_EXCLUDED_FILES) \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,__coverage \
|
||||
-p $(TFHE_SPEC) -- shortint::
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache -p tfhe -- shortint::
|
||||
|
||||
.PHONY: test_integer_ci # Run the tests for integer ci
|
||||
test_integer_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
test_integer_ci: install_rs_build_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)"
|
||||
|
||||
.PHONY: test_unsigned_integer_ci # Run the tests for unsigned integer ci
|
||||
test_unsigned_integer_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)" \
|
||||
--unsigned-only
|
||||
|
||||
.PHONY: test_signed_integer_ci # Run the tests for signed integer ci
|
||||
test_signed_integer_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --avx512-support "$(AVX512_SUPPORT)" \
|
||||
--signed-only
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)"
|
||||
|
||||
.PHONY: test_integer_multi_bit_ci # Run the tests for integer ci running only multibit tests
|
||||
test_integer_multi_bit_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
test_integer_multi_bit_ci: install_rs_build_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --multi-bit --avx512-support "$(AVX512_SUPPORT)"
|
||||
|
||||
.PHONY: test_unsigned_integer_multi_bit_ci # Run the tests for nsigned integer ci running only multibit tests
|
||||
test_unsigned_integer_multi_bit_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --multi-bit --avx512-support "$(AVX512_SUPPORT)" \
|
||||
--unsigned-only
|
||||
|
||||
.PHONY: test_signed_integer_multi_bit_ci # Run the tests for nsigned integer ci running only multibit tests
|
||||
test_signed_integer_multi_bit_ci: install_rs_check_toolchain install_cargo_nextest
|
||||
BIG_TESTS_INSTANCE="$(BIG_TESTS_INSTANCE)" \
|
||||
FAST_TESTS="$(FAST_TESTS)" \
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_CHECK_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --multi-bit --avx512-support "$(AVX512_SUPPORT)" \
|
||||
--signed-only
|
||||
|
||||
.PHONY: test_safe_deserialization # Run the tests for safe deserialization
|
||||
test_safe_deserialization: install_rs_build_toolchain install_cargo_nextest
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache,safe-deserialization -p $(TFHE_SPEC) -- safe_deserialization::
|
||||
./scripts/integer-tests.sh --rust-toolchain $(CARGO_RS_BUILD_TOOLCHAIN) \
|
||||
--cargo-profile "$(CARGO_PROFILE)" --multi-bit
|
||||
|
||||
.PHONY: test_integer # Run all the tests for integer
|
||||
test_integer: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache -p $(TFHE_SPEC) -- integer::
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache -p tfhe -- integer::
|
||||
|
||||
.PHONY: test_high_level_api # Run all the tests for high_level_api
|
||||
test_high_level_api: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p $(TFHE_SPEC) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p tfhe \
|
||||
-- high_level_api::
|
||||
|
||||
.PHONY: test_user_doc # Run tests from the .md documentation
|
||||
test_user_doc: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) --doc \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p $(TFHE_SPEC) \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer,internal-keycache -p tfhe \
|
||||
-- test_user_docs::
|
||||
|
||||
.PHONY: test_regex_engine # Run tests for regex_engine example
|
||||
@@ -471,70 +327,14 @@ test_examples: test_sha256_bool test_regex_engine
|
||||
.PHONY: test_trivium # Run tests for trivium
|
||||
test_trivium: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
-p tfhe-trivium -- --test-threads=1 trivium::
|
||||
trivium --features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
|
||||
-- --test-threads=1
|
||||
|
||||
.PHONY: test_kreyvium # Run tests for kreyvium
|
||||
test_kreyvium: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
-p tfhe-trivium -- --test-threads=1 kreyvium::
|
||||
|
||||
.PHONY: test_concrete_csprng # Run concrete-csprng tests
|
||||
test_concrete_csprng:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-csprng
|
||||
|
||||
.PHONY: test_float # Run minifloat bivariate test
|
||||
test_float: test_float_add test_float_sub test_float_mul test_float_div test_float_cos test_float_sin test_float_relu test_float_sigmoid test_minifloat
|
||||
|
||||
.PHONY: test_minifloat # Run minifloat bivariate test
|
||||
test_minifloat:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint -p tfhe float_wopbs_bivariate -- --nocapture
|
||||
|
||||
.PHONY: test_float_cos # Run floating points cosine test
|
||||
test_float_cos:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::float_cos" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_sin # Run floating points sine test
|
||||
test_float_sin:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::float_sin" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_mul # Run floating points multiplication test
|
||||
test_float_mul:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::test_float_mul" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_add # Run floating points addition test
|
||||
test_float_add:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::test_float_add" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_sub # Run floating points subtraction test
|
||||
test_float_sub:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::test_float_sub" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_div # Run floating points division test
|
||||
test_float_div:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::test_float_div" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_relu # Run floating points relu test
|
||||
test_float_relu:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::test_float_relu" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_sigmoid # Run floating points sigmoid test
|
||||
test_float_sigmoid:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::test_float_sigmoid" -- --exact --nocapture
|
||||
|
||||
.PHONY: test_float_depth_test # Run floating points depth test
|
||||
test_float_depth_test:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE) -p concrete-float "server_key::tests::depth_test_parallelized" -- --exact --nocapture
|
||||
kreyvium --features=$(TARGET_ARCH_FEATURE),boolean,shortint,integer \
|
||||
-- --test-threads=1
|
||||
|
||||
.PHONY: doc # Build rust doc
|
||||
doc: install_rs_check_toolchain
|
||||
@@ -566,18 +366,18 @@ format_doc_latex:
|
||||
.PHONY: check_compile_tests # Build tests in debug without running them
|
||||
check_compile_tests:
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --no-run \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,boolean,shortint,integer,internal-keycache,safe-deserialization \
|
||||
-p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,boolean,shortint,integer,internal-keycache \
|
||||
-p tfhe
|
||||
|
||||
@if [[ "$(OS)" == "Linux" || "$(OS)" == "Darwin" ]]; then \
|
||||
"$(MAKE)" build_c_api && \
|
||||
"$(MAKE)" build_c_api; \
|
||||
./scripts/c_api_tests.sh --build-only; \
|
||||
fi
|
||||
|
||||
.PHONY: build_nodejs_test_docker # Build a docker image with tools to run nodejs tests for wasm API
|
||||
build_nodejs_test_docker:
|
||||
DOCKER_BUILDKIT=1 docker build --build-arg RUST_TOOLCHAIN="$(RS_BUILD_TOOLCHAIN)" \
|
||||
-f docker/Dockerfile.wasm_tests --build-arg NODE_VERSION=$(NODE_VERSION) -t tfhe-wasm-tests .
|
||||
-f docker/Dockerfile.wasm_tests -t tfhe-wasm-tests .
|
||||
|
||||
.PHONY: test_nodejs_wasm_api_in_docker # Run tests for the nodejs on wasm API in a docker container
|
||||
test_nodejs_wasm_api_in_docker: build_nodejs_test_docker
|
||||
@@ -601,8 +401,7 @@ test_web_js_api_parallel: build_web_js_api_parallel
|
||||
.PHONY: ci_test_web_js_api_parallel # Run tests for the web wasm api
|
||||
ci_test_web_js_api_parallel: build_web_js_api_parallel
|
||||
source ~/.nvm/nvm.sh && \
|
||||
nvm install $(NODE_VERSION) && \
|
||||
nvm use $(NODE_VERSION) && \
|
||||
nvm use node && \
|
||||
$(MAKE) -C tfhe/web_wasm_parallel_tests test-ci
|
||||
|
||||
.PHONY: no_tfhe_typo # Check we did not invert the h and f in tfhe
|
||||
@@ -613,50 +412,31 @@ no_tfhe_typo:
|
||||
no_dbg_log:
|
||||
@./scripts/no_dbg_calls.sh
|
||||
|
||||
.PHONY: dieharder_csprng # Run the dieharder test suite on our CSPRNG implementation
|
||||
dieharder_csprng: install_dieharder build_concrete_csprng
|
||||
./scripts/dieharder_test.sh
|
||||
|
||||
#
|
||||
# Benchmarks
|
||||
#
|
||||
|
||||
.PHONY: bench_integer # Run benchmarks for unsigned integer
|
||||
.PHONY: bench_integer # Run benchmarks for integer
|
||||
bench_integer: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p tfhe --
|
||||
|
||||
.PHONY: bench_integer_multi_bit # Run benchmarks for unsigned integer using multi-bit parameters
|
||||
.PHONY: bench_integer_multi_bit # Run benchmarks for integer using multi-bit parameters
|
||||
bench_integer_multi_bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=MULTI_BIT \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
|
||||
|
||||
.PHONY: bench_signed_integer # Run benchmarks for signed integer
|
||||
bench_signed_integer: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-signed-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
|
||||
|
||||
.PHONY: bench_signed_integer_multi_bit # Run benchmarks for signed integer using multi-bit parameters
|
||||
bench_signed_integer_multi_bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_TYPE=MULTI_BIT \
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) __TFHE_RS_FAST_BENCH=$(FAST_BENCH) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench integer-signed-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
|
||||
--features=$(TARGET_ARCH_FEATURE),integer,internal-keycache,$(AVX512_FEATURE) -p tfhe --
|
||||
|
||||
.PHONY: bench_shortint # Run benchmarks for shortint
|
||||
bench_shortint: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" __TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench shortint-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe
|
||||
|
||||
.PHONY: bench_shortint_multi_bit # Run benchmarks for shortint using multi-bit parameters
|
||||
bench_shortint_multi_bit: install_rs_check_toolchain
|
||||
@@ -664,20 +444,20 @@ bench_shortint_multi_bit: install_rs_check_toolchain
|
||||
__TFHE_RS_BENCH_OP_FLAVOR=$(BENCH_OP_FLAVOR) \
|
||||
cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench shortint-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC) --
|
||||
--features=$(TARGET_ARCH_FEATURE),shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe --
|
||||
|
||||
|
||||
.PHONY: bench_boolean # Run benchmarks for boolean
|
||||
bench_boolean: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench boolean-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,internal-keycache,$(AVX512_FEATURE) -p tfhe
|
||||
|
||||
.PHONY: bench_pbs # Run benchmarks for PBS
|
||||
bench_pbs: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench pbs-bench \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache,$(AVX512_FEATURE) -p $(TFHE_SPEC)
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache,$(AVX512_FEATURE) -p tfhe
|
||||
|
||||
.PHONY: bench_web_js_api_parallel # Run benchmarks for the web wasm api
|
||||
bench_web_js_api_parallel: build_web_js_api_parallel
|
||||
@@ -689,53 +469,9 @@ ci_bench_web_js_api_parallel: build_web_js_api_parallel
|
||||
nvm use node && \
|
||||
$(MAKE) -C tfhe/web_wasm_parallel_tests bench-ci
|
||||
|
||||
.PHONY: bench_float # Run benchmarks for the floating points
|
||||
bench_float: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench float-bench
|
||||
|
||||
.PHONY: bench_float_8bit # Run benchmarks for the floating points
|
||||
bench_float_8bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench float-bench -- PARAM_8
|
||||
|
||||
|
||||
.PHONY: bench_float_16bit # Run benchmarks for the floating points
|
||||
bench_float_16bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench float-bench -- PARAM_16
|
||||
|
||||
|
||||
.PHONY: bench_float_32bit # Run benchmarks for the floating points
|
||||
bench_float_32bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench float-bench -- PARAM_32
|
||||
|
||||
.PHONY: bench_float_64bit # Run benchmarks for the floating points
|
||||
bench_float_64bit: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench float-bench -- PARAM_64
|
||||
|
||||
.PHONY: bench_minifloat # Run benchmarks for Wopbs floating points
|
||||
bench_minifloat: install_rs_check_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench \
|
||||
--bench float-wopbs-bench
|
||||
|
||||
#
|
||||
# Utility tools
|
||||
#
|
||||
.PHONY: gen_key_cache # Run the script to generate keys and cache them for shortint tests
|
||||
gen_key_cache: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) run --profile $(CARGO_PROFILE) \
|
||||
--example generates_test_keys \
|
||||
--features=$(TARGET_ARCH_FEATURE),boolean,shortint,internal-keycache -- \
|
||||
$(MULTI_BIT_ONLY) $(COVERAGE_ONLY)
|
||||
|
||||
.PHONY: gen_key_cache_core_crypto # Run function to generate keys and cache them for core_crypto tests
|
||||
gen_key_cache_core_crypto: install_rs_build_toolchain
|
||||
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_BUILD_TOOLCHAIN) test --tests --profile $(CARGO_PROFILE) \
|
||||
--features=$(TARGET_ARCH_FEATURE),experimental,internal-keycache -p $(TFHE_SPEC) -- --nocapture \
|
||||
core_crypto::keycache::generate_keys
|
||||
|
||||
.PHONY: measure_hlapi_compact_pk_ct_sizes # Measure sizes of public keys and ciphertext for high-level API
|
||||
measure_hlapi_compact_pk_ct_sizes: install_rs_check_toolchain
|
||||
@@ -805,7 +541,7 @@ pcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_all check_compile_tests
|
||||
fpcc: no_tfhe_typo no_dbg_log check_fmt lint_doc clippy_fast check_compile_tests
|
||||
|
||||
.PHONY: conformance # Automatically fix problems that can be fixed
|
||||
conformance: fix_newline fmt
|
||||
conformance: fmt
|
||||
|
||||
.PHONY: help # Generate list of targets with descriptions
|
||||
help:
|
||||
|
||||
299
README.md
299
README.md
@@ -1,160 +1,177 @@
|
||||
# Artifact:TFHE Gets Real: an Efficient and Flexible Homomorphic Floating-Point Arithmetic
|
||||
<p align="center">
|
||||
<!-- product name logo -->
|
||||
<img width=600 src="https://user-images.githubusercontent.com/5758427/231206749-8f146b97-3c5a-4201-8388-3ffa88580415.png">
|
||||
</p>
|
||||
<hr/>
|
||||
<p align="center">
|
||||
<a href="https://docs.zama.ai/tfhe-rs"> 📒 Read documentation</a> | <a href="https://zama.ai/community"> 💛 Community support</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<!-- Version badge using shields.io -->
|
||||
<a href="https://github.com/zama-ai/tfhe-rs/releases">
|
||||
<img src="https://img.shields.io/github/v/release/zama-ai/tfhe-rs?style=flat-square">
|
||||
</a>
|
||||
<!-- Zama Bounty Program -->
|
||||
<a href="https://github.com/zama-ai/bounty-program">
|
||||
<img src="https://img.shields.io/badge/Contribute-Zama%20Bounty%20Program-yellow?style=flat-square">
|
||||
</a>
|
||||
</p>
|
||||
<hr/>
|
||||
|
||||
|
||||
## Description
|
||||
**TFHE-rs** is a pure Rust implementation of TFHE for boolean and integer
|
||||
arithmetics over encrypted data. It includes:
|
||||
- a **Rust** API
|
||||
- a **C** API
|
||||
- and a **client-side WASM** API
|
||||
|
||||
**TFHE-rs** is meant for developers and researchers who want full control over
|
||||
what they can do with TFHE, while not having to worry about the low level
|
||||
implementation. The goal is to have a stable, simple, high-performance, and
|
||||
production-ready library for all the advanced features of TFHE.
|
||||
|
||||
In what follows, we provide instructions on how to run the benchmarks from the paper entitled **TFHE Gets Real: An Efficient and Flexible Homomorphic Floating-Point Arithmetic**.
|
||||
In particular, the benchmarks presented in **Table 5**, **Table 6**, **Table 7**, and the experiments shown in **Table 8** can be easily reproduced using this code. The implementation of the techniques described in the aforementioned paper has been integrated into the **TFHE-rs** library, version 0.5.0. The modified or added source files are organized into two different paths.
|
||||
## Getting Started
|
||||
The steps to run a first example are described below.
|
||||
|
||||
The Minifloats (Section 3.1) are located in *tfhe/src/float-wopbs*
|
||||
- Test files are located in *tfhe/src/float_wopbs/server_key/tests.rs*
|
||||
- Benchmarks are located in *tfhe/benches/float_wopbs/bench.rs*
|
||||
### Cargo.toml configuration
|
||||
To use the latest version of `TFHE-rs` in your project, you first need to add it as a dependency in your `Cargo.toml`:
|
||||
|
||||
+ For x86_64-based machines running Unix-like OSes:
|
||||
|
||||
The homomorphic floating points (Section 3.2) are located in *tfhe/concrete-float/*
|
||||
- Test files are located *tfhe/concrete-float/src/server_key/tests.rs*
|
||||
- Benchmarks are located in *tfhe/concrete-float/benches/bench.rs*
|
||||
|
||||
|
||||
## Dependencies
|
||||
|
||||
Tested on Linux and Mac OS with Rust version >= 1.80 (see [here](https://www.rust-lang.org/tools/install) a guide to install Rust).
|
||||
Complete list of dependencies and a guide on how to install TFHE-rs can be found in the online documentation [here](https://docs.zama.ai/tfhe-rs/0.5-3/getting-started/installation) or in the local file [here](./README_TFHE-rs.md).
|
||||
|
||||
## How to run benchmarks
|
||||
At the root of the project (i.e., in the TFHE-rs folder), enter the following commands to run the benchmarks:
|
||||
|
||||
- ```make bench_minifloat```: returns the timings associated to the Minifloats (**Table 6**).
|
||||
- ```make bench_float```: returns the timings associated to the HFP (**Table 5**, **Table 7**).
|
||||
These benchmarks first launch the parallelized and then the sequential experiments.
|
||||
This outputs the timings depending on the input precision.
|
||||
**This takes more than 6 hours to run**.
|
||||
|
||||
To run benchmarks for a specific precision over homomorphic floating points, here are the dedicated commands:
|
||||
- ```make bench_float_8bit```: Runs benchmarks for only 8-bit floating point *(around 15 min)*.
|
||||
- ```make bench_float_16bit```: Runs benchmarks for only 16-bit floating point *(around 30 min)*.
|
||||
- ```make bench_float_32bit```: Runs benchmarks for only 32-bit floating point *(around 1h40)*.
|
||||
- ```make bench_float_64bit```: Runs benchmarks for only 64-bit floating point *(around 6h30)*.
|
||||
|
||||
|
||||
We recall that the benchmarks were performed on AWS using an **m6i.metal** instance with an Intel Xeon 8375C (Ice Lake) processor running at 3.5 GHz, 128 vCPUs, and 512 GiB of memory.
|
||||
|
||||
### Understanding Benchmark Output (Criterion.rs)
|
||||
|
||||
This project uses [Criterion.rs](https://docs.rs/criterion/latest/criterion/) for benchmarking. Criterion is a powerful and statistically robust benchmarking framework for Rust, and it may produce outputs that are unfamiliar at first glance. This section explains how to interpret them.
|
||||
|
||||
#### Sample Output Structure
|
||||
|
||||
A typical benchmark result looks like this:
|
||||
|
||||
```
|
||||
test_float time: [53.2 µs 54.0 µs 54.8 µs]
|
||||
change: [+0.2% +1.0% +1.8%] (p = 0.002)
|
||||
Found 3 outliers among 100 measurements (3.00%)
|
||||
3 (3.00%) high mild
|
||||
```toml
|
||||
tfhe = { version = "*", features = ["boolean", "shortint", "integer", "x86_64-unix"] }
|
||||
```
|
||||
|
||||
**Here's what this means:**
|
||||
+ For Apple Silicon or aarch64-based machines running Unix-like OSes:
|
||||
|
||||
- `time: [low est. median high est.]`: The estimated execution time of the function.
|
||||
- `change`: The performance change compared to a previous run (if available).
|
||||
- `outliers`: Some runs deviated from the typical time. Criterion detects and accounts for these using statistical methods.
|
||||
|
||||
---
|
||||
|
||||
#### Common Warnings and What They Mean
|
||||
|
||||
##### `Found X outliers among Y measurements`
|
||||
|
||||
Criterion runs each benchmark many times (default: 100) to get statistically significant results.
|
||||
An *outlier* is a run that was significantly faster or slower than the others.
|
||||
|
||||
- **Why does this happen?** Often, it's due to **other processes on the machine** (e.g., background services, OS interrupts, or CPU scheduling) affecting performance temporarily.
|
||||
- **Why it doesn't invalidate results:** Criterion uses statistical techniques to minimize the impact of these outliers when estimating performance.
|
||||
- **Best practice to reduce outliers:** Run the benchmarks on a **freshly rebooted machine**, with as few background processes as possible. Ideally, let the system idle for a minute after boot to stabilize before running benchmarks.
|
||||
|
||||
##### `Unable to complete 100 samples in 5.0s.`
|
||||
|
||||
The benchmark took longer than the expected 5 seconds.
|
||||
This is merely a warning indicating that the full set of 100 samples could not be collected within the default 5-second measurement window.
|
||||
|
||||
- **No action is required**: Criterion will still proceed to run all 100 samples, and the results remain statistically valid.
|
||||
- **Why the warning appears**: It's there to inform you that benchmarking is taking longer than expected and to help you tune settings if needed.
|
||||
- **Optional**: If you're constrained by time (e.g., running in CI), you can:
|
||||
- Reduce the sample size (e.g., to 10 or 20 samples).
|
||||
- Or increase the measurement time using:
|
||||
```bash
|
||||
cargo bench -- --measurement-time 30
|
||||
```
|
||||
|
||||
## How to run the tests
|
||||
### MiniFloats
|
||||
|
||||
To run the tests related to the **minifloats**, run the following command:
|
||||
- ```make test_minifloat```: Runs a bivariate operation between two minifloats.
|
||||
|
||||
|
||||
The **minifloat** test is available in the file *tfhe/src/float_wopbs/server_key/tests.rs*.
|
||||
|
||||
|
||||
|
||||
### Homomorphic Floating Points
|
||||
At the root of the project (i.e., in the TFHE-rs folder), enter the following commands to run the tests per operation on the **homomorphic floating points**:
|
||||
- ```make test_float_add```: Runs a 32-bit floating-point addition with two random inputs.
|
||||
- ```make test_float_sub```: Runs a 32-bit floating-point subtraction with two random inputs.
|
||||
- ```make test_float_mul```: Runs a 32-bit floating-point multiplication with two random inputs.
|
||||
- ```make test_float_div```: Runs a 32-bit floating-point division with two random inputs.
|
||||
- ```make test_float_cos```: Runs the experiment from **Table 8** with a random input value.
|
||||
- ```make test_float_sin```: Runs the experiment from **Table 8** with a random input value.
|
||||
- ```make test_float_relu```: Runs a 32-bit floating-point relu with a random input.
|
||||
- ```make test_float_sigmoid```: Runs a 32-bit floating-point sigmoid with a random input.
|
||||
- ```make test_float```: Runs all previous tests for operations on 32-bit floating-points.
|
||||
- ```make test_float_depth_test```: This command runs the following experiment:
|
||||
- **Step 1**: Create 3 blocks, each composed of a clear 32-bit floating point, a clear 64-bit floating point, and a 32-bit homomorphic floating point.
|
||||
- **Step 2**: Choose two blocks randomly among the 3 blocks and randomly select a parallelized operation (addition, subtraction, or multiplication).
|
||||
- **Step 3**: Compute the selected operation between the two selected blocks and store the result randomly in one of the two selected blocks.
|
||||
(The operation is performed respectively between the two 64-bit floating points, the two 32-bit floating points, and homomorphically between the two 32-bit homomorphic floating points.)
|
||||
- Repeat Steps 2 and 3 for 50 iterations.
|
||||
- To avoid reaching + or - infinity, or **NaN**, when the clear 64-bit floating point reaches a fixed bound, compute a multiplication to rescale the value close to 1.
|
||||
This operation is also performed homomorphically for the encrypted data. This test takes several minutes.
|
||||
|
||||
The tests are located in the file *tfhe/concrete-float/src/server_key/tests.rs*.
|
||||
|
||||
Due to the representation being close to, but not exactly the same as, a given representation, the obtained result is not identical to the one obtained in clear.
|
||||
To consider a test as "passed", we accept a difference of less than 0.1% compared to the 64-bit floating-point clear results.
|
||||
Note that using 8 or 16-bit homomorphic floating points might return errors due to a lack of precision and due to the comparisons with clear 64-bit floating points.
|
||||
|
||||
In each test, the different results are presented in the following format:
|
||||
```
|
||||
--------------------
|
||||
"Name":
|
||||
|
||||
Result :
|
||||
Clear 32-bits:
|
||||
Clear 64-bits:
|
||||
|
||||
--------------------
|
||||
```toml
|
||||
tfhe = { version = "*", features = ["boolean", "shortint", "integer", "aarch64-unix"] }
|
||||
```
|
||||
where ```name``` stands for the name of the ciphertext or the name of the operation, result always corresponds to the decryption of a homomorphic floating point, and Clear ``` 32-bits``` and Clear ``` 64-bits``` correspond to the clear floating-point witness.
|
||||
|
||||
All tests in *tfhe/concrete-float/src/server_key/tests.rs* are conducted for 32-bit floating-point precision, as it provides the best ratio between execution time and precision.
|
||||
To change the parameter set used, the parameters in the following ``` const ``` must be uncommented (lines 79 to 87 in the file *tfhe/concrete-float/src/server_key/tests.rs*).
|
||||
Note: users with ARM devices must use `TFHE-rs` by compiling using the `nightly` toolchain.
|
||||
|
||||
|
||||
```rust
|
||||
const PARAMS: [(&str, Parameters); 1] =
|
||||
[
|
||||
//named_param!(PARAM_FP_64_BITS),
|
||||
named_param!(PARAM_FP_32_BITS),
|
||||
//named_param!(PARAM_FP_16_BITS),
|
||||
//named_param!(PARAM_FP_8_BITS),
|
||||
];
|
||||
+ For x86_64-based machines with the [`rdseed instruction`](https://en.wikipedia.org/wiki/RDRAND)
|
||||
running Windows:
|
||||
|
||||
```toml
|
||||
tfhe = { version = "*", features = ["boolean", "shortint", "integer", "x86_64"] }
|
||||
```
|
||||
|
||||
Note that the number in ``` [(\&str, Parameters); 1] ``` should correspond to the number of tested parameters, e.g., if another parameter sets is uncommented, this line becomes: ``` [(\&str, Parameters); 2] ```.
|
||||
The parameter ```PARAM_X``` corresponds to the parameters used in **Table 5**, and ```PARAM_TCHES_X``` corresponds to the parameters used in **Table 7**.
|
||||
Note: aarch64-based machines are not yet supported for Windows as it's currently missing an entropy source to be able to seed the [CSPRNGs](https://en.wikipedia.org/wiki/Cryptographically_secure_pseudorandom_number_generator) used in TFHE-rs
|
||||
|
||||
|
||||
## A simple example
|
||||
|
||||
Here is a full example:
|
||||
|
||||
``` rust
|
||||
use tfhe::prelude::*;
|
||||
use tfhe::{generate_keys, set_server_key, ConfigBuilder, FheUint32, FheUint8};
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Basic configuration to use homomorphic integers
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
|
||||
// Key generation
|
||||
let (client_key, server_keys) = generate_keys(config);
|
||||
|
||||
let clear_a = 1344u32;
|
||||
let clear_b = 5u32;
|
||||
let clear_c = 7u8;
|
||||
|
||||
// Encrypting the input data using the (private) client_key
|
||||
// FheUint32: Encrypted equivalent to u32
|
||||
let mut encrypted_a = FheUint32::try_encrypt(clear_a, &client_key)?;
|
||||
let encrypted_b = FheUint32::try_encrypt(clear_b, &client_key)?;
|
||||
|
||||
// FheUint8: Encrypted equivalent to u8
|
||||
let encrypted_c = FheUint8::try_encrypt(clear_c, &client_key)?;
|
||||
|
||||
// On the server side:
|
||||
set_server_key(server_keys);
|
||||
|
||||
// Clear equivalent computations: 1344 * 8 = 10752
|
||||
let encrypted_res_mul = &encrypted_a * &encrypted_b;
|
||||
|
||||
// Clear equivalent computations: 1344 >> 8 = 42
|
||||
encrypted_a = &encrypted_res_mul >> &encrypted_b;
|
||||
|
||||
// Clear equivalent computations: let casted_a = a as u8;
|
||||
let casted_a: FheUint8 = encrypted_a.cast_into();
|
||||
|
||||
// Clear equivalent computations: min(42, 7) = 7
|
||||
let encrypted_res_min = &casted_a.min(&encrypted_c);
|
||||
|
||||
// Operation between clear and encrypted data:
|
||||
// Clear equivalent computations: 7 & 1 = 1
|
||||
let encrypted_res = encrypted_res_min & 1_u8;
|
||||
|
||||
// Decrypting on the client side:
|
||||
let clear_res: u8 = encrypted_res.decrypt(&client_key);
|
||||
assert_eq!(clear_res, 1_u8);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
To run this code, use the following command:
|
||||
<p align="center"> <code> cargo run --release </code> </p>
|
||||
|
||||
Note that when running code that uses `tfhe-rs`, it is highly recommended
|
||||
to run in release mode with cargo's `--release` flag to have the best performances possible,
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
There are two ways to contribute to TFHE-rs:
|
||||
|
||||
- you can open issues to report bugs or typos, or to suggest new ideas
|
||||
- you can ask to become an official contributor by emailing [hello@zama.ai](mailto:hello@zama.ai).
|
||||
(becoming an approved contributor involves signing our Contributor License Agreement (CLA))
|
||||
|
||||
Only approved contributors can send pull requests, so please make sure to get in touch before you do!
|
||||
|
||||
## Credits
|
||||
|
||||
This library uses several dependencies and we would like to thank the contributors of those
|
||||
libraries.
|
||||
|
||||
## Need support?
|
||||
<a target="_blank" href="https://community.zama.ai">
|
||||
<img src="https://user-images.githubusercontent.com/5758427/231115030-21195b55-2629-4c01-9809-be5059243999.png">
|
||||
</a>
|
||||
|
||||
## Citing TFHE-rs
|
||||
|
||||
To cite TFHE-rs in academic papers, please use the following entry:
|
||||
|
||||
```text
|
||||
@Misc{TFHE-rs,
|
||||
title={{TFHE-rs: A Pure Rust Implementation of the TFHE Scheme for Boolean and Integer Arithmetics Over Encrypted Data}},
|
||||
author={Zama},
|
||||
year={2022},
|
||||
note={\url{https://github.com/zama-ai/tfhe-rs}},
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions,
|
||||
please contact us at `hello@zama.ai`.
|
||||
|
||||
## Disclaimers
|
||||
|
||||
### Security Estimation
|
||||
|
||||
Security estimations are done using the
|
||||
[Lattice Estimator](https://github.com/malb/lattice-estimator)
|
||||
with `red_cost_model = reduction.RC.BDGL16`.
|
||||
|
||||
When a new update is published in the Lattice Estimator, we update parameters accordingly.
|
||||
|
||||
### Side-Channel Attacks
|
||||
|
||||
Mitigation for side channel attacks have not yet been implemented in TFHE-rs,
|
||||
and will be released in upcoming versions.
|
||||
|
||||
@@ -17,7 +17,7 @@ path = "../../tfhe"
|
||||
features = [ "boolean", "shortint", "integer", "aarch64-unix" ]
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.5.1", features = [ "html_reports" ]}
|
||||
criterion = { version = "0.4", features = [ "html_reports" ]}
|
||||
|
||||
[[bench]]
|
||||
name = "trivium"
|
||||
|
||||
@@ -120,7 +120,7 @@ fn main() {
|
||||
|
||||
# FHE byte Trivium implementation
|
||||
|
||||
The same objects have also been implemented to stream bytes instead of booleans. They can be constructed and used in the same way via the functions `TriviumStreamByte::<u8>::new` and
|
||||
The same objects have also been implemented to stream bytes insead of booleans. They can be constructed and used in the same way via the functions `TriviumStreamByte::<u8>::new` and
|
||||
`TriviumStreamByte::<FheUint8>::new` with the same arguments as before. The `FheUint8` version is significantly slower than the `FheBool` version, because not running
|
||||
with the same cryptographic parameters. Its interest lie in its trans-ciphering capabilities: `TriviumStreamByte<FheUint8>` implements the trait `TransCiphering`,
|
||||
meaning it implements the functions `trans_encrypt_64`. This function takes as input a `FheUint64` and outputs a `FheUint64`, the output being
|
||||
|
||||
@@ -6,7 +6,7 @@ use tfhe_trivium::KreyviumStream;
|
||||
use criterion::Criterion;
|
||||
|
||||
pub fn kreyvium_bool_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled().enable_default_bool().build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
|
||||
@@ -41,7 +41,7 @@ pub fn kreyvium_bool_gen(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn kreyvium_bool_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled().enable_default_bool().build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
|
||||
|
||||
@@ -6,8 +6,9 @@ use tfhe_trivium::{KreyviumStreamByte, TransCiphering};
|
||||
use criterion::Criterion;
|
||||
|
||||
pub fn kreyvium_byte_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.enable_function_evaluation()
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.enable_function_evaluation_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
@@ -35,8 +36,9 @@ pub fn kreyvium_byte_gen(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn kreyvium_byte_trans(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.enable_function_evaluation()
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.enable_function_evaluation_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
@@ -65,8 +67,9 @@ pub fn kreyvium_byte_trans(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn kreyvium_byte_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default()
|
||||
.enable_function_evaluation()
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.enable_function_evaluation_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
|
||||
@@ -8,7 +8,9 @@ use tfhe_trivium::{KreyviumStreamShortint, TransCiphering};
|
||||
use criterion::Criterion;
|
||||
|
||||
pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
@@ -58,7 +60,9 @@ pub fn kreyvium_shortint_warmup(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn kreyvium_shortint_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
@@ -103,7 +107,9 @@ pub fn kreyvium_shortint_gen(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn kreyvium_shortint_trans(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
@@ -6,7 +6,7 @@ use tfhe_trivium::TriviumStream;
|
||||
use criterion::Criterion;
|
||||
|
||||
pub fn trivium_bool_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled().enable_default_bool().build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -41,7 +41,7 @@ pub fn trivium_bool_gen(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn trivium_bool_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled().enable_default_bool().build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
|
||||
@@ -6,7 +6,9 @@ use tfhe_trivium::{TransCiphering, TriviumStreamByte};
|
||||
use criterion::Criterion;
|
||||
|
||||
pub fn trivium_byte_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -33,7 +35,9 @@ pub fn trivium_byte_gen(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn trivium_byte_trans(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -61,7 +65,9 @@ pub fn trivium_byte_trans(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn trivium_byte_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
|
||||
@@ -8,7 +8,9 @@ use tfhe_trivium::{TransCiphering, TriviumStreamShortint};
|
||||
use criterion::Criterion;
|
||||
|
||||
pub fn trivium_shortint_warmup(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
@@ -58,7 +60,9 @@ pub fn trivium_shortint_warmup(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn trivium_shortint_gen(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
@@ -103,7 +107,9 @@ pub fn trivium_shortint_gen(c: &mut Criterion) {
|
||||
}
|
||||
|
||||
pub fn trivium_shortint_trans(c: &mut Criterion) {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Kreyvium stream cipher, using booleans or FheBool
|
||||
//! for the representation of the inner bits.
|
||||
//! for the representaion of the inner bits.
|
||||
|
||||
use crate::static_deque::StaticDeque;
|
||||
|
||||
@@ -35,7 +35,7 @@ pub struct KreyviumStream<T> {
|
||||
}
|
||||
|
||||
impl KreyviumStream<bool> {
|
||||
/// Constructor for `KreyviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Contructor for `KreyviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(mut key: [bool; 128], mut iv: [bool; 128]) -> KreyviumStream<bool> {
|
||||
@@ -118,7 +118,7 @@ where
|
||||
T: KreyviumBoolInput<T> + std::marker::Send + std::marker::Sync,
|
||||
for<'a> &'a T: KreyviumBoolInput<T>,
|
||||
{
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 93],
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Kreyvium stream cipher, using u8 or FheUint8
|
||||
//! for the representation of the inner bits.
|
||||
//! for the representaion of the inner bits.
|
||||
|
||||
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
|
||||
|
||||
@@ -31,7 +31,7 @@ impl KreyviumByteInput<FheUint8> for &FheUint8 {}
|
||||
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
|
||||
/// an Option for a ServerKey.
|
||||
/// Since the original Kreyvium registers' sizes are not a multiple of 8, these registers (which
|
||||
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
|
||||
/// store byte-like objects) have a size that is the eigth of the closest multiple of 8 above the
|
||||
/// originals' sizes.
|
||||
pub struct KreyviumStreamByte<T> {
|
||||
a_byte: StaticByteDeque<12, T>,
|
||||
@@ -43,7 +43,7 @@ pub struct KreyviumStreamByte<T> {
|
||||
}
|
||||
|
||||
impl KreyviumStreamByte<u8> {
|
||||
/// Constructor for `KreyviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Contructor for `KreyviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a KreyviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(key_bytes: [u8; 16], iv_bytes: [u8; 16]) -> KreyviumStreamByte<u8> {
|
||||
@@ -146,7 +146,7 @@ where
|
||||
T: KreyviumByteInput<T> + Send,
|
||||
for<'a> &'a T: KreyviumByteInput<T>,
|
||||
{
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 12],
|
||||
|
||||
@@ -19,7 +19,7 @@ pub struct KreyviumStreamShortint {
|
||||
}
|
||||
|
||||
impl KreyviumStreamShortint {
|
||||
/// Constructor for KreyviumStreamShortint: arguments are the secret key and the input vector,
|
||||
/// Contructor for KreyviumStreamShortint: arguments are the secret key and the input vector,
|
||||
/// and a ServerKey reference. Outputs a KreyviumStream object already initialized (1152
|
||||
/// steps have been run before returning)
|
||||
pub fn new(
|
||||
|
||||
@@ -170,7 +170,7 @@ fn kreyvium_test_4() {
|
||||
|
||||
#[test]
|
||||
fn kreyvium_test_fhe_long() {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled().enable_default_bool().build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB000000000000".to_string();
|
||||
@@ -217,7 +217,9 @@ use tfhe::shortint::prelude::*;
|
||||
|
||||
#[test]
|
||||
fn kreyvium_test_shortint_long() {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
@@ -300,8 +302,9 @@ fn kreyvium_test_clear_byte() {
|
||||
|
||||
#[test]
|
||||
fn kreyvium_test_byte_long() {
|
||||
let config = ConfigBuilder::default()
|
||||
.enable_function_evaluation()
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.enable_function_evaluation_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
@@ -339,8 +342,9 @@ fn kreyvium_test_byte_long() {
|
||||
|
||||
#[test]
|
||||
fn kreyvium_test_fhe_byte_transciphering_long() {
|
||||
let config = ConfigBuilder::default()
|
||||
.enable_function_evaluation()
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.enable_function_evaluation_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//! This module implements the StaticByteDeque struct: a deque of bytes. The idea
|
||||
//! is that this is a wrapper around StaticDeque, but StaticByteDeque has an additional
|
||||
//! functionality: it can construct the "intermediate" bytes, made of parts of other bytes.
|
||||
//! functionnality: it can construct the "intermediate" bytes, made of parts of other bytes.
|
||||
//! This is pretending to store bits, and allows accessing bits in chunks of 8 consecutive.
|
||||
|
||||
use crate::static_deque::StaticDeque;
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
use core::ops::{Index, IndexMut};
|
||||
|
||||
/// StaticDeque: a struct implementing a deque whose size is known at compile time.
|
||||
/// It has 2 members: the static array containing the data (never empty), and a cursor
|
||||
/// It has 2 members: the static array conatining the data (never empty), and a cursor
|
||||
/// equal to the index of the oldest element (and the next one to be overwritten).
|
||||
#[derive(Clone)]
|
||||
pub struct StaticDeque<const N: usize, T> {
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
use crate::{KreyviumStreamByte, KreyviumStreamShortint, TriviumStreamByte, TriviumStreamShortint};
|
||||
use tfhe::shortint::Ciphertext;
|
||||
|
||||
use tfhe::prelude::*;
|
||||
use tfhe::{set_server_key, unset_server_key, FheUint64, FheUint8, ServerKey};
|
||||
|
||||
use rayon::prelude::*;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
mod trivium_bool;
|
||||
pub use trivium_bool::TriviumStream;
|
||||
#[allow(clippy::module_inception)]
|
||||
mod trivium;
|
||||
pub use trivium::TriviumStream;
|
||||
|
||||
mod trivium_byte;
|
||||
pub use trivium_byte::TriviumStreamByte;
|
||||
|
||||
@@ -232,7 +232,7 @@ fn trivium_test_clear_byte() {
|
||||
|
||||
#[test]
|
||||
fn trivium_test_fhe_long() {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled().enable_default_bool().build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -277,7 +277,9 @@ fn trivium_test_fhe_long() {
|
||||
|
||||
#[test]
|
||||
fn trivium_test_fhe_byte_long() {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -314,7 +316,9 @@ fn trivium_test_fhe_byte_long() {
|
||||
|
||||
#[test]
|
||||
fn trivium_test_fhe_byte_transciphering_long() {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (client_key, server_key) = generate_keys(config);
|
||||
|
||||
let key_string = "0053A6F94C9FF24598EB".to_string();
|
||||
@@ -353,7 +357,9 @@ use tfhe::shortint::prelude::*;
|
||||
|
||||
#[test]
|
||||
fn trivium_test_shortint_long() {
|
||||
let config = ConfigBuilder::default().build();
|
||||
let config = ConfigBuilder::all_disabled()
|
||||
.enable_default_integers()
|
||||
.build();
|
||||
let (hl_client_key, hl_server_key) = generate_keys(config);
|
||||
let underlying_ck: tfhe::shortint::ClientKey = (*hl_client_key.as_ref()).clone().into();
|
||||
let underlying_sk: tfhe::shortint::ServerKey = (*hl_server_key.as_ref()).clone().into();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Trivium stream cipher, using booleans or FheBool
|
||||
//! for the representation of the inner bits.
|
||||
//! for the representaion of the inner bits.
|
||||
|
||||
use crate::static_deque::StaticDeque;
|
||||
|
||||
@@ -33,7 +33,7 @@ pub struct TriviumStream<T> {
|
||||
}
|
||||
|
||||
impl TriviumStream<bool> {
|
||||
/// Constructor for `TriviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Contructor for `TriviumStream<bool>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(key: [bool; 80], iv: [bool; 80]) -> TriviumStream<bool> {
|
||||
@@ -94,7 +94,7 @@ where
|
||||
T: TriviumBoolInput<T> + std::marker::Send + std::marker::Sync,
|
||||
for<'a> &'a T: TriviumBoolInput<T>,
|
||||
{
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 93],
|
||||
@@ -1,5 +1,5 @@
|
||||
//! This module implements the Trivium stream cipher, using u8 or FheUint8
|
||||
//! for the representation of the inner bits.
|
||||
//! for the representaion of the inner bits.
|
||||
|
||||
use crate::static_deque::{StaticByteDeque, StaticByteDequeInput};
|
||||
|
||||
@@ -31,7 +31,7 @@ impl TriviumByteInput<FheUint8> for &FheUint8 {}
|
||||
/// representation of bits (u8 or FheUint8). To be able to compute FHE operations, it also owns
|
||||
/// an Option for a ServerKey.
|
||||
/// Since the original Trivium registers' sizes are not a multiple of 8, these registers (which
|
||||
/// store byte-like objects) have a size that is the eighth of the closest multiple of 8 above the
|
||||
/// store byte-like objects) have a size that is the eigth of the closest multiple of 8 above the
|
||||
/// originals' sizes.
|
||||
pub struct TriviumStreamByte<T> {
|
||||
a_byte: StaticByteDeque<12, T>,
|
||||
@@ -41,7 +41,7 @@ pub struct TriviumStreamByte<T> {
|
||||
}
|
||||
|
||||
impl TriviumStreamByte<u8> {
|
||||
/// Constructor for `TriviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Contructor for `TriviumStreamByte<u8>`: arguments are the secret key and the input vector.
|
||||
/// Outputs a TriviumStream object already initialized (1152 steps have been run before
|
||||
/// returning)
|
||||
pub fn new(key: [u8; 10], iv: [u8; 10]) -> TriviumStreamByte<u8> {
|
||||
@@ -111,7 +111,7 @@ where
|
||||
T: TriviumByteInput<T> + Send,
|
||||
for<'a> &'a T: TriviumByteInput<T>,
|
||||
{
|
||||
/// Internal generic constructor: arguments are already prepared registers, and an optional FHE
|
||||
/// Internal generic contructor: arguments are already prepared registers, and an optional FHE
|
||||
/// server key
|
||||
fn new_from_registers(
|
||||
a_register: [T; 12],
|
||||
|
||||
@@ -17,9 +17,9 @@ pub struct TriviumStreamShortint {
|
||||
}
|
||||
|
||||
impl TriviumStreamShortint {
|
||||
/// Constructor for TriviumStreamShortint: arguments are the secret key and the input vector,
|
||||
/// and a ServerKey reference. Outputs a TriviumStream object already initialized (1152
|
||||
/// steps have been run before returning)
|
||||
/// Contructor for TriviumStreamShortint: arguments are the secret key and the input vector, and
|
||||
/// a ServerKey reference. Outputs a TriviumStream object already initialized (1152 steps
|
||||
/// have been run before returning)
|
||||
pub fn new(
|
||||
key: [Ciphertext; 80],
|
||||
iv: [u64; 80],
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
{
|
||||
"m6i.metal": 7.168,
|
||||
"hpc7a.96xlarge": 7.7252
|
||||
"m6i.metal": 7.168
|
||||
}
|
||||
|
||||
@@ -36,24 +36,23 @@ def check_security(filename):
|
||||
try:
|
||||
# The lattice estimator is not able to manage such large dimension.
|
||||
# If we have the security for smaller `n` then we have security for larger ones.
|
||||
if param.n > 16384:
|
||||
if param.n == 32768:
|
||||
param = param.updated(n = 16384)
|
||||
|
||||
usvp_level = LWE.primal_usvp(param, red_cost_model = model)
|
||||
dual_level = LWE.dual_hybrid(param, red_cost_model = model)
|
||||
|
||||
estimator_level = log(min(usvp_level["rop"], dual_level["rop"]),2 )
|
||||
security_level = f"security level = {estimator_level} bits"
|
||||
if estimator_level < 127:
|
||||
print("FAIL\t({security_level})")
|
||||
reason = f"attained {security_level} target is 128 bits"
|
||||
print("FAIL")
|
||||
reason = f"attained security level = {estimator_level} bits target is 128 bits"
|
||||
to_update.append((param, reason))
|
||||
continue
|
||||
except Exception as err:
|
||||
print("FAIL")
|
||||
to_update.append((param, f"{repr(err)}"))
|
||||
else:
|
||||
print(f"OK\t({security_level})")
|
||||
print("OK")
|
||||
|
||||
return to_update
|
||||
|
||||
@@ -73,4 +72,4 @@ if __name__ == "__main__":
|
||||
print(f"[{param.tag}] reason: {reason} (param)")
|
||||
sys.exit(int(1)) # Explicit conversion is needed to make this call work
|
||||
else:
|
||||
print("All parameters passed the security check")
|
||||
print("All parameters passed the security check")
|
||||
@@ -20,10 +20,7 @@ def main(args):
|
||||
bench_function_id = bench_data["function_id"]
|
||||
|
||||
split = bench_function_id.split("::")
|
||||
if split.len() == 5: # Signed integers
|
||||
(_, _, function_name, parameter_set, bits) = split
|
||||
else: # Unsigned integers
|
||||
(_, function_name, parameter_set, bits) = split
|
||||
(_, function_name, parameter_set, bits) = split
|
||||
|
||||
if "_scalar_" in bits:
|
||||
(bits, scalar) = bits.split("_bits_scalar_")
|
||||
|
||||
50
ci/slab.toml
50
ci/slab.toml
@@ -1,37 +1,32 @@
|
||||
[profile.cpu-big]
|
||||
region = "eu-west-3"
|
||||
image_id = "ami-051942e4055555752"
|
||||
instance_type = "m6i.32xlarge"
|
||||
|
||||
[profile.cpu-big_fallback]
|
||||
region = "us-east-1"
|
||||
image_id = "ami-04e3bb9aebb6786df"
|
||||
image_id = "ami-0ab73f5bd11708a85"
|
||||
instance_type = "m6i.32xlarge"
|
||||
|
||||
[profile.cpu-small]
|
||||
region = "eu-west-3"
|
||||
image_id = "ami-051942e4055555752"
|
||||
image_id = "ami-0ab73f5bd11708a85"
|
||||
instance_type = "m6i.4xlarge"
|
||||
|
||||
[profile.bench]
|
||||
region = "eu-west-1"
|
||||
image_id = "ami-0e88d98b86aff13de"
|
||||
instance_type = "hpc7a.96xlarge"
|
||||
region = "eu-west-3"
|
||||
image_id = "ami-0ab73f5bd11708a85"
|
||||
instance_type = "m6i.metal"
|
||||
|
||||
[command.cpu_test]
|
||||
workflow = "aws_tfhe_tests.yml"
|
||||
profile = "cpu-big"
|
||||
check_run_name = "CPU AWS Tests"
|
||||
|
||||
[command.cpu_unsigned_integer_test]
|
||||
[command.cpu_integer_test]
|
||||
workflow = "aws_tfhe_integer_tests.yml"
|
||||
profile = "cpu-big"
|
||||
check_run_name = "CPU Unsigned Integer AWS Tests"
|
||||
check_run_name = "CPU Integer AWS Tests"
|
||||
|
||||
[command.cpu_signed_integer_test]
|
||||
workflow = "aws_tfhe_signed_integer_tests.yml"
|
||||
[command.cpu_multi_bit_test]
|
||||
workflow = "aws_tfhe_multi_bit_tests.yml"
|
||||
profile = "cpu-big"
|
||||
check_run_name = "CPU Signed Integer AWS Tests"
|
||||
check_run_name = "CPU AWS Multi Bit Tests"
|
||||
|
||||
[command.cpu_wasm_test]
|
||||
workflow = "aws_tfhe_wasm_tests.yml"
|
||||
@@ -48,11 +43,6 @@ workflow = "integer_full_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Integer CPU AWS Benchmarks Full Suite"
|
||||
|
||||
[command.signed_integer_full_bench]
|
||||
workflow = "signed_integer_full_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Signed Integer CPU AWS Benchmarks Full Suite"
|
||||
|
||||
[command.integer_bench]
|
||||
workflow = "integer_benchmark.yml"
|
||||
profile = "bench"
|
||||
@@ -63,16 +53,6 @@ workflow = "integer_multi_bit_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Integer multi bit CPU AWS Benchmarks"
|
||||
|
||||
[command.signed_integer_bench]
|
||||
workflow = "signed_integer_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Signed integer CPU AWS Benchmarks"
|
||||
|
||||
[command.signed_integer_multi_bit_bench]
|
||||
workflow = "signed_integer_multi_bit_benchmark.yml"
|
||||
profile = "bench"
|
||||
check_run_name = "Signed integer multi bit CPU AWS Benchmarks"
|
||||
|
||||
[command.shortint_full_bench]
|
||||
workflow = "shortint_full_benchmark.yml"
|
||||
profile = "bench"
|
||||
@@ -97,13 +77,3 @@ check_run_name = "PBS CPU AWS Benchmarks"
|
||||
workflow = "wasm_client_benchmark.yml"
|
||||
profile = "cpu-small"
|
||||
check_run_name = "WASM Client AWS Benchmarks"
|
||||
|
||||
[command.csprng_randomness_testing]
|
||||
workflow = "csprng_randomness_testing.yml"
|
||||
profile = "cpu-small"
|
||||
check_run_name = "CSPRNG randomness testing"
|
||||
|
||||
[command.code_coverage]
|
||||
workflow = "code_coverage.yml"
|
||||
profile = "cpu-small"
|
||||
check_run_name = "Code coverage"
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
# Disable patch checks in GitHub until all tfhe-rs layers have coverage implemented.
|
||||
patch: false
|
||||
@@ -1,53 +0,0 @@
|
||||
[package]
|
||||
name = "concrete-csprng"
|
||||
version = "0.4.0"
|
||||
edition = "2021"
|
||||
license = "BSD-3-Clause-Clear"
|
||||
description = "Cryptographically Secure PRNG used in the TFHE-rs library."
|
||||
homepage = "https://zama.ai/"
|
||||
documentation = "https://docs.zama.ai/tfhe-rs"
|
||||
repository = "https://github.com/zama-ai/tfhe-rs"
|
||||
readme = "README.md"
|
||||
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
|
||||
rust-version = "1.72"
|
||||
|
||||
[dependencies]
|
||||
aes = "0.8.2"
|
||||
rayon = { version = "1.5.0", optional = true }
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies]
|
||||
libc = "0.2.133"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8.3"
|
||||
criterion = "0.5.1"
|
||||
clap = "=4.4.4"
|
||||
|
||||
[features]
|
||||
parallel = ["rayon"]
|
||||
seeder_x86_64_rdseed = []
|
||||
seeder_unix = []
|
||||
generator_x86_64_aesni = []
|
||||
generator_fallback = []
|
||||
generator_aarch64_aes = []
|
||||
|
||||
x86_64 = [
|
||||
"parallel",
|
||||
"seeder_x86_64_rdseed",
|
||||
"generator_x86_64_aesni",
|
||||
"generator_fallback",
|
||||
]
|
||||
x86_64-unix = ["x86_64", "seeder_unix"]
|
||||
aarch64 = ["parallel", "generator_aarch64_aes", "generator_fallback"]
|
||||
aarch64-unix = ["aarch64", "seeder_unix"]
|
||||
|
||||
[[bench]]
|
||||
name = "benchmark"
|
||||
path = "benches/benchmark.rs"
|
||||
harness = false
|
||||
required-features = ["seeder_x86_64_rdseed", "generator_x86_64_aesni"]
|
||||
|
||||
[[example]]
|
||||
name = "generate"
|
||||
path = "examples/generate.rs"
|
||||
required-features = ["seeder_unix", "generator_fallback"]
|
||||
@@ -1,28 +0,0 @@
|
||||
BSD 3-Clause Clear License
|
||||
|
||||
Copyright © 2023 ZAMA.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or other
|
||||
materials provided with the distribution.
|
||||
|
||||
3. Neither the name of ZAMA nor the names of its contributors may be used to endorse
|
||||
or promote products derived from this software without specific prior written permission.
|
||||
|
||||
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE.
|
||||
THIS SOFTWARE IS PROVIDED BY THE ZAMA AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
||||
ZAMA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
||||
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,23 +0,0 @@
|
||||
# Concrete CSPRNG
|
||||
|
||||
This crate contains a fast *Cryptographically Secure Pseudoramdon Number Generator*, used in the
|
||||
['concrete-core'](https://crates.io/crates/concrete-core) library, you can find it [here](../concrete-core/) in this repo.
|
||||
|
||||
The implementation is based on the AES blockcipher used in CTR mode, as described in the ISO/IEC
|
||||
18033-4 standard.
|
||||
|
||||
Two implementations are available, an accelerated one on x86_64 CPUs with the `aes` feature and the `sse2` feature, and a pure software one that can be used on other platforms.
|
||||
|
||||
The crate also makes two seeders available, one needing the x86_64 feature `rdseed` and another one based on the Unix random device `/dev/random` the latter requires the user to provide a secret.
|
||||
|
||||
## Running the benchmarks
|
||||
|
||||
To execute the benchmarks on an x86_64 platform:
|
||||
```shell
|
||||
RUSTFLAGS="-Ctarget-cpu=native" cargo bench --features=seeder_x86_64_rdseed,generator_x86_64_aesni
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions,
|
||||
please contact us at `hello@zama.ai`.
|
||||
@@ -1,54 +0,0 @@
|
||||
use concrete_csprng::generators::{
|
||||
AesniRandomGenerator, BytesPerChild, ChildrenCount, RandomGenerator,
|
||||
};
|
||||
use concrete_csprng::seeders::{RdseedSeeder, Seeder};
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
|
||||
// The number of bytes to generate during one benchmark iteration.
|
||||
const N_GEN: usize = 1_000_000;
|
||||
|
||||
fn parent_generate(c: &mut Criterion) {
|
||||
let mut seeder = RdseedSeeder;
|
||||
let mut generator = AesniRandomGenerator::new(seeder.seed());
|
||||
c.bench_function("parent_generate", |b| {
|
||||
b.iter(|| {
|
||||
(0..N_GEN).for_each(|_| {
|
||||
generator.next();
|
||||
})
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn child_generate(c: &mut Criterion) {
|
||||
let mut seeder = RdseedSeeder;
|
||||
let mut generator = AesniRandomGenerator::new(seeder.seed());
|
||||
let mut generator = generator
|
||||
.try_fork(ChildrenCount(1), BytesPerChild(N_GEN * 10_000))
|
||||
.unwrap()
|
||||
.next()
|
||||
.unwrap();
|
||||
c.bench_function("child_generate", |b| {
|
||||
b.iter(|| {
|
||||
(0..N_GEN).for_each(|_| {
|
||||
generator.next();
|
||||
})
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn fork(c: &mut Criterion) {
|
||||
let mut seeder = RdseedSeeder;
|
||||
let mut generator = AesniRandomGenerator::new(seeder.seed());
|
||||
c.bench_function("fork", |b| {
|
||||
b.iter(|| {
|
||||
black_box(
|
||||
generator
|
||||
.try_fork(ChildrenCount(2048), BytesPerChild(2048))
|
||||
.unwrap(),
|
||||
)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, parent_generate, child_generate, fork);
|
||||
criterion_main!(benches);
|
||||
@@ -1,112 +0,0 @@
|
||||
// To have clear error messages during compilation about why some piece of code may not be available
|
||||
// we decided to check the features compatibility with the target configuration in this script.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
|
||||
// See https://doc.rust-lang.org/reference/conditional-compilation.html#target_arch for various
|
||||
// compilation configuration
|
||||
|
||||
// Can be easily extended if needed
|
||||
pub struct FeatureRequirement {
|
||||
pub feature_name: &'static str,
|
||||
// target_arch requirement
|
||||
pub feature_req_target_arch: Option<&'static str>,
|
||||
// target_family requirement
|
||||
pub feature_req_target_family: Option<&'static str>,
|
||||
}
|
||||
|
||||
// We implement a version of default that is const which is not possible through the Default trait
|
||||
impl FeatureRequirement {
|
||||
// As we cannot use cfg!(feature = "feature_name") with something else than a literal, we need
|
||||
// a reference to the HashMap we populate with the enabled features
|
||||
fn is_activated(&self, build_activated_features: &HashMap<&'static str, bool>) -> bool {
|
||||
*build_activated_features.get(self.feature_name).unwrap()
|
||||
}
|
||||
|
||||
// panics if the requirements are not met
|
||||
fn check_requirements(&self) {
|
||||
let target_arch = get_target_arch_cfg();
|
||||
if let Some(feature_req_target_arch) = self.feature_req_target_arch {
|
||||
if feature_req_target_arch != target_arch {
|
||||
panic!(
|
||||
"Feature `{}` requires target_arch `{}`, current cfg: `{}`",
|
||||
self.feature_name, feature_req_target_arch, target_arch
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let target_family = get_target_family_cfg();
|
||||
if let Some(feature_req_target_family) = self.feature_req_target_family {
|
||||
if feature_req_target_family != target_family {
|
||||
panic!(
|
||||
"Feature `{}` requires target_family `{}`, current cfg: `{}`",
|
||||
self.feature_name, feature_req_target_family, target_family
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// const vecs are not yet a thing so use a fixed size array (update the array size when adding
|
||||
// requirements)
|
||||
static FEATURE_REQUIREMENTS: [FeatureRequirement; 4] = [
|
||||
FeatureRequirement {
|
||||
feature_name: "seeder_x86_64_rdseed",
|
||||
feature_req_target_arch: Some("x86_64"),
|
||||
feature_req_target_family: None,
|
||||
},
|
||||
FeatureRequirement {
|
||||
feature_name: "generator_x86_64_aesni",
|
||||
feature_req_target_arch: Some("x86_64"),
|
||||
feature_req_target_family: None,
|
||||
},
|
||||
FeatureRequirement {
|
||||
feature_name: "seeder_unix",
|
||||
feature_req_target_arch: None,
|
||||
feature_req_target_family: Some("unix"),
|
||||
},
|
||||
FeatureRequirement {
|
||||
feature_name: "generator_aarch64_aes",
|
||||
feature_req_target_arch: Some("aarch64"),
|
||||
feature_req_target_family: None,
|
||||
},
|
||||
];
|
||||
|
||||
// For a "feature_name" feature_cfg!("feature_name") expands to
|
||||
// ("feature_name", cfg!(feature = "feature_name"))
|
||||
macro_rules! feature_cfg {
|
||||
($feat_name:literal) => {
|
||||
($feat_name, cfg!(feature = $feat_name))
|
||||
};
|
||||
}
|
||||
|
||||
// Static HashMap would require an additional crate (phf or lazy static e.g.), so we just write a
|
||||
// function that returns the HashMap we are interested in
|
||||
fn get_feature_enabled_status() -> HashMap<&'static str, bool> {
|
||||
HashMap::from([
|
||||
feature_cfg!("seeder_x86_64_rdseed"),
|
||||
feature_cfg!("generator_x86_64_aesni"),
|
||||
feature_cfg!("seeder_unix"),
|
||||
feature_cfg!("generator_aarch64_aes"),
|
||||
])
|
||||
}
|
||||
|
||||
// See https://stackoverflow.com/a/43435335/18088947 for the inspiration of this code
|
||||
fn get_target_arch_cfg() -> String {
|
||||
env::var("CARGO_CFG_TARGET_ARCH").expect("CARGO_CFG_TARGET_ARCH is not set")
|
||||
}
|
||||
|
||||
fn get_target_family_cfg() -> String {
|
||||
env::var("CARGO_CFG_TARGET_FAMILY").expect("CARGO_CFG_TARGET_FAMILY is not set")
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let feature_enabled_status = get_feature_enabled_status();
|
||||
|
||||
// This will panic if some requirements for a feature are not met
|
||||
FEATURE_REQUIREMENTS
|
||||
.iter()
|
||||
.filter(|&req| FeatureRequirement::is_activated(req, &feature_enabled_status))
|
||||
.for_each(FeatureRequirement::check_requirements);
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
//! This program uses the concrete csprng to generate an infinite stream of random bytes on
|
||||
//! the program stdout. It can also generate a fixed number of bytes by passing a value along the
|
||||
//! optional argument `--bytes_total`. For testing purpose.
|
||||
use clap::{value_parser, Arg, Command};
|
||||
#[cfg(feature = "generator_x86_64_aesni")]
|
||||
use concrete_csprng::generators::AesniRandomGenerator as ActivatedRandomGenerator;
|
||||
#[cfg(feature = "generator_aarch64_aes")]
|
||||
use concrete_csprng::generators::NeonAesRandomGenerator as ActivatedRandomGenerator;
|
||||
#[cfg(all(
|
||||
not(feature = "generator_x86_64_aesni"),
|
||||
not(feature = "generator_aarch64_aes"),
|
||||
feature = "generator_fallback"
|
||||
))]
|
||||
use concrete_csprng::generators::SoftwareRandomGenerator as ActivatedRandomGenerator;
|
||||
|
||||
use concrete_csprng::generators::RandomGenerator;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
use concrete_csprng::seeders::AppleSecureEnclaveSeeder as ActivatedSeeder;
|
||||
#[cfg(all(not(target_os = "macos"), feature = "seeder_x86_64_rdseed"))]
|
||||
use concrete_csprng::seeders::RdseedSeeder as ActivatedSeeder;
|
||||
#[cfg(all(
|
||||
not(target_os = "macos"),
|
||||
not(feature = "seeder_x86_64_rdseed"),
|
||||
feature = "seeder_unix"
|
||||
))]
|
||||
use concrete_csprng::seeders::UnixSeeder as ActivatedSeeder;
|
||||
|
||||
use concrete_csprng::seeders::Seeder;
|
||||
|
||||
use std::io::prelude::*;
|
||||
use std::io::{stdout, StdoutLock};
|
||||
|
||||
fn write_bytes(
|
||||
buffer: &mut [u8],
|
||||
generator: &mut ActivatedRandomGenerator,
|
||||
stdout: &mut StdoutLock<'_>,
|
||||
) -> std::io::Result<()> {
|
||||
buffer.iter_mut().zip(generator).for_each(|(b, g)| *b = g);
|
||||
stdout.write_all(buffer)
|
||||
}
|
||||
|
||||
fn infinite_bytes_generation(
|
||||
buffer: &mut [u8],
|
||||
generator: &mut ActivatedRandomGenerator,
|
||||
stdout: &mut StdoutLock<'_>,
|
||||
) {
|
||||
while write_bytes(buffer, generator, stdout).is_ok() {}
|
||||
}
|
||||
|
||||
fn bytes_generation(
|
||||
bytes_total: usize,
|
||||
buffer: &mut [u8],
|
||||
generator: &mut ActivatedRandomGenerator,
|
||||
stdout: &mut StdoutLock<'_>,
|
||||
) {
|
||||
let quotient = bytes_total / buffer.len();
|
||||
let remaining = bytes_total % buffer.len();
|
||||
|
||||
for _ in 0..quotient {
|
||||
write_bytes(buffer, generator, stdout).unwrap();
|
||||
}
|
||||
|
||||
write_bytes(&mut buffer[0..remaining], generator, stdout).unwrap()
|
||||
}
|
||||
|
||||
pub fn main() {
|
||||
let matches = Command::new(
|
||||
"Generate a stream of random numbers, specify no flags for infinite generation",
|
||||
)
|
||||
.arg(
|
||||
Arg::new("bytes_total")
|
||||
.short('b')
|
||||
.long("bytes_total")
|
||||
.value_parser(value_parser!(usize))
|
||||
.help("Total number of bytes that has to be generated"),
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// Ugly hack to be able to use UnixSeeder
|
||||
#[cfg(all(
|
||||
not(target_os = "macos"),
|
||||
not(feature = "seeder_x86_64_rdseed"),
|
||||
feature = "seeder_unix"
|
||||
))]
|
||||
let new_seeder = || ActivatedSeeder::new(0);
|
||||
#[cfg(not(all(
|
||||
not(target_os = "macos"),
|
||||
not(feature = "seeder_x86_64_rdseed"),
|
||||
feature = "seeder_unix"
|
||||
)))]
|
||||
let new_seeder = || ActivatedSeeder;
|
||||
|
||||
let mut seeder = new_seeder();
|
||||
let seed = seeder.seed();
|
||||
// Don't print on std out
|
||||
eprintln!("seed={seed:?}");
|
||||
let mut generator = ActivatedRandomGenerator::new(seed);
|
||||
let stdout = stdout();
|
||||
let mut buffer = [0u8; 16];
|
||||
|
||||
// lock stdout as there is a single thread running
|
||||
let mut stdout = stdout.lock();
|
||||
|
||||
match matches.get_one::<usize>("bytes_total") {
|
||||
Some(&total) => {
|
||||
bytes_generation(total, &mut buffer, &mut generator, &mut stdout);
|
||||
}
|
||||
None => {
|
||||
infinite_bytes_generation(&mut buffer, &mut generator, &mut stdout);
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
use crate::generators::aes_ctr::index::AesIndex;
|
||||
use crate::generators::aes_ctr::BYTES_PER_BATCH;
|
||||
|
||||
/// Represents a key used in the AES block cipher.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct AesKey(pub u128);
|
||||
|
||||
/// A trait for AES block ciphers.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// The block cipher is used in a batched manner (to reduce amortized cost on special hardware).
|
||||
/// For this reason we only expose a `generate_batch` method.
|
||||
pub trait AesBlockCipher: Clone + Send + Sync {
|
||||
/// Instantiate a new generator from a secret key.
|
||||
fn new(key: AesKey) -> Self;
|
||||
/// Generates the batch corresponding to the given index.
|
||||
fn generate_batch(&mut self, index: AesIndex) -> [u8; BYTES_PER_BATCH];
|
||||
}
|
||||
@@ -1,379 +0,0 @@
|
||||
use crate::generators::aes_ctr::block_cipher::{AesBlockCipher, AesKey};
|
||||
use crate::generators::aes_ctr::index::TableIndex;
|
||||
use crate::generators::aes_ctr::states::{BufferPointer, ShiftAction, State};
|
||||
use crate::generators::aes_ctr::BYTES_PER_BATCH;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError};
|
||||
|
||||
// Usually, to work with iterators and parallel iterators, we would use opaque types such as
|
||||
// `impl Iterator<..>`. Unfortunately, it is not yet possible to return existential types in
|
||||
// traits, which we would need for `RandomGenerator`. For this reason, we have to use the
|
||||
// full type name where needed. Hence the following trait aliases definition:
|
||||
|
||||
/// A type alias for the children iterator closure type.
|
||||
pub type ChildrenClosure<BlockCipher> =
|
||||
fn((usize, (Box<BlockCipher>, TableIndex, BytesPerChild))) -> AesCtrGenerator<BlockCipher>;
|
||||
|
||||
/// A type alias for the children iterator type.
|
||||
pub type ChildrenIterator<BlockCipher> = std::iter::Map<
|
||||
std::iter::Zip<
|
||||
std::ops::Range<usize>,
|
||||
std::iter::Repeat<(Box<BlockCipher>, TableIndex, BytesPerChild)>,
|
||||
>,
|
||||
ChildrenClosure<BlockCipher>,
|
||||
>;
|
||||
|
||||
/// A type implementing the `RandomGenerator` api using the AES block cipher in counter mode.
|
||||
#[derive(Clone)]
|
||||
pub struct AesCtrGenerator<BlockCipher: AesBlockCipher> {
|
||||
// The block cipher used in the background
|
||||
pub(crate) block_cipher: Box<BlockCipher>,
|
||||
// The state corresponding to the latest outputted byte.
|
||||
pub(crate) state: State,
|
||||
// The last legal index. This makes bound check faster.
|
||||
pub(crate) last: TableIndex,
|
||||
// The buffer containing the current batch of aes calls.
|
||||
pub(crate) buffer: [u8; BYTES_PER_BATCH],
|
||||
}
|
||||
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
impl<BlockCipher: AesBlockCipher> AesCtrGenerator<BlockCipher> {
|
||||
/// Generates a new csprng.
|
||||
///
|
||||
/// Note :
|
||||
/// ------
|
||||
///
|
||||
/// The `start_index` given as input, points to the first byte that will be outputted by the
|
||||
/// generator. If not given, this one is automatically set to the second table index. The
|
||||
/// first table index is not used to prevent an edge case from happening: since `state` is
|
||||
/// supposed to contain the index of the previous byte, the initial value must be decremented.
|
||||
/// Using the second value prevents wrapping to the max index, which would make the bound
|
||||
/// checking fail.
|
||||
///
|
||||
/// The `bound_index` given as input, points to the first byte that can __not__ be legally
|
||||
/// outputted by the generator. If not given, the bound is automatically set to the last
|
||||
/// table index.
|
||||
pub fn new(
|
||||
key: AesKey,
|
||||
start_index: Option<TableIndex>,
|
||||
bound_index: Option<TableIndex>,
|
||||
) -> AesCtrGenerator<BlockCipher> {
|
||||
AesCtrGenerator::from_block_cipher(
|
||||
Box::new(BlockCipher::new(key)),
|
||||
start_index.unwrap_or(TableIndex::SECOND),
|
||||
bound_index.unwrap_or(TableIndex::LAST),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generates a csprng from an existing block cipher.
|
||||
pub fn from_block_cipher(
|
||||
block_cipher: Box<BlockCipher>,
|
||||
start_index: TableIndex,
|
||||
bound_index: TableIndex,
|
||||
) -> AesCtrGenerator<BlockCipher> {
|
||||
assert!(start_index < bound_index);
|
||||
let last = bound_index.decremented();
|
||||
let buffer = [0u8; BYTES_PER_BATCH];
|
||||
let state = State::new(start_index);
|
||||
AesCtrGenerator {
|
||||
block_cipher,
|
||||
state,
|
||||
last,
|
||||
buffer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the table index related to the previous random byte.
|
||||
pub fn table_index(&self) -> TableIndex {
|
||||
self.state.table_index()
|
||||
}
|
||||
|
||||
/// Returns the bound of the generator if any.
|
||||
///
|
||||
/// The bound is the table index of the first byte that can not be outputted by the generator.
|
||||
pub fn get_bound(&self) -> TableIndex {
|
||||
self.last.incremented()
|
||||
}
|
||||
|
||||
/// Returns whether the generator is bounded or not.
|
||||
pub fn is_bounded(&self) -> bool {
|
||||
self.get_bound() != TableIndex::LAST
|
||||
}
|
||||
|
||||
/// Computes the number of bytes that can still be outputted by the generator.
|
||||
///
|
||||
/// Note :
|
||||
/// ------
|
||||
///
|
||||
/// Note that `ByteCount` uses the `u128` datatype to store the byte count. Unfortunately, the
|
||||
/// number of remaining bytes is in ⟦0;2¹³² -1⟧. When the number is greater than 2¹²⁸ - 1,
|
||||
/// we saturate the count at 2¹²⁸ - 1.
|
||||
pub fn remaining_bytes(&self) -> ByteCount {
|
||||
TableIndex::distance(&self.last, &self.state.table_index()).unwrap()
|
||||
}
|
||||
|
||||
/// Outputs the next random byte.
|
||||
pub fn generate_next(&mut self) -> u8 {
|
||||
self.next()
|
||||
.expect("Tried to generate a byte after the bound.")
|
||||
}
|
||||
|
||||
/// Tries to fork the current generator into `n_child` generators each able to output
|
||||
/// `child_bytes` random bytes.
|
||||
pub fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<ChildrenIterator<BlockCipher>, ForkError> {
|
||||
if n_children.0 == 0 {
|
||||
return Err(ForkError::ZeroChildrenCount);
|
||||
}
|
||||
if n_bytes.0 == 0 {
|
||||
return Err(ForkError::ZeroBytesPerChild);
|
||||
}
|
||||
if !self.is_fork_in_bound(n_children, n_bytes) {
|
||||
return Err(ForkError::ForkTooLarge);
|
||||
}
|
||||
|
||||
// The state currently stored in the parent generator points to the table index of the last
|
||||
// generated byte. The first index to be generated is the next one:
|
||||
let first_index = self.state.table_index().incremented();
|
||||
let output = (0..n_children.0)
|
||||
.zip(std::iter::repeat((
|
||||
self.block_cipher.clone(),
|
||||
first_index,
|
||||
n_bytes,
|
||||
)))
|
||||
.map(
|
||||
// This map is a little weird because we need to cast the closure to a fn pointer
|
||||
// that matches the signature of `ChildrenIterator<BlockCipher>`.
|
||||
// Unfortunately, the compiler does not manage to coerce this one
|
||||
// automatically.
|
||||
(|(i, (block_cipher, first_index, n_bytes))| {
|
||||
// The first index to be outputted by the child is the `first_index` shifted by
|
||||
// the proper amount of `child_bytes`.
|
||||
let child_first_index = first_index.increased(n_bytes.0 * i);
|
||||
// The bound of the child is the first index of its next sibling.
|
||||
let child_bound_index = first_index.increased(n_bytes.0 * (i + 1));
|
||||
AesCtrGenerator::from_block_cipher(
|
||||
block_cipher,
|
||||
child_first_index,
|
||||
child_bound_index,
|
||||
)
|
||||
}) as ChildrenClosure<BlockCipher>,
|
||||
);
|
||||
// The parent next index is the bound of the last child.
|
||||
let next_index = first_index.increased(n_bytes.0 * n_children.0);
|
||||
self.state = State::new(next_index);
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
pub(crate) fn is_fork_in_bound(
|
||||
&self,
|
||||
n_child: ChildrenCount,
|
||||
child_bytes: BytesPerChild,
|
||||
) -> bool {
|
||||
let mut end = self.state.table_index();
|
||||
end.increase(n_child.0 * child_bytes.0);
|
||||
end <= self.last
|
||||
}
|
||||
}
|
||||
|
||||
impl<BlockCipher: AesBlockCipher> Iterator for AesCtrGenerator<BlockCipher> {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.state.table_index() >= self.last {
|
||||
None
|
||||
} else {
|
||||
match self.state.increment() {
|
||||
ShiftAction::OutputByte(BufferPointer(ptr)) => Some(self.buffer[ptr]),
|
||||
ShiftAction::RefreshBatchAndOutputByte(aes_index, BufferPointer(ptr)) => {
|
||||
self.buffer = self.block_cipher.generate_batch(aes_index);
|
||||
Some(self.buffer[ptr])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod aes_ctr_generic_test {
|
||||
#![allow(unused)] // to please clippy when tests are not activated
|
||||
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::index::{AesIndex, ByteIndex};
|
||||
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
pub fn any_table_index() -> impl Iterator<Item = TableIndex> {
|
||||
std::iter::repeat_with(|| {
|
||||
TableIndex::new(
|
||||
AesIndex(thread_rng().gen()),
|
||||
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn any_usize() -> impl Iterator<Item = usize> {
|
||||
std::iter::repeat_with(|| thread_rng().gen())
|
||||
}
|
||||
|
||||
pub fn any_children_count() -> impl Iterator<Item = ChildrenCount> {
|
||||
std::iter::repeat_with(|| ChildrenCount(thread_rng().gen::<usize>() % 2048 + 1))
|
||||
}
|
||||
|
||||
pub fn any_bytes_per_child() -> impl Iterator<Item = BytesPerChild> {
|
||||
std::iter::repeat_with(|| BytesPerChild(thread_rng().gen::<usize>() % 2048 + 1))
|
||||
}
|
||||
|
||||
pub fn any_key() -> impl Iterator<Item = AesKey> {
|
||||
std::iter::repeat_with(|| AesKey(thread_rng().gen()))
|
||||
}
|
||||
|
||||
/// Output a valid fork:
|
||||
/// a table index t,
|
||||
/// a number of children nc,
|
||||
/// a number of bytes per children nb
|
||||
/// and a positive integer i such that:
|
||||
/// increase(t, nc*nb+i) < MAX with MAX the largest table index.
|
||||
///
|
||||
/// Put differently, if we initialize a parent generator at t and fork it with (nc, nb), our
|
||||
/// parent generator current index gets shifted to an index, distant of at least i bytes of
|
||||
/// the max index.
|
||||
pub fn any_valid_fork(
|
||||
) -> impl Iterator<Item = (TableIndex, ChildrenCount, BytesPerChild, usize)> {
|
||||
any_table_index()
|
||||
.zip(any_children_count())
|
||||
.zip(any_bytes_per_child())
|
||||
.zip(any_usize())
|
||||
.map(|(((t, nc), nb), i)| (t, nc, nb, i))
|
||||
.filter(|(t, nc, nb, i)| {
|
||||
TableIndex::distance(&TableIndex::LAST, t).unwrap().0 > (nc.0 * nb.0 + i) as u128
|
||||
})
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first child is the same as the table index of
|
||||
/// the parent before the fork.
|
||||
pub fn prop_fork_first_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let first_child = forked_generator.try_fork(nc, nb).unwrap().next().unwrap();
|
||||
assert_eq!(original_generator.table_index(), first_child.table_index());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first byte outputted by the parent after the
|
||||
/// fork, is the bound of the last child of the fork.
|
||||
pub fn prop_fork_last_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut parent_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let last_child = parent_generator.try_fork(nc, nb).unwrap().last().unwrap();
|
||||
assert_eq!(
|
||||
parent_generator.table_index().incremented(),
|
||||
last_child.get_bound()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bound of the parent does not change.
|
||||
pub fn prop_fork_parent_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator.try_fork(nc, nb).unwrap().last().unwrap();
|
||||
assert_eq!(original_generator.get_bound(), forked_generator.get_bound());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the parent table index is increased of the number of children
|
||||
/// multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator.try_fork(nc, nb).unwrap().last().unwrap();
|
||||
assert_eq!(
|
||||
forked_generator.table_index(),
|
||||
// Decrement accounts for the fact that the table index stored is the previous one
|
||||
t.increased(nc.0 * nb.0).decremented()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bytes outputted by the children in the fork order form the same
|
||||
/// sequence the parent would have had yielded no fork had happened.
|
||||
pub fn prop_fork<G: AesBlockCipher>() {
|
||||
for _ in 0..1000 {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let initial_output: Vec<u8> = original_generator.take(bytes_to_go).collect();
|
||||
let forked_output: Vec<u8> = forked_generator
|
||||
.try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.flat_map(|child| child.collect::<Vec<_>>())
|
||||
.collect();
|
||||
assert_eq!(initial_output, forked_output);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, all children got a number of remaining bytes equals to the number of
|
||||
/// bytes per child given as fork input.
|
||||
pub fn prop_fork_children_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
assert!(generator
|
||||
.try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.all(|c| c.remaining_bytes().0 == nb.0 as u128));
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the number of remaining bybtes of the parent is reduced by the number
|
||||
/// of children multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let before_remaining_bytes = generator.remaining_bytes();
|
||||
let _ = generator.try_fork(nc, nb).unwrap();
|
||||
let after_remaining_bytes = generator.remaining_bytes();
|
||||
assert_eq!(
|
||||
before_remaining_bytes.0 - after_remaining_bytes.0,
|
||||
bytes_to_go as u128
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,389 +0,0 @@
|
||||
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
|
||||
use crate::generators::ByteCount;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// A structure representing an [aes index](#coarse-grained-pseudo-random-table-lookup).
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct AesIndex(pub u128);
|
||||
|
||||
/// A structure representing a [byte index](#fine-grained-pseudo-random-table-lookup).
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct ByteIndex(pub usize);
|
||||
|
||||
/// A structure representing a [table index](#fine-grained-pseudo-random-table-lookup)
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct TableIndex {
|
||||
pub(crate) aes_index: AesIndex,
|
||||
pub(crate) byte_index: ByteIndex,
|
||||
}
|
||||
|
||||
impl TableIndex {
|
||||
/// The first table index.
|
||||
pub const FIRST: TableIndex = TableIndex {
|
||||
aes_index: AesIndex(0),
|
||||
byte_index: ByteIndex(0),
|
||||
};
|
||||
|
||||
/// The second table index.
|
||||
pub const SECOND: TableIndex = TableIndex {
|
||||
aes_index: AesIndex(0),
|
||||
byte_index: ByteIndex(1),
|
||||
};
|
||||
|
||||
/// The last table index.
|
||||
pub const LAST: TableIndex = TableIndex {
|
||||
aes_index: AesIndex(u128::MAX),
|
||||
byte_index: ByteIndex(BYTES_PER_AES_CALL - 1),
|
||||
};
|
||||
|
||||
/// Creates a table index from an aes index and a byte index.
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
pub fn new(aes_index: AesIndex, byte_index: ByteIndex) -> Self {
|
||||
assert!(byte_index.0 < BYTES_PER_AES_CALL);
|
||||
TableIndex {
|
||||
aes_index,
|
||||
byte_index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the table index forward of `shift` bytes.
|
||||
pub fn increase(&mut self, shift: usize) {
|
||||
// Compute full shifts to avoid overflows
|
||||
let full_aes_shifts = shift / BYTES_PER_AES_CALL;
|
||||
let shift_remainder = shift % BYTES_PER_AES_CALL;
|
||||
|
||||
// Get the additional shift if any
|
||||
let new_byte_index = self.byte_index.0 + shift_remainder;
|
||||
let full_aes_shifts = full_aes_shifts + new_byte_index / BYTES_PER_AES_CALL;
|
||||
|
||||
// Store the reaminder in the byte index
|
||||
self.byte_index.0 = new_byte_index % BYTES_PER_AES_CALL;
|
||||
|
||||
self.aes_index.0 = self.aes_index.0.wrapping_add(full_aes_shifts as u128);
|
||||
}
|
||||
|
||||
/// Shifts the table index backward of `shift` bytes.
|
||||
pub fn decrease(&mut self, shift: usize) {
|
||||
let remainder = shift % BYTES_PER_AES_CALL;
|
||||
if remainder <= self.byte_index.0 {
|
||||
self.aes_index.0 = self
|
||||
.aes_index
|
||||
.0
|
||||
.wrapping_sub((shift / BYTES_PER_AES_CALL) as u128);
|
||||
self.byte_index.0 -= remainder;
|
||||
} else {
|
||||
self.aes_index.0 = self
|
||||
.aes_index
|
||||
.0
|
||||
.wrapping_sub((shift / BYTES_PER_AES_CALL) as u128 + 1);
|
||||
self.byte_index.0 += BYTES_PER_AES_CALL - remainder;
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the table index forward of one byte.
|
||||
pub fn increment(&mut self) {
|
||||
self.increase(1)
|
||||
}
|
||||
|
||||
/// Shifts the table index backward of one byte.
|
||||
pub fn decrement(&mut self) {
|
||||
self.decrease(1)
|
||||
}
|
||||
|
||||
/// Returns the table index shifted forward by `shift` bytes.
|
||||
pub fn increased(mut self, shift: usize) -> Self {
|
||||
self.increase(shift);
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the table index shifted backward by `shift` bytes.
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
pub fn decreased(mut self, shift: usize) -> Self {
|
||||
self.decrease(shift);
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the table index to the next byte.
|
||||
pub fn incremented(mut self) -> Self {
|
||||
self.increment();
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the table index to the previous byte.
|
||||
pub fn decremented(mut self) -> Self {
|
||||
self.decrement();
|
||||
self
|
||||
}
|
||||
|
||||
/// Returns the distance between two table indices in bytes.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// This method assumes that the `larger` input is, well, larger than the `smaller` input. If
|
||||
/// this is not the case, the method returns `None`. Also, note that `ByteCount` uses the
|
||||
/// `u128` datatype to store the byte count. Unfortunately, the number of bytes between two
|
||||
/// table indices is in ⟦0;2¹³² -1⟧. When the distance is greater than 2¹²⁸ - 1, we saturate
|
||||
/// the count at 2¹²⁸ - 1.
|
||||
pub fn distance(larger: &Self, smaller: &Self) -> Option<ByteCount> {
|
||||
match std::cmp::Ord::cmp(larger, smaller) {
|
||||
Ordering::Less => None,
|
||||
Ordering::Equal => Some(ByteCount(0)),
|
||||
Ordering::Greater => {
|
||||
let mut result = larger.aes_index.0 - smaller.aes_index.0;
|
||||
result = result.saturating_mul(BYTES_PER_AES_CALL as u128);
|
||||
result = result.saturating_add(larger.byte_index.0 as u128);
|
||||
result = result.saturating_sub(smaller.byte_index.0 as u128);
|
||||
Some(ByteCount(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for TableIndex {}
|
||||
|
||||
impl PartialEq<Self> for TableIndex {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
matches!(self.partial_cmp(other), Some(Ordering::Equal))
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd<Self> for TableIndex {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for TableIndex {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
match self.aes_index.cmp(&other.aes_index) {
|
||||
Ordering::Equal => self.byte_index.cmp(&other.byte_index),
|
||||
other => other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
fn any_table_index() -> impl Iterator<Item = TableIndex> {
|
||||
std::iter::repeat_with(|| {
|
||||
TableIndex::new(
|
||||
AesIndex(thread_rng().gen()),
|
||||
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn any_usize() -> impl Iterator<Item = usize> {
|
||||
std::iter::repeat_with(|| thread_rng().gen())
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
/// Verifies that the constructor of `TableIndex` panics when the byte index is too large.
|
||||
fn test_table_index_new_panic() {
|
||||
TableIndex::new(AesIndex(12), ByteIndex(144));
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Verifies that the `TableIndex` wraps nicely with predecessor
|
||||
fn test_table_index_predecessor_edge() {
|
||||
assert_eq!(TableIndex::FIRST.decremented(), TableIndex::LAST);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Verifies that the `TableIndex` wraps nicely with successor
|
||||
fn test_table_index_successor_edge() {
|
||||
assert_eq!(TableIndex::LAST.incremented(), TableIndex::FIRST);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check that the table index distance saturates nicely.
|
||||
fn prop_table_index_distance_saturates() {
|
||||
assert_eq!(
|
||||
TableIndex::distance(&TableIndex::LAST, &TableIndex::FIRST)
|
||||
.unwrap()
|
||||
.0,
|
||||
u128::MAX
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// distance(t, t) = Some(0).
|
||||
fn prop_table_index_distance_zero() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(TableIndex::distance(&t, &t), Some(ByteCount(0)));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t1, t2 such that t1 < t2,
|
||||
/// distance(t1, t2) = None.
|
||||
fn prop_table_index_distance_wrong_order_none() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t1, t2) = any_table_index()
|
||||
.zip(any_table_index())
|
||||
.find(|(t1, t2)| t1 < t2)
|
||||
.unwrap();
|
||||
assert_eq!(TableIndex::distance(&t1, &t2), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t1, t2 such that t1 > t2,
|
||||
/// distance(t1, t2) = Some(v) where v is strictly positive.
|
||||
fn prop_table_index_distance_some_positive() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t1, t2) = any_table_index()
|
||||
.zip(any_table_index())
|
||||
.find(|(t1, t2)| t1 > t2)
|
||||
.unwrap();
|
||||
assert!(matches!(TableIndex::distance(&t1, &t2), Some(ByteCount(v)) if v > 0));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive i such that i < distance (MAX, t) with MAX the largest
|
||||
/// table index,
|
||||
/// distance(t.increased(i), t) = Some(i).
|
||||
fn prop_table_index_distance_increase() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, inc) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.find(|(t, inc)| {
|
||||
(*inc as u128) < TableIndex::distance(&TableIndex::LAST, t).unwrap().0
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
TableIndex::distance(&t.increased(inc), &t).unwrap().0 as usize,
|
||||
inc
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, t =? t = true.
|
||||
fn prop_table_index_equality() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(
|
||||
std::cmp::PartialOrd::partial_cmp(&t, &t),
|
||||
Some(std::cmp::Ordering::Equal)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive i such that i < distance (MAX, t) with MAX the largest
|
||||
/// table index,
|
||||
/// t.increased(i) >? t = true.
|
||||
fn prop_table_index_greater() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, inc) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.find(|(t, inc)| {
|
||||
(*inc as u128) < TableIndex::distance(&TableIndex::LAST, t).unwrap().0
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
std::cmp::PartialOrd::partial_cmp(&t.increased(inc), &t),
|
||||
Some(std::cmp::Ordering::Greater),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive i such that i < distance (t, 0) with MAX the largest
|
||||
/// table index,
|
||||
/// t.decreased(i) <? t = true.
|
||||
fn prop_table_index_less() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, inc) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.find(|(t, inc)| {
|
||||
(*inc as u128) < TableIndex::distance(t, &TableIndex::FIRST).unwrap().0
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
std::cmp::PartialOrd::partial_cmp(&t.decreased(inc), &t),
|
||||
Some(std::cmp::Ordering::Less)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// successor(predecessor(t)) = t.
|
||||
fn prop_table_index_decrement_increment() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(t.decremented().incremented(), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// predecessor(successor(t)) = t.
|
||||
fn prop_table_index_increment_decrement() {
|
||||
for _ in 0..REPEATS {
|
||||
let t = any_table_index().next().unwrap();
|
||||
assert_eq!(t.incremented().decremented(), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i,
|
||||
/// increase(decrease(t, i), i) = t.
|
||||
fn prop_table_index_increase_decrease() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, i) = any_table_index().zip(any_usize()).next().unwrap();
|
||||
assert_eq!(t.increased(i).decreased(i), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i,
|
||||
/// decrease(increase(t, i), i) = t.
|
||||
fn prop_table_index_decrease_increase() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, i) = any_table_index().zip(any_usize()).next().unwrap();
|
||||
assert_eq!(t.decreased(i).increased(i), t);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check that a big increase does not overflow
|
||||
fn prop_table_increase_max_no_overflow() {
|
||||
let first = TableIndex::FIRST;
|
||||
// Increase so that ByteIndex is at 1usize
|
||||
let second = first.increased(1);
|
||||
|
||||
// Now increase by usize::MAX, as the underlying byte index stores a usize this may overflow
|
||||
// depending on implementation, ensure it does not overflow
|
||||
let big_increase = second.increased(usize::MAX);
|
||||
let total_full_aes_shifts = (1u128 + usize::MAX as u128) / BYTES_PER_AES_CALL as u128;
|
||||
|
||||
assert_eq!(
|
||||
big_increase,
|
||||
TableIndex::new(AesIndex(total_full_aes_shifts), ByteIndex(0))
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
//! A module implementing the random generator api with batched aes calls.
|
||||
//!
|
||||
//! This module provides a generic [`AesCtrGenerator`] structure which implements the
|
||||
//! [`super::RandomGenerator`] api using the AES block cipher in counter mode. That is, the
|
||||
//! generator holds a state (i.e. counter) which is incremented iteratively, to produce the stream
|
||||
//! of random values:
|
||||
//! ```ascii
|
||||
//! state=0 state=1 state=2
|
||||
//! ╔══↧══╗ ╔══↧══╗ ╔══↧══╗
|
||||
//! key ↦ AES ║ key ↦ AES ║ key ↦ AES ║ ...
|
||||
//! ╚══↧══╝ ╚══↧══╝ ╚══↧══╝
|
||||
//! output0 output1 output2
|
||||
//!
|
||||
//! t=0 t=1 t=2
|
||||
//! ```
|
||||
//!
|
||||
//! The [`AesCtrGenerator`] structure is generic over the AES block ciphers, which are
|
||||
//! represented by the [`AesBlockCipher`] trait. Consequently, implementers only need to implement
|
||||
//! the `AesBlockCipher` trait, to benefit from the whole api of the `AesCtrGenerator` structure.
|
||||
//!
|
||||
//! In the following section, we give details on the implementation of this generic generator.
|
||||
//!
|
||||
//! Coarse-grained pseudo-random lookup table
|
||||
//! =========================================
|
||||
//!
|
||||
//! To generate random values, we use the AES block cipher in counter mode. If we denote f the aes
|
||||
//! encryption function, we have:
|
||||
//! ```ascii
|
||||
//! f: ⟦0;2¹²⁸ -1⟧ X ⟦0;2¹²⁸ -1⟧ ↦ ⟦0;2¹²⁸ -1⟧
|
||||
//! f(secret_key, input) ↦ output
|
||||
//! ```
|
||||
|
||||
//! If we fix the secret key to a value k, we have a function fₖ from ⟦0;2¹²⁸ -1⟧ to ⟦0;2¹²⁸-1⟧,
|
||||
//! transforming the state of the counter into a pseudo random value. Essentially, this fₖ
|
||||
//! function can be considered as a the following lookup table, containing 2¹²⁸ pseudo-random
|
||||
//! values:
|
||||
//! ```ascii
|
||||
//! ╭──────────────┬──────────────┬─────┬──────────────╮
|
||||
//! │ 0 │ 1 │ │ 2¹²⁸ -1 │
|
||||
//! ├──────────────┼──────────────┼─────┼──────────────┤
|
||||
//! │ fₖ(0) │ fₖ(1) │ │ fₖ(2¹²⁸ -1) │
|
||||
//! ╔═══════↧══════╦═══════↧══════╦═════╦═══════↧══════╗
|
||||
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
|
||||
//! ║┃ u128 ┃║┃ u128 ┃║ ... ║┃ u128 ┃║
|
||||
//! ║┗━━━━━━━━━━━━┛║┗━━━━━━━━━━━━┛║ ║┗━━━━━━━━━━━━┛║
|
||||
//! ╚══════════════╩══════════════╩═════╩══════════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! An input to the fₖ function is called an _aes index_ (also called state or counter in the
|
||||
//! standards) of the pseudo-random table. The [`AesIndex`] structure defined in this module
|
||||
//! represents such an index in the code.
|
||||
//!
|
||||
//! Fine-grained pseudo-random table lookup
|
||||
//! =======================================
|
||||
//!
|
||||
//! Since we want to deliver the pseudo-random bytes one by one, we have to come with a finer
|
||||
//! grained indexing. Fortunately, each `u128` value outputted by fₖ can be seen as a table of 16
|
||||
//! `u8`:
|
||||
//! ```ascii
|
||||
//! ╭──────────────┬──────────────┬─────┬──────────────╮
|
||||
//! │ 0 │ 1 │ │ 2¹²⁸ -1 │
|
||||
//! ├──────────────┼──────────────┼─────┼──────────────┤
|
||||
//! │ fₖ(0) │ fₖ(1) │ │ fₖ(2¹²⁸ -1) │
|
||||
//! ╔═══════↧══════╦═══════↧══════╦═════╦═══════↧══════╗
|
||||
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
|
||||
//! ║┃ u128 ┃║┃ u128 ┃║ ║┃ u128 ┃║
|
||||
//! ║┣━━┯━━┯━━━┯━━┫║┣━━┯━━┯━━━┯━━┫║ ... ║┣━━┯━━┯━━━┯━━┫║
|
||||
//! ║┃u8│u8│...│u8┃║┃u8│u8│...│u8┃║ ║┃u8│u8│...│u8┃║
|
||||
//! ║┗━━┷━━┷━━━┷━━┛║┗━━┷━━┷━━━┷━━┛║ ║┗━━┷━━┷━━━┷━━┛║
|
||||
//! ╚══════════════╩══════════════╩═════╩══════════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! We introduce a second function to select a chunk of 8 bits:
|
||||
//! ```ascii
|
||||
//! g: ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ ↦ ⟦0;2⁸ -1⟧
|
||||
//! g(big_int, index) ↦ byte
|
||||
//! ```
|
||||
//!
|
||||
//! If we fix the `u128` value to a value e, we have a function gₑ from ⟦0;15⟧ to ⟦0;2⁸ -1⟧
|
||||
//! transforming an index into a pseudo-random byte:
|
||||
//! ```ascii
|
||||
//! ┏━━━━━━━━┯━━━━━━━━┯━━━┯━━━━━━━━┓
|
||||
//! ┃ u8 │ u8 │...│ u8 ┃
|
||||
//! ┗━━━━━━━━┷━━━━━━━━┷━━━┷━━━━━━━━┛
|
||||
//! │ gₑ(0) │ gₑ(1) │ │ gₑ(15) │
|
||||
//! ╰────────┴─────-──┴───┴────────╯
|
||||
//! ```
|
||||
//!
|
||||
//! We call this input to the gₑ function, a _byte index_ of the pseudo-random table. The
|
||||
//! [`ByteIndex`] structure defined in this module represents such an index in the code.
|
||||
//!
|
||||
//! By using both the g and the fₖ functions, we can define a new function l which allows to index
|
||||
//! any byte of the pseudo-random table:
|
||||
//! ```ascii
|
||||
//! l: ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ ↦ ⟦0;2⁸ -1⟧
|
||||
//! l(aes_index, byte_index) ↦ g(fₖ(aes_index), byte_index)
|
||||
//! ```
|
||||
//!
|
||||
//! In this sense, any member of ⟦0;2¹²⁸ -1⟧ X ⟦0;15⟧ uniquely defines a byte in this pseudo-random
|
||||
//! table:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a)
|
||||
//! ╔══════════════╦═══════↧══════╦═════╦══════════════╗
|
||||
//! ║┏━━━━━━━━━━━━┓║┏━━━━━━━━━━━━┓║ ║┏━━━━━━━━━━━━┓║
|
||||
//! ║┃ u128 ┃║┃ u128 ┃║ ║┃ u128 ┃║
|
||||
//! ║┣━━┯━━┯━━━┯━━┫║┣━━┯━━┯━━━┯━━┫║ ... ║┣━━┯━━┯━━━┯━━┫║
|
||||
//! ║┃u8│u8│...│u8┃║┃u8│u8│...│u8┃║ ║┃u8│u8│...│u8┃║
|
||||
//! ║┗━━┷━━┷━━━┷━━┛║┗━━┷↥━┷━━━┷━━┛║ ║┗━━┷━━┷━━━┷━━┛║
|
||||
//! ║ ║│ gₑ(b) │║ ║ ║
|
||||
//! ║ ║╰───-────────╯║ ║ ║
|
||||
//! ╚══════════════╩══════════════╩═════╩══════════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! We call this input to the l function, a _table index_ of the pseudo-random table. The
|
||||
//! [`TableIndex`] structure defined in this module represents such an index in the code.
|
||||
//!
|
||||
//! Prngs current table index
|
||||
//! =========================
|
||||
//!
|
||||
//! When created, a prng is given an initial _table index_, denoted (a₀, b₀), which identifies the
|
||||
//! first byte of the table to be outputted by the prng. Then, each time the prng is queried for a
|
||||
//! new value, the byte corresponding to the current _table index_ is returned, and the current
|
||||
//! _table index_ is incremented:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a₀) e = fₖ(a₁)
|
||||
//! ╔═════↧═════╦═══════════╦═════╦═══════════╗ ╔═══════════╦═════↧═════╦═════╦═══════════╗
|
||||
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║ ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
|
||||
//! ║┃ │ │...│ ┃║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║┃ │ │...│ ┃║ ║┃ │ │...│ ┃║
|
||||
//! ║┗━┷━┷━━━┷↥┛║┗━┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ → ║┗━┷━┷━━━┷━┛║┗↥┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║
|
||||
//! ║│ gₑ(b₀) │║ ║ ║ ║ ║ ║│ gₑ(b₁) │║ ║ ║
|
||||
//! ║╰─────────╯║ ║ ║ ║ ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═══════════╩═══════════╩═════╩═══════════╝ ╚═══════════╩═══════════╩═════╩═══════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! Prng bound
|
||||
//! ==========
|
||||
//!
|
||||
//! When created, a prng is also given a _bound_ (aₘ, bₘ) , that is a table index which it is not
|
||||
//! allowed to exceed:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a₀)
|
||||
//! ╔═════↧═════╦═══════════╦═════╦═══════════╗
|
||||
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
|
||||
//! ║┃ │ │...│ ┃║┃ │╳│...│╳┃║ ║┃╳│╳│...│╳┃║
|
||||
//! ║┗━┷━┷━━━┷↥┛║┗━┷━┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ The current byte can be returned.
|
||||
//! ║│ gₑ(b₀) │║ ║ ║ ║
|
||||
//! ║╰─────────╯║ ║ ║ ║
|
||||
//! ╚═══════════╩═══════════╩═════╩═══════════╝
|
||||
//!
|
||||
//! e = fₖ(aₘ)
|
||||
//! ╔═══════════╦═════↧═════╦═════╦═══════════╗
|
||||
//! ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║┏━┯━┯━━━┯━┓║
|
||||
//! ║┃ │ │...│ ┃║┃ │╳│...│╳┃║ ║┃╳│╳│...│╳┃║ The table index reached the bound,
|
||||
//! ║┗━┷━┷━━━┷━┛║┗━┷↥┷━━━┷━┛║ ║┗━┷━┷━━━┷━┛║ the current byte can not be
|
||||
//! ║ ║│ gₑ(bₘ) │║ ║ ║ returned.
|
||||
//! ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═══════════╩═══════════╩═════╩═══════════╝
|
||||
//! ```
|
||||
//!
|
||||
//! Buffering
|
||||
//! =========
|
||||
//!
|
||||
//! Calling the aes function every time we need to output a single byte would be a huge waste of
|
||||
//! resources. In practice, we call aes 8 times in a row, for 8 successive values of aes index, and
|
||||
//! store the results in a buffer. For platforms which have a dedicated aes chip, this allows to
|
||||
//! fill the unit pipeline and reduces the amortized cost of the aes function.
|
||||
//!
|
||||
//! Together with the current table index of the prng, we also store a pointer p (initialized at
|
||||
//! p₀=b₀) to the current byte in the buffer. If we denote v the lookup function we have :
|
||||
//! ```ascii
|
||||
//! e = fₖ(a₀) Buffer(length=128)
|
||||
//! ╔═════╦═══════════╦═════↧═════╦═══════════╦═════╗ ┏━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓
|
||||
//! ║ ... ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║ ┃▓│▓│▓│▓│▓│▓│▓│▓│...│▓┃
|
||||
//! ║ ║┃ │ │...│ ┃║┃▓│▓│...│▓┃║┃▓│▓│...│▓┃║ ║ ┗━┷↥┷━┷━┷━┷━┷━┷━┷━━━┷━┛
|
||||
//! ║ ║┗━┷━┷━━━┷━┛║┗━┷↥┷━━━┷━┛║┗━┷━┷━━━┷━┛║ ║ │ v(p₀) │
|
||||
//! ║ ║ ║│ gₑ(b₀) │║ ║ ║ ╰─────────────────────╯
|
||||
//! ║ ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═════╩═══════════╩═══════════╩═══════════╩═════╝
|
||||
//! ```
|
||||
//!
|
||||
//! We call this input to the v function, a _buffer pointer_. The [`BufferPointer`] structure
|
||||
//! defined in this module represents such a pointer in the code.
|
||||
//!
|
||||
//! When the table index is incremented, the buffer pointer is incremented alongside:
|
||||
//! ```ascii
|
||||
//! e = fₖ(a) Buffer(length=128)
|
||||
//! ╔═════╦═══════════╦═════↧═════╦═══════════╦═════╗ ┏━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓
|
||||
//! ║ ... ║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║┏━┯━┯━━━┯━┓║ ... ║ ┃▓│▓│▓│▓│▓│▓│▓│▓│...│▓┃
|
||||
//! ║ ║┃ │ │...│ ┃║┃▓│▓│...│▓┃║┃▓│▓│...│▓┃║ ║ ┗━┷━┷↥┷━┷━┷━┷━┷━┷━━━┷━┛
|
||||
//! ║ ║┗━┷━┷━━━┷━┛║┗━┷━┷↥━━┷━┛║┗━┷━┷━━━┷━┛║ ║ │ v(p) │
|
||||
//! ║ ║ ║│ gₑ(b) │║ ║ ║ ╰─────────────────────╯
|
||||
//! ║ ║ ║╰─────────╯║ ║ ║
|
||||
//! ╚═════╩═══════════╩═══════════╩═══════════╩═════╝
|
||||
//! ```
|
||||
//!
|
||||
//! When the buffer pointer is incremented it is checked against the size of the buffer, and if
|
||||
//! necessary, a new batch of aes index values is generated.
|
||||
|
||||
pub const AES_CALLS_PER_BATCH: usize = 8;
|
||||
pub const BYTES_PER_AES_CALL: usize = 128 / 8;
|
||||
pub const BYTES_PER_BATCH: usize = BYTES_PER_AES_CALL * AES_CALLS_PER_BATCH;
|
||||
|
||||
/// A module containing structures to manage table indices.
|
||||
mod index;
|
||||
pub use index::*;
|
||||
|
||||
/// A module containing structures to manage table indices and buffer pointers together properly.
|
||||
mod states;
|
||||
pub use states::*;
|
||||
|
||||
/// A module containing an abstraction for aes block ciphers.
|
||||
mod block_cipher;
|
||||
pub use block_cipher::*;
|
||||
|
||||
/// A module containing a generic implementation of a random generator.
|
||||
mod generic;
|
||||
pub use generic::*;
|
||||
|
||||
/// A module extending `generic` to the `rayon` paradigm.
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
@@ -1,222 +0,0 @@
|
||||
use crate::generators::aes_ctr::{
|
||||
AesBlockCipher, AesCtrGenerator, ChildrenClosure, State, TableIndex,
|
||||
};
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError};
|
||||
|
||||
/// A type alias for the parallel children iterator type.
|
||||
pub type ParallelChildrenIterator<BlockCipher> = rayon::iter::Map<
|
||||
rayon::iter::Zip<
|
||||
rayon::range::Iter<usize>,
|
||||
rayon::iter::RepeatN<(Box<BlockCipher>, TableIndex, BytesPerChild)>,
|
||||
>,
|
||||
fn((usize, (Box<BlockCipher>, TableIndex, BytesPerChild))) -> AesCtrGenerator<BlockCipher>,
|
||||
>;
|
||||
|
||||
impl<BlockCipher: AesBlockCipher> AesCtrGenerator<BlockCipher> {
|
||||
/// Tries to fork the current generator into `n_child` generators each able to output
|
||||
/// `child_bytes` random bytes as a parallel iterator.
|
||||
///
|
||||
/// # Notes
|
||||
///
|
||||
/// This method necessitate the "multithread" feature.
|
||||
pub fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<ParallelChildrenIterator<BlockCipher>, ForkError>
|
||||
where
|
||||
BlockCipher: Send + Sync,
|
||||
{
|
||||
use rayon::prelude::*;
|
||||
|
||||
if n_children.0 == 0 {
|
||||
return Err(ForkError::ZeroChildrenCount);
|
||||
}
|
||||
if n_bytes.0 == 0 {
|
||||
return Err(ForkError::ZeroBytesPerChild);
|
||||
}
|
||||
if !self.is_fork_in_bound(n_children, n_bytes) {
|
||||
return Err(ForkError::ForkTooLarge);
|
||||
}
|
||||
|
||||
// The state currently stored in the parent generator points to the table index of the last
|
||||
// generated byte. The first index to be generated is the next one :
|
||||
let first_index = self.state.table_index().incremented();
|
||||
let output = (0..n_children.0)
|
||||
.into_par_iter()
|
||||
.zip(rayon::iter::repeatn(
|
||||
(self.block_cipher.clone(), first_index, n_bytes),
|
||||
n_children.0,
|
||||
))
|
||||
.map(
|
||||
// This map is a little weird because we need to cast the closure to a fn pointer
|
||||
// that matches the signature of `ChildrenIterator<BlockCipher>`. Unfortunately,
|
||||
// the compiler does not manage to coerce this one automatically.
|
||||
(|(i, (block_cipher, first_index, n_bytes))| {
|
||||
// The first index to be outputted by the child is the `first_index` shifted by
|
||||
// the proper amount of `child_bytes`.
|
||||
let child_first_index = first_index.increased(n_bytes.0 * i);
|
||||
// The bound of the child is the first index of its next sibling.
|
||||
let child_bound_index = first_index.increased(n_bytes.0 * (i + 1));
|
||||
AesCtrGenerator::from_block_cipher(
|
||||
block_cipher,
|
||||
child_first_index,
|
||||
child_bound_index,
|
||||
)
|
||||
}) as ChildrenClosure<BlockCipher>,
|
||||
);
|
||||
// The parent next index is the bound of the last child.
|
||||
let next_index = first_index.increased(n_bytes.0 * n_children.0);
|
||||
self.state = State::new(next_index);
|
||||
|
||||
Ok(output)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod aes_ctr_parallel_generic_tests {
|
||||
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test::{any_key, any_valid_fork};
|
||||
use rayon::prelude::*;
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first child is the same as the table index of
|
||||
/// the parent before the fork.
|
||||
pub fn prop_fork_first_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let first_child = forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_first(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(original_generator.table_index(), first_child.table_index());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the table index of the first byte outputted by the parent after the
|
||||
/// fork, is the bound of the last child of the fork.
|
||||
pub fn prop_fork_last_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut parent_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let last_child = parent_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_last(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
parent_generator.table_index().incremented(),
|
||||
last_child.get_bound()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bound of the parent does not change.
|
||||
pub fn prop_fork_parent_bound_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_last(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(original_generator.get_bound(), forked_generator.get_bound());
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the parent table index is increased of the number of children
|
||||
/// multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_state_table_index<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.find_last(|_| true)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
forked_generator.table_index(),
|
||||
// Decrement accounts for the fact that the table index stored is the previous one
|
||||
t.increased(nc.0 * nb.0).decremented()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the bytes outputted by the children in the fork order form the same
|
||||
/// sequence the parent would have had outputted no fork had happened.
|
||||
pub fn prop_fork<G: AesBlockCipher>() {
|
||||
for _ in 0..1000 {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let original_generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let mut forked_generator = original_generator.clone();
|
||||
let initial_output: Vec<u8> = original_generator.take(bytes_to_go).collect();
|
||||
let forked_output: Vec<u8> = forked_generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.flat_map(|child| child.collect::<Vec<_>>())
|
||||
.collect();
|
||||
assert_eq!(initial_output, forked_output);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, all children got a number of remaining bytes equals to the number of
|
||||
/// bytes per child given as fork input.
|
||||
pub fn prop_fork_children_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
assert!(generator
|
||||
.par_try_fork(nc, nb)
|
||||
.unwrap()
|
||||
.all(|c| c.remaining_bytes().0 == nb.0 as u128));
|
||||
}
|
||||
}
|
||||
|
||||
/// Check the property:
|
||||
/// On a valid fork, the number of remaining bytes of the parent is reduced by the
|
||||
/// number of children multiplied by the number of bytes per child.
|
||||
pub fn prop_fork_parent_remaining_bytes<G: AesBlockCipher>() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, nc, nb, i) = any_valid_fork().next().unwrap();
|
||||
let k = any_key().next().unwrap();
|
||||
let bytes_to_go = nc.0 * nb.0;
|
||||
let mut generator =
|
||||
AesCtrGenerator::<G>::new(k, Some(t), Some(t.increased(nc.0 * nb.0 + i)));
|
||||
let before_remaining_bytes = generator.remaining_bytes();
|
||||
let _ = generator.par_try_fork(nc, nb).unwrap();
|
||||
let after_remaining_bytes = generator.remaining_bytes();
|
||||
assert_eq!(
|
||||
before_remaining_bytes.0 - after_remaining_bytes.0,
|
||||
bytes_to_go as u128
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,176 +0,0 @@
|
||||
use crate::generators::aes_ctr::index::{AesIndex, TableIndex};
|
||||
use crate::generators::aes_ctr::BYTES_PER_BATCH;
|
||||
|
||||
/// A pointer to the next byte to be outputted by the generator.
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct BufferPointer(pub usize);
|
||||
|
||||
/// A structure representing the current state of generator using batched aes-ctr approach.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct State {
|
||||
table_index: TableIndex,
|
||||
buffer_pointer: BufferPointer,
|
||||
}
|
||||
|
||||
/// A structure representing the action to be taken by the generator after shifting its state.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ShiftAction {
|
||||
/// Outputs the byte pointed to by the 0-th field.
|
||||
OutputByte(BufferPointer),
|
||||
/// Refresh the buffer starting from the 0-th field, and output the byte pointed to by the 0-th
|
||||
/// field.
|
||||
RefreshBatchAndOutputByte(AesIndex, BufferPointer),
|
||||
}
|
||||
|
||||
impl State {
|
||||
/// Creates a new state from the initial table index.
|
||||
///
|
||||
/// Note :
|
||||
/// ------
|
||||
///
|
||||
/// The `table_index` input, is the __first__ table index that will be outputted on the next
|
||||
/// call to `increment`. Put differently, the current table index of the newly created state
|
||||
/// is the predecessor of this one.
|
||||
pub fn new(table_index: TableIndex) -> Self {
|
||||
// We ensure that the table index is not the first one, to prevent wrapping on `decrement`,
|
||||
// and outputting `RefreshBatchAndOutputByte(AesIndex::MAX, ...)` on the first increment
|
||||
// (which would lead to loading a non continuous batch).
|
||||
assert_ne!(table_index, TableIndex::FIRST);
|
||||
State {
|
||||
// To ensure that the first outputted table index is the proper one, we decrement the
|
||||
// table index.
|
||||
table_index: table_index.decremented(),
|
||||
// To ensure that the first `ShiftAction` will be a `RefreshBatchAndOutputByte`, we set
|
||||
// the buffer to the last allowed value.
|
||||
buffer_pointer: BufferPointer(BYTES_PER_BATCH - 1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the state forward of `shift` bytes.
|
||||
pub fn increase(&mut self, shift: usize) -> ShiftAction {
|
||||
self.table_index.increase(shift);
|
||||
let total_batch_index = self.buffer_pointer.0 + shift;
|
||||
if total_batch_index > BYTES_PER_BATCH - 1 {
|
||||
self.buffer_pointer.0 = self.table_index.byte_index.0;
|
||||
ShiftAction::RefreshBatchAndOutputByte(self.table_index.aes_index, self.buffer_pointer)
|
||||
} else {
|
||||
self.buffer_pointer.0 = total_batch_index;
|
||||
ShiftAction::OutputByte(self.buffer_pointer)
|
||||
}
|
||||
}
|
||||
|
||||
/// Shifts the state forward of one byte.
|
||||
pub fn increment(&mut self) -> ShiftAction {
|
||||
self.increase(1)
|
||||
}
|
||||
|
||||
/// Returns the current table index.
|
||||
pub fn table_index(&self) -> TableIndex {
|
||||
self.table_index
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> Self {
|
||||
State::new(TableIndex::FIRST)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::index::ByteIndex;
|
||||
use crate::generators::aes_ctr::BYTES_PER_AES_CALL;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
const REPEATS: usize = 1_000_000;
|
||||
|
||||
fn any_table_index() -> impl Iterator<Item = TableIndex> {
|
||||
std::iter::repeat_with(|| {
|
||||
TableIndex::new(
|
||||
AesIndex(thread_rng().gen()),
|
||||
ByteIndex(thread_rng().gen::<usize>() % BYTES_PER_AES_CALL),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn any_usize() -> impl Iterator<Item = usize> {
|
||||
std::iter::repeat_with(|| thread_rng().gen())
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t,
|
||||
/// State::new(t).increment() = RefreshBatchAndOutputByte(t.aes_index, t.byte_index)
|
||||
fn prop_state_new_increment() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s) = any_table_index()
|
||||
.map(|t| (t, State::new(t)))
|
||||
.next()
|
||||
.unwrap();
|
||||
assert!(matches!(
|
||||
s.increment(),
|
||||
ShiftAction::RefreshBatchAndOutputByte(t_, BufferPointer(p_)) if t_ == t.aes_index && p_ == t.byte_index.0
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all states s, table indices t, positive integer i
|
||||
/// if s = State::new(t), then t.increased(i) = s.increased(i-1).table_index().
|
||||
fn prop_state_increase_table_index() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s, i) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.map(|(t, i)| (t, State::new(t), i))
|
||||
.next()
|
||||
.unwrap();
|
||||
s.increase(i);
|
||||
assert_eq!(s.table_index(), t.increased(i - 1))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i such as t.byte_index + i < 127,
|
||||
/// if s = State::new(t), and s.increment() was executed, then
|
||||
/// s.increase(i) = OutputByte(t.byte_index + i).
|
||||
fn prop_state_increase_small() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s, i) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.map(|(t, i)| (t, State::new(t), i % BYTES_PER_BATCH))
|
||||
.find(|(t, _, i)| t.byte_index.0 + i < BYTES_PER_BATCH - 1)
|
||||
.unwrap();
|
||||
s.increment();
|
||||
assert!(matches!(
|
||||
s.increase(i),
|
||||
ShiftAction::OutputByte(BufferPointer(p_)) if p_ == t.byte_index.0 + i
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Check the property:
|
||||
/// For all table indices t, positive integer i such as t.byte_index + i >= 127,
|
||||
/// if s = State::new(t), and s.increment() was executed, then
|
||||
/// s.increase(i) = RefreshBatchAndOutputByte(
|
||||
/// t.increased(i).aes_index,
|
||||
/// t.increased(i).byte_index).
|
||||
fn prop_state_increase_large() {
|
||||
for _ in 0..REPEATS {
|
||||
let (t, mut s, i) = any_table_index()
|
||||
.zip(any_usize())
|
||||
.map(|(t, i)| (t, State::new(t), i))
|
||||
.find(|(t, _, i)| t.byte_index.0 + i >= BYTES_PER_BATCH - 1)
|
||||
.unwrap();
|
||||
s.increment();
|
||||
assert!(matches!(
|
||||
s.increase(i),
|
||||
ShiftAction::RefreshBatchAndOutputByte(t_, BufferPointer(p_))
|
||||
if t_ == t.increased(i).aes_index && p_ == t.increased(i).byte_index.0
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,184 +0,0 @@
|
||||
use crate::generators::aes_ctr::{AesBlockCipher, AesIndex, AesKey, BYTES_PER_BATCH};
|
||||
use core::arch::aarch64::{
|
||||
uint8x16_t, vaeseq_u8, vaesmcq_u8, vdupq_n_u32, vdupq_n_u8, veorq_u8, vgetq_lane_u32,
|
||||
vreinterpretq_u32_u8, vreinterpretq_u8_u32,
|
||||
};
|
||||
use std::arch::is_aarch64_feature_detected;
|
||||
use std::mem::transmute;
|
||||
|
||||
const RCONS: [u32; 10] = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36];
|
||||
const NUM_WORDS_IN_KEY: usize = 4;
|
||||
const NUM_ROUNDS: usize = 10;
|
||||
const NUM_ROUND_KEYS: usize = NUM_ROUNDS + 1;
|
||||
|
||||
/// An aes block cipher implementation which uses `neon` and `aes` instructions.
|
||||
#[derive(Clone)]
|
||||
pub struct ArmAesBlockCipher {
|
||||
round_keys: [uint8x16_t; NUM_ROUND_KEYS],
|
||||
}
|
||||
|
||||
impl AesBlockCipher for ArmAesBlockCipher {
|
||||
fn new(key: AesKey) -> ArmAesBlockCipher {
|
||||
let aes_detected = is_aarch64_feature_detected!("aes");
|
||||
let neon_detected = is_aarch64_feature_detected!("neon");
|
||||
|
||||
if !(aes_detected && neon_detected) {
|
||||
panic!(
|
||||
"The ArmAesBlockCipher requires both aes and neon aarch64 CPU features.\n\
|
||||
aes feature available: {}\nneon feature available: {}\n.",
|
||||
aes_detected, neon_detected
|
||||
)
|
||||
}
|
||||
|
||||
let round_keys = unsafe { generate_round_keys(key) };
|
||||
ArmAesBlockCipher { round_keys }
|
||||
}
|
||||
|
||||
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
|
||||
#[target_feature(enable = "aes,neon")]
|
||||
unsafe fn implementation(
|
||||
this: &ArmAesBlockCipher,
|
||||
AesIndex(aes_ctr): AesIndex,
|
||||
) -> [u8; BYTES_PER_BATCH] {
|
||||
let mut output = [0u8; BYTES_PER_BATCH];
|
||||
// We want 128 bytes of output, the ctr gives 128 bit message (16 bytes)
|
||||
for (i, out) in output.chunks_exact_mut(16).enumerate() {
|
||||
// Safe because we prevent the user from creating the Generator
|
||||
// on non-supported hardware
|
||||
let encrypted = encrypt(aes_ctr + (i as u128), &this.round_keys);
|
||||
out.copy_from_slice(&encrypted.to_ne_bytes());
|
||||
}
|
||||
output
|
||||
}
|
||||
// SAFETY: we checked for aes and neon availability in `Self::new`
|
||||
unsafe { implementation(self, AesIndex(aes_ctr)) }
|
||||
}
|
||||
}
|
||||
|
||||
/// Does the AES SubWord operation for the Key Expansion step
|
||||
///
|
||||
/// # SAFETY
|
||||
///
|
||||
/// You must make sure the CPU's arch is`aarch64` and has
|
||||
/// `neon` and `aes` features.
|
||||
#[inline(always)]
|
||||
unsafe fn sub_word(word: u32) -> u32 {
|
||||
let data = vreinterpretq_u8_u32(vdupq_n_u32(word));
|
||||
let zero_key = vdupq_n_u8(0u8);
|
||||
let temp = vaeseq_u8(data, zero_key);
|
||||
// vaeseq_u8 does SubBytes(ShiftRow(XOR(data, key))
|
||||
// But because we used a zero aes key,the XOR did not alter data
|
||||
// We now have temp = SubBytes(ShiftRow(data))
|
||||
|
||||
// Since in AES ShiftRow operation, the first row is not shifted
|
||||
// We can just get that one to have our SubWord(word) result
|
||||
vgetq_lane_u32::<0>(vreinterpretq_u32_u8(temp))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn uint8x16_t_to_u128(input: uint8x16_t) -> u128 {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn u128_to_uint8x16_t(input: u128) -> uint8x16_t {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[target_feature(enable = "aes,neon")]
|
||||
unsafe fn generate_round_keys(key: AesKey) -> [uint8x16_t; NUM_ROUND_KEYS] {
|
||||
let mut round_keys: [uint8x16_t; NUM_ROUND_KEYS] = std::mem::zeroed();
|
||||
round_keys[0] = u128_to_uint8x16_t(key.0);
|
||||
|
||||
let words = std::slice::from_raw_parts_mut(
|
||||
round_keys.as_mut_ptr() as *mut u32,
|
||||
NUM_ROUND_KEYS * NUM_WORDS_IN_KEY,
|
||||
);
|
||||
|
||||
debug_assert_eq!(words.len(), 44);
|
||||
|
||||
// Skip the words of the first key, its already done
|
||||
for i in NUM_WORDS_IN_KEY..words.len() {
|
||||
if (i % NUM_WORDS_IN_KEY) == 0 {
|
||||
words[i] = words[i - NUM_WORDS_IN_KEY]
|
||||
^ sub_word(words[i - 1]).rotate_right(8)
|
||||
^ RCONS[(i / NUM_WORDS_IN_KEY) - 1];
|
||||
} else {
|
||||
words[i] = words[i - NUM_WORDS_IN_KEY] ^ words[i - 1];
|
||||
}
|
||||
// Note: there is also a special thing to do when
|
||||
// i mod SElf::NUM_WORDS_IN_KEY == 4 but it cannot happen on 128 bits keys
|
||||
}
|
||||
|
||||
round_keys
|
||||
}
|
||||
|
||||
/// Encrypts a 128-bit message
|
||||
///
|
||||
/// # SAFETY
|
||||
///
|
||||
/// You must make sure the CPU's arch is`aarch64` and has
|
||||
/// `neon` and `aes` features.
|
||||
#[inline(always)]
|
||||
unsafe fn encrypt(message: u128, keys: &[uint8x16_t; NUM_ROUND_KEYS]) -> u128 {
|
||||
// Notes:
|
||||
// According the [ARM Manual](https://developer.arm.com/documentation/ddi0487/gb/):
|
||||
// `vaeseq_u8` is the following AES operations:
|
||||
// 1. AddRoundKey (XOR)
|
||||
// 2. ShiftRows
|
||||
// 3. SubBytes
|
||||
// `vaesmcq_u8` is MixColumns
|
||||
let mut data: uint8x16_t = u128_to_uint8x16_t(message);
|
||||
|
||||
for &key in keys.iter().take(NUM_ROUNDS - 1) {
|
||||
data = vaesmcq_u8(vaeseq_u8(data, key));
|
||||
}
|
||||
|
||||
data = vaeseq_u8(data, keys[NUM_ROUNDS - 1]);
|
||||
data = veorq_u8(data, keys[NUM_ROUND_KEYS - 1]);
|
||||
|
||||
uint8x16_t_to_u128(data)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
// Test vector for aes128, from the FIPS publication 197
|
||||
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
|
||||
const KEY_SCHEDULE: [u128; 11] = [
|
||||
u128::from_be(0x000102030405060708090a0b0c0d0e0f),
|
||||
u128::from_be(0xd6aa74fdd2af72fadaa678f1d6ab76fe),
|
||||
u128::from_be(0xb692cf0b643dbdf1be9bc5006830b3fe),
|
||||
u128::from_be(0xb6ff744ed2c2c9bf6c590cbf0469bf41),
|
||||
u128::from_be(0x47f7f7bc95353e03f96c32bcfd058dfd),
|
||||
u128::from_be(0x3caaa3e8a99f9deb50f3af57adf622aa),
|
||||
u128::from_be(0x5e390f7df7a69296a7553dc10aa31f6b),
|
||||
u128::from_be(0x14f9701ae35fe28c440adf4d4ea9c026),
|
||||
u128::from_be(0x47438735a41c65b9e016baf4aebf7ad2),
|
||||
u128::from_be(0x549932d1f08557681093ed9cbe2c974e),
|
||||
u128::from_be(0x13111d7fe3944a17f307a78b4d2b30c5),
|
||||
];
|
||||
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
|
||||
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
|
||||
|
||||
#[test]
|
||||
fn test_generate_key_schedule() {
|
||||
// Checks that the round keys are correctly generated from the sample key from FIPS
|
||||
let key = AesKey(CIPHER_KEY);
|
||||
let keys = unsafe { generate_round_keys(key) };
|
||||
for (expected, actual) in KEY_SCHEDULE.iter().zip(keys.iter()) {
|
||||
assert_eq!(*expected, uint8x16_t_to_u128(*actual));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_message() {
|
||||
// Checks that encrypting many plaintext at the same time gives the correct output.
|
||||
let message = PLAINTEXT;
|
||||
let key = AesKey(CIPHER_KEY);
|
||||
let keys = unsafe { generate_round_keys(key) };
|
||||
let ciphertext = unsafe { encrypt(message, &keys) };
|
||||
assert_eq!(CIPHERTEXT, ciphertext);
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
|
||||
use crate::seeders::Seed;
|
||||
|
||||
/// A random number generator using the `aesni` instructions.
|
||||
pub struct NeonAesRandomGenerator(pub(super) AesCtrGenerator<ArmAesBlockCipher>);
|
||||
|
||||
/// The children iterator used by [`NeonAesRandomGenerator`].
|
||||
///
|
||||
/// Outputs children generators one by one.
|
||||
pub struct ArmAesChildrenIterator(ChildrenIterator<ArmAesBlockCipher>);
|
||||
|
||||
impl Iterator for ArmAesChildrenIterator {
|
||||
type Item = NeonAesRandomGenerator;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(NeonAesRandomGenerator)
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomGenerator for NeonAesRandomGenerator {
|
||||
type ChildrenIter = ArmAesChildrenIterator;
|
||||
fn new(seed: Seed) -> Self {
|
||||
NeonAesRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
|
||||
}
|
||||
fn remaining_bytes(&self) -> ByteCount {
|
||||
self.0.remaining_bytes()
|
||||
}
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError> {
|
||||
self.0
|
||||
.try_fork(n_children, n_bytes)
|
||||
.map(ArmAesChildrenIterator)
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for NeonAesRandomGenerator {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test;
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
use crate::generators::{generator_generic_test, NeonAesRandomGenerator};
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_first_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_last_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_generic_test::prop_fork::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roughly_uniform() {
|
||||
generator_generic_test::test_roughly_uniform::<NeonAesRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generator_determinism() {
|
||||
generator_generic_test::test_generator_determinism::<NeonAesRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork() {
|
||||
generator_generic_test::test_fork_children::<NeonAesRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "expected test panic")]
|
||||
fn test_bounded_panic() {
|
||||
generator_generic_test::test_bounded_none_should_panic::<NeonAesRandomGenerator>();
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
//! A module implementing a random number generator, using the aarch64 `neon` and `aes`
|
||||
//! instructions.
|
||||
//!
|
||||
//! This module implements a cryptographically secure pseudorandom number generator
|
||||
//! (CS-PRNG), using a fast block cipher. The implementation is based on the
|
||||
//! [intel aesni white paper 323641-001 revision 3.0](https://www.intel.com/content/dam/doc/white-paper/advanced-encryption-standard-new-instructions-set-paper.pdf).
|
||||
|
||||
mod block_cipher;
|
||||
|
||||
mod generator;
|
||||
pub use generator::*;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
@@ -1,95 +0,0 @@
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// The parallel children iterator used by [`NeonAesRandomGenerator`].
|
||||
///
|
||||
/// Outputs the children generators one by one.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct ParallelArmAesChildrenIterator(
|
||||
rayon::iter::Map<
|
||||
ParallelChildrenIterator<ArmAesBlockCipher>,
|
||||
fn(AesCtrGenerator<ArmAesBlockCipher>) -> NeonAesRandomGenerator,
|
||||
>,
|
||||
);
|
||||
|
||||
impl ParallelIterator for ParallelArmAesChildrenIterator {
|
||||
type Item = NeonAesRandomGenerator;
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
self.0.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexedParallelIterator for ParallelArmAesChildrenIterator {
|
||||
fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
|
||||
self.0.drive(consumer)
|
||||
}
|
||||
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
|
||||
self.0.with_producer(callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParallelRandomGenerator for NeonAesRandomGenerator {
|
||||
type ParChildrenIter = ParallelArmAesChildrenIterator;
|
||||
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError> {
|
||||
self.0
|
||||
.par_try_fork(n_children, n_bytes)
|
||||
.map(|iterator| ParallelArmAesChildrenIterator(iterator.map(NeonAesRandomGenerator)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
|
||||
use crate::generators::implem::aarch64::block_cipher::ArmAesBlockCipher;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_ttt() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<ArmAesBlockCipher>();
|
||||
}
|
||||
}
|
||||
@@ -1,231 +0,0 @@
|
||||
use crate::generators::aes_ctr::{AesBlockCipher, AesIndex, AesKey, BYTES_PER_BATCH};
|
||||
use std::arch::x86_64::{
|
||||
__m128i, _mm_aesenc_si128, _mm_aesenclast_si128, _mm_aeskeygenassist_si128, _mm_shuffle_epi32,
|
||||
_mm_slli_si128, _mm_store_si128, _mm_xor_si128,
|
||||
};
|
||||
use std::mem::transmute;
|
||||
|
||||
/// An aes block cipher implementation which uses `aesni` instructions.
|
||||
#[derive(Clone)]
|
||||
pub struct AesniBlockCipher {
|
||||
// The set of round keys used for the aes encryption
|
||||
round_keys: [__m128i; 11],
|
||||
}
|
||||
|
||||
impl AesBlockCipher for AesniBlockCipher {
|
||||
fn new(key: AesKey) -> AesniBlockCipher {
|
||||
let aes_detected = is_x86_feature_detected!("aes");
|
||||
let sse2_detected = is_x86_feature_detected!("sse2");
|
||||
|
||||
if !(aes_detected && sse2_detected) {
|
||||
panic!(
|
||||
"The AesniBlockCipher requires both aes and sse2 x86 CPU features.\n\
|
||||
aes feature available: {}\nsse2 feature available: {}\n.",
|
||||
aes_detected, sse2_detected
|
||||
)
|
||||
}
|
||||
|
||||
// SAFETY: we checked for aes and sse2 availability
|
||||
let round_keys = unsafe { generate_round_keys(key) };
|
||||
AesniBlockCipher { round_keys }
|
||||
}
|
||||
|
||||
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
|
||||
#[target_feature(enable = "sse2,aes")]
|
||||
unsafe fn implementation(
|
||||
this: &AesniBlockCipher,
|
||||
AesIndex(aes_ctr): AesIndex,
|
||||
) -> [u8; BYTES_PER_BATCH] {
|
||||
si128arr_to_u8arr(aes_encrypt_many(
|
||||
u128_to_si128(aes_ctr),
|
||||
u128_to_si128(aes_ctr + 1),
|
||||
u128_to_si128(aes_ctr + 2),
|
||||
u128_to_si128(aes_ctr + 3),
|
||||
u128_to_si128(aes_ctr + 4),
|
||||
u128_to_si128(aes_ctr + 5),
|
||||
u128_to_si128(aes_ctr + 6),
|
||||
u128_to_si128(aes_ctr + 7),
|
||||
&this.round_keys,
|
||||
))
|
||||
}
|
||||
// SAFETY: we checked for aes and sse2 availability in `Self::new`
|
||||
unsafe { implementation(self, AesIndex(aes_ctr)) }
|
||||
}
|
||||
}
|
||||
|
||||
#[target_feature(enable = "sse2,aes")]
|
||||
unsafe fn generate_round_keys(key: AesKey) -> [__m128i; 11] {
|
||||
let key = u128_to_si128(key.0);
|
||||
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
|
||||
aes_128_key_expansion(key, &mut keys);
|
||||
keys
|
||||
}
|
||||
|
||||
// Uses aes to encrypt many values at once. This allows a substantial speedup (around 30%)
|
||||
// compared to the naive approach.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[inline(always)]
|
||||
fn aes_encrypt_many(
|
||||
message_1: __m128i,
|
||||
message_2: __m128i,
|
||||
message_3: __m128i,
|
||||
message_4: __m128i,
|
||||
message_5: __m128i,
|
||||
message_6: __m128i,
|
||||
message_7: __m128i,
|
||||
message_8: __m128i,
|
||||
keys: &[__m128i; 11],
|
||||
) -> [__m128i; 8] {
|
||||
unsafe {
|
||||
let mut tmp_1 = _mm_xor_si128(message_1, keys[0]);
|
||||
let mut tmp_2 = _mm_xor_si128(message_2, keys[0]);
|
||||
let mut tmp_3 = _mm_xor_si128(message_3, keys[0]);
|
||||
let mut tmp_4 = _mm_xor_si128(message_4, keys[0]);
|
||||
let mut tmp_5 = _mm_xor_si128(message_5, keys[0]);
|
||||
let mut tmp_6 = _mm_xor_si128(message_6, keys[0]);
|
||||
let mut tmp_7 = _mm_xor_si128(message_7, keys[0]);
|
||||
let mut tmp_8 = _mm_xor_si128(message_8, keys[0]);
|
||||
|
||||
for key in keys.iter().take(10).skip(1) {
|
||||
tmp_1 = _mm_aesenc_si128(tmp_1, *key);
|
||||
tmp_2 = _mm_aesenc_si128(tmp_2, *key);
|
||||
tmp_3 = _mm_aesenc_si128(tmp_3, *key);
|
||||
tmp_4 = _mm_aesenc_si128(tmp_4, *key);
|
||||
tmp_5 = _mm_aesenc_si128(tmp_5, *key);
|
||||
tmp_6 = _mm_aesenc_si128(tmp_6, *key);
|
||||
tmp_7 = _mm_aesenc_si128(tmp_7, *key);
|
||||
tmp_8 = _mm_aesenc_si128(tmp_8, *key);
|
||||
}
|
||||
|
||||
tmp_1 = _mm_aesenclast_si128(tmp_1, keys[10]);
|
||||
tmp_2 = _mm_aesenclast_si128(tmp_2, keys[10]);
|
||||
tmp_3 = _mm_aesenclast_si128(tmp_3, keys[10]);
|
||||
tmp_4 = _mm_aesenclast_si128(tmp_4, keys[10]);
|
||||
tmp_5 = _mm_aesenclast_si128(tmp_5, keys[10]);
|
||||
tmp_6 = _mm_aesenclast_si128(tmp_6, keys[10]);
|
||||
tmp_7 = _mm_aesenclast_si128(tmp_7, keys[10]);
|
||||
tmp_8 = _mm_aesenclast_si128(tmp_8, keys[10]);
|
||||
|
||||
[tmp_1, tmp_2, tmp_3, tmp_4, tmp_5, tmp_6, tmp_7, tmp_8]
|
||||
}
|
||||
}
|
||||
|
||||
fn aes_128_assist(temp1: __m128i, temp2: __m128i) -> __m128i {
|
||||
let mut temp3: __m128i;
|
||||
let mut temp2 = temp2;
|
||||
let mut temp1 = temp1;
|
||||
unsafe {
|
||||
temp2 = _mm_shuffle_epi32(temp2, 0xff);
|
||||
temp3 = _mm_slli_si128(temp1, 0x4);
|
||||
temp1 = _mm_xor_si128(temp1, temp3);
|
||||
temp3 = _mm_slli_si128(temp3, 0x4);
|
||||
temp1 = _mm_xor_si128(temp1, temp3);
|
||||
temp3 = _mm_slli_si128(temp3, 0x4);
|
||||
temp1 = _mm_xor_si128(temp1, temp3);
|
||||
temp1 = _mm_xor_si128(temp1, temp2);
|
||||
}
|
||||
temp1
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn aes_128_key_expansion(key: __m128i, keys: &mut [__m128i; 11]) {
|
||||
let (mut temp1, mut temp2): (__m128i, __m128i);
|
||||
temp1 = key;
|
||||
unsafe {
|
||||
_mm_store_si128(keys.as_mut_ptr(), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x01);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(1), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x02);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(2), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x04);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(3), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x08);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(4), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x10);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(5), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x20);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(6), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x40);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(7), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x80);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(8), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x1b);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(9), temp1);
|
||||
temp2 = _mm_aeskeygenassist_si128(temp1, 0x36);
|
||||
temp1 = aes_128_assist(temp1, temp2);
|
||||
_mm_store_si128(keys.as_mut_ptr().offset(10), temp1);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn u128_to_si128(input: u128) -> __m128i {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
fn si128_to_u128(input: __m128i) -> u128 {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn si128arr_to_u8arr(input: [__m128i; 8]) -> [u8; BYTES_PER_BATCH] {
|
||||
unsafe { transmute(input) }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
// Test vector for aes128, from the FIPS publication 197
|
||||
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
|
||||
const KEY_SCHEDULE: [u128; 11] = [
|
||||
u128::from_be(0x000102030405060708090a0b0c0d0e0f),
|
||||
u128::from_be(0xd6aa74fdd2af72fadaa678f1d6ab76fe),
|
||||
u128::from_be(0xb692cf0b643dbdf1be9bc5006830b3fe),
|
||||
u128::from_be(0xb6ff744ed2c2c9bf6c590cbf0469bf41),
|
||||
u128::from_be(0x47f7f7bc95353e03f96c32bcfd058dfd),
|
||||
u128::from_be(0x3caaa3e8a99f9deb50f3af57adf622aa),
|
||||
u128::from_be(0x5e390f7df7a69296a7553dc10aa31f6b),
|
||||
u128::from_be(0x14f9701ae35fe28c440adf4d4ea9c026),
|
||||
u128::from_be(0x47438735a41c65b9e016baf4aebf7ad2),
|
||||
u128::from_be(0x549932d1f08557681093ed9cbe2c974e),
|
||||
u128::from_be(0x13111d7fe3944a17f307a78b4d2b30c5),
|
||||
];
|
||||
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
|
||||
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
|
||||
|
||||
#[test]
|
||||
fn test_generate_key_schedule() {
|
||||
// Checks that the round keys are correctly generated from the sample key from FIPS
|
||||
let key = u128_to_si128(CIPHER_KEY);
|
||||
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
|
||||
aes_128_key_expansion(key, &mut keys);
|
||||
for (expected, actual) in KEY_SCHEDULE.iter().zip(keys.iter()) {
|
||||
assert_eq!(*expected, si128_to_u128(*actual));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_many_messages() {
|
||||
// Checks that encrypting many plaintext at the same time gives the correct output.
|
||||
let message = u128_to_si128(PLAINTEXT);
|
||||
let key = u128_to_si128(CIPHER_KEY);
|
||||
let mut keys: [__m128i; 11] = [u128_to_si128(0); 11];
|
||||
aes_128_key_expansion(key, &mut keys);
|
||||
let ciphertexts = aes_encrypt_many(
|
||||
message, message, message, message, message, message, message, message, &keys,
|
||||
);
|
||||
for ct in &ciphertexts {
|
||||
assert_eq!(CIPHERTEXT, si128_to_u128(*ct));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
|
||||
use crate::seeders::Seed;
|
||||
|
||||
/// A random number generator using the `aesni` instructions.
|
||||
pub struct AesniRandomGenerator(pub(super) AesCtrGenerator<AesniBlockCipher>);
|
||||
|
||||
/// The children iterator used by [`AesniRandomGenerator`].
|
||||
///
|
||||
/// Outputs children generators one by one.
|
||||
pub struct AesniChildrenIterator(ChildrenIterator<AesniBlockCipher>);
|
||||
|
||||
impl Iterator for AesniChildrenIterator {
|
||||
type Item = AesniRandomGenerator;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(AesniRandomGenerator)
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomGenerator for AesniRandomGenerator {
|
||||
type ChildrenIter = AesniChildrenIterator;
|
||||
fn new(seed: Seed) -> Self {
|
||||
AesniRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
|
||||
}
|
||||
fn remaining_bytes(&self) -> ByteCount {
|
||||
self.0.remaining_bytes()
|
||||
}
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError> {
|
||||
self.0
|
||||
.try_fork(n_children, n_bytes)
|
||||
.map(AesniChildrenIterator)
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for AesniRandomGenerator {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test;
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
use crate::generators::{generator_generic_test, AesniRandomGenerator};
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_first_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_last_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_generic_test::prop_fork::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roughly_uniform() {
|
||||
generator_generic_test::test_roughly_uniform::<AesniRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generator_determinism() {
|
||||
generator_generic_test::test_generator_determinism::<AesniRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork() {
|
||||
generator_generic_test::test_fork_children::<AesniRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "expected test panic")]
|
||||
fn test_bounded_panic() {
|
||||
generator_generic_test::test_bounded_none_should_panic::<AesniRandomGenerator>();
|
||||
}
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
//! A module implementing a random number generator, using the x86_64 `aesni` instructions.
|
||||
//!
|
||||
//! This module implements a cryptographically secure pseudorandom number generator
|
||||
//! (CS-PRNG), using a fast block cipher. The implementation is based on the
|
||||
//! [intel aesni white paper 323641-001 revision 3.0](https://www.intel.com/content/dam/doc/white-paper/advanced-encryption-standard-new-instructions-set-paper.pdf).
|
||||
|
||||
mod block_cipher;
|
||||
|
||||
mod generator;
|
||||
pub use generator::*;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
@@ -1,95 +0,0 @@
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// The parallel children iterator used by [`AesniRandomGenerator`].
|
||||
///
|
||||
/// Outputs the children generators one by one.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct ParallelAesniChildrenIterator(
|
||||
rayon::iter::Map<
|
||||
ParallelChildrenIterator<AesniBlockCipher>,
|
||||
fn(AesCtrGenerator<AesniBlockCipher>) -> AesniRandomGenerator,
|
||||
>,
|
||||
);
|
||||
|
||||
impl ParallelIterator for ParallelAesniChildrenIterator {
|
||||
type Item = AesniRandomGenerator;
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
self.0.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexedParallelIterator for ParallelAesniChildrenIterator {
|
||||
fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
|
||||
self.0.drive(consumer)
|
||||
}
|
||||
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
|
||||
self.0.with_producer(callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParallelRandomGenerator for AesniRandomGenerator {
|
||||
type ParChildrenIter = ParallelAesniChildrenIterator;
|
||||
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError> {
|
||||
self.0
|
||||
.par_try_fork(n_children, n_bytes)
|
||||
.map(|iterator| ParallelAesniChildrenIterator(iterator.map(AesniRandomGenerator)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
mod test {
|
||||
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
|
||||
use crate::generators::implem::aesni::block_cipher::AesniBlockCipher;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_ttt() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<AesniBlockCipher>();
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
#[cfg(feature = "generator_x86_64_aesni")]
|
||||
mod aesni;
|
||||
#[cfg(feature = "generator_x86_64_aesni")]
|
||||
pub use aesni::*;
|
||||
|
||||
#[cfg(feature = "generator_aarch64_aes")]
|
||||
mod aarch64;
|
||||
#[cfg(feature = "generator_aarch64_aes")]
|
||||
pub use aarch64::*;
|
||||
|
||||
#[cfg(feature = "generator_fallback")]
|
||||
mod soft;
|
||||
#[cfg(feature = "generator_fallback")]
|
||||
pub use soft::*;
|
||||
@@ -1,114 +0,0 @@
|
||||
use crate::generators::aes_ctr::{
|
||||
AesBlockCipher, AesIndex, AesKey, AES_CALLS_PER_BATCH, BYTES_PER_AES_CALL, BYTES_PER_BATCH,
|
||||
};
|
||||
use aes::cipher::generic_array::GenericArray;
|
||||
use aes::cipher::{BlockEncrypt, KeyInit};
|
||||
use aes::Aes128;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SoftwareBlockCipher {
|
||||
// Aes structure
|
||||
aes: Aes128,
|
||||
}
|
||||
|
||||
impl AesBlockCipher for SoftwareBlockCipher {
|
||||
fn new(key: AesKey) -> SoftwareBlockCipher {
|
||||
let key: [u8; BYTES_PER_AES_CALL] = key.0.to_ne_bytes();
|
||||
let key = GenericArray::clone_from_slice(&key[..]);
|
||||
let aes = Aes128::new(&key);
|
||||
SoftwareBlockCipher { aes }
|
||||
}
|
||||
|
||||
fn generate_batch(&mut self, AesIndex(aes_ctr): AesIndex) -> [u8; BYTES_PER_BATCH] {
|
||||
aes_encrypt_many(
|
||||
aes_ctr,
|
||||
aes_ctr + 1,
|
||||
aes_ctr + 2,
|
||||
aes_ctr + 3,
|
||||
aes_ctr + 4,
|
||||
aes_ctr + 5,
|
||||
aes_ctr + 6,
|
||||
aes_ctr + 7,
|
||||
&self.aes,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Uses aes to encrypt many values at once. This allows a substantial speedup (around 30%)
|
||||
// compared to the naive approach.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn aes_encrypt_many(
|
||||
message_1: u128,
|
||||
message_2: u128,
|
||||
message_3: u128,
|
||||
message_4: u128,
|
||||
message_5: u128,
|
||||
message_6: u128,
|
||||
message_7: u128,
|
||||
message_8: u128,
|
||||
cipher: &Aes128,
|
||||
) -> [u8; BYTES_PER_BATCH] {
|
||||
let mut b1 = GenericArray::clone_from_slice(&message_1.to_ne_bytes()[..]);
|
||||
let mut b2 = GenericArray::clone_from_slice(&message_2.to_ne_bytes()[..]);
|
||||
let mut b3 = GenericArray::clone_from_slice(&message_3.to_ne_bytes()[..]);
|
||||
let mut b4 = GenericArray::clone_from_slice(&message_4.to_ne_bytes()[..]);
|
||||
let mut b5 = GenericArray::clone_from_slice(&message_5.to_ne_bytes()[..]);
|
||||
let mut b6 = GenericArray::clone_from_slice(&message_6.to_ne_bytes()[..]);
|
||||
let mut b7 = GenericArray::clone_from_slice(&message_7.to_ne_bytes()[..]);
|
||||
let mut b8 = GenericArray::clone_from_slice(&message_8.to_ne_bytes()[..]);
|
||||
|
||||
cipher.encrypt_block(&mut b1);
|
||||
cipher.encrypt_block(&mut b2);
|
||||
cipher.encrypt_block(&mut b3);
|
||||
cipher.encrypt_block(&mut b4);
|
||||
cipher.encrypt_block(&mut b5);
|
||||
cipher.encrypt_block(&mut b6);
|
||||
cipher.encrypt_block(&mut b7);
|
||||
cipher.encrypt_block(&mut b8);
|
||||
|
||||
let output_array: [[u8; BYTES_PER_AES_CALL]; AES_CALLS_PER_BATCH] = [
|
||||
b1.into(),
|
||||
b2.into(),
|
||||
b3.into(),
|
||||
b4.into(),
|
||||
b5.into(),
|
||||
b6.into(),
|
||||
b7.into(),
|
||||
b8.into(),
|
||||
];
|
||||
|
||||
unsafe { *{ output_array.as_ptr() as *const [u8; BYTES_PER_BATCH] } }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::convert::TryInto;
|
||||
|
||||
// Test vector for aes128, from the FIPS publication 197
|
||||
const CIPHER_KEY: u128 = u128::from_be(0x000102030405060708090a0b0c0d0e0f);
|
||||
const PLAINTEXT: u128 = u128::from_be(0x00112233445566778899aabbccddeeff);
|
||||
const CIPHERTEXT: u128 = u128::from_be(0x69c4e0d86a7b0430d8cdb78070b4c55a);
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_many_messages() {
|
||||
// Checks that encrypting many plaintext at the same time gives the correct output.
|
||||
let key: [u8; BYTES_PER_AES_CALL] = CIPHER_KEY.to_ne_bytes();
|
||||
let aes = Aes128::new(&GenericArray::from(key));
|
||||
let ciphertexts = aes_encrypt_many(
|
||||
PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT, PLAINTEXT,
|
||||
&aes,
|
||||
);
|
||||
let ciphertexts: [u8; BYTES_PER_BATCH] = ciphertexts[..].try_into().unwrap();
|
||||
for i in 0..8 {
|
||||
assert_eq!(
|
||||
u128::from_ne_bytes(
|
||||
ciphertexts[BYTES_PER_AES_CALL * i..BYTES_PER_AES_CALL * (i + 1)]
|
||||
.try_into()
|
||||
.unwrap()
|
||||
),
|
||||
CIPHERTEXT
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, AesKey, ChildrenIterator};
|
||||
use crate::generators::implem::soft::block_cipher::SoftwareBlockCipher;
|
||||
use crate::generators::{ByteCount, BytesPerChild, ChildrenCount, ForkError, RandomGenerator};
|
||||
use crate::seeders::Seed;
|
||||
|
||||
/// A random number generator using a software implementation.
|
||||
pub struct SoftwareRandomGenerator(pub(super) AesCtrGenerator<SoftwareBlockCipher>);
|
||||
|
||||
/// The children iterator used by [`SoftwareRandomGenerator`].
|
||||
///
|
||||
/// Outputs children generators one by one.
|
||||
pub struct SoftwareChildrenIterator(ChildrenIterator<SoftwareBlockCipher>);
|
||||
|
||||
impl Iterator for SoftwareChildrenIterator {
|
||||
type Item = SoftwareRandomGenerator;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next().map(SoftwareRandomGenerator)
|
||||
}
|
||||
}
|
||||
|
||||
impl RandomGenerator for SoftwareRandomGenerator {
|
||||
type ChildrenIter = SoftwareChildrenIterator;
|
||||
fn new(seed: Seed) -> Self {
|
||||
SoftwareRandomGenerator(AesCtrGenerator::new(AesKey(seed.0), None, None))
|
||||
}
|
||||
fn remaining_bytes(&self) -> ByteCount {
|
||||
self.0.remaining_bytes()
|
||||
}
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError> {
|
||||
self.0
|
||||
.try_fork(n_children, n_bytes)
|
||||
.map(SoftwareChildrenIterator)
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for SoftwareRandomGenerator {
|
||||
type Item = u8;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.0.next()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::aes_ctr_generic_test;
|
||||
use crate::generators::generator_generic_test;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_first_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_last_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_generic_test::prop_fork_parent_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_generic_test::prop_fork::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_children_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_generic_test::prop_fork_parent_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roughly_uniform() {
|
||||
generator_generic_test::test_roughly_uniform::<SoftwareRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fork() {
|
||||
generator_generic_test::test_fork_children::<SoftwareRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generator_determinism() {
|
||||
generator_generic_test::test_generator_determinism::<SoftwareRandomGenerator>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "expected test panic")]
|
||||
fn test_bounded_panic() {
|
||||
generator_generic_test::test_bounded_none_should_panic::<SoftwareRandomGenerator>();
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
//! A module using a software fallback implementation of random number generator.
|
||||
|
||||
mod block_cipher;
|
||||
|
||||
mod generator;
|
||||
pub use generator::*;
|
||||
|
||||
#[cfg(feature = "parallel")]
|
||||
mod parallel;
|
||||
#[cfg(feature = "parallel")]
|
||||
pub use parallel::*;
|
||||
@@ -1,94 +0,0 @@
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::{AesCtrGenerator, ParallelChildrenIterator};
|
||||
use crate::generators::implem::soft::block_cipher::SoftwareBlockCipher;
|
||||
use crate::generators::{BytesPerChild, ChildrenCount, ForkError, ParallelRandomGenerator};
|
||||
use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
|
||||
use rayon::prelude::*;
|
||||
|
||||
/// The parallel children iterator used by [`SoftwareRandomGenerator`].
|
||||
///
|
||||
/// Outputs the children generators one by one.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub struct ParallelSoftwareChildrenIterator(
|
||||
rayon::iter::Map<
|
||||
ParallelChildrenIterator<SoftwareBlockCipher>,
|
||||
fn(AesCtrGenerator<SoftwareBlockCipher>) -> SoftwareRandomGenerator,
|
||||
>,
|
||||
);
|
||||
|
||||
impl ParallelIterator for ParallelSoftwareChildrenIterator {
|
||||
type Item = SoftwareRandomGenerator;
|
||||
fn drive_unindexed<C>(self, consumer: C) -> C::Result
|
||||
where
|
||||
C: UnindexedConsumer<Self::Item>,
|
||||
{
|
||||
self.0.drive_unindexed(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexedParallelIterator for ParallelSoftwareChildrenIterator {
|
||||
fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
|
||||
self.0.drive(consumer)
|
||||
}
|
||||
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
|
||||
self.0.with_producer(callback)
|
||||
}
|
||||
}
|
||||
|
||||
impl ParallelRandomGenerator for SoftwareRandomGenerator {
|
||||
type ParChildrenIter = ParallelSoftwareChildrenIterator;
|
||||
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError> {
|
||||
self.0
|
||||
.par_try_fork(n_children, n_bytes)
|
||||
.map(|iterator| ParallelSoftwareChildrenIterator(iterator.map(SoftwareRandomGenerator)))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::generators::aes_ctr::aes_ctr_parallel_generic_tests;
|
||||
|
||||
#[test]
|
||||
fn prop_fork_first_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_first_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_last_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_last_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_bound_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_bound_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_state_table_index() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_state_table_index::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_children_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_children_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prop_fork_parent_remaining_bytes() {
|
||||
aes_ctr_parallel_generic_tests::prop_fork_parent_remaining_bytes::<SoftwareBlockCipher>();
|
||||
}
|
||||
}
|
||||
@@ -1,235 +0,0 @@
|
||||
//! A module containing random generators objects.
|
||||
//!
|
||||
//! See [crate-level](`crate`) explanations.
|
||||
use crate::seeders::Seed;
|
||||
use std::error::Error;
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
/// The number of children created when a generator is forked.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ChildrenCount(pub usize);
|
||||
|
||||
/// The number of bytes each child can generate, when a generator is forked.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct BytesPerChild(pub usize);
|
||||
|
||||
/// A structure representing the number of bytes between two table indices.
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq)]
|
||||
pub struct ByteCount(pub u128);
|
||||
|
||||
/// An error occurring during a generator fork.
|
||||
#[derive(Debug)]
|
||||
pub enum ForkError {
|
||||
ForkTooLarge,
|
||||
ZeroChildrenCount,
|
||||
ZeroBytesPerChild,
|
||||
}
|
||||
|
||||
impl Display for ForkError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ForkError::ForkTooLarge => {
|
||||
write!(
|
||||
f,
|
||||
"The children generators would output bytes after the parent bound. "
|
||||
)
|
||||
}
|
||||
ForkError::ZeroChildrenCount => {
|
||||
write!(
|
||||
f,
|
||||
"The number of children in the fork must be greater than zero."
|
||||
)
|
||||
}
|
||||
ForkError::ZeroBytesPerChild => {
|
||||
write!(
|
||||
f,
|
||||
"The number of bytes per child must be greater than zero."
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Error for ForkError {}
|
||||
|
||||
/// A trait for cryptographically secure pseudo-random generators.
|
||||
///
|
||||
/// See the [crate-level](#crate) documentation for details.
|
||||
pub trait RandomGenerator: Iterator<Item = u8> {
|
||||
/// The iterator over children generators, returned by `try_fork` in case of success.
|
||||
type ChildrenIter: Iterator<Item = Self>;
|
||||
|
||||
/// Creates a new generator from a seed.
|
||||
///
|
||||
/// This operation is usually costly to perform, as the aes round keys need to be generated from
|
||||
/// the seed.
|
||||
fn new(seed: Seed) -> Self;
|
||||
|
||||
/// Returns the number of bytes that can still be outputted by the generator before reaching its
|
||||
/// bound.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// A fresh generator can generate 2¹³² bytes. Unfortunately, no rust integer type in is able
|
||||
/// to encode such a large number. Consequently [`ByteCount`] uses the largest integer type
|
||||
/// available to encode this value: the `u128` type. For this reason, this method does not
|
||||
/// effectively return the number of remaining bytes, but instead
|
||||
/// `min(2¹²⁸-1, remaining_bytes)`.
|
||||
fn remaining_bytes(&self) -> ByteCount;
|
||||
|
||||
/// Returns the next byte of the stream, if the generator did not yet reach its bound.
|
||||
fn next_byte(&mut self) -> Option<u8> {
|
||||
self.next()
|
||||
}
|
||||
|
||||
/// Tries to fork the generator into an iterator of `n_children` new generators, each able to
|
||||
/// output `n_bytes` bytes.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// To be successful, the number of remaining bytes for the parent generator must be larger than
|
||||
/// `n_children*n_bytes`.
|
||||
fn try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ChildrenIter, ForkError>;
|
||||
}
|
||||
|
||||
/// A trait extending [`RandomGenerator`] to the parallel iterators of `rayon`.
|
||||
#[cfg(feature = "parallel")]
|
||||
pub trait ParallelRandomGenerator: RandomGenerator + Send {
|
||||
/// The iterator over children generators, returned by `par_try_fork` in case of success.
|
||||
type ParChildrenIter: rayon::prelude::IndexedParallelIterator<Item = Self>;
|
||||
|
||||
/// Tries to fork the generator into a parallel iterator of `n_children` new generators, each
|
||||
/// able to output `n_bytes` bytes.
|
||||
///
|
||||
/// Note:
|
||||
/// -----
|
||||
///
|
||||
/// To be successful, the number of remaining bytes for the parent generator must be larger than
|
||||
/// `n_children*n_bytes`.
|
||||
fn par_try_fork(
|
||||
&mut self,
|
||||
n_children: ChildrenCount,
|
||||
n_bytes: BytesPerChild,
|
||||
) -> Result<Self::ParChildrenIter, ForkError>;
|
||||
}
|
||||
|
||||
mod aes_ctr;
|
||||
|
||||
mod implem;
|
||||
pub use implem::*;
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod generator_generic_test {
|
||||
#![allow(unused)] // to please clippy when tests are not activated
|
||||
use super::*;
|
||||
use rand::Rng;
|
||||
|
||||
const REPEATS: usize = 1_000;
|
||||
|
||||
fn any_seed() -> impl Iterator<Item = Seed> {
|
||||
std::iter::repeat_with(|| Seed(rand::thread_rng().gen()))
|
||||
}
|
||||
|
||||
fn some_children_count() -> impl Iterator<Item = ChildrenCount> {
|
||||
std::iter::repeat_with(|| ChildrenCount(rand::thread_rng().gen::<usize>() % 16 + 1))
|
||||
}
|
||||
|
||||
fn some_bytes_per_child() -> impl Iterator<Item = BytesPerChild> {
|
||||
std::iter::repeat_with(|| BytesPerChild(rand::thread_rng().gen::<usize>() % 128 + 1))
|
||||
}
|
||||
|
||||
/// Checks that the PRNG roughly generates uniform numbers.
|
||||
///
|
||||
/// To do that, we perform an histogram of the occurrences of each byte value, over a fixed
|
||||
/// number of samples and check that the empirical probabilities of the bins are close to
|
||||
/// the theoretical probabilities.
|
||||
pub fn test_roughly_uniform<G: RandomGenerator>() {
|
||||
// Number of bins to use for the histogram.
|
||||
const N_BINS: usize = u8::MAX as usize + 1;
|
||||
// Number of samples to use for the histogram.
|
||||
let n_samples = 10_000_000_usize;
|
||||
// Theoretical probability of a each bins.
|
||||
let expected_prob: f64 = 1. / N_BINS as f64;
|
||||
// Absolute error allowed on the empirical probabilities.
|
||||
// This value was tuned to make the test pass on an arguably correct state of
|
||||
// implementation. 10^-4 precision is arguably pretty fine for this rough test, but it would
|
||||
// be interesting to improve this test.
|
||||
let precision = 10f64.powi(-3);
|
||||
|
||||
for _ in 0..REPEATS {
|
||||
// We instantiate a new generator.
|
||||
let seed = any_seed().next().unwrap();
|
||||
let mut generator = G::new(seed);
|
||||
// We create a new histogram
|
||||
let mut counts = [0usize; N_BINS];
|
||||
// We fill the histogram.
|
||||
for _ in 0..n_samples {
|
||||
counts[generator.next_byte().unwrap() as usize] += 1;
|
||||
}
|
||||
// We check that the empirical probabilities are close enough to the theoretical one.
|
||||
counts
|
||||
.iter()
|
||||
.map(|a| (*a as f64) / (n_samples as f64))
|
||||
.for_each(|a| assert!((a - expected_prob).abs() < precision))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks that given a state and a key, the PRNG is determinist.
|
||||
pub fn test_generator_determinism<G: RandomGenerator>() {
|
||||
for _ in 0..REPEATS {
|
||||
let seed = any_seed().next().unwrap();
|
||||
let mut first_generator = G::new(seed);
|
||||
let mut second_generator = G::new(seed);
|
||||
for _ in 0..1024 {
|
||||
assert_eq!(first_generator.next(), second_generator.next());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks that forks returns a bounded child, and that the proper number of bytes can be
|
||||
/// generated.
|
||||
pub fn test_fork_children<G: RandomGenerator>() {
|
||||
for _ in 0..REPEATS {
|
||||
let ((seed, n_children), n_bytes) = any_seed()
|
||||
.zip(some_children_count())
|
||||
.zip(some_bytes_per_child())
|
||||
.next()
|
||||
.unwrap();
|
||||
let mut gen = G::new(seed);
|
||||
let mut bounded = gen.try_fork(n_children, n_bytes).unwrap().next().unwrap();
|
||||
assert_eq!(bounded.remaining_bytes(), ByteCount(n_bytes.0 as u128));
|
||||
for _ in 0..n_bytes.0 {
|
||||
bounded.next().unwrap();
|
||||
}
|
||||
|
||||
// Assert we are at the bound
|
||||
assert!(bounded.next().is_none());
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks that a bounded prng returns none when exceeding the allowed number of bytes.
|
||||
///
|
||||
/// To properly check for panic use `#[should_panic(expected = "expected test panic")]` as an
|
||||
/// attribute on the test function.
|
||||
pub fn test_bounded_none_should_panic<G: RandomGenerator>() {
|
||||
let ((seed, n_children), n_bytes) = any_seed()
|
||||
.zip(some_children_count())
|
||||
.zip(some_bytes_per_child())
|
||||
.next()
|
||||
.unwrap();
|
||||
let mut gen = G::new(seed);
|
||||
let mut bounded = gen.try_fork(n_children, n_bytes).unwrap().next().unwrap();
|
||||
assert_eq!(bounded.remaining_bytes(), ByteCount(n_bytes.0 as u128));
|
||||
for _ in 0..n_bytes.0 {
|
||||
assert!(bounded.next().is_some());
|
||||
}
|
||||
|
||||
// One call too many, should panic
|
||||
bounded.next().ok_or("expected test panic").unwrap();
|
||||
}
|
||||
}
|
||||
@@ -1,114 +0,0 @@
|
||||
#![deny(rustdoc::broken_intra_doc_links)]
|
||||
//! Cryptographically secure pseudo random number generator.
|
||||
//!
|
||||
//! Welcome to the `concrete-csprng` documentation.
|
||||
//!
|
||||
//! This crate provides a fast cryptographically secure pseudo-random number generator, suited to
|
||||
//! work in a multithreaded setting.
|
||||
//!
|
||||
//! Random Generators
|
||||
//! =================
|
||||
//!
|
||||
//! The central abstraction of this crate is the [`RandomGenerator`](generators::RandomGenerator)
|
||||
//! trait, which is implemented by different types, each supporting a different platform. In
|
||||
//! essence, a type implementing [`RandomGenerator`](generators::RandomGenerator) is a type that
|
||||
//! outputs a new pseudo-random byte at each call to
|
||||
//! [`next_byte`](generators::RandomGenerator::next_byte). Such a generator `g` can be seen as
|
||||
//! enclosing a growing index into an imaginary array of pseudo-random bytes:
|
||||
//! ```ascii
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M-1 │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃ │ │ │ │ │ │ │ │ │ │...│ ┃ │
|
||||
//! ┗↥┷━┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! g │
|
||||
//! │
|
||||
//! g.next_byte() │
|
||||
//! │
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M-1 │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃╳│ │ │ │ │ │ │ │ │ │...│ ┃ │
|
||||
//! ┗━┷↥┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! g │
|
||||
//! │
|
||||
//! g.next_byte() │ legend:
|
||||
//! │ -------
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M-1 │ ↥ : next byte to be outputted by g
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │ │: byte not yet outputted by g
|
||||
//! ┃╳│╳│ │ │ │ │ │ │ │ │...│ ┃ │ │╳│: byte already outputted by g
|
||||
//! ┗━┷━┷↥┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! g 🭭
|
||||
//! ```
|
||||
//!
|
||||
//! While being large, this imaginary array is still bounded to M = 2¹³² bytes. Consequently, a
|
||||
//! generator is always bounded to a maximal index. That is, there is always a max amount of
|
||||
//! elements of this array that can be outputted by the generator. By default, generators created
|
||||
//! via [`new`](generators::RandomGenerator::new) are always bounded to M-1.
|
||||
//!
|
||||
//! Tree partition of the pseudo-random stream
|
||||
//! ==========================================
|
||||
//!
|
||||
//! One particularity of this implementation is that you can use the
|
||||
//! [`try_fork`](generators::RandomGenerator::try_fork) method to create an arbitrary partition tree
|
||||
//! of a region of this array. Indeed, calling `try_fork(nc, nb)` outputs `nc` new generators, each
|
||||
//! able to output `nb` bytes. The `try_fork` method ensures that the states and bounds of the
|
||||
//! parent and children generators are set so as to prevent the same substream to be outputted
|
||||
//! twice:
|
||||
//! ```ascii
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃P│P│P│P│P│P│P│P│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷━┷━┷━┷━┷━┷━┷━━━┷━┛ │
|
||||
//! p │
|
||||
//! │
|
||||
//! (a,b) = p.fork(2,4) │
|
||||
//! │
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃A│A│A│A│B│B│B│B│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷↥┷━┷━┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a b p │
|
||||
//! │ legend:
|
||||
//! (c,d) = b.fork(2, 1) │ -------
|
||||
//! │ ↥ : next byte to be outputted by p
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │ │P│: byte to be outputted by p
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │╳│: byte already outputted
|
||||
//! ┃A│A│A│A│C│D│B│B│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p 🭭
|
||||
//! ```
|
||||
//!
|
||||
//! This makes it possible to consume the stream at different places. This is particularly useful in
|
||||
//! a multithreaded setting, in which we want to use the same generator from different independent
|
||||
//! threads:
|
||||
//!
|
||||
//! ```ascii
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃A│A│A│A│C│D│B│B│P│P│...│P┃ │
|
||||
//! ┗↥┷━┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p │
|
||||
//! │
|
||||
//! a.next_byte() │
|
||||
//! │
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │
|
||||
//! ┃╳│A│A│A│C│D│B│B│P│P│...│P┃ │
|
||||
//! ┗━┷↥┷━┷━┷↥┷↥┷↥┷━┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p │
|
||||
//! │ legend:
|
||||
//! b.next_byte() │ -------
|
||||
//! │ ↥ : next byte to be outputted by p
|
||||
//! 0 1 2 3 4 5 6 7 8 9 M │ │P│: byte to be outputted by p
|
||||
//! ┏━┯━┯━┯━┯━┯━┯━┯━┯━┯━┯━━━┯━┓ │ │╳│: byte already outputted
|
||||
//! ┃╳│A│A│A│C│D│╳│B│P│P│...│P┃ │
|
||||
//! ┗━┷↥┷━┷━┷↥┷↥┷━┷↥┷↥┷━┷━━━┷━┛ │
|
||||
//! a c d b p 🭭
|
||||
//! ```
|
||||
//!
|
||||
//! Implementation
|
||||
//! ==============
|
||||
//!
|
||||
//! The implementation is based on the AES blockcipher used in counter (CTR) mode, as presented
|
||||
//! in the ISO/IEC 18033-4 document.
|
||||
pub mod generators;
|
||||
pub mod seeders;
|
||||
@@ -1,141 +0,0 @@
|
||||
use crate::seeders::{Seed, Seeder};
|
||||
use libc;
|
||||
use std::cmp::Ordering;
|
||||
|
||||
/// There is no `rseed` equivalent in the ARM specification until `ARMv8.5-A`.
|
||||
/// However it seems that these instructions are not exposed in `core::arch::aarch64`.
|
||||
///
|
||||
/// Our primary interest for supporting aarch64 targets is AppleSilicon support
|
||||
/// which for the M1 macs available, they are based on the `ARMv8.4-A` set.
|
||||
///
|
||||
/// So we fall back to using a function from Apple's API which
|
||||
/// uses the [Secure Enclave] to generate cryptographically secure random bytes.
|
||||
///
|
||||
/// [Secure Enclave]: https://support.apple.com/fr-fr/guide/security/sec59b0b31ff/web
|
||||
mod secure_enclave {
|
||||
pub enum __SecRandom {}
|
||||
pub type SecRandomRef = *const __SecRandom;
|
||||
use libc::{c_int, c_void};
|
||||
|
||||
#[link(name = "Security", kind = "framework")]
|
||||
extern "C" {
|
||||
pub static kSecRandomDefault: SecRandomRef;
|
||||
|
||||
pub fn SecRandomCopyBytes(rnd: SecRandomRef, count: usize, bytes: *mut c_void) -> c_int;
|
||||
}
|
||||
|
||||
pub fn generate_random_bytes(bytes: &mut [u8]) -> std::io::Result<()> {
|
||||
// As per Apple's documentation:
|
||||
// - https://developer.apple.com/documentation/security/randomization_services?language=objc
|
||||
// - https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
|
||||
//
|
||||
// The `SecRandomCopyBytes` "Generate cryptographically secure random numbers"
|
||||
unsafe {
|
||||
let res = SecRandomCopyBytes(
|
||||
kSecRandomDefault,
|
||||
bytes.len(),
|
||||
bytes.as_mut_ptr() as *mut c_void,
|
||||
);
|
||||
if res != 0 {
|
||||
Err(std::io::Error::last_os_error())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A seeder which uses the `SecRandomCopyBytes` function from Apple's `Security` framework.
|
||||
///
|
||||
/// <https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc>
|
||||
pub struct AppleSecureEnclaveSeeder;
|
||||
|
||||
impl Seeder for AppleSecureEnclaveSeeder {
|
||||
fn seed(&mut self) -> Seed {
|
||||
// 16 bytes == 128 bits
|
||||
let mut bytes = [0u8; 16];
|
||||
secure_enclave::generate_random_bytes(&mut bytes)
|
||||
.expect("Failure while using Apple secure enclave: {err:?}");
|
||||
|
||||
Seed(u128::from_le_bytes(bytes))
|
||||
}
|
||||
|
||||
fn is_available() -> bool {
|
||||
let os_version_sysctl_name = match std::ffi::CString::new("kern.osproductversion") {
|
||||
Ok(c_str) => c_str,
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
// Big enough buffer to get a version output as an ASCII string
|
||||
const OUTPUT_BUFFER_SIZE: usize = 64;
|
||||
let mut output_buffer_size = OUTPUT_BUFFER_SIZE;
|
||||
let mut output_buffer = [0u8; OUTPUT_BUFFER_SIZE];
|
||||
let res = unsafe {
|
||||
libc::sysctlbyname(
|
||||
os_version_sysctl_name.as_ptr() as *const _ as *const _,
|
||||
&mut output_buffer as *mut _ as *mut _,
|
||||
&mut output_buffer_size as *mut _ as *mut _,
|
||||
std::ptr::null_mut(),
|
||||
0,
|
||||
)
|
||||
};
|
||||
|
||||
if res != 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let result_c_str =
|
||||
match std::ffi::CStr::from_bytes_with_nul(&output_buffer[..output_buffer_size]) {
|
||||
Ok(c_str) => c_str,
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
let result_string = match result_c_str.to_str() {
|
||||
Ok(str) => str,
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
// Normally we get a major version and minor version
|
||||
let split_string: Vec<&str> = result_string.split('.').collect();
|
||||
|
||||
let mut major = -1;
|
||||
let mut minor = -1;
|
||||
|
||||
// Major part of the version string
|
||||
if !split_string.is_empty() {
|
||||
major = match split_string[0].parse() {
|
||||
Ok(major_from_str) => major_from_str,
|
||||
_ => return false,
|
||||
};
|
||||
}
|
||||
|
||||
// SecRandomCopyBytes is available starting with mac OS 10.7
|
||||
// https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc
|
||||
// This match pattern is recommended by clippy, so we oblige here
|
||||
match major.cmp(&10) {
|
||||
Ordering::Greater => true,
|
||||
Ordering::Equal => {
|
||||
// Minor part of the version string
|
||||
if split_string.len() >= 2 {
|
||||
minor = match split_string[1].parse() {
|
||||
Ok(minor_from_str) => minor_from_str,
|
||||
_ => return false,
|
||||
};
|
||||
}
|
||||
minor >= 7
|
||||
}
|
||||
Ordering::Less => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
|
||||
|
||||
#[test]
|
||||
fn check_bounded_sequence_difference() {
|
||||
check_seeder_fixed_sequences_different(|_| AppleSecureEnclaveSeeder);
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
#[cfg(target_os = "macos")]
|
||||
mod apple_secure_enclave_seeder;
|
||||
#[cfg(target_os = "macos")]
|
||||
pub use apple_secure_enclave_seeder::AppleSecureEnclaveSeeder;
|
||||
|
||||
#[cfg(feature = "seeder_x86_64_rdseed")]
|
||||
mod rdseed;
|
||||
#[cfg(feature = "seeder_x86_64_rdseed")]
|
||||
pub use rdseed::RdseedSeeder;
|
||||
|
||||
#[cfg(feature = "seeder_unix")]
|
||||
mod unix;
|
||||
#[cfg(feature = "seeder_unix")]
|
||||
pub use unix::UnixSeeder;
|
||||
@@ -1,51 +0,0 @@
|
||||
use crate::seeders::{Seed, Seeder};
|
||||
|
||||
/// A seeder which uses the `rdseed` x86_64 instruction.
|
||||
///
|
||||
/// The `rdseed` instruction allows to deliver seeds from a hardware source of entropy see
|
||||
/// <https://www.felixcloutier.com/x86/rdseed> .
|
||||
pub struct RdseedSeeder;
|
||||
|
||||
impl Seeder for RdseedSeeder {
|
||||
fn seed(&mut self) -> Seed {
|
||||
Seed(unsafe { rdseed_random_m128() })
|
||||
}
|
||||
|
||||
fn is_available() -> bool {
|
||||
is_x86_feature_detected!("rdseed")
|
||||
}
|
||||
}
|
||||
|
||||
// Generates a random 128 bits value from rdseed
|
||||
#[target_feature(enable = "rdseed")]
|
||||
unsafe fn rdseed_random_m128() -> u128 {
|
||||
let mut rand1: u64 = 0;
|
||||
let mut rand2: u64 = 0;
|
||||
let mut output_bytes = [0u8; 16];
|
||||
unsafe {
|
||||
loop {
|
||||
if core::arch::x86_64::_rdseed64_step(&mut rand1) == 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
loop {
|
||||
if core::arch::x86_64::_rdseed64_step(&mut rand2) == 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
output_bytes[0..8].copy_from_slice(&rand1.to_ne_bytes());
|
||||
output_bytes[8..16].copy_from_slice(&rand2.to_ne_bytes());
|
||||
u128::from_ne_bytes(output_bytes)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
|
||||
|
||||
#[test]
|
||||
fn check_bounded_sequence_difference() {
|
||||
check_seeder_fixed_sequences_different(|_| RdseedSeeder);
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
use crate::seeders::{Seed, Seeder};
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
/// A seeder which uses the `/dev/random` source on unix-like systems.
|
||||
pub struct UnixSeeder {
|
||||
counter: u128,
|
||||
secret: u128,
|
||||
file: File,
|
||||
}
|
||||
|
||||
impl UnixSeeder {
|
||||
/// Creates a new seeder from a user defined secret.
|
||||
///
|
||||
/// Important:
|
||||
/// ----------
|
||||
///
|
||||
/// This secret is used to ensure the quality of the seed in scenarios where `/dev/random` may
|
||||
/// be compromised.
|
||||
///
|
||||
/// The attack hypotheses are as follow:
|
||||
/// - `/dev/random` output can be predicted by a process running on the machine by just
|
||||
/// observing various states of the machine
|
||||
/// - The attacker cannot read data from the process where `concrete-csprng` is running
|
||||
///
|
||||
/// Using a secret in `concrete-csprng` allows to generate values that the attacker cannot
|
||||
/// predict, making this seeder secure on systems were `/dev/random` outputs can be
|
||||
/// predicted.
|
||||
pub fn new(secret: u128) -> UnixSeeder {
|
||||
let file = std::fs::File::open("/dev/random").expect("Failed to open /dev/random .");
|
||||
let counter = std::time::UNIX_EPOCH
|
||||
.elapsed()
|
||||
.expect("Failed to initialize unix seeder.")
|
||||
.as_nanos();
|
||||
UnixSeeder {
|
||||
secret,
|
||||
counter,
|
||||
file,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Seeder for UnixSeeder {
|
||||
fn seed(&mut self) -> Seed {
|
||||
let output = self.secret ^ self.counter ^ dev_random(&mut self.file);
|
||||
self.counter = self.counter.wrapping_add(1);
|
||||
Seed(output)
|
||||
}
|
||||
|
||||
fn is_available() -> bool {
|
||||
cfg!(target_family = "unix")
|
||||
}
|
||||
}
|
||||
|
||||
fn dev_random(random: &mut File) -> u128 {
|
||||
let mut buf = [0u8; 16];
|
||||
random
|
||||
.read_exact(&mut buf[..])
|
||||
.expect("Failed to read from /dev/random .");
|
||||
u128::from_ne_bytes(buf)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::seeders::generic_tests::check_seeder_fixed_sequences_different;
|
||||
|
||||
#[test]
|
||||
fn check_bounded_sequence_difference() {
|
||||
check_seeder_fixed_sequences_different(UnixSeeder::new);
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
//! A module containing seeders objects.
|
||||
//!
|
||||
//! When initializing a generator, one needs to provide a [`Seed`], which is then used as key to the
|
||||
//! AES blockcipher. As a consequence, the quality of the outputs of the generator is directly
|
||||
//! conditioned by the quality of this seed. This module proposes different mechanisms to deliver
|
||||
//! seeds that can accommodate varying scenarios.
|
||||
|
||||
/// A seed value, used to initialize a generator.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct Seed(pub u128);
|
||||
|
||||
/// A trait representing a seeding strategy.
|
||||
pub trait Seeder {
|
||||
/// Generates a new seed.
|
||||
fn seed(&mut self) -> Seed;
|
||||
|
||||
/// Check whether the seeder can be used on the current machine. This function may check if some
|
||||
/// required CPU features are available or if some OS features are available for example.
|
||||
fn is_available() -> bool
|
||||
where
|
||||
Self: Sized;
|
||||
}
|
||||
|
||||
mod implem;
|
||||
// This import statement can be empty if seeder features are disabled, rustc's behavior changed to
|
||||
// warn of empty modules, we know this can happen, so allow it.
|
||||
#[allow(unused_imports)]
|
||||
pub use implem::*;
|
||||
|
||||
#[cfg(test)]
|
||||
mod generic_tests {
|
||||
use crate::seeders::Seeder;
|
||||
|
||||
/// Naively verifies that two fixed-size sequences generated by repeatedly calling the seeder
|
||||
/// are different.
|
||||
#[allow(unused)] // to please clippy when tests are not activated
|
||||
pub fn check_seeder_fixed_sequences_different<S: Seeder, F: Fn(u128) -> S>(
|
||||
construct_seeder: F,
|
||||
) {
|
||||
const SEQUENCE_SIZE: usize = 500;
|
||||
const REPEATS: usize = 10_000;
|
||||
for i in 0..REPEATS {
|
||||
let mut seeder = construct_seeder(i as u128);
|
||||
let orig_seed = seeder.seed();
|
||||
for _ in 0..SEQUENCE_SIZE {
|
||||
assert_ne!(seeder.seed(), orig_seed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
[package]
|
||||
name = "concrete-float"
|
||||
version = "0.1.0-beta.0"
|
||||
edition = "2018"
|
||||
authors = ["Zama team"]
|
||||
license = "BSD-3-Clause-Clear"
|
||||
description = "Homomorphic Integer circuit interface for the concrete FHE library."
|
||||
homepage = "https://www.zama.ai/concrete-framework"
|
||||
documentation = "https://docs.zama.ai/home/"
|
||||
repository = "https://github.com/zama-ai/concrete"
|
||||
readme = "README.md"
|
||||
keywords = ["fully", "homomorphic", "encryption", "fhe", "cryptography"]
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
aligned-vec = { version = "0.5", features = ["serde"] }
|
||||
dyn-stack = { version = "0.9" }
|
||||
rayon = "1.5"
|
||||
|
||||
lazy_static = { version = "1.4.0", optional = true }
|
||||
|
||||
tfhe = { path = "../tfhe", features = ["shortint", "integer"] }
|
||||
|
||||
[target.'cfg(target_arch = "x86_64")'.dependencies]
|
||||
tfhe = { path = "../tfhe", features = ["shortint", "integer", "x86_64-unix"] }
|
||||
|
||||
[target.'cfg(target_arch = "aarch64")'.dependencies]
|
||||
tfhe = { path = "../tfhe", features = ["shortint", "integer", "aarch64-unix"] }
|
||||
|
||||
[features]
|
||||
nightly-avx512 = ["tfhe/nightly-avx512"]
|
||||
seeder_x86_64_rdseed = []
|
||||
seeder_unix = []
|
||||
generator_x86_64_aesni = []
|
||||
generator_fallback = []
|
||||
generator_aarch64_aes = []
|
||||
|
||||
x86_64 = [
|
||||
"seeder_x86_64_rdseed",
|
||||
"generator_x86_64_aesni",
|
||||
"generator_fallback",
|
||||
]
|
||||
x86_64-unix = ["x86_64", "seeder_unix"]
|
||||
aarch64 = [ "generator_aarch64_aes", "generator_fallback"]
|
||||
aarch64-unix = ["aarch64", "seeder_unix"]
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.5.1"
|
||||
lazy_static = "1.4.0"
|
||||
bincode = "1.3.3"
|
||||
paste = "1.0.7"
|
||||
rand = "0.8.4"
|
||||
doc-comment = "0.3.3"
|
||||
#concrete-shortint = { path = "../tfhe", features = ["internal-keycache"] }
|
||||
|
||||
#[features]
|
||||
# Keychache used to speed up tests and benches
|
||||
# by not requiring to regererate keys at each launch
|
||||
#internal-keycache = ["lazy_static", "shortint/src/internal-keycache"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
rustdoc-args = ["--html-in-header", "katex-header.html"]
|
||||
|
||||
[[bench]]
|
||||
name = "float-bench"
|
||||
path = "benches/bench.rs"
|
||||
harness = false
|
||||
required-features = []
|
||||
@@ -1,32 +0,0 @@
|
||||
BSD 3-Clause Clear License
|
||||
|
||||
Copyright © 2022 ZAMA.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or other
|
||||
materials provided with the distribution.
|
||||
|
||||
3. Neither the name of ZAMA nor the names of its contributors may be used to endorse
|
||||
or promote products derived from this software without specific prior written permission.
|
||||
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE*.
|
||||
THIS SOFTWARE IS PROVIDED BY THE ZAMA AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
|
||||
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
||||
ZAMA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
||||
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
*In addition to the rights carried by this license, ZAMA grants to the user a non-exclusive,
|
||||
free and non-commercial license on all patents filed in its name relating to the open-source
|
||||
code (the "Patents") for the sole purpose of evaluation, development, research, prototyping
|
||||
and experimentation.
|
||||
@@ -1,11 +0,0 @@
|
||||
# concrete Integer
|
||||
|
||||
`concrete-integer` is a Rust library built on top of `concrete-shortint`, it
|
||||
combines multiple `shortint` to handle encrypted integers of "arbitrary"
|
||||
size.
|
||||
|
||||
## License
|
||||
|
||||
This software is distributed under the BSD-3-Clause-Clear license. If you have any questions,
|
||||
please contact us at `hello@zama.ai`.
|
||||
|
||||
@@ -1,304 +0,0 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use concrete_float::gen_keys;
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use rand::Rng;
|
||||
|
||||
// Previous Parameters
|
||||
#[allow(unused_imports)]
|
||||
use concrete_float::parameters::{FINAL_PARAM_16,
|
||||
FINAL_PARAM_2_2_32, FINAL_PARAM_32,
|
||||
FINAL_PARAM_64, FINAL_PARAM_8,
|
||||
FINAL_WOP_PARAM_15, FINAL_WOP_PARAM_16,
|
||||
FINAL_WOP_PARAM_2_2_32, FINAL_WOP_PARAM_32,
|
||||
FINAL_WOP_PARAM_64, FINAL_WOP_PARAM_8,
|
||||
FINAL_PARAM_64_TCHESS, FINAL_PARAM_32_TCHESS,
|
||||
FINAL_WOP_PARAM_64_TCHESS, FINAL_WOP_PARAM_32_TCHESS};
|
||||
|
||||
use concrete_float::parameters::{FINAL_PARAM_16_BIS, FINAL_PARAM_32_BIS,
|
||||
FINAL_PARAM_64_BIS, FINAL_PARAM_8_BIS,
|
||||
FINAL_WOP_PARAM_16_BIS, FINAL_WOP_PARAM_32_BIS,
|
||||
FINAL_WOP_PARAM_64_BIS, FINAL_WOP_PARAM_8_BIS};
|
||||
use tfhe::shortint;
|
||||
|
||||
macro_rules! named_param {
|
||||
($param:ident) => {
|
||||
(stringify!($param), $param)
|
||||
};
|
||||
}
|
||||
|
||||
criterion_main!(float_parallelized, float);
|
||||
|
||||
struct Parameters {
|
||||
pbsparameters: shortint::ClassicPBSParameters,
|
||||
wopbsparameters: shortint::WopbsParameters,
|
||||
len_man: usize,
|
||||
len_exp: usize,
|
||||
}
|
||||
|
||||
//Parameter for a Floating point 64-bits equivalent
|
||||
const PARAM_64: Parameters = Parameters {
|
||||
pbsparameters: FINAL_PARAM_64_BIS,
|
||||
wopbsparameters: FINAL_WOP_PARAM_64_BIS,
|
||||
len_man: 27,
|
||||
len_exp: 5,
|
||||
};
|
||||
|
||||
|
||||
//Parameter for a Floating point 32-bits equivalent
|
||||
const PARAM_32: Parameters = Parameters {
|
||||
pbsparameters: FINAL_PARAM_32_BIS,
|
||||
wopbsparameters: FINAL_WOP_PARAM_32_BIS,
|
||||
len_man: 13,
|
||||
len_exp: 4,
|
||||
};
|
||||
|
||||
|
||||
//Parameter for a Floating point 16-bits equivalent
|
||||
const PARAM_16: Parameters = Parameters {
|
||||
pbsparameters: FINAL_PARAM_16_BIS,
|
||||
wopbsparameters: FINAL_WOP_PARAM_16_BIS,
|
||||
len_man: 6,
|
||||
len_exp: 3,
|
||||
};
|
||||
|
||||
|
||||
//Parameter for a Floating point 8-bits equivalent
|
||||
const PARAM_8: Parameters = Parameters {
|
||||
pbsparameters: FINAL_PARAM_8_BIS,
|
||||
wopbsparameters: FINAL_WOP_PARAM_8_BIS,
|
||||
len_man: 3,
|
||||
len_exp: 2,
|
||||
};
|
||||
|
||||
|
||||
//Parameter for a Floating point 64-bits equivalent
|
||||
//With failure probability smaller than PARAM_64
|
||||
const PARAM_TCHESS_64: Parameters = Parameters {
|
||||
pbsparameters: FINAL_PARAM_64_TCHESS,
|
||||
wopbsparameters: FINAL_WOP_PARAM_64_TCHESS,
|
||||
len_man: 27,
|
||||
len_exp: 5,
|
||||
};
|
||||
|
||||
|
||||
//Parameter for a Floating point 32-bits equivalent
|
||||
//With failure probability smaller than PARAM_32
|
||||
const PARAM_TCHESS_32: Parameters = Parameters {
|
||||
pbsparameters: FINAL_PARAM_32_TCHESS,
|
||||
wopbsparameters: FINAL_WOP_PARAM_32_TCHESS,
|
||||
len_man: 13,
|
||||
len_exp: 4,
|
||||
};
|
||||
|
||||
|
||||
const SERVER_KEY_BENCH_PARAMS: [(&str, Parameters);6] =
|
||||
[
|
||||
named_param!(PARAM_8),
|
||||
named_param!(PARAM_16),
|
||||
named_param!(PARAM_32),
|
||||
named_param!(PARAM_64),
|
||||
named_param!(PARAM_TCHESS_32),
|
||||
named_param!(PARAM_TCHESS_64),
|
||||
];
|
||||
|
||||
criterion_group!(
|
||||
float,
|
||||
add,
|
||||
mul,
|
||||
relu,
|
||||
sigmoid,
|
||||
);
|
||||
|
||||
criterion_group!(
|
||||
float_parallelized,
|
||||
add_parallelized,
|
||||
mul_parallelized,
|
||||
div_parallelized,,
|
||||
);
|
||||
|
||||
|
||||
fn relu(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("operation");
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param_name, param) in SERVER_KEY_BENCH_PARAMS {
|
||||
let (cks, sks) = gen_keys(
|
||||
param.pbsparameters,
|
||||
param.wopbsparameters,
|
||||
param.len_man,
|
||||
param.len_exp,
|
||||
);
|
||||
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct = cks.encrypt(msg);
|
||||
|
||||
let bench_id = format!("{}::{}", "Relu", param_name);
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
sks.relu(&ct);
|
||||
})
|
||||
});
|
||||
}
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn sigmoid(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("operation");
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param_name, param) in SERVER_KEY_BENCH_PARAMS {
|
||||
let (cks, sks) = gen_keys(
|
||||
param.pbsparameters,
|
||||
param.wopbsparameters,
|
||||
param.len_man,
|
||||
param.len_exp,
|
||||
);
|
||||
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct = cks.encrypt(msg);
|
||||
|
||||
let bench_id = format!("{}::{}", "sigmoid", param_name);
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
sks.sigmoid(&ct);
|
||||
})
|
||||
});
|
||||
}
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn mul(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("operation");
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param_name, param) in SERVER_KEY_BENCH_PARAMS {
|
||||
let (cks, sks) = gen_keys(
|
||||
param.pbsparameters,
|
||||
param.wopbsparameters,
|
||||
param.len_man,
|
||||
param.len_exp,
|
||||
);
|
||||
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct1 = cks.encrypt(msg);
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct2 = cks.encrypt(msg);
|
||||
|
||||
let bench_id = format!("{}::{}", "mul", param_name);
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
sks.mul_total(&ct1, &ct2);
|
||||
})
|
||||
});
|
||||
}
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn mul_parallelized(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("operation");
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param_name, param) in SERVER_KEY_BENCH_PARAMS {
|
||||
let (cks, sks) = gen_keys(
|
||||
param.pbsparameters,
|
||||
param.wopbsparameters,
|
||||
param.len_man,
|
||||
param.len_exp,
|
||||
);
|
||||
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct1 = cks.encrypt(msg);
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct2 = cks.encrypt(msg);
|
||||
|
||||
let bench_id = format!("{}::{}", "mul parallelized", param_name);
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
sks.mul_total_parallelized(&ct1, &ct2);
|
||||
})
|
||||
});
|
||||
}
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn div_parallelized(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("operation");
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param_name, param) in SERVER_KEY_BENCH_PARAMS {
|
||||
let (cks, sks) = gen_keys(
|
||||
param.pbsparameters,
|
||||
param.wopbsparameters,
|
||||
param.len_man,
|
||||
param.len_exp,
|
||||
);
|
||||
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct1 = cks.encrypt(msg);
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct2 = cks.encrypt(msg);
|
||||
|
||||
let bench_id = format!("{}::{}", "div parallelized", param_name);
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
sks.division(&ct1, &ct2);
|
||||
})
|
||||
});
|
||||
}
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn add(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("operation");
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param_name, param) in SERVER_KEY_BENCH_PARAMS {
|
||||
let (cks, sks) = gen_keys(
|
||||
param.pbsparameters,
|
||||
param.wopbsparameters,
|
||||
param.len_man,
|
||||
param.len_exp,
|
||||
);
|
||||
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct1 = cks.encrypt(msg);
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct2 = cks.encrypt(msg);
|
||||
|
||||
let bench_id = format!("{}::{}", "add", param_name);
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
sks.add_total(&ct1, &ct2);
|
||||
})
|
||||
});
|
||||
}
|
||||
bench_group.finish()
|
||||
}
|
||||
|
||||
fn add_parallelized(c: &mut Criterion) {
|
||||
let mut bench_group = c.benchmark_group("operation");
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for (param_name, param) in SERVER_KEY_BENCH_PARAMS {
|
||||
let (cks, sks) = gen_keys(
|
||||
param.pbsparameters,
|
||||
param.wopbsparameters,
|
||||
param.len_man,
|
||||
param.len_exp,
|
||||
);
|
||||
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct1 = cks.encrypt(msg);
|
||||
let msg = rng.gen::<f32>() as f64;
|
||||
let ct2 = cks.encrypt(msg);
|
||||
|
||||
let bench_id = format!("{}::{}", "add parallelized", param_name);
|
||||
bench_group.bench_function(&bench_id, |b| {
|
||||
b.iter(|| {
|
||||
sks.add_total_parallelized(&ct1, &ct2);
|
||||
})
|
||||
});
|
||||
}
|
||||
bench_group.finish()
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
# Concrete-Integer User Guide
|
||||
|
||||
[Introduction](introduction.md)
|
||||
|
||||
# Getting Started
|
||||
|
||||
[Installation](getting_started/installation.md)
|
||||
|
||||
[Writing Your First Circuit](getting_started/first_circuit.md)
|
||||
|
||||
[Types Of Operations](getting_started/operation_types.md)
|
||||
|
||||
[List of Operations](getting_started/operation_list.md)
|
||||
|
||||
[Cryptographic Parameters](getting_started/parameters.md)
|
||||
|
||||
|
||||
# How to
|
||||
|
||||
[Serialization / Deserialization](tutorials/serialization.md)
|
||||
@@ -1,105 +0,0 @@
|
||||
# Writing Your First Circuit
|
||||
|
||||
|
||||
## Key Types
|
||||
|
||||
`concrete-integer` provides 2 basic key types:
|
||||
- `ClientKey`
|
||||
- `ServerKey`
|
||||
|
||||
The `ClientKey` is the key that encrypts and decrypts messages,
|
||||
thus this key is meant to be kept private and should never be shared.
|
||||
This key is created from parameter values that will dictate both the security and efficiency
|
||||
of computations. The parameters also set the maximum number of bits of message encrypted
|
||||
in a ciphertext.
|
||||
|
||||
The `ServerKey` is the key that is used to actually do the FHE computations. It contains (among other things)
|
||||
a bootstrapping key and a keyswitching key.
|
||||
This key is created from a `ClientKey` that needs to be shared to the server, therefore it is not
|
||||
meant to be kept private.
|
||||
A user with a `ServerKey` can compute on the encrypted data sent by the owner of the associated
|
||||
`ClientKey`.
|
||||
|
||||
To reflect that, computation/operation methods are tied to the `ServerKey` type.
|
||||
|
||||
|
||||
## 1. Key Generation
|
||||
|
||||
To generate the keys, a user needs two parameters:
|
||||
- A set of `shortint` cryptographic parameters.
|
||||
- The number of ciphertexts used to encrypt an integer (we call them "shortint blocks").
|
||||
|
||||
|
||||
For this example we are going to build a pair of keys that can encrypt an **8-bit** integer
|
||||
by using **4** shortint blocks that store **2** bits of message each.
|
||||
|
||||
|
||||
```rust
|
||||
use concrete_integer::gen_keys;
|
||||
use concrete_shortint::parameters::PARAM_MESSAGE_2_CARRY_2;
|
||||
|
||||
fn main() {
|
||||
// We generate a set of client/server keys, using the default parameters:
|
||||
let num_block = 4;
|
||||
let (client_key, server_key) = gen_keys(&PARAM_MESSAGE_2_CARRY_2, num_block);
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
## 2. Encrypting values
|
||||
|
||||
|
||||
Once we have our keys we can encrypt values:
|
||||
|
||||
```rust
|
||||
use concrete_integer::gen_keys;
|
||||
use concrete_shortint::parameters::PARAM_MESSAGE_2_CARRY_2;
|
||||
|
||||
fn main() {
|
||||
// We generate a set of client/server keys, using the default parameters:
|
||||
let num_block = 4;
|
||||
let (client_key, server_key) = gen_keys(&PARAM_MESSAGE_2_CARRY_2, num_block);
|
||||
|
||||
let msg1 = 128;
|
||||
let msg2 = 13;
|
||||
|
||||
// We use the client key to encrypt two messages:
|
||||
let ct_1 = client_key.encrypt(msg1);
|
||||
let ct_2 = client_key.encrypt(msg2);
|
||||
}
|
||||
```
|
||||
|
||||
## 3. Computing and decrypting
|
||||
|
||||
With our `server_key`, and encrypted values, we can now do an addition
|
||||
and then decrypt the result.
|
||||
|
||||
```rust
|
||||
use concrete_integer::gen_keys;
|
||||
use concrete_shortint::parameters::PARAM_MESSAGE_2_CARRY_2;
|
||||
|
||||
fn main() {
|
||||
// We generate a set of client/server keys, using the default parameters:
|
||||
let num_block = 4;
|
||||
let (client_key, server_key) = gen_keys(&PARAM_MESSAGE_2_CARRY_2, num_block);
|
||||
|
||||
let msg1 = 128;
|
||||
let msg2 = 13;
|
||||
|
||||
// message_modulus^vec_length
|
||||
let modulus = client_key.parameters().message_modulus.0.pow(num_block as u32) as u64;
|
||||
|
||||
// We use the client key to encrypt two messages:
|
||||
let ct_1 = client_key.encrypt(msg1);
|
||||
let ct_2 = client_key.encrypt(msg2);
|
||||
|
||||
// We use the server public key to execute an integer circuit:
|
||||
let ct_3 = server_key.unchecked_add(&ct_1, &ct_2);
|
||||
|
||||
// We use the client key to decrypt the output of the circuit:
|
||||
let output = client_key.decrypt(&ct_3);
|
||||
|
||||
assert_eq!(output, (msg1 + msg2) % modulus);
|
||||
}
|
||||
```
|
||||
@@ -1,49 +0,0 @@
|
||||
# Installation
|
||||
|
||||
## Cargo.toml
|
||||
|
||||
To use `concrete-integer`, you will need to add it to the list of dependencies
|
||||
of your project, by updating your `Cargo.toml` file.
|
||||
|
||||
```toml
|
||||
concrete-integer = "0.1.0"
|
||||
```
|
||||
|
||||
### Supported platforms
|
||||
|
||||
|
||||
As `concrete-integer` relies on `concrete-shortint`, which in turn relies on `concrete-core`,
|
||||
the support ted platforms supported are:
|
||||
- `x86_64 Linux`
|
||||
- `x86_64 macOS`.
|
||||
|
||||
Windows users can use `concrete-integer` through the `WSL`.
|
||||
|
||||
macOS users which have the newer M1 (`arm64`) devices can use `concrete-integer` by cross-compiling to
|
||||
`x86_64` and run their program with Rosetta.
|
||||
|
||||
First install the needed Rust toolchain:
|
||||
|
||||
```console
|
||||
# Install the macOS x86_64 toolchain (you only need to do this once)
|
||||
rustup toolchain install --force-non-host stable-x86_64-apple-darwin
|
||||
```
|
||||
|
||||
Then you can either:
|
||||
|
||||
- Manually specify the toolchain to use in each of the cargo commands:
|
||||
|
||||
For example:
|
||||
|
||||
```console
|
||||
cargo +stable-x86_64-apple-darwin build
|
||||
cargo +stable-x86_64-apple-darwin test
|
||||
```
|
||||
|
||||
- Or override the toolchain to use for the current project:
|
||||
|
||||
```console
|
||||
rustup override set stable-x86_64-apple-darwin
|
||||
# cargo will use the `stable-x86_64-apple-darwin` toolchain.
|
||||
cargo build
|
||||
```
|
||||
@@ -1,15 +0,0 @@
|
||||
# List of available operations
|
||||
|
||||
`concrete-integer` comes with a set of already implemented functions:
|
||||
|
||||
|
||||
- addition between two ciphertexts
|
||||
- addition between a ciphertext and an unencrypted scalar
|
||||
- multiplication of a ciphertext by an unencrypted scalar
|
||||
- bitwise shift `<<`, `>>`
|
||||
- bitwise and, or and xor
|
||||
- multiplication between two ciphertexts
|
||||
- subtraction of a ciphertext by another ciphertext
|
||||
- subtraction of a ciphertext by an unencrypted scalar
|
||||
- negation of a ciphertext
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
# How Integers are represented
|
||||
|
||||
|
||||
In `concrete-integer`, the encrypted data is split amongst many ciphertexts
|
||||
encrypted using the `concrete-shortint` library.
|
||||
|
||||
This crate implements two ways to represent an integer:
|
||||
- the Radix representation
|
||||
- the CRT (Chinese Reminder Theorem) representation
|
||||
|
||||
## Radix based Integers
|
||||
The first possibility to represent a large integer is to use a radix-based decomposition on the
|
||||
plaintexts. Let $$B \in \mathbb{N}$$ be a basis such that the size of $$B$$ is smaller (or equal)
|
||||
to four bits.
|
||||
Then, an integer $$m \in \mathbb{N}$$ can be written as $$m = m_0 + m_1*B + m_2*B^2 + ... $$, where
|
||||
each $$m_i$$ is strictly smaller than $$B$$. Each $$m_i$$ is then independently encrypted. In
|
||||
the end, an Integer ciphertext is defined as a set of Shortint ciphertexts.
|
||||
|
||||
In practice, the definition of an Integer requires the basis and the number of blocks. This is
|
||||
done at the key creation step.
|
||||
```rust
|
||||
use concrete_integer::gen_keys;
|
||||
use concrete_shortint::parameters::PARAM_MESSAGE_2_CARRY_2;
|
||||
|
||||
fn main() {
|
||||
// We generate a set of client/server keys, using the default parameters:
|
||||
let num_block = 4;
|
||||
let (client_key, server_key) = gen_keys(&PARAM_MESSAGE_2_CARRY_2, num_block);
|
||||
}
|
||||
```
|
||||
|
||||
In this example, the keys are dedicated to Integers decomposed as four blocks using the basis
|
||||
$$B=2^2$$. Otherwise said, they allow to work on Integers modulus $$(2^2)^4 = 2^8$$.
|
||||
|
||||
|
||||
In this representation, the correctness of operations requires to propagate the carries
|
||||
between the ciphertext. This operation is costly since it relies on the computation of many
|
||||
programmable bootstrapping over Shortints.
|
||||
|
||||
|
||||
## CRT based Integers
|
||||
The second approach to represent large integers is based on the Chinese Remainder Theorem.
|
||||
In this cases, the basis $$B$$ is composed of several integers $$b_i$$, such that there are
|
||||
pairwise coprime, and each b_i has a size smaller than four bits. Then, the Integer will be
|
||||
defined modulus $$\prod b_i$$. For an integer $$m$$, its CRT decomposition is simply defined as
|
||||
$$m % b_0, m % b_1, ...$$. Each part is then encrypted as a Shortint ciphertext. In
|
||||
the end, an Integer ciphertext is defined as a set of Shortint ciphertexts.
|
||||
|
||||
An example of such a basis
|
||||
could be $$B = [2, 3, 5]$$. This means that the Integer is defined modulus $$2*3*5 = 30$$.
|
||||
|
||||
This representation has many advantages: no carry propagation is required, so that only cleaning
|
||||
the carry buffer of each ciphertexts is enough. This implies that operations can easily be
|
||||
parallelized. Moreover, it allows to efficiently compute PBS in the case where the function is
|
||||
CRT compliant.
|
||||
|
||||
A variant of the CRT is proposed, where each block might be associated to a different key couple.
|
||||
In the end, a keychain is required to the computations, but performance might be improved.
|
||||
|
||||
|
||||
|
||||
# Types of operations
|
||||
|
||||
|
||||
Much like `concrete-shortint`, the operations available via a `ServerKey` may come in different variants:
|
||||
|
||||
- operations that take their inputs as encrypted values.
|
||||
- scalar operations take at least one non-encrypted value as input.
|
||||
|
||||
For example, the addition has both variants:
|
||||
|
||||
- `ServerKey::unchecked_add` which takes two encrypted values and adds them.
|
||||
- `ServerKey::unchecked_scalar_add` which takes an encrypted value and a clear value (the
|
||||
so-called scalar) and adds them.
|
||||
|
||||
Each operation may come in different 'flavors':
|
||||
|
||||
- `unchecked`: Always does the operation, without checking if the result may exceed the capacity of
|
||||
the plaintext space.
|
||||
- `checked`: Checks are done before computing the operation, returning an error if operation
|
||||
cannot be done safely.
|
||||
- `smart`: Always does the operation, if the operation cannot be computed safely, the smart operation
|
||||
will propagate the carry buffer to make the operation possible.
|
||||
|
||||
Not all operations have these 3 flavors, as some of them are implemented in a way that the operation
|
||||
is always possible without ever exceeding the plaintext space capacity.
|
||||
@@ -1,6 +0,0 @@
|
||||
# Use of parameters
|
||||
|
||||
|
||||
`concrete-integer` does not come with its own set of parameters, instead it uses
|
||||
parameters from the `concrete-shortint` crate. Currently, only the parameters
|
||||
`PARAM_MESSAGE_{X}_CARRY_{X}` with `X` in [1,4] can be used in `concrete-integer`.
|
||||
@@ -1,47 +0,0 @@
|
||||
# The tree programmable bootstrapping
|
||||
|
||||
In `concrete-integer`, the user can evaluate any function on an encrypted ciphertext. To do so the user must first
|
||||
create a `treepbs key`, choose a function to evaluate and give them as parameters to the `tree programmable bootstrapping`.
|
||||
|
||||
Two versions of the tree pbs are implemented: the `standard` version that computes a result according to every encrypted
|
||||
bit (message and carry), and the `base` version that only takes into account the message bits of each block.
|
||||
|
||||
{% hint style="warning" %}
|
||||
|
||||
The `tree pbs` is quite slow, therefore its use is currently restricted to two and three blocks integer ciphertexts.
|
||||
|
||||
{% endhint %}
|
||||
|
||||
```rust
|
||||
use concrete_integer::gen_keys;
|
||||
use concrete_shortint::parameters::PARAM_MESSAGE_2_CARRY_2;
|
||||
use concrete_integer::treepbs::TreepbsKey;
|
||||
|
||||
fn main() {
|
||||
let num_block = 2;
|
||||
// Generate the client key and the server key:
|
||||
let (cks, sks) = gen_keys(&PARAM_MESSAGE_2_CARRY_2, num_block);
|
||||
|
||||
let msg: u64 = 27;
|
||||
let ct = cks.encrypt(msg);
|
||||
|
||||
// message_modulus^vec_length
|
||||
let modulus = cks.parameters().message_modulus.0.pow(2 as u32) as u64;
|
||||
|
||||
let treepbs_key = TreepbsKey::new(&cks);
|
||||
|
||||
let f = |x: u64| x * x;
|
||||
|
||||
// evaluate f
|
||||
let vec_res = treepbs_key.two_block_pbs(&sks, &ct, f);
|
||||
|
||||
// decryption
|
||||
let res = cks.decrypt(&vec_res);
|
||||
|
||||
let clear = f(msg) % modulus;
|
||||
assert_eq!(res, clear);
|
||||
}
|
||||
```
|
||||
|
||||
# The WOP programmable bootstrapping
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user