mirror of
https://github.com/zama-ai/tfhe-rs.git
synced 2026-01-08 22:28:01 -05:00
chore(ci): add regression benchmark workflow
Regression benchmarks are meant to be run in pull-request. They can be launched in two flavors: * issue comment: using command like "/bench --backend cpu" * adding a label: `bench-perfs-cpu` or `bench-perfs-gpu` Benchmark definitions are written in TOML and located at ci/regression.toml. While not exhaustive, it can be easily modified by reading the embbeded documentation. "/bench" commands are parsed by a Python script located at ci/perf_regression.py. This script produces output files that contains cargo commands and a shell script generating custom environment variables. The Python script and generated files are meant to be used only by the workflow benchmark_perf_regression.yml.
This commit is contained in:
300
.github/workflows/benchmark_perf_regression.yml
vendored
Normal file
300
.github/workflows/benchmark_perf_regression.yml
vendored
Normal file
@@ -0,0 +1,300 @@
|
|||||||
|
# Run performance regression benchmarks and return parsed results to associated pull-request.
|
||||||
|
name: benchmark_perf_regression
|
||||||
|
|
||||||
|
on:
|
||||||
|
issue_comment:
|
||||||
|
types: created
|
||||||
|
pull_request:
|
||||||
|
types: [ labeled ]
|
||||||
|
|
||||||
|
env:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
RESULTS_FILENAME: parsed_benchmark_results_${{ github.sha }}.json
|
||||||
|
ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||||
|
RUST_BACKTRACE: "full"
|
||||||
|
RUST_MIN_STACK: "8388608"
|
||||||
|
SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }}
|
||||||
|
SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png
|
||||||
|
SLACK_USERNAME: ${{ secrets.BOT_USERNAME }}
|
||||||
|
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||||
|
|
||||||
|
permissions: { }
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
verify-actor:
|
||||||
|
name: benchmark_perf_regression/verify-actor
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
|
secrets:
|
||||||
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
|
prepare-benchmarks:
|
||||||
|
name: benchmark_perf_regression/prepare-benchmarks
|
||||||
|
needs: verify-actor
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: (github.event_name == 'pull_request' &&
|
||||||
|
(contains(github.event.label.name, 'bench-perfs-cpu') ||
|
||||||
|
contains(github.event.label.name, 'bench-perfs-gpu'))) ||
|
||||||
|
(github.event.issue.pull_request &&
|
||||||
|
github.event_name == 'issue_comment' &&
|
||||||
|
startsWith(github.event.comment.body, '/bench'))
|
||||||
|
outputs:
|
||||||
|
commands: ${{ steps.set_commands.outputs.commands }}
|
||||||
|
slab-backend: ${{ steps.set_slab_details.outputs.backend }}
|
||||||
|
slab-profile: ${{ steps.set_slab_details.outputs.profile }}
|
||||||
|
hardware-name: ${{ steps.get_hardware_name.outputs.name }}
|
||||||
|
custom-env: ${{ steps.get_custom_env.outputs.custom_env }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout tfhe-rs repo
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
with:
|
||||||
|
persist-credentials: 'false'
|
||||||
|
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||||
|
|
||||||
|
- name: Generate cpu benchmarks command from label
|
||||||
|
if: (github.event_name == 'pull_request' && contains(github.event.label.name, 'bench-perfs-cpu'))
|
||||||
|
run: |
|
||||||
|
echo "DEFAULT_BENCH_OPTIONS=--backend cpu" >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Generate cpu benchmarks command from label
|
||||||
|
if: (github.event_name == 'pull_request' && contains(github.event.label.name, 'bench-perfs-gpu'))
|
||||||
|
run: |
|
||||||
|
echo "DEFAULT_BENCH_OPTIONS=--backend gpu" >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
# TODO add support for HPU backend
|
||||||
|
|
||||||
|
- name: Generate cargo commands and env from label
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
run: |
|
||||||
|
python3 ci/perf_regression.py parse_profile --issue-comment "/bench ${DEFAULT_BENCH_OPTIONS}"
|
||||||
|
echo "COMMANDS=$(cat ci/perf_regression_generated_commands.json)" >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Dump issue comment into file # To avoid possible code-injection
|
||||||
|
if: github.event_name == 'issue_comment'
|
||||||
|
run: |
|
||||||
|
echo "${COMMENT_BODY}" >> dumped_comment.txt
|
||||||
|
env:
|
||||||
|
COMMENT_BODY: ${{ github.event.comment.body }}
|
||||||
|
|
||||||
|
- name: Generate cargo commands and env
|
||||||
|
if: github.event_name == 'issue_comment'
|
||||||
|
run: |
|
||||||
|
python3 ci/perf_regression.py parse_profile --issue-comment "$(cat dumped_comment.txt)"
|
||||||
|
echo "COMMANDS=$(cat ci/perf_regression_generated_commands.json)" >> "${GITHUB_ENV}"
|
||||||
|
|
||||||
|
- name: Set commands output
|
||||||
|
id: set_commands
|
||||||
|
run: | # zizmor: ignore[template-injection] this env variable is safe
|
||||||
|
echo "commands=${{ toJSON(env.COMMANDS) }}" >> "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
|
- name: Set Slab details outputs
|
||||||
|
id: set_slab_details
|
||||||
|
run: |
|
||||||
|
echo "backend=$(cat ci/perf_regression_slab_backend_config.txt)" >> "${GITHUB_OUTPUT}"
|
||||||
|
echo "profile=$(cat ci/perf_regression_slab_profile_config.txt)" >> "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
|
- name: Get hardware name
|
||||||
|
id: get_hardware_name
|
||||||
|
run: | # zizmor: ignore[template-injection] these interpolations are safe
|
||||||
|
HARDWARE_NAME=$(python3 ci/hardware_finder.py "${{ steps.set_slab_details.outputs.backend }}" "${{ steps.set_slab_details.outputs.profile }}");
|
||||||
|
echo "name=${HARDWARE_NAME}" >> "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
|
- name: Get custom env vars
|
||||||
|
id: get_custom_env
|
||||||
|
run: |
|
||||||
|
echo "custom_env=$(cat ci/perf_regression_custom_env.sh)" >> "${GITHUB_OUTPUT}"
|
||||||
|
|
||||||
|
setup-instance:
|
||||||
|
name: benchmark_perf_regression/setup-instance
|
||||||
|
needs: prepare-benchmarks
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
runner-name: ${{ steps.start-instance.outputs.label }}
|
||||||
|
steps:
|
||||||
|
- name: Start instance
|
||||||
|
id: start-instance
|
||||||
|
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
|
||||||
|
with:
|
||||||
|
mode: start
|
||||||
|
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||||
|
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||||
|
job-secret: ${{ secrets.JOB_SECRET }}
|
||||||
|
backend: ${{ needs.prepare-benchmarks.outputs.slab-backend }}
|
||||||
|
profile: ${{ needs.prepare-benchmarks.outputs.slab-profile }}
|
||||||
|
|
||||||
|
install-cuda-dependencies-if-required:
|
||||||
|
name: benchmark_perf_regression/install-cuda-dependencies-if-required
|
||||||
|
needs: [ prepare-benchmarks, setup-instance ]
|
||||||
|
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
# explicit include-based build matrix, of known valid options
|
||||||
|
include:
|
||||||
|
- cuda: "12.8"
|
||||||
|
gcc: 11
|
||||||
|
steps:
|
||||||
|
- name: Checkout tfhe-rs repo
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: 'false'
|
||||||
|
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||||
|
|
||||||
|
- name: Setup Hyperstack dependencies
|
||||||
|
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
|
||||||
|
uses: ./.github/actions/gpu_setup
|
||||||
|
with:
|
||||||
|
cuda-version: ${{ matrix.cuda }}
|
||||||
|
gcc-version: ${{ matrix.gcc }}
|
||||||
|
|
||||||
|
regression-benchmarks:
|
||||||
|
name: benchmark_perf_regression/regression-benchmarks
|
||||||
|
needs: [ prepare-benchmarks, setup-instance, install-cuda-dependencies-if-required ]
|
||||||
|
runs-on: ${{ needs.setup-instance.outputs.runner-name }}
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow_ref }}_${{ needs.prepare-benchmarks.outputs.slab-backend }}_${{ needs.prepare-benchmarks.outputs.slab-profile }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
timeout-minutes: 720 # 12 hours
|
||||||
|
strategy:
|
||||||
|
max-parallel: 1
|
||||||
|
matrix:
|
||||||
|
command: ${{ fromJson(needs.prepare-benchmarks.outputs.commands) }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout tfhe-rs repo with tags
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
persist-credentials: 'false'
|
||||||
|
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||||
|
|
||||||
|
- name: Get benchmark details
|
||||||
|
run: |
|
||||||
|
COMMIT_DATE=$(git --no-pager show -s --format=%cd --date=iso8601-strict "${SHA}");
|
||||||
|
{
|
||||||
|
echo "BENCH_DATE=$(date --iso-8601=seconds)";
|
||||||
|
echo "COMMIT_DATE=${COMMIT_DATE}";
|
||||||
|
echo "COMMIT_HASH=$(git describe --tags --dirty)";
|
||||||
|
} >> "${GITHUB_ENV}"
|
||||||
|
env:
|
||||||
|
SHA: ${{ github.sha }}
|
||||||
|
|
||||||
|
- name: Export custom env variables
|
||||||
|
run: | # zizmor: ignore[template-injection] this env variable is safe
|
||||||
|
{
|
||||||
|
${{ needs.prepare-benchmarks.outputs.custom-env }}
|
||||||
|
} >> "$GITHUB_ENV"
|
||||||
|
|
||||||
|
# Re-export environment variables as dependencies setup perform this task in the previous job.
|
||||||
|
# Local env variables are cleaned at the end of each job.
|
||||||
|
- name: Export CUDA variables
|
||||||
|
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "CUDA_PATH=$CUDA_PATH" >> "${GITHUB_ENV}"
|
||||||
|
echo "PATH=$PATH:$CUDA_PATH/bin" >> "${GITHUB_PATH}"
|
||||||
|
echo "LD_LIBRARY_PATH=$CUDA_PATH/lib64:$LD_LIBRARY_PATH" >> "${GITHUB_ENV}"
|
||||||
|
echo "CUDA_MODULE_LOADER=EAGER" >> "${GITHUB_ENV}"
|
||||||
|
env:
|
||||||
|
CUDA_PATH: /usr/local/cuda-12.8
|
||||||
|
|
||||||
|
- name: Export gcc and g++ variables
|
||||||
|
if: needs.prepare-benchmarks.outputs.slab-backend == 'hyperstack'
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
{
|
||||||
|
echo "CC=/usr/bin/gcc-${GCC_VERSION}";
|
||||||
|
echo "CXX=/usr/bin/g++-${GCC_VERSION}";
|
||||||
|
echo "CUDAHOSTCXX=/usr/bin/g++-${GCC_VERSION}";
|
||||||
|
} >> "${GITHUB_ENV}"
|
||||||
|
env:
|
||||||
|
GCC_VERSION: 11
|
||||||
|
|
||||||
|
- name: Install rust
|
||||||
|
uses: dtolnay/rust-toolchain@b3b07ba8b418998c39fb20f53e8b695cdcc8de1b # zizmor: ignore[stale-action-refs] this action doesn't create releases
|
||||||
|
with:
|
||||||
|
toolchain: nightly
|
||||||
|
|
||||||
|
- name: Checkout Slab repo
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||||
|
with:
|
||||||
|
repository: zama-ai/slab
|
||||||
|
path: slab
|
||||||
|
persist-credentials: 'false'
|
||||||
|
token: ${{ secrets.REPO_CHECKOUT_TOKEN }}
|
||||||
|
|
||||||
|
- name: Run regression benchmarks
|
||||||
|
run: |
|
||||||
|
make BENCH_CUSTOM_COMMAND="${BENCH_COMMAND}" bench_custom
|
||||||
|
env:
|
||||||
|
BENCH_COMMAND: ${{ matrix.command }}
|
||||||
|
|
||||||
|
- name: Parse results
|
||||||
|
run: |
|
||||||
|
python3 ./ci/benchmark_parser.py target/criterion "${RESULTS_FILENAME}" \
|
||||||
|
--database tfhe_rs \
|
||||||
|
--hardware "${HARDWARE_NAME}" \
|
||||||
|
--project-version "${COMMIT_HASH}" \
|
||||||
|
--branch "${REF_NAME}" \
|
||||||
|
--commit-date "${COMMIT_DATE}" \
|
||||||
|
--bench-date "${BENCH_DATE}" \
|
||||||
|
--walk-subdirs \
|
||||||
|
--name-suffix regression \
|
||||||
|
--bench-type "${BENCH_TYPE}"
|
||||||
|
env:
|
||||||
|
REF_NAME: ${{ github.ref_name }}
|
||||||
|
BENCH_TYPE: ${{ env.__TFHE_RS_BENCH_TYPE }}
|
||||||
|
HARDWARE_NAME: ${{ needs.prepare-benchmarks.outputs.hardware-name }}
|
||||||
|
|
||||||
|
- name: Upload parsed results artifact
|
||||||
|
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02
|
||||||
|
with:
|
||||||
|
name: ${{ github.sha }}_regression
|
||||||
|
path: ${{ env.RESULTS_FILENAME }}
|
||||||
|
|
||||||
|
- name: Send data to Slab
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python3 slab/scripts/data_sender.py "${RESULTS_FILENAME}" "${JOB_SECRET}" \
|
||||||
|
--slab-url "${SLAB_URL}"
|
||||||
|
env:
|
||||||
|
JOB_SECRET: ${{ secrets.JOB_SECRET }}
|
||||||
|
SLAB_URL: ${{ secrets.SLAB_URL }}
|
||||||
|
|
||||||
|
slack-notify:
|
||||||
|
name: benchmark_perf_regression/slack-notify
|
||||||
|
needs: [ prepare-benchmarks, setup-instance, regression-benchmarks ]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ failure() }}
|
||||||
|
continue-on-error: true
|
||||||
|
steps:
|
||||||
|
- name: Send message
|
||||||
|
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||||
|
env:
|
||||||
|
SLACK_COLOR: ${{ needs.regression-benchmarks.result }}
|
||||||
|
SLACK_MESSAGE: "Performance regression benchmarks finished with status: ${{ needs.regression-benchmarks.result }}. (${{ env.ACTION_RUN_URL }})"
|
||||||
|
|
||||||
|
# TODO Add job for regression calculation
|
||||||
|
|
||||||
|
teardown-instance:
|
||||||
|
name: benchmark_perf_regression/teardown-instance
|
||||||
|
if: ${{ always() && needs.setup-instance.result == 'success' }}
|
||||||
|
needs: [ setup-instance, regression-benchmarks ]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Stop instance
|
||||||
|
id: stop-instance
|
||||||
|
uses: zama-ai/slab-github-runner@79939325c3c429837c10d6041e4fd8589d328bac
|
||||||
|
with:
|
||||||
|
mode: stop
|
||||||
|
github-token: ${{ secrets.SLAB_ACTION_TOKEN }}
|
||||||
|
slab-url: ${{ secrets.SLAB_BASE_URL }}
|
||||||
|
job-secret: ${{ secrets.JOB_SECRET }}
|
||||||
|
label: ${{ needs.setup-instance.outputs.runner-name }}
|
||||||
|
|
||||||
|
- name: Slack Notification
|
||||||
|
if: ${{ failure() }}
|
||||||
|
continue-on-error: true
|
||||||
|
uses: rtCamp/action-slack-notify@e31e87e03dd19038e411e38ae27cbad084a90661
|
||||||
|
env:
|
||||||
|
SLACK_COLOR: ${{ job.status }}
|
||||||
|
SLACK_MESSAGE: "Instance teardown (regression-benchmarks) finished with status: ${{ job.status }}. (${{ env.ACTION_RUN_URL }})"
|
||||||
5
.github/workflows/make_release.yml
vendored
5
.github/workflows/make_release.yml
vendored
@@ -38,9 +38,10 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release/verify-tag
|
name: make_release/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
package:
|
package:
|
||||||
|
|||||||
5
.github/workflows/make_release_cuda.yml
vendored
5
.github/workflows/make_release_cuda.yml
vendored
@@ -20,9 +20,10 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release_cuda/verify-tag
|
name: make_release_cuda/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
setup-instance:
|
setup-instance:
|
||||||
|
|||||||
5
.github/workflows/make_release_hpu.yml
vendored
5
.github/workflows/make_release_hpu.yml
vendored
@@ -20,9 +20,10 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release_hpu/verify-tag
|
name: make_release_hpu/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
package:
|
package:
|
||||||
|
|||||||
@@ -20,9 +20,10 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release_tfhe_csprng/verify-tag
|
name: make_release_tfhe_csprng/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
package:
|
package:
|
||||||
|
|||||||
5
.github/workflows/make_release_tfhe_fft.yml
vendored
5
.github/workflows/make_release_tfhe_fft.yml
vendored
@@ -21,9 +21,10 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release_tfhe_fft/verify-tag
|
name: make_release_tfhe_fft/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
package:
|
package:
|
||||||
|
|||||||
5
.github/workflows/make_release_tfhe_ntt.yml
vendored
5
.github/workflows/make_release_tfhe_ntt.yml
vendored
@@ -21,9 +21,10 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release_tfhe_ntt/verify-tag
|
name: make_release_tfhe_ntt/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
package:
|
package:
|
||||||
|
|||||||
@@ -15,9 +15,10 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release_tfhe_versionable/verify-tag
|
name: make_release_tfhe_versionable/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
package-derive:
|
package-derive:
|
||||||
|
|||||||
6
.github/workflows/make_release_zk_pok.yml
vendored
6
.github/workflows/make_release_zk_pok.yml
vendored
@@ -20,14 +20,16 @@ permissions: { }
|
|||||||
jobs:
|
jobs:
|
||||||
verify-tag:
|
verify-tag:
|
||||||
name: make_release_zk_pok/verify-tag
|
name: make_release_zk_pok/verify-tag
|
||||||
uses: ./.github/workflows/verify_tagged_commit.yml
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
uses: ./.github/workflows/verify_commit_actor.yml
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM: ${{ secrets.RELEASE_TEAM }}
|
ALLOWED_TEAM: ${{ secrets.RELEASE_TEAM }}
|
||||||
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
READ_ORG_TOKEN: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
package:
|
package:
|
||||||
name: make_release_zk_pok/package
|
name: make_release_zk_pok/package
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
needs: verify-tag
|
||||||
outputs:
|
outputs:
|
||||||
hash: ${{ steps.hash.outputs.hash }}
|
hash: ${{ steps.hash.outputs.hash }}
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
# Verify a tagged commit
|
# Verify a commit actor
|
||||||
name: verify_tagged_commit
|
name: verify_commit_actor
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
secrets:
|
secrets:
|
||||||
RELEASE_TEAM:
|
ALLOWED_TEAM:
|
||||||
required: true
|
required: true
|
||||||
READ_ORG_TOKEN:
|
READ_ORG_TOKEN:
|
||||||
required: true
|
required: true
|
||||||
@@ -12,10 +12,9 @@ on:
|
|||||||
permissions: {}
|
permissions: {}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
checks:
|
check-actor:
|
||||||
name: verify_tagged_commit/checks
|
name: verify_commit_actor/check-actor
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
steps:
|
steps:
|
||||||
# Check triggering actor membership
|
# Check triggering actor membership
|
||||||
- name: Actor verification
|
- name: Actor verification
|
||||||
@@ -24,7 +23,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
username: ${{ github.triggering_actor }}
|
username: ${{ github.triggering_actor }}
|
||||||
org: ${{ github.repository_owner }}
|
org: ${{ github.repository_owner }}
|
||||||
team: ${{ secrets.RELEASE_TEAM }}
|
team: ${{ secrets.ALLOWED_TEAM }}
|
||||||
github_token: ${{ secrets.READ_ORG_TOKEN }}
|
github_token: ${{ secrets.READ_ORG_TOKEN }}
|
||||||
|
|
||||||
- name: Actor authorized
|
- name: Actor authorized
|
||||||
6
Makefile
6
Makefile
@@ -21,6 +21,7 @@ BENCH_OP_FLAVOR?=DEFAULT
|
|||||||
BENCH_TYPE?=latency
|
BENCH_TYPE?=latency
|
||||||
BENCH_PARAM_TYPE?=classical
|
BENCH_PARAM_TYPE?=classical
|
||||||
BENCH_PARAMS_SET?=default
|
BENCH_PARAMS_SET?=default
|
||||||
|
BENCH_CUSTOM_COMMAND:=
|
||||||
NODE_VERSION=22.6
|
NODE_VERSION=22.6
|
||||||
BACKWARD_COMPAT_DATA_DIR=utils/tfhe-backward-compat-data
|
BACKWARD_COMPAT_DATA_DIR=utils/tfhe-backward-compat-data
|
||||||
WASM_PACK_VERSION="0.13.1"
|
WASM_PACK_VERSION="0.13.1"
|
||||||
@@ -1564,6 +1565,11 @@ bench_hlapi_noise_squash_gpu: install_rs_check_toolchain
|
|||||||
--bench hlapi-noise-squash \
|
--bench hlapi-noise-squash \
|
||||||
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
|
--features=integer,gpu,internal-keycache,pbs-stats,nightly-avx512 -p tfhe-benchmark --
|
||||||
|
|
||||||
|
|
||||||
|
.PHONY: bench_custom # Run benchmarks with a user-defined command
|
||||||
|
bench_custom: install_rs_check_toolchain
|
||||||
|
RUSTFLAGS="$(RUSTFLAGS)" cargo $(CARGO_RS_CHECK_TOOLCHAIN) bench -p tfhe-benchmark $(BENCH_CUSTOM_COMMAND)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Utility tools
|
# Utility tools
|
||||||
#
|
#
|
||||||
|
|||||||
96
ci/hardware_finder.py
Normal file
96
ci/hardware_finder.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
"""
|
||||||
|
hardware_finder
|
||||||
|
---------------
|
||||||
|
|
||||||
|
This script parses ci/slab.toml file to find the hardware name associated with a given pair of backend and a profile name.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import enum
|
||||||
|
import pathlib
|
||||||
|
import sys
|
||||||
|
import tomllib
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"backend",
|
||||||
|
choices=["aws", "hyperstack"],
|
||||||
|
help="Backend instance provider",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"profile",
|
||||||
|
help="Instance profile name",
|
||||||
|
)
|
||||||
|
|
||||||
|
SLAB_FILE = pathlib.Path("ci/slab.toml")
|
||||||
|
|
||||||
|
|
||||||
|
class Backend(enum.StrEnum):
|
||||||
|
Aws = "aws"
|
||||||
|
Hyperstack = "hyperstack"
|
||||||
|
Hpu = "hpu" # Only v80 is supported for now
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_str(label):
|
||||||
|
match label.lower():
|
||||||
|
case "aws":
|
||||||
|
return Backend.Aws
|
||||||
|
case "hyperstack":
|
||||||
|
return Backend.Hyperstack
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
def parse_toml_file(path):
|
||||||
|
"""
|
||||||
|
Parse TOML file.
|
||||||
|
|
||||||
|
:param path: path to TOML file
|
||||||
|
:return: file content as :class:`dict`
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return tomllib.loads(pathlib.Path(path).read_text())
|
||||||
|
except tomllib.TOMLDecodeError as err:
|
||||||
|
raise RuntimeError(f"failed to parse definition file (error: {err})")
|
||||||
|
|
||||||
|
|
||||||
|
def find_hardware_name(config_file: dict[str, Any], backend: Backend, profile: str):
|
||||||
|
"""
|
||||||
|
Find hardware name associated with :class:`Backend` and :class:`str` profile name.
|
||||||
|
|
||||||
|
:param config_file: parsed slab.toml file
|
||||||
|
:param backend: backend name
|
||||||
|
:param profile: profile name
|
||||||
|
|
||||||
|
:return: hardware name as :class:`str`
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
definition = config_file["backend"][backend.value][profile]
|
||||||
|
except KeyError:
|
||||||
|
section_name = f"backend.{backend.value}.{profile}"
|
||||||
|
raise KeyError(f"no definition found for `[{section_name}]` in {SLAB_FILE}")
|
||||||
|
|
||||||
|
match backend:
|
||||||
|
case Backend.Aws:
|
||||||
|
return definition["instance_type"]
|
||||||
|
case Backend.Hyperstack:
|
||||||
|
return definition["flavor_name"]
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
parsed_toml = parse_toml_file(SLAB_FILE)
|
||||||
|
backend = Backend.from_str(args.backend)
|
||||||
|
try:
|
||||||
|
hardware_name = find_hardware_name(parsed_toml, backend, args.profile)
|
||||||
|
except Exception as err:
|
||||||
|
print(
|
||||||
|
f"failed to find hardware name for ({args.backend}, {args.profile}): {err}"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
print(hardware_name)
|
||||||
456
ci/perf_regression.py
Normal file
456
ci/perf_regression.py
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
"""
|
||||||
|
perf_regression
|
||||||
|
---------------
|
||||||
|
|
||||||
|
This script allows zama-ai developers to run performance regression benchmarks.
|
||||||
|
It is capable of launching any performance benchmarks available in `tfhe-benchmark` crate.
|
||||||
|
Used in a GitHub action workflow, it can parse an issue comment and generate arguments to be fed
|
||||||
|
to a `cargo bench` command.
|
||||||
|
|
||||||
|
To define what to run and where, a TOML file is used to define targets, check `ci/regression.toml` to have an
|
||||||
|
explanation of all possible fields.
|
||||||
|
One can also provide a fully custom profile via the issue comment string see: func:`parse_issue_comment` for details.
|
||||||
|
|
||||||
|
This script is also capable of checking for performance regression based on previous benchmarks results.
|
||||||
|
It works by providing a result file containing the baseline values and the results of the last run.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import enum
|
||||||
|
import pathlib
|
||||||
|
import sys
|
||||||
|
import tomllib
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"command",
|
||||||
|
choices=["parse_profile", "check_regression"],
|
||||||
|
help="Command to run",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--issue-comment",
|
||||||
|
dest="issue_comment",
|
||||||
|
help="GitHub issue comment defining the regression benchmark profile to use",
|
||||||
|
)
|
||||||
|
|
||||||
|
COMMENT_IDENTIFIER = "/bench"
|
||||||
|
|
||||||
|
CWD = pathlib.Path(__file__).parent
|
||||||
|
REPO_ROOT = CWD.parent
|
||||||
|
PROFILE_DEFINITION_PATH = CWD.joinpath("regression.toml")
|
||||||
|
BENCH_TARGETS_PATH = REPO_ROOT.joinpath("tfhe-benchmark/Cargo.toml")
|
||||||
|
# Files generated after parsing an issue comment
|
||||||
|
GENERATED_COMMANDS_PATH = CWD.joinpath("perf_regression_generated_commands.json")
|
||||||
|
CUSTOM_ENV_PATH = CWD.joinpath("perf_regression_custom_env.sh")
|
||||||
|
|
||||||
|
|
||||||
|
class ProfileOption(enum.Enum):
|
||||||
|
Backend = 1
|
||||||
|
RegressionProfile = 2
|
||||||
|
Slab = 3
|
||||||
|
BenchmarkTarget = 4
|
||||||
|
EnvironmentVariable = 5
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_str(label):
|
||||||
|
match label.lower():
|
||||||
|
case "backend":
|
||||||
|
return ProfileOption.Backend
|
||||||
|
case "profile" | "regression-profile" | "regression_profile":
|
||||||
|
return ProfileOption.RegressionProfile
|
||||||
|
case "slab":
|
||||||
|
return ProfileOption.Slab
|
||||||
|
case "target":
|
||||||
|
return ProfileOption.BenchmarkTarget
|
||||||
|
case "env":
|
||||||
|
return ProfileOption.EnvironmentVariable
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class TfheBackend(enum.StrEnum):
|
||||||
|
Cpu = "cpu"
|
||||||
|
Gpu = "gpu"
|
||||||
|
Hpu = "hpu" # Only v80 is supported for now
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_str(label):
|
||||||
|
match label.lower():
|
||||||
|
case "cpu":
|
||||||
|
return TfheBackend.Cpu
|
||||||
|
case "gpu":
|
||||||
|
return TfheBackend.Gpu
|
||||||
|
case "hpu":
|
||||||
|
return TfheBackend.Hpu
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
def parse_toml_file(path):
|
||||||
|
"""
|
||||||
|
Parse TOML file.
|
||||||
|
|
||||||
|
:param path: path to TOML file
|
||||||
|
:return: file content as :class:`dict`
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return tomllib.loads(pathlib.Path(path).read_text())
|
||||||
|
except tomllib.TOMLDecodeError as err:
|
||||||
|
raise RuntimeError(f"failed to parse definition file (error: {err})")
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_bench_targets():
|
||||||
|
parsed = {}
|
||||||
|
|
||||||
|
for item in parse_toml_file(BENCH_TARGETS_PATH)["bench"]:
|
||||||
|
bench_name = item["name"]
|
||||||
|
key = bench_name.title().replace("-", "").replace("_", "")
|
||||||
|
parsed[key] = bench_name
|
||||||
|
|
||||||
|
return enum.Enum("TargetOption", parsed)
|
||||||
|
|
||||||
|
|
||||||
|
# This Enum is built at runtime to ensure we have the most up-to-date benchmark targets.
|
||||||
|
TargetOption = _parse_bench_targets()
|
||||||
|
|
||||||
|
|
||||||
|
class SlabOption(enum.Enum):
|
||||||
|
Backend = 1
|
||||||
|
Profile = 2
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_str(label):
|
||||||
|
match label.lower():
|
||||||
|
case "backend":
|
||||||
|
return SlabOption.Backend
|
||||||
|
case "profile":
|
||||||
|
return SlabOption.Profile
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
class EnvOption(enum.StrEnum):
|
||||||
|
FastBench = "__TFHE_RS_FAST_BENCH"
|
||||||
|
BenchOpFlavor = "__TFHE_RS_BENCH_OP_FLAVOR"
|
||||||
|
BenchType = "__TFHE_RS_BENCH_TYPE"
|
||||||
|
BenchParamType = "__TFHE_RS_PARAM_TYPE"
|
||||||
|
BenchParamsSet = "__TFHE_RS_PARAMS_SET"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_str(label):
|
||||||
|
match label.lower():
|
||||||
|
case "fast_bench":
|
||||||
|
return EnvOption.FastBench
|
||||||
|
case "bench_op_flavor":
|
||||||
|
return EnvOption.BenchOpFlavor
|
||||||
|
case "bench_type":
|
||||||
|
return EnvOption.BenchType
|
||||||
|
case "bench_param_type":
|
||||||
|
return EnvOption.BenchParamType
|
||||||
|
case "bench_params_set":
|
||||||
|
return EnvOption.BenchParamsSet
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_option_content(content):
|
||||||
|
key, _, value = content.partition("=")
|
||||||
|
return key, value
|
||||||
|
|
||||||
|
|
||||||
|
class ProfileDefinition:
|
||||||
|
def __init__(self, tfhe_rs_targets: list[dict]):
|
||||||
|
"""
|
||||||
|
Regression profile definition builder capable of generating Cargo commands and custom environment variables for
|
||||||
|
benchmarks to run.
|
||||||
|
|
||||||
|
:param tfhe_rs_targets: parsed TOML from tfhe-benchmark crate containing cargo targets definition
|
||||||
|
"""
|
||||||
|
self.backend = None
|
||||||
|
self.regression_profile = "default"
|
||||||
|
self.targets = {}
|
||||||
|
self.slab_backend = None
|
||||||
|
self.slab_profile = None
|
||||||
|
|
||||||
|
self.env_vars = {
|
||||||
|
EnvOption.FastBench: "false",
|
||||||
|
EnvOption.BenchOpFlavor: "default",
|
||||||
|
EnvOption.BenchType: "latency",
|
||||||
|
EnvOption.BenchParamType: "classical",
|
||||||
|
EnvOption.BenchParamsSet: "default",
|
||||||
|
}
|
||||||
|
|
||||||
|
# TargetOption.check_targets_consistency(tfhe_rs_targets)
|
||||||
|
|
||||||
|
self.tfhe_rs_targets = self._build_tfhe_rs_targets(tfhe_rs_targets)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"ProfileDefinition(backend={self.backend}, regression_profile={self.regression_profile}, targets={self.targets}, slab_backend={self.slab_backend}, slab_profile={self.slab_profile}, env_vars={self.env_vars})"
|
||||||
|
|
||||||
|
def set_field_from_option(self, option: ProfileOption, value: str):
|
||||||
|
"""
|
||||||
|
Set a profile definition field based on a user input value.
|
||||||
|
|
||||||
|
:param option: profile option field
|
||||||
|
:param value: profile option value
|
||||||
|
"""
|
||||||
|
match option:
|
||||||
|
case ProfileOption.Backend:
|
||||||
|
self.backend = TfheBackend.from_str(value)
|
||||||
|
case ProfileOption.RegressionProfile:
|
||||||
|
self.regression_profile = value
|
||||||
|
case ProfileOption.BenchmarkTarget:
|
||||||
|
key, value = _parse_option_content(value)
|
||||||
|
for target_option in TargetOption:
|
||||||
|
if target_option.value == key:
|
||||||
|
trgt = TargetOption
|
||||||
|
operations = value.replace(" ", "").split(",")
|
||||||
|
try:
|
||||||
|
self.targets[trgt].extend(operations)
|
||||||
|
except KeyError:
|
||||||
|
self.targets[trgt] = operations
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise KeyError(f"unknown benchmark target `{key}`")
|
||||||
|
case ProfileOption.Slab:
|
||||||
|
key, value = _parse_option_content(value)
|
||||||
|
if key == "backend":
|
||||||
|
self.slab_backend = value
|
||||||
|
elif key == "profile":
|
||||||
|
self.slab_profile = value
|
||||||
|
case ProfileOption.EnvironmentVariable:
|
||||||
|
key, value = _parse_option_content(value)
|
||||||
|
self.env_vars[EnvOption.from_str(key)] = value
|
||||||
|
case _:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def set_defaults_from_definitions_file(self, definitions: dict):
|
||||||
|
"""
|
||||||
|
Set profile definition fields based on definitions file.
|
||||||
|
|
||||||
|
:param definitions: definitions parsed form file.
|
||||||
|
"""
|
||||||
|
base_error_msg = "failed to set regression profile values"
|
||||||
|
|
||||||
|
if not self.backend:
|
||||||
|
raise ValueError(f"{base_error_msg}: no backend specified")
|
||||||
|
|
||||||
|
try:
|
||||||
|
backend_defs = definitions[self.backend]
|
||||||
|
except KeyError:
|
||||||
|
raise KeyError(
|
||||||
|
f"{base_error_msg}: no definitions found for `{self.backend}` backend"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
profile_def = backend_defs[self.regression_profile]
|
||||||
|
except KeyError:
|
||||||
|
raise KeyError(
|
||||||
|
f"{base_error_msg}: no definition found for `{self.backend}.{self.regression_profile}` profile"
|
||||||
|
)
|
||||||
|
|
||||||
|
for key, value in profile_def.items():
|
||||||
|
try:
|
||||||
|
option = ProfileOption.from_str(key)
|
||||||
|
except NotImplementedError:
|
||||||
|
print(
|
||||||
|
f"ignoring unknown option name `{key}` in definition `{self.backend}.{self.regression_profile}`"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
match option:
|
||||||
|
case ProfileOption.BenchmarkTarget:
|
||||||
|
for target_key, ops in value.items():
|
||||||
|
for target_option in TargetOption:
|
||||||
|
if target_option.value == target_key:
|
||||||
|
trgt = target_option
|
||||||
|
if trgt not in self.targets:
|
||||||
|
self.targets[trgt] = ops
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
raise KeyError(f"unknown benchmark target `{target_key}`")
|
||||||
|
case ProfileOption.Slab:
|
||||||
|
for slab_key, val in value.items():
|
||||||
|
if slab_key == "backend":
|
||||||
|
self.slab_backend = val
|
||||||
|
elif slab_key == "profile":
|
||||||
|
self.slab_profile = val
|
||||||
|
case ProfileOption.EnvironmentVariable:
|
||||||
|
for env_key, val in value.items():
|
||||||
|
self.env_vars[EnvOption.from_str(env_key)] = val
|
||||||
|
case _:
|
||||||
|
continue
|
||||||
|
|
||||||
|
def _build_tfhe_rs_targets(self, tfhe_rs_targets: list[dict]):
|
||||||
|
targets = {}
|
||||||
|
for key in TargetOption:
|
||||||
|
required_features = []
|
||||||
|
for item in tfhe_rs_targets:
|
||||||
|
if item["name"] == key.value:
|
||||||
|
required_features = item["required-features"]
|
||||||
|
break
|
||||||
|
|
||||||
|
targets[key] = {"target": key.value, "required_features": required_features}
|
||||||
|
|
||||||
|
return targets
|
||||||
|
|
||||||
|
def _build_features(self, target):
|
||||||
|
features = self.tfhe_rs_targets[target]["required_features"]
|
||||||
|
|
||||||
|
match self.backend:
|
||||||
|
case TfheBackend.Cpu:
|
||||||
|
features.append("nightly-avx512")
|
||||||
|
case TfheBackend.Gpu:
|
||||||
|
features.extend(["gpu", "nightly-avx512"])
|
||||||
|
case TfheBackend.Hpu:
|
||||||
|
features.extend(["hpu", "hpu-v80"])
|
||||||
|
|
||||||
|
return features
|
||||||
|
|
||||||
|
def generate_cargo_commands(self):
|
||||||
|
"""
|
||||||
|
Generate Cargo commands to run benchmarks.
|
||||||
|
|
||||||
|
:return: :class:`list` of :class:`str` of Cargo commands
|
||||||
|
"""
|
||||||
|
commands = []
|
||||||
|
for key, ops in self.targets.items():
|
||||||
|
features = self._build_features(key)
|
||||||
|
ops_filter = [f"::{op}::" for op in ops]
|
||||||
|
commands.append(
|
||||||
|
f"--bench {self.tfhe_rs_targets[key]["target"]} --features={','.join(features)} -- '{"\\|".join(ops_filter)}'"
|
||||||
|
)
|
||||||
|
|
||||||
|
return commands
|
||||||
|
|
||||||
|
|
||||||
|
def parse_issue_comment(comment):
|
||||||
|
"""
|
||||||
|
Parse GitHub issue comment string. To be parsable, the string must be formatted as:
|
||||||
|
`/bench <benchmark_args>`.
|
||||||
|
|
||||||
|
Note that multiline command and group of commands are not supported.
|
||||||
|
|
||||||
|
:param comment: :class:`str`
|
||||||
|
|
||||||
|
:return: :class:`list` of (:class:`ProfileOption`, :class:`str`)
|
||||||
|
"""
|
||||||
|
identifier, profile_arguments = comment.split(" ", maxsplit=1)
|
||||||
|
|
||||||
|
if identifier != COMMENT_IDENTIFIER:
|
||||||
|
raise ValueError(
|
||||||
|
f"unknown issue comment identifier (expected: `{COMMENT_IDENTIFIER}`, got `{identifier}`)"
|
||||||
|
)
|
||||||
|
|
||||||
|
arguments_pairs = []
|
||||||
|
for raw_pair in profile_arguments.split("--")[1:]:
|
||||||
|
name, value = raw_pair.split(" ", maxsplit=1)
|
||||||
|
try:
|
||||||
|
profile_option = ProfileOption.from_str(name)
|
||||||
|
except NotImplementedError:
|
||||||
|
raise ValueError(f"unknown profile option `{name}`")
|
||||||
|
else:
|
||||||
|
arguments_pairs.append((profile_option, value.strip()))
|
||||||
|
|
||||||
|
return arguments_pairs
|
||||||
|
|
||||||
|
|
||||||
|
def build_definition(profile_args_pairs, profile_defintions):
|
||||||
|
"""
|
||||||
|
Build regression profile definition form user inputs and definitions file.
|
||||||
|
|
||||||
|
:param profile_args_pairs: pairs of profile options and their value parsed from a string
|
||||||
|
:param profile_defintions: parsed profile definitions file
|
||||||
|
|
||||||
|
:return: :class:`ProfileDefinition`
|
||||||
|
"""
|
||||||
|
bench_targets = parse_toml_file(BENCH_TARGETS_PATH)["bench"]
|
||||||
|
definition = ProfileDefinition(bench_targets)
|
||||||
|
|
||||||
|
for profile_option, value in profile_args_pairs:
|
||||||
|
definition.set_field_from_option(profile_option, value)
|
||||||
|
|
||||||
|
definition.set_defaults_from_definitions_file(profile_defintions)
|
||||||
|
|
||||||
|
return definition
|
||||||
|
|
||||||
|
|
||||||
|
def write_commands_to_file(commands):
|
||||||
|
"""
|
||||||
|
Write commands to a file.
|
||||||
|
This file is meant to be read a string and passed to `toJSON()` GitHub actions function.
|
||||||
|
|
||||||
|
:param commands: :class:`list` of commands to write
|
||||||
|
"""
|
||||||
|
with GENERATED_COMMANDS_PATH.open("w") as f:
|
||||||
|
f.write("[")
|
||||||
|
for command in commands[:-1]:
|
||||||
|
f.write(f'"{command}", ')
|
||||||
|
f.write(f'"{commands[-1]}"]')
|
||||||
|
|
||||||
|
|
||||||
|
def write_env_to_file(env_vars: dict[EnvOption, str]):
|
||||||
|
"""
|
||||||
|
Write environment variables to a file.
|
||||||
|
This file is meant to be executed in a GitHub actions function. The variable contained in it, would be sent to
|
||||||
|
a GITHUB_ENV file thus the following workflow steps would be able to use these variables.
|
||||||
|
|
||||||
|
:param env_vars: dict of environment variables to write
|
||||||
|
"""
|
||||||
|
with CUSTOM_ENV_PATH.open("w") as f:
|
||||||
|
if not env_vars:
|
||||||
|
f.write("echo 'no env vars to set';\n")
|
||||||
|
return
|
||||||
|
|
||||||
|
for key, v in env_vars.items():
|
||||||
|
f.write(f'echo "{key.value}={v}";')
|
||||||
|
|
||||||
|
|
||||||
|
def write_backend_config_to_file(backend, profile):
|
||||||
|
"""
|
||||||
|
Write backend and profile configuration to different files to ease parsing.
|
||||||
|
|
||||||
|
:param backend:
|
||||||
|
:param profile:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
for filepart, content in [("backend", backend), ("profile", profile)]:
|
||||||
|
pathlib.Path(f"ci/perf_regression_slab_{filepart}_config.txt").write_text(
|
||||||
|
f"{content}\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO Perform regression computing by providing a file containing results from database that would be parsed
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.command == "parse_profile":
|
||||||
|
comment = args.issue_comment
|
||||||
|
if not comment:
|
||||||
|
print(
|
||||||
|
f"cannot run `{args.command}` command: please specify the issue comment with `--issue-comment` argument"
|
||||||
|
)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
try:
|
||||||
|
profile_args_pairs = parse_issue_comment(comment)
|
||||||
|
profile_definitions = parse_toml_file(PROFILE_DEFINITION_PATH)
|
||||||
|
|
||||||
|
definition = build_definition(profile_args_pairs, profile_definitions)
|
||||||
|
commands = definition.generate_cargo_commands()
|
||||||
|
except Exception as err:
|
||||||
|
print(f"failed to generate commands (error:{err})")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
try:
|
||||||
|
write_commands_to_file(commands)
|
||||||
|
write_env_to_file(definition.env_vars)
|
||||||
|
write_backend_config_to_file(
|
||||||
|
definition.slab_backend, definition.slab_profile
|
||||||
|
)
|
||||||
|
except Exception as err:
|
||||||
|
print(f"failed to write commands/env to file (error:{err})")
|
||||||
|
sys.exit(3)
|
||||||
|
elif args.command == "check_regression":
|
||||||
|
pass
|
||||||
61
ci/regression.toml
Normal file
61
ci/regression.toml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Benchmark regression profile structure is defined as:
|
||||||
|
#
|
||||||
|
# [<tfhe-rs_backend>.<regression_profile_name>]
|
||||||
|
# target.<target_name> = ["<operation_name>", ]
|
||||||
|
# env.<variable_name> = "<variable_value>"
|
||||||
|
# slab.backend = "<provider_name>"
|
||||||
|
# slab.profile = "<slab_profile_name>"
|
||||||
|
#
|
||||||
|
# Each tfhe-rs_backend **must** have one regression_profile_name named `default`.
|
||||||
|
#
|
||||||
|
# Details:
|
||||||
|
# --------
|
||||||
|
#
|
||||||
|
# > tfhe-rs_backend: name of the backend to use to run the benchmarks
|
||||||
|
# Possible values are:
|
||||||
|
# * cpu
|
||||||
|
# * gpu
|
||||||
|
# * hpu
|
||||||
|
#
|
||||||
|
# > regression_profile_name: any string (containing only dash or underscore as special chars)
|
||||||
|
# Each tfhe-rs backend should have a default profile.
|
||||||
|
#
|
||||||
|
# > target.<target_name>: list of operations to benchmark on the given tfhe-rs benchmark target
|
||||||
|
# A profile can have multiple targets.
|
||||||
|
# Possible values for target_name are listed in tfhe-benchmark/Cargo.toml file under `[[bench]]` section in the
|
||||||
|
# `name` field.
|
||||||
|
#
|
||||||
|
# > env.<variable_name>: environment variable that will be used to alter benchmark execution enviroment
|
||||||
|
# Possible values for variable_name are (case-insensitive):
|
||||||
|
# * FAST_BENCH
|
||||||
|
# * BENCH_OP_FLAVOR
|
||||||
|
# * BENCH_TYPE
|
||||||
|
# * BENCH_PARAM_TYPE
|
||||||
|
# * BENCH_PARAMS_SET
|
||||||
|
#
|
||||||
|
# > slab.backend: name of on-demand instance provider
|
||||||
|
# Possible values are:
|
||||||
|
# * aws
|
||||||
|
# * hyperstack
|
||||||
|
#
|
||||||
|
# > slab.profile: on-demand instance profile to use for the benchmark
|
||||||
|
# See ci/slab.toml file to have the list of all supported profiles.
|
||||||
|
|
||||||
|
[gpu.default]
|
||||||
|
target.integer-bench = ["mul", "div"]
|
||||||
|
target.hlapi-dex = ["dex_swap"]
|
||||||
|
slab.backend = "hyperstack"
|
||||||
|
slab.profile = "single-h100"
|
||||||
|
env.fast_bench = "TRUE"
|
||||||
|
|
||||||
|
[gpu.multi-h100]
|
||||||
|
target.integer-bench = ["mul", "div"]
|
||||||
|
target.hlapi-dex = ["dex_swap"]
|
||||||
|
slab.backend = "hyperstack"
|
||||||
|
slab.profile = "multi-h100"
|
||||||
|
|
||||||
|
[cpu.default]
|
||||||
|
target.integer-bench = ["add_parallelized", "mul_parallelized", "div_parallelized"]
|
||||||
|
slab.backend = "aws"
|
||||||
|
slab.profile = "bench"
|
||||||
|
env.fast_bench = "TRUE"
|
||||||
Reference in New Issue
Block a user