refactor: use the new python library in benchmarks

This commit is contained in:
Umut
2021-12-24 15:43:34 +03:00
parent 73d8aebd06
commit 73596b3b7d
97 changed files with 173 additions and 4579 deletions

View File

@@ -323,10 +323,6 @@ jobs:
if: ${{ github.event_name == 'schedule' && steps.conformance.outcome == 'success' && !cancelled() }}
run: |
make pytest_nb
- name: PyTest Progress Tracker
if: ${{ steps.conformance.outcome == 'success' && !cancelled() }}
run: |
make pytest_progress_tracker
- name: Test coverage
id: coverage
if: ${{ always() && steps.pytest.outcome != 'skipped' && !cancelled() }}

View File

@@ -49,16 +49,16 @@ jobs:
key: ${{ secrets.BENCHMARKS_EC2_SSH_KEY }}
command_timeout: 240m
script: |
cd ~/concretefhe-internal
cd ~/concrete-framework-internal
git pull
make docker_publish_measurements
docker system prune -f
- name: Copy AMD EC2 Instance Logs
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.amd-public-ip.outputs.value }}:~/concretefhe-internal/logs/latest.log ~/latest.log
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.amd-public-ip.outputs.value }}:~/concrete-framework-internal/logs/latest.log ~/latest.log
- name: Copy AMD EC2 Instance Logs
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.amd-public-ip.outputs.value }}:~/concretefhe-internal/.benchmarks/findings.json ~/findings.json
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.amd-public-ip.outputs.value }}:~/concrete-framework-internal/.benchmarks/findings.json ~/findings.json
- name: Stop AMD EC2 Instance
if: ${{ always() }}
@@ -104,16 +104,16 @@ jobs:
key: ${{ secrets.BENCHMARKS_EC2_SSH_KEY }}
command_timeout: 240m
script: |
cd ~/concretefhe-internal
cd ~/concrete-framework-internal
git pull
make docker_publish_measurements
docker system prune -f
- name: Copy Intel EC2 Instance Logs
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.intel-public-ip.outputs.value }}:~/concretefhe-internal/logs/latest.log ~/latest.log
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.intel-public-ip.outputs.value }}:~/concrete-framework-internal/logs/latest.log ~/latest.log
- name: Copy Intel EC2 Instance Findings
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.intel-public-ip.outputs.value }}:~/concretefhe-internal/.benchmarks/findings.json ~/findings.json
run: scp -o StrictHostKeyChecking=no -i ~/ssh-key ${{ secrets.BENCHMARKS_EC2_USERNAME }}@${{ steps.intel-public-ip.outputs.value }}:~/concrete-framework-internal/.benchmarks/findings.json ~/findings.json
- name: Stop Intel EC2 Instance
if: ${{ always() }}

2
.gitignore vendored
View File

@@ -134,7 +134,7 @@ dmypy.json
.pyre/
# Benchmark Artifacts
.benchmarks
progress.json
# concrete compilation artifacts
.artifacts

View File

@@ -42,11 +42,6 @@ check_python_format:
check_finalize_nb:
poetry run python ./script/nbmake_utils/notebook_finalize.py docs --check
.PHONY: check_benchmarks # Run benchmark checks (to validate they work fine)
check_benchmarks:
poetry run python script/progress_tracker_utils/extract_machine_info.py
poetry run python script/progress_tracker_utils/measure.py benchmarks --check
.PHONY: pylint # Run pylint
pylint:
$(MAKE) --keep-going pylint_src pylint_tests pylint_benchmarks pylint_script
@@ -69,9 +64,7 @@ pylint_benchmarks:
.PHONY: pylint_script # Run pylint on scripts
pylint_script:
@# disable linting python files under `progress_tracker_utils/test_scripts` folder
@# because they are intentionally ill-formed so that progress tracker can be tested
find ./script/ -type f -name "*.py" -not -path "./script/progress_tracker_utils/test_scripts/*" | xargs poetry run pylint --rcfile=pylintrc
find ./script/ -type f -name "*.py" | xargs poetry run pylint --rcfile=pylintrc
.PHONY: flake8 # Run flake8
flake8:
@@ -90,7 +83,7 @@ pcc:
--no-print-directory pcc_internal
PCC_DEPS := check_python_format check_finalize_nb python_linting mypy_ci pydocstyle shell_lint
PCC_DEPS += check_version_coherence check_supported_functions check_benchmarks check_licenses
PCC_DEPS += check_version_coherence check_supported_functions check_licenses
# Not commented on purpose for make help, since internal
.PHONY: pcc_internal
@@ -107,11 +100,6 @@ pytest:
--randomly-dont-reorganize \
--cov-report=term-missing:skip-covered tests/
.PHONY: pytest_progress_tracker # Run pytest for progress tracker
pytest_progress_tracker:
poetry run python script/progress_tracker_utils/extract_machine_info.py
poetry run pytest -svv script/progress_tracker_utils/test_progress_tracker.py
# Not a huge fan of ignoring missing imports, but some packages do not have typing stubs
.PHONY: mypy # Run mypy
mypy:
@@ -186,11 +174,7 @@ docker_clean_volumes:
docker_cv: docker_clean_volumes
.PHONY: docker_publish_measurements # Run benchmarks in docker and publish results
docker_publish_measurements: docker_build
mkdir -p .benchmarks
@# Poetry is not installed on the benchmark servers
@# Thus, we ran `extract_machine_info.py` script using native python
python script/progress_tracker_utils/extract_machine_info.py
docker_publish_measurements: docker_rebuild
docker run --rm --volume /"$$(pwd)":/src \
--volume $(DEV_CONTAINER_VENV_VOLUME):/home/dev_user/dev_venv \
--volume $(DEV_CONTAINER_CACHE_VOLUME):/home/dev_user/.cache \
@@ -230,8 +214,10 @@ pytest_nb:
.PHONY: benchmark # Launch benchmark
benchmark:
poetry run python script/progress_tracker_utils/extract_machine_info.py
poetry run python script/progress_tracker_utils/measure.py benchmarks
rm -rf progress.json && \
for script in benchmarks/*.py; do \
poetry run python $$script; \
done
.PHONY: jupyter # Launch jupyter notebook
jupyter:

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: 124 - x
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return 124 - x
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** 3),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** 3 - 1)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: 124 - x (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return 124 - x
x = hnp.EncryptedTensor(hnp.UnsignedInteger(6), shape=(3,))
inputset = [np.random.randint(0, 2 ** 6, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 6, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,49 +0,0 @@
# bench: Unit Target: np.concatenate((c, x))
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return np.concatenate((c, x))
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
c = np.arange(20).reshape((4, 5))
inputset = [np.random.randint(0, 2 ** 3, size=(4, 5)) for _ in range(128)]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,50 +0,0 @@
# bench: Unit Target: np.matmul(c, x)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
c = np.arange(1, 7).reshape((3, 2))
def function_to_compile(x):
return np.matmul(c, x)
x = hnp.EncryptedTensor(hnp.UnsignedInteger(2), shape=(2, 3))
inputset = [np.random.randint(0, 2 ** 2, size=(2, 3)) for _ in range(128)]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 2, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,8 +1,16 @@
import concrete.numpy as hnp
from concrete.numpy import compile as compile_
# This is only for benchmarks to speed up compilation times
# pylint: disable=protected-access
compile_._COMPILE_FHE_INSECURE_KEY_CACHE_DIR = "/tmp/keycache"
# pylint: enable=protected-access
BENCHMARK_CONFIGURATION = hnp.CompilationConfiguration(
check_every_input_in_inputset=True,
dump_artifacts_on_unexpected_failures=True,
enable_topological_optimizations=True,
check_every_input_in_inputset=True,
enable_unsafe_features=True,
treat_warnings_as_errors=True,
use_insecure_key_cache=True,
)

View File

@@ -1,9 +1,8 @@
# bench: Full Target: Generalized Linear Model
from copy import deepcopy
from typing import Any, Dict
import numpy as np
import progress
from common import BENCHMARK_CONFIGURATION
from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
@@ -158,7 +157,8 @@ def score_concrete_glm_estimator(poisson_glm_pca, q_glm, df_test):
return score_estimator(y_pred, df_test["Frequency"], df_test["Exposure"])
def run_glm_benchmark():
@progress.track([{"id": "glm", "name": "Generalized Linear Model", "parameters": {}}])
def main():
"""
This is our main benchmark function. It gets a dataset, trains a GLM model,
then trains a GLM model on PCA reduced features, a QuantizedGLM model
@@ -239,21 +239,18 @@ def run_glm_benchmark():
test_data = poisson_glm_pca["pca"].transform(poisson_glm_pca["preprocessor"].transform(df_test))
q_test_data = q_glm.quantize_input(test_data)
# bench: Measure: Compilation Time (ms)
engine = q_glm.compile(
q_test_data,
BENCHMARK_CONFIGURATION,
show_mlir=False,
)
# bench: Measure: End
y_pred_fhe = np.zeros((test_data.shape[0],), np.float32)
for i, test_sample in enumerate(tqdm(q_test_data.qvalues)):
# bench: Measure: Evaluation Time (ms)
q_sample = np.expand_dims(test_sample, 1).transpose([1, 0]).astype(np.uint8)
q_pred_fhe = engine.run(q_sample)
y_pred_fhe[i] = q_glm.dequantize_output(q_pred_fhe)
# bench: Measure: End
with progress.measure(id="evaluation-time-ms", label="Evaluation Time (ms)"):
q_sample = np.expand_dims(test_sample, 1).transpose([1, 0]).astype(np.uint8)
q_pred_fhe = engine.run(q_sample)
y_pred_fhe[i] = q_glm.dequantize_output(q_pred_fhe)
dev_pca_quantized_fhe = score_estimator(y_pred_fhe, df_test["Frequency"], df_test["Exposure"])
@@ -263,14 +260,23 @@ def run_glm_benchmark():
difference = 0
print(f"Quantized deviance: {dev_pca_quantized}")
progress.measure(
id="non-homomorphic-loss",
label="Non Homomorphic Loss",
value=dev_pca_quantized,
)
print(f"FHE Quantized deviance: {dev_pca_quantized_fhe}")
progress.measure(
id="homomorphic-loss",
label="Homomorphic Loss",
value=dev_pca_quantized_fhe,
)
print(f"Percentage difference: {difference}%")
# bench: Measure: Non Homomorphic Loss = dev_pca_quantized
# bench: Measure: Homomorphic Loss = dev_pca_quantized_fhe
# bench: Measure: Relative Loss Difference (%) = difference
# bench: Alert: Relative Loss Difference (%) > 7.5
if __name__ == "__main__":
run_glm_benchmark()
progress.measure(
id="relative-loss-difference-percent",
label="Relative Loss Difference (%)",
value=difference,
alert=(">", 7.5),
)

View File

@@ -1,13 +1,9 @@
# bench: Full Target: Linear Regression
# Disable line length warnings as we have a looooong metric...
# flake8: noqa: E501
# pylint: disable=C0301
from copy import deepcopy
from typing import Any, Dict
import numpy as np
import progress
from common import BENCHMARK_CONFIGURATION
from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
@@ -98,6 +94,7 @@ class QuantizedLinearRegression(QuantizedModule):
return q_input_arr
@progress.track([{"id": "linear-regression", "name": "Linear Regression", "parameters": {}}])
def main():
"""
Our linear regression benchmark. Use some synthetic data to train a regression model,
@@ -130,9 +127,10 @@ def main():
q_linreg = QuantizedLinearRegression.from_sklearn(linreg, calib_data)
# Compile the quantized model to FHE
# bench: Measure: Compilation Time (ms)
engine = q_linreg.compile(q_linreg.quantize_input(calib_data))
# bench: Measure: End
engine = q_linreg.compile(
q_linreg.quantize_input(calib_data),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure test error using the clear-sklearn, the clear-quantized and the FHE quantized model
# as R^2 coefficient for the test data
@@ -149,9 +147,8 @@ def main():
for i, x_i in enumerate(tqdm(x_test_q.qvalues)):
q_sample = np.expand_dims(x_i, 1).transpose([1, 0]).astype(np.uint8)
# bench: Measure: Evaluation Time (ms)
q_pred_fhe = engine.run(q_sample)
# bench: Measure: End
with progress.measure(id="evaluation-time-ms", label="Evaluation Time (ms)"):
q_pred_fhe = engine.run(q_sample)
y_test_pred_fhe[i] = q_linreg.dequantize_output(q_pred_fhe)
# Measure the error for the three versions of the classifier
@@ -165,16 +162,30 @@ def main():
)
print(f"Sklearn R^2: {sklearn_r2:.4f}")
progress.measure(
id="sklearn-r2",
label="Sklearn R^2",
value=sklearn_r2,
)
print(f"Non Homomorphic R^2: {non_homomorphic_test_error:.4f}")
progress.measure(
id="non-homomorphic-r2",
label="Non Homomorphic R^2",
value=non_homomorphic_test_error,
)
print(f"Homomorphic R^2: {homomorphic_test_error:.4f}")
print(f"Relative Difference Percentage: {difference:.2f}%")
progress.measure(
id="homomorphic-r2",
label="Homomorphic R^2",
value=homomorphic_test_error,
)
# bench: Measure: Sklearn R^2 = sklearn_r2
# bench: Measure: Non Homomorphic R^2 = non_homomorphic_test_error
# bench: Measure: Homomorphic R^2 = homomorphic_test_error
# bench: Measure: Relative Loss Difference (%) = difference
# bench: Alert: Relative Loss Difference (%) > 7.5
if __name__ == "__main__":
main()
print(f"Relative Loss Difference (%): {difference:.2f}%")
progress.measure(
id="relative-loss-difference-percent",
label="Relative Loss Difference (%)",
value=difference,
alert=(">", 7.5),
)

View File

@@ -1,13 +1,9 @@
# bench: Full Target: Logistic Regression
# Disable line length warnings as we have a looooong metric...
# flake8: noqa: E501
# pylint: disable=C0301
from copy import deepcopy
from typing import Any, Dict
import numpy as np
import progress
from common import BENCHMARK_CONFIGURATION
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
@@ -120,6 +116,7 @@ class QuantizedLogisticRegression(QuantizedModule):
return q_input_arr
@progress.track([{"id": "logistic-regression", "name": "Logistic Regression", "parameters": {}}])
def main():
"""Main benchmark function: generate some synthetic data for two class classification,
split train-test, train a sklearn classifier, calibrate and quantize it on the whole dataset
@@ -152,11 +149,10 @@ def main():
q_logreg = QuantizedLogisticRegression.from_sklearn(logreg, calib_data)
# Now, we can compile our model to FHE, taking as possible input set all of our dataset
X_q = q_logreg.quantize_input(X)
# bench: Measure: Compilation Time (ms)
engine = q_logreg.compile(X_q)
# bench: Measure: End
engine = q_logreg.compile(
q_logreg.quantize_input(X),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Start classifier evaluation
@@ -182,9 +178,9 @@ def main():
fhe_in_sample = np.expand_dims(x_i, 1).transpose([1, 0]).astype(np.uint8)
# bench: Measure: Evaluation Time (ms)
q_pred_fhe = engine.run(fhe_in_sample)
# bench: Measure: End
with progress.measure(id="evaluation-time-ms", label="Evaluation Time (ms)"):
q_pred_fhe = engine.run(fhe_in_sample)
y_score_fhe = q_logreg.dequantize_output(q_pred_fhe)
homomorphic_prediction = (y_score_fhe > 0.5).astype(np.int32)
@@ -204,18 +200,31 @@ def main():
homomorphic_accuracy = (homomorphic_correct / len(y_test)) * 100
difference = abs(homomorphic_accuracy - non_homomorphic_accuracy)
print()
print(f"Sklearn accuracy: {sklearn_acc:.4f}")
print(f"Non Homomorphic Accuracy: {non_homomorphic_accuracy:.4f}")
print(f"Homomorphic Accuracy: {homomorphic_accuracy:.4f}")
print(f"Difference Percentage: {difference:.2f}%")
print(f"Sklearn Accuracy (%): {sklearn_acc:.4f}")
progress.measure(
id="sklearn-accuracy-percent",
label="Sklearn Accuracy (%)",
value=sklearn_acc,
)
# bench: Measure: Sklearn accuracy = sklearn_acc
# bench: Measure: Non Homomorphic Accuracy = non_homomorphic_accuracy
# bench: Measure: Homomorphic Accuracy = homomorphic_accuracy
# bench: Measure: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference
# bench: Alert: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2
print(f"Non Homomorphic Accuracy (%): {non_homomorphic_accuracy:.4f}")
progress.measure(
id="non-homomorphic-accuracy-percent",
label="Non Homomorphic Accuracy (%)",
value=non_homomorphic_accuracy,
)
print(f"Homomorphic Accuracy (%): {homomorphic_accuracy:.4f}")
progress.measure(
id="homomorphic-accuracy-percent",
label="Homomorphic Accuracy (%)",
value=homomorphic_accuracy,
)
if __name__ == "__main__":
main()
print(f"Relative Accuracy Difference (%): {difference:.2f}%")
progress.measure(
id="relative-accuracy-difference-percent",
label="Relative Accuracy Difference (%)",
value=difference,
alert=(">", 2.0),
)

View File

@@ -1,60 +0,0 @@
# bench: Unit Target: Multi Table Lookup
import math
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
input_bits = 3
square_table = hnp.LookupTable([i ** 2 for i in range(2 ** input_bits)])
sqrt_table = hnp.LookupTable([int(math.sqrt(i)) for i in range(2 ** input_bits)])
multi_table = hnp.MultiLookupTable(
[
[square_table, sqrt_table],
[square_table, sqrt_table],
[square_table, sqrt_table],
]
)
def function_to_compile(x):
return multi_table[x]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(input_bits), shape=(3, 2))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[np.random.randint(0, 2 ** input_bits, size=(3, 2)) for _ in range(32)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(50):
sample_x = np.random.randint(0, 2 ** input_bits, size=(3, 2), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) < 99
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: Single Table Lookup
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
input_bits = 3
entries = [i ** 2 for i in range(2 ** input_bits)]
table = hnp.LookupTable(entries)
def function_to_compile(x):
return table[x]
x = hnp.EncryptedScalar(hnp.UnsignedInteger(input_bits))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** input_bits),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(10):
sample_x = random.randint(0, (2 ** input_bits) - 1)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,49 +0,0 @@
# bench: Unit Target: np.concatenate((x, c))
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return np.concatenate((x, c))
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
c = np.arange(20).reshape((4, 5))
inputset = [np.random.randint(0, 2 ** 3, size=(4, 5)) for _ in range(128)]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: np.concatenate((x, y))
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return np.concatenate((x, y))
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 5)), np.random.randint(0, 2 ** 3, size=(4, 5)))
for _ in range(128)
]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5), dtype=np.uint8)
sample_y = np.random.randint(0, 2 ** 3, size=(4, 5), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[0]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[0]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[0, 0]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[0, 0]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3, 2))
inputset = [np.random.randint(0, 2 ** 3, size=(3, 2)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3, 2), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[1]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[1]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[1:]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[1:]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[:]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[:]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[:2]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[:2]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[:, 1]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[:, 1]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3, 2))
inputset = [np.random.randint(0, 2 ** 3, size=(3, 2)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3, 2), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[:, y] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[:, y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(2, 4)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[:, y] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[:, y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(2, 4)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[::-1, y, z] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[::-1, y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(5, 4, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(5, 4, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(5, 4, 2), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[::-1, y, z] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[::-1, y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(5, 4, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(5, 4, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(5, 4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[-1]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[-1]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[y, 1:] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, 1:]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 5)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[y, 1:] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, 1:]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 5)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[y, :, z] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, :, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 5, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[y, :, z] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, :, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 5, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5, 2), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[y, :] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, :]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 2)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[y, :] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, :]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 2)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[y, z, 0] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z, 0]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2, 5))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2, 5)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2, 5))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[y, z, 0] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z, 0]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2, 5))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2, 5)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2, 5), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[y, z] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2), dtype=np.uint8),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,61 +0,0 @@
# bench: Unit Target: x[y, z] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[y] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4,))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4,)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4,), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,55 +0,0 @@
# bench: Unit Target: x[y] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4,))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4,)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4,), dtype=np.uint8)
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,50 +0,0 @@
# bench: Unit Target: np.matmul(x, c)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
c = np.arange(6).reshape((3, 2))
def function_to_compile(x):
return np.matmul(x, c)
x = hnp.EncryptedTensor(hnp.UnsignedInteger(2), shape=(2, 3))
inputset = [np.random.randint(0, 2 ** 2, size=(2, 3)) for _ in range(128)]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 2, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: np.matmul(x, y)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return np.matmul(x, y)
x = hnp.EncryptedTensor(hnp.UnsignedInteger(2), shape=(2, 3))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(2), shape=(3, 2))
inputset = [
(np.random.randint(0, 2 ** 2, size=(2, 3)), np.random.randint(0, 2 ** 2, size=(3, 2)))
for _ in range(128)
]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 2, size=(2, 3), dtype=np.uint8)
sample_y = np.random.randint(0, 2 ** 2, size=(3, 2), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x - [1, 2, 3]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x - np.array([1, 2, 3])
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 2, size=(3,)) + np.array([1, 2, 3]) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(3, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x - [1, 2, 3] (Broadcasted)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x - np.array([1, 2, 3])
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 3))
inputset = [np.random.randint(0, 2 ** 2, size=(2, 3)) + np.array([1, 2, 3]) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(3, 2 ** 3, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x - 24
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x - 24
x = hnp.EncryptedScalar(hnp.UnsignedInteger(6))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(24, 2 ** 6),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(40, 40 + 2 ** 3 - 1)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x - 24 (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x - 24
x = hnp.EncryptedTensor(hnp.UnsignedInteger(6), shape=(3,))
inputset = [np.random.randint(0, 2 ** 5, size=(3,)) + 24 for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(24, 2 ** 6, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: x - y
import itertools
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x - y
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = itertools.product(range(4, 8), range(0, 4))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(2 ** 2, 2 ** 3 - 1)
sample_y = random.randint(0, 2 ** 2 - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: x - y (Broadcasted Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x - y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(2), shape=(2, 3))
inputset = [
(np.random.randint(4, 8, size=(3,)), np.random.randint(0, 4, size=(2, 3)))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(4, 2 ** 3, size=(3,), dtype=np.uint8)
sample_y = np.random.randint(0, 5, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,52 +0,0 @@
# bench: Unit Target: x - y (Tensor & Scalar)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x - y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [(np.random.randint(4, 8, size=(3,)), random.randint(0, 3)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(3, 2 ** 3, size=(3,))
sample_y = random.randint(0, 3)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,52 +0,0 @@
# bench: Unit Target: x - y (Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x - y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(2), shape=(3,))
inputset = [
(np.random.randint(4, 8, size=(3,)), np.random.randint(0, 4, size=(3,))) for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(3, 2 ** 3, size=(3,), dtype=np.uint8)
sample_y = np.random.randint(0, 4, size=(3,), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,49 +0,0 @@
# bench: Unit Target: np.negative(x)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
# Such that the result is positive
return 10 * np.ones(shape=(10, 6), dtype=np.uint8) + np.negative(x)
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(10, 6))
inputset = [np.random.randint(0, 2 ** 3, size=(10, 6)) for _ in range(128)]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(10, 6), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) < 95
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x + [1, 2, 3]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x + np.array([1, 2, 3])
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x + [1, 2, 3] (Broadcasted)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x + np.array([1, 2, 3])
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 3))
inputset = [np.random.randint(0, 2 ** 3, size=(2, 3)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x + 42
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** 3),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** 3 - 1)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (10b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 10
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** max_precision - 42),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (11b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 11
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[random.randint(0, 2 ** max_precision - 1 - 42) for _ in range(128)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (12b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 12
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[random.randint(0, 2 ** max_precision - 1 - 42) for _ in range(128)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (13b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 13
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[random.randint(0, 2 ** max_precision - 1 - 42) for _ in range(128)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (14b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 14
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[random.randint(0, 2 ** max_precision - 1 - 42) for _ in range(128)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (15b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 15
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[random.randint(0, 2 ** max_precision - 1 - 42) for _ in range(128)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (16b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 16
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[random.randint(0, 2 ** max_precision - 1 - 42) for _ in range(128)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (32b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 32
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[random.randint(0, 2 ** max_precision - 1 - 42) for _ in range(128)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (8b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 8
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** max_precision - 42),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,51 +0,0 @@
# bench: Unit Target: x + 42 (9b)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
max_precision = 9
def function_to_compile(x):
return x + 42
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** max_precision - 42),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x + 42 (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x + 42
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,50 +0,0 @@
# bench: Unit Target: x + y
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x + y
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
[(random.randint(0, 7), random.randint(0, 7)) for _ in range(32)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** 3 - 1)
sample_y = random.randint(0, 2 ** 3 - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: x + y (Broadcasted Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x + y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 3))
inputset = [
(np.random.randint(0, 2 ** 3, size=(3,)), np.random.randint(0, 2 ** 3, size=(2, 3)))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
sample_y = np.random.randint(0, 2 ** 3, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,52 +0,0 @@
# bench: Unit Target: x + y (Tensor & Scalar)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x + y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
inputset = [(np.random.randint(0, 8, size=(3,)), random.randint(0, 7)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
sample_y = random.randint(0, 2 ** 3 - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: x + y (Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x + y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [
(np.random.randint(0, 2 ** 3, size=(3,)), np.random.randint(0, 2 ** 3, size=(3,)))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
sample_y = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: np.reshape(x, some_shape)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return np.reshape(x, (15, 4))
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(10, 6))
inputset = [np.random.randint(0, 2 ** 3, size=(10, 6)) for _ in range(128)]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(10, 6), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x[::-1]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x[::-1]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x * [1, 2, 3]
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x * np.array([1, 2, 3])
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x * [1, 2, 3] (Broadcasted)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x * np.array([1, 2, 3])
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 3))
inputset = [np.random.randint(0, 2 ** 3, size=(2, 3)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x * 7
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x * 7
x = hnp.EncryptedScalar(hnp.UnsignedInteger(4))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** 4),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** 4 - 1)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x * 7 (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x * 7
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [np.random.randint(0, 2 ** 3, size=(3,)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: x * y
import itertools
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x * y
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = itertools.product(range(4, 8), range(0, 4))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(2 ** 2, 2 ** 3 - 1)
sample_y = random.randint(0, 2 ** 2 - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: x * y (Broadcasted Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x * y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 3))
inputset = [
(np.random.randint(0, 2 ** 3, size=(3,)), np.random.randint(0, 2 ** 3, size=(2, 3)))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
sample_y = np.random.randint(0, 2 ** 3, size=(2, 3), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,52 +0,0 @@
# bench: Unit Target: x * y (Tensor & Scalar)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x * y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
inputset = [(np.random.randint(0, 8, size=(3,)), random.randint(0, 7)) for _ in range(32)]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
sample_y = random.randint(0, 5)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,53 +0,0 @@
# bench: Unit Target: x * y (Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x * y
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
inputset = [
(np.random.randint(0, 2 ** 3, size=(3,)), np.random.randint(0, 2 ** 3, size=(3,)))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
sample_y = np.random.randint(0, 2 ** 3, size=(3,), dtype=np.uint8)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: x ** 2
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return x ** 2
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
range(2 ** 3),
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(4):
sample_x = random.randint(0, 2 ** 3 - 1)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,48 +0,0 @@
# bench: Unit Target: np.transpose(x)
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x):
return np.transpose(x)
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
inputset = [np.random.randint(0, 2 ** 3, size=(2, 4)) for _ in range(128)]
inputs = []
labels = []
for _ in range(4):
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4), dtype=np.uint8)
inputs.append([sample_x])
labels.append(function_to_compile(*inputs[-1]))
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if np.array_equal(result_i, label_i):
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -1,6 +1,6 @@
# Benchmarks
To track our progress over time, we have created a [progress tracker](https://progress.zama.ai) that:
To track our progress over time, we have created a [progress tracker](https://ml.progress.zama.ai) that:
- lists targets that we want to compile
- updates the status on the compilation of these functions
- tracks compilation and evaluation times on different hardware

53
poetry.lock generated
View File

@@ -187,7 +187,7 @@ pycparser = "*"
[[package]]
name = "charset-normalizer"
version = "2.0.9"
version = "2.0.10"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "dev"
optional = false
@@ -1352,6 +1352,21 @@ category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "py-progress-tracker"
version = "0.1.0"
description = "A simple benchmarking library"
category = "dev"
optional = false
python-versions = ">=3.8,<3.11"
[package.dependencies]
colorama = ">=0.4.4,<0.5.0"
psutil = ">=5.8.0,<6.0.0"
py-cpuinfo = ">=8.0.0,<9.0.0"
tabulate = ">=0.8.9,<0.9.0"
termcolor = ">=1.1.0,<2.0.0"
[[package]]
name = "pycodestyle"
version = "2.7.0"
@@ -2022,6 +2037,25 @@ python-versions = ">=3.5"
lint = ["flake8", "mypy", "docutils-stubs"]
test = ["pytest"]
[[package]]
name = "tabulate"
version = "0.8.9"
description = "Pretty-print tabular data"
category = "dev"
optional = false
python-versions = "*"
[package.extras]
widechars = ["wcwidth"]
[[package]]
name = "termcolor"
version = "1.1.0"
description = "ANSII Color formatting for output in terminal."
category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "terminado"
version = "0.12.1"
@@ -2248,7 +2282,7 @@ full = ["pygraphviz"]
[metadata]
lock-version = "1.1"
python-versions = ">=3.8,<3.10"
content-hash = "4fbcb00a68b38cf9bfc977c6c750f4f76852bdd704f5346bc19752ca7c4b6905"
content-hash = "51dccbf357cf2a087c60beef5bc118d0ef469b5c5c7d72794550c4ea3c318f28"
[metadata.files]
alabaster = [
@@ -2379,8 +2413,8 @@ cffi = [
{file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"},
]
charset-normalizer = [
{file = "charset-normalizer-2.0.9.tar.gz", hash = "sha256:b0b883e8e874edfdece9c28f314e3dd5badf067342e42fb162203335ae61aa2c"},
{file = "charset_normalizer-2.0.9-py3-none-any.whl", hash = "sha256:1eecaa09422db5be9e29d7fc65664e6c33bd06f9ced7838578ba40d58bdf3721"},
{file = "charset-normalizer-2.0.10.tar.gz", hash = "sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd"},
{file = "charset_normalizer-2.0.10-py3-none-any.whl", hash = "sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455"},
]
click = [
{file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"},
@@ -3158,6 +3192,10 @@ py = [
py-cpuinfo = [
{file = "py-cpuinfo-8.0.0.tar.gz", hash = "sha256:5f269be0e08e33fd959de96b34cd4aeeeacac014dd8305f70eb28d06de2345c5"},
]
py-progress-tracker = [
{file = "py-progress-tracker-0.1.0.tar.gz", hash = "sha256:ebda5b1e9d87a6cb8d9af02c625f372d89e7b6f52d0637cb8476db25ea55f5b4"},
{file = "py_progress_tracker-0.1.0-py3-none-any.whl", hash = "sha256:5fcc8abaea1c46ea81fa2e99f2028cd988cf022db22bd7d70c684644534fccf9"},
]
pycodestyle = [
{file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"},
{file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"},
@@ -3560,6 +3598,13 @@ sphinxcontrib-serializinghtml = [
{file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
{file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
]
tabulate = [
{file = "tabulate-0.8.9-py3-none-any.whl", hash = "sha256:d7c013fe7abbc5e491394e10fa845f8f32fe54f8dc60c6622c6cf482d25d47e4"},
{file = "tabulate-0.8.9.tar.gz", hash = "sha256:eb1d13f25760052e8931f2ef80aaf6045a6cceb47514db8beab24cded16f13a7"},
]
termcolor = [
{file = "termcolor-1.1.0.tar.gz", hash = "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b"},
]
terminado = [
{file = "terminado-0.12.1-py3-none-any.whl", hash = "sha256:09fdde344324a1c9c6e610ee4ca165c4bb7f5bbf982fceeeb38998a988ef8452"},
{file = "terminado-0.12.1.tar.gz", hash = "sha256:b20fd93cc57c1678c799799d117874367cc07a3d2d55be95205b1a88fa08393f"},

View File

@@ -53,6 +53,7 @@ scikit-learn = "1.0.1"
pandas = "1.3.4"
pip-audit = "^1.1.1"
pytest-codeblocks = "^0.12.2"
py-progress-tracker = "^0.1.0"
[build-system]
requires = ["poetry-core>=1.0.0"]

View File

@@ -12,13 +12,15 @@ if ! source "${DEV_VENV_PATH}/bin/activate"; then
python3 -m venv "${DEV_VENV_PATH}"
# shellcheck disable=SC1090,SC1091
source "${DEV_VENV_PATH}/bin/activate"
cd /src/ && make setup_env
fi
cd /src/ && make sync_env
mkdir -p logs
initial_log=logs/$(date -u --iso-8601=seconds).log
mkdir -p logs
poetry run python script/progress_tracker_utils/measure.py benchmarks > "$initial_log"
make benchmark > "$initial_log"
final_log=logs/$(date -u --iso-8601=seconds).log
@@ -38,5 +40,5 @@ fi
curl \
-H 'Authorization: Bearer '"$PROGRESS_TRACKER_TOKEN"'' \
-H 'Content-Type: application/json' \
-d @.benchmarks/findings.json \
-d @progress.json \
-X POST "$PROGRESS_TRACKER_URL"/measurement

View File

@@ -1,56 +0,0 @@
"""Extract some info about the host machine."""
import json
import os
import platform
import urllib.parse
import cpuinfo
import dotenv
import psutil
def main():
"""Extract some info about the host machine."""
dotenv.load_dotenv()
specs = []
cpu_value = cpuinfo.get_cpu_info()["brand_raw"].replace("(R)", "®").replace("(TM)", "")
specs.append(["CPU", cpu_value])
vcpu_value = os.getenv("VCPU")
if vcpu_value is not None:
specs.append(["vCPU", vcpu_value])
ram_value = f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB"
specs.append(["RAM", ram_value])
os_value = os.getenv("OS_NAME")
if os_value is None:
os_value = f"{platform.system()} {platform.release()}"
specs.append(["OS", os_value])
name = os.getenv("MACHINE_NAME")
if name is None:
name = platform.node()
name = name.strip()
id_ = name.lower()
id_ = id_.replace(" ", "-")
id_ = id_.replace("_", "-")
id_ = id_.replace(".", "-")
id_ = id_.replace("(", "")
id_ = id_.replace(")", "")
id_ = id_.replace("$/h", "-dollars-per-hour")
id_ = id_.strip()
id_ = urllib.parse.quote_plus(id_)
os.makedirs(".benchmarks", exist_ok=True)
machine = {"id": id_, "name": name, "specs": specs}
with open(".benchmarks/machine.json", "w", encoding="utf-8") as f:
json.dump(machine, f, indent=2, ensure_ascii=False)
if __name__ == "__main__":
main()

View File

@@ -1,434 +0,0 @@
"""Measurement script for the progress tracker"""
import argparse
import json
import os
import pathlib
import shutil
import subprocess
import urllib
import tqdm
def name_to_id(name):
"""Convert a human readable name to a url friendly id (e.g., `x + y` to `x-plus-y`)"""
name = name.replace("-", "minus")
name = name.replace(" ** ", "-to-the-power-of-")
name = name.replace("+", "plus")
name = name.replace("*", "times")
name = name.replace("/", "over")
name = name.replace("%", "percent")
name = name.replace("&", "and")
name = name.replace(":", "colon")
name = name.replace(" ", "-")
name = name.replace("(", "")
name = name.replace(")", "")
name = name.replace("[", "")
name = name.replace("]", "")
name = name.replace(",", "")
name = name.replace(".", "-")
name = name.replace("^", "")
return urllib.parse.quote_plus(name.lower())
def register_alert(script, index, line, metrics, alerts):
"""Parse line, check its correctness, add it to list of alerts if it's valid"""
# Extract the alert details
alert_line = line.replace("# bench: Alert:", "")
# Parse the alert and append it to list of alerts
supported_operators = ["==", "!=", "<=", ">=", "<", ">"]
for operator in supported_operators:
alert_details = alert_line.split(f" {operator} ")
# An alert should be of form `{metric} {operator} {constant}`
if len(alert_details) == 2:
metric_label = alert_details[0].strip()
metric_id = name_to_id(metric_label)
if metric_id not in metrics:
raise SyntaxError(
f"An alert is using an undefined metric `{metric_label}` "
f"(at line {index + 1} of {script})",
)
value_str = alert_details[1].strip()
try:
value = float(value_str)
alerts.append({"metric": metric_id, "comparison": operator, "value": value})
except ValueError as error:
raise SyntaxError(
f"An alert is not using a constant floating point for comparison "
f"(it uses `{value_str}` at line {index + 1} of {script})",
) from error
break
else:
raise SyntaxError(
f"An alert is not using any of the supported comparisons "
f"{', '.join(supported_operators)} "
f"(at line {index + 1} of {script})",
)
def identify_metrics_and_alerts(script, lines, metrics, alerts):
"""Identify the metrics of a script and make sure the annotations are well-formed"""
# Create a flag to detect `# Measure: End` without a measurement start
in_measurement = False
# Create a variable to remember the indentation of the start of the last measurement
measurement_indentation = 0
# Create a variable to remember the line number of the start of the last measurement
measurement_line = 0
# Identify measurements and store their name and id in `metrics`
for index, line in enumerate(lines):
# Get the indentation of the line
indentation = len(line) - len(line.lstrip())
# Strip the line for easier processing
line = line.strip()
# Check whether the line is a special line or not
if line == "# bench: Measure: End":
# Make sure a measurement is active already
if not in_measurement:
raise SyntaxError(
f"Measurements cannot end before they are defined "
f"(at line {index + 1} of {script})",
)
# Make sure indentation of the current line
# matches the indentation of the active measurement line
if indentation != measurement_indentation:
raise SyntaxError(
f"Measurements should finish with the same indentation as they are defined "
f"(at lines {measurement_line} and {index + 1} of {script})",
)
# Set in_measurement to false as the active measurement has ended
in_measurement = False
elif line.startswith("# bench: Measure:"):
# Make sure a measurement is not active already
if in_measurement:
raise SyntaxError(
f"Nested measurements are not supported "
f"(at lines {measurement_line} and {index + 1} of {script})",
)
# Extract the measurement details
measurement_details = line.replace("# bench: Measure:", "").split("=")
# Extract metric name and id
metric_label = measurement_details[0].strip()
metric_id = name_to_id(metric_label)
# Add metric id and metric name to `metrics`
metrics[metric_id] = metric_label
# Check if the measurement is a timing measurement (does not contain `= expression`)
if len(measurement_details) == 1:
# We need to see an end in the upcoming lines so update variables accordingly
in_measurement = True
measurement_line = index + 1
measurement_indentation = indentation
elif line.startswith("# bench: Alert:"):
register_alert(script, index, line, metrics, alerts)
# Make sure there isn't an active measurement that hasn't finished
if in_measurement:
raise SyntaxError(
f"Unfinished measurements are not supported "
f"(at line {measurement_line} of {script})",
)
def create_modified_script(script, lines, metrics):
"""Create a modified version of the script which can be used to perform measurements"""
with open(f".benchmarks/scripts/{script}", "w", encoding="utf-8") as f:
# Import must-have libraries
f.write("import json\n")
f.write("import time\n")
f.write("\n")
# Create a measurement dictionary to accumulate values
f.write("_measurements_ = {\n")
for metric_id in metrics.keys():
f.write(f' "{metric_id}": [],\n')
f.write("}\n")
# Create a variable to hold the id of the current metric
# This is required to determine where to save the measured value
current_metric_id = ""
# Copy the lines of the original script into the new script
for line in lines[1:]:
# And modify special lines along the way
if line.strip() == "# bench: Measure: End":
# Replace `# Measure: End` with
#
# _end_ = time.time()
# _measurements_["id"].append((_end_ - _start_) * 1000)
index = line.find("# bench: Measure: End")
line = line[:index]
f.write(f"{line}_end_ = time.time()\n")
value = "(_end_ - _start_) * 1000"
line += f'_measurements_["{current_metric_id}"].append({value})\n'
elif line.strip().startswith("# bench: Measure:"):
# Replace `# Measure: ...` with
#
# _start_ = time.time()
# Replace `# Measure: ... = expression` with
#
# _measurements_["id"].append(expression)
metric_details = line.replace("# bench: Measure:", "").split("=")
metric_label = metric_details[0].strip()
metric_id = name_to_id(metric_label)
index = line.find("# bench: Measure:")
line = line[:index]
if len(metric_details) == 1:
current_metric_id = metric_id
line += "_start_ = time.time()\n"
else:
value = metric_details[1]
line += f'_measurements_["{metric_id}"].append({value.strip()})\n'
# Write the possibly replaced line back
f.write(line)
# Dump measurements to a temporary file after the script is executed from start to end
f.write("\n")
f.write(f'with open(".benchmarks/scripts/{script}.measurements", "w") as f:\n')
f.write(" json.dump(_measurements_, f, indent=2)\n")
def perform_measurements(path, script, target_id, metrics, samples, result):
"""Run the modified script multiple times and update the result"""
# Create a flag to keep track of the working status
working = True
print()
print(path)
print("-" * len(str(path)))
# Run the modified script `samples` times and accumulate measurements
measurements = {metric_id: [] for metric_id in metrics.keys()}
with tqdm.tqdm(total=samples) as pbar:
for i in range(samples):
# Create the subprocess
process = subprocess.run(
["python", f".benchmarks/scripts/{script}"],
capture_output=True,
check=False,
)
# Print sample information
pbar.write(f" Sample {i + 1}")
pbar.write(f" {'-' * len(f'Sample {i + 1}')}")
# If the script raised an exception, discard everything for now
if process.returncode != 0:
working = False
pbar.write(f" Failed (exited with {process.returncode})")
pbar.write(f" --------------------{'-' * len(str(process.returncode))}-")
stderr = process.stderr.decode("utf-8")
for line in stderr.split("\n"):
if line.strip() != "":
pbar.write(f" {line}")
pbar.write("")
pbar.update(samples)
break
# Read the measurements and delete the temporary file
with open(f".benchmarks/scripts/{script}.measurements", encoding="utf-8") as f:
results = json.load(f)
os.unlink(f".benchmarks/scripts/{script}.measurements")
# Add the `results` of the current run to `measurements`
for metric_id in metrics.keys():
average = sum(results[metric_id]) / len(results[metric_id])
pbar.write(f" {metrics[metric_id]} = {average}")
for measurement in results[metric_id]:
measurements[metric_id].append(measurement)
pbar.write("")
pbar.update(1)
print()
result["targets"][target_id]["working"] = working
if working:
# Take average of all metrics and store them in `result`
result["targets"][target_id]["measurements"].update(
{metric_id: sum(metric) / len(metric) for metric_id, metric in measurements.items()}
)
# Add metrics of the current script to the result
for metric_id, metric_label in metrics.items():
if metric_id not in result["metrics"]:
result["metrics"][metric_id] = {"label": metric_label}
else:
# Delete measurements field of the current target
del result["targets"][target_id]["measurements"]
def get_scripts_to_benchmark(args):
"""Get the list of files to benchmark"""
base = pathlib.Path(args.base)
if args.files_to_benchmark is None:
scripts = list(base.glob("*.py"))
else:
scripts = [pathlib.Path(f) for f in args.files_to_benchmark]
if not args.check:
print("Will benchmark following files:\n")
print(" - " + "\n - ".join(str(s) for s in scripts))
# Clear the previous temporary scripts directory
shutil.rmtree(".benchmarks/scripts", ignore_errors=True)
# Copy the base directory to the new temporary scripts directory
shutil.copytree(base, ".benchmarks/scripts")
# Because we copy the entire base directory to the new temporary scripts directory,
# the modified scripts will have access to helper modules defined within the base directory
# (e.g., we copy `benchmarks/common.py` to `.benchmarks/scripts/common.py` which allows
# the modified `.benchmarks/scripts/x_plus_42.py` to access `common` module`)
return scripts
def main(args):
"""Measurement script for the progress tracker"""
samples = args.samples
with open(".benchmarks/machine.json", "r", encoding="utf-8") as f:
machine = json.load(f)
result = {"machine": machine, "metrics": {}, "targets": {}}
scripts = get_scripts_to_benchmark(args)
# Process each script under the base directory
for path in scripts:
# Read the script line by line
with open(path, "r", encoding="utf-8") as f:
lines = f.readlines()
# Find the first non-empty line
first_line = ""
for line in map(lambda line: line.strip(), lines):
if line != "":
first_line = line
break
# Check whether the script is a target or not
if first_line.startswith("# bench: Unit Target:"):
# Extract target name
target_name = first_line.replace("# bench: Unit Target:", "").strip()
is_unit = True
elif first_line.startswith("# bench: Full Target:"):
# Extract target name
target_name = first_line.replace("# bench: Full Target:", "").strip()
is_unit = False
else:
if not args.check:
print()
print(path)
print("-" * len(str(path)))
with tqdm.tqdm(total=samples) as pbar:
pbar.write(" Sample 1")
pbar.write(" --------")
pbar.write(
" Skipped (doesn't have a `# bench: Unit/Full Target:` directive)\n"
)
pbar.update(samples)
print()
continue
# Extract target id
target_id = name_to_id(target_name)
# Check whether the target is already registered
if target_id in result["targets"]:
raise RuntimeError(f"Target `{target_name}` is already registered")
# Create a dictionary to hold `metric_id` to `metric_name`
metrics = {}
# Create a list to hold alerts in form { "metric": ..., "comparison": ..., "value": ... }
alerts = []
# Identify metrics of the current script
identify_metrics_and_alerts(path, lines, metrics, alerts)
# Extract the script name
name = os.path.basename(path)
# Create another script to hold the modified version of the current script
create_modified_script(name, lines, metrics)
# Create an entry in the result for the current target
result["targets"][target_id] = {
"name": target_name,
"measurements": {},
"alerts": alerts,
"code": "\n".join(lines),
"isUnit": is_unit,
}
if not args.check:
# Perform and save measurements
perform_measurements(path, name, target_id, metrics, samples, result)
# Dump the latest results to the output file
with open(".benchmarks/findings.json", "w", encoding="utf-8") as f:
json.dump(result, f, indent=2, ensure_ascii=False)
# Delete the modified scripts if the user doesn't care
if not args.keep:
shutil.rmtree(".benchmarks/scripts", ignore_errors=True)
if not args.check:
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Measurement script for the progress tracker")
parser.add_argument("base", type=str, help="directory which contains the benchmarks")
parser.add_argument("--check", action="store_true", help="flag to enable just checking mode")
parser.add_argument("--samples", type=int, default=30, help="number of samples to take")
parser.add_argument("--keep", action="store_true", help="flag to keep measurement scripts")
parser.add_argument(
"--files_to_benchmark",
"-f",
nargs="+",
type=str,
default=None,
help="files to benchmark in base directory (with base directory as a prefix)",
)
main(parser.parse_args())

View File

@@ -1,137 +0,0 @@
"""Test file for progress tracker"""
from typing import List
import measure
import pytest
class Args:
"""Class to mimic the command line arguments that can be passed to measurement script."""
base: str
files_to_benchmark: List[str]
samples: int
keep: bool
check: bool
def __init__(self, files_to_benchmark: List[str]):
self.base = "script/progress_tracker_utils/test_scripts"
self.files_to_benchmark = files_to_benchmark
self.samples = 30
self.keep = False
self.check = True
def test_alert_on_undefined_metric():
"""Test function for alert directive on unefined metric"""
file = "script/progress_tracker_utils/test_scripts/alert_on_undefined_metric.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"An alert is using an undefined metric `Accuracy (%)` (at line 7 of {file})"
)
def test_alert_invalid_comparison_constant():
"""Test function for alert directive with invalid constant"""
file = "script/progress_tracker_utils/test_scripts/alert_invalid_comparison_constant.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"An alert is not using a constant floating point for comparison "
f'(it uses `"abc"` at line 7 of {file})'
)
def test_alert_invalid_comparison_operator():
"""Test function for alert directive that use invalid comparison operator"""
file = "script/progress_tracker_utils/test_scripts/alert_invalid_comparison_operator.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"An alert is not using any of the supported comparisons ==, !=, <=, >=, <, > "
f"(at line 7 of {file})"
)
def test_measure_end_before_start():
"""Test function for measure end directive before measure directive"""
file = "script/progress_tracker_utils/test_scripts/measure_end_before_start.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Measurements cannot end before they are defined (at line 3 of {file})"
)
def test_measure_invalid_indentation():
"""Test function for invalid indentation of measure directives"""
file = "script/progress_tracker_utils/test_scripts/measure_invalid_indentation.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Measurements should finish with the same indentation as they are defined "
f"(at lines 4 and 6 of {file})"
)
def test_measure_nested():
"""Test function for nested measure directives"""
file = "script/progress_tracker_utils/test_scripts/measure_nested.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Nested measurements are not supported (at lines 3 and 7 of {file})"
)
def test_measure_unfinished():
"""Test function for measure directives without a measure end directive"""
file = "script/progress_tracker_utils/test_scripts/measure_unfinished.py"
args = Args([file])
with pytest.raises(SyntaxError) as excinfo:
measure.main(args)
assert str(excinfo.value) == (
f"Unfinished measurements are not supported (at line 3 of {file})"
)
def test_two_targets_with_the_same_name():
"""Test function for target name collisions"""
file1 = "script/progress_tracker_utils/test_scripts/two_targets_with_the_same_name.1.py"
file2 = "script/progress_tracker_utils/test_scripts/two_targets_with_the_same_name.2.py"
args = Args([file1, file2])
with pytest.raises(RuntimeError) as excinfo:
measure.main(args)
assert str(excinfo.value) == "Target `X` is already registered"

View File

@@ -1,7 +0,0 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End
# bench: Alert: Printing Time (ms) != "abc"

View File

@@ -1,7 +0,0 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End
# bench: Alert: Printing Time (ms) === 10

View File

@@ -1,7 +0,0 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End
# bench: Alert: Accuracy (%) != 100

View File

@@ -1,7 +0,0 @@
# bench: Unit Target: X
# bench: Measure: End
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End

View File

@@ -1,6 +0,0 @@
# bench: Unit Target: X
if True:
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End

View File

@@ -1,13 +0,0 @@
# bench: Unit Target: X
# bench: Measure: Printing Time Of Three (ms)
print(42)
# bench: Measure: Printing Time Of One (ms)
print(42)
# bench: Measure: End
print(42)
# bench: Measure: End

View File

@@ -1,5 +0,0 @@
# bench: Unit Target: X
# bench: Measure: Printing Time Of Three (ms)
print(42)

View File

@@ -1,5 +0,0 @@
# bench: Full Target: X
# bench: Measure: Printing Time (ms)
print(420)
# bench: Measure: End

View File

@@ -1,5 +0,0 @@
# bench: Unit Target: X
# bench: Measure: Printing Time (ms)
print(42)
# bench: Measure: End