From 6edba1e10ccddd7cea927b7f9b29856036d5be13 Mon Sep 17 00:00:00 2001 From: Benoit Chevallier-Mames Date: Wed, 20 Oct 2021 16:31:11 +0200 Subject: [PATCH] chore(benchmarks): add benchmark scripts for more features refs #700 --- benchmarks/c_concatenate_x.py | 49 ++++++++++++++++++++++++++++++++ benchmarks/c_matmul_x.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_concatenate_c.py | 49 ++++++++++++++++++++++++++++++++ benchmarks/x_concatenate_y.py | 53 +++++++++++++++++++++++++++++++++++ benchmarks/x_matmul_c.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_matmul_y.py | 53 +++++++++++++++++++++++++++++++++++ benchmarks/x_negative.py | 48 +++++++++++++++++++++++++++++++ benchmarks/x_plus_42.py | 6 ++-- benchmarks/x_plus_42_10b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_11b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_12b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_13b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_14b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_15b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_16b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_32b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_8b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_plus_42_9b.py | 50 +++++++++++++++++++++++++++++++++ benchmarks/x_reshape.py | 48 +++++++++++++++++++++++++++++++ benchmarks/x_tranpose.py | 48 +++++++++++++++++++++++++++++++ 20 files changed, 951 insertions(+), 3 deletions(-) create mode 100644 benchmarks/c_concatenate_x.py create mode 100644 benchmarks/c_matmul_x.py create mode 100644 benchmarks/x_concatenate_c.py create mode 100644 benchmarks/x_concatenate_y.py create mode 100644 benchmarks/x_matmul_c.py create mode 100644 benchmarks/x_matmul_y.py create mode 100644 benchmarks/x_negative.py create mode 100644 benchmarks/x_plus_42_10b.py create mode 100644 benchmarks/x_plus_42_11b.py create mode 100644 benchmarks/x_plus_42_12b.py create mode 100644 benchmarks/x_plus_42_13b.py create mode 100644 benchmarks/x_plus_42_14b.py create mode 100644 benchmarks/x_plus_42_15b.py create mode 100644 benchmarks/x_plus_42_16b.py create mode 100644 benchmarks/x_plus_42_32b.py create mode 100644 benchmarks/x_plus_42_8b.py create mode 100644 benchmarks/x_plus_42_9b.py create mode 100644 benchmarks/x_reshape.py create mode 100644 benchmarks/x_tranpose.py diff --git a/benchmarks/c_concatenate_x.py b/benchmarks/c_concatenate_x.py new file mode 100644 index 000000000..939f52062 --- /dev/null +++ b/benchmarks/c_concatenate_x.py @@ -0,0 +1,49 @@ +# bench: Unit Target: np.concatenate((c, x)) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + def function_to_compile(x): + return np.concatenate((c, x)) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5)) + c = np.arange(20).reshape((4, 5)) + + inputset = [(np.random.randint(0, 2 ** 3, size=(4, 5)),) for _ in range(128)] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(4, 5)) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/c_matmul_x.py b/benchmarks/c_matmul_x.py new file mode 100644 index 000000000..cb04999eb --- /dev/null +++ b/benchmarks/c_matmul_x.py @@ -0,0 +1,50 @@ +# bench: Unit Target: np.matmul(c, x) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + c = np.arange(20, 30).reshape((5, 2)) + + def function_to_compile(x): + return np.matmul(c, x) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4)) + + inputset = [(np.random.randint(0, 2 ** 3, size=(2, 4))) for _ in range(128)] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(2, 4)) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_concatenate_c.py b/benchmarks/x_concatenate_c.py new file mode 100644 index 000000000..8392c154d --- /dev/null +++ b/benchmarks/x_concatenate_c.py @@ -0,0 +1,49 @@ +# bench: Unit Target: np.concatenate((x, c)) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + def function_to_compile(x): + return np.concatenate((x, c)) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5)) + c = np.arange(20).reshape((4, 5)) + + inputset = [(np.random.randint(0, 2 ** 3, size=(4, 5)),) for _ in range(128)] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(4, 5)) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_concatenate_y.py b/benchmarks/x_concatenate_y.py new file mode 100644 index 000000000..e0b0e599b --- /dev/null +++ b/benchmarks/x_concatenate_y.py @@ -0,0 +1,53 @@ +# bench: Unit Target: np.concatenate((x, y)) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + def function_to_compile(x, y): + return np.concatenate((x, y)) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5)) + y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5)) + + inputset = [ + (np.random.randint(0, 2 ** 3, size=(4, 5)), np.random.randint(0, 2 ** 3, size=(4, 5))) + for _ in range(128) + ] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(4, 5)) + sample_y = np.random.randint(0, 2 ** 3, size=(4, 5)) + + inputs.append([sample_x, sample_y]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x, "y": y}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_matmul_c.py b/benchmarks/x_matmul_c.py new file mode 100644 index 000000000..a63c4797a --- /dev/null +++ b/benchmarks/x_matmul_c.py @@ -0,0 +1,50 @@ +# bench: Unit Target: np.matmul(x, c) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + c = np.arange(20).reshape((4, 5)) + + def function_to_compile(x): + return np.matmul(x, c) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4)) + + inputset = [(np.random.randint(0, 2 ** 3, size=(2, 4))) for _ in range(128)] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(2, 4)) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_matmul_y.py b/benchmarks/x_matmul_y.py new file mode 100644 index 000000000..13ab3e485 --- /dev/null +++ b/benchmarks/x_matmul_y.py @@ -0,0 +1,53 @@ +# bench: Unit Target: np.matmul(x, y) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + def function_to_compile(x, y): + return np.matmul(x, y) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4)) + y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5)) + + inputset = [ + (np.random.randint(0, 2 ** 3, size=(2, 4)), np.random.randint(0, 2 ** 3, size=(4, 5))) + for _ in range(128) + ] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(2, 4)) + sample_y = np.random.randint(0, 2 ** 3, size=(4, 5)) + + inputs.append([sample_x, sample_y]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x, "y": y}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_negative.py b/benchmarks/x_negative.py new file mode 100644 index 000000000..0dff55aba --- /dev/null +++ b/benchmarks/x_negative.py @@ -0,0 +1,48 @@ +# bench: Unit Target: np.negative(x) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + def function_to_compile(x): + return np.negative(x) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(10, 6)) + + inputset = [(np.random.randint(0, 2 ** 3, size=(10, 6)),) for _ in range(128)] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(10, 6)) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42.py b/benchmarks/x_plus_42.py index 9b28ddff3..1f9075999 100644 --- a/benchmarks/x_plus_42.py +++ b/benchmarks/x_plus_42.py @@ -11,13 +11,13 @@ def main(): def function_to_compile(x): return x + 42 - x = hnp.EncryptedScalar(hnp.UnsignedInteger(3)) + x = hnp.EncryptedScalar(hnp.UnsignedInteger(10)) # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, - [(i,) for i in range(2 ** 3)], + [(i,) for i in range(2 ** 10)], compilation_configuration=BENCHMARK_CONFIGURATION, ) # bench: Measure: End @@ -25,7 +25,7 @@ def main(): inputs = [] labels = [] for _ in range(4): - sample_x = random.randint(0, 2 ** 3 - 1) + sample_x = random.randint(0, 2 ** 10 - 1) inputs.append([sample_x]) labels.append(function_to_compile(*inputs[-1])) diff --git a/benchmarks/x_plus_42_10b.py b/benchmarks/x_plus_42_10b.py new file mode 100644 index 000000000..a04045ee0 --- /dev/null +++ b/benchmarks/x_plus_42_10b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (10b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 10 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(i,) for i in range(2 ** max_precision - 42)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_11b.py b/benchmarks/x_plus_42_11b.py new file mode 100644 index 000000000..dab6dc583 --- /dev/null +++ b/benchmarks/x_plus_42_11b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (11b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 11 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_12b.py b/benchmarks/x_plus_42_12b.py new file mode 100644 index 000000000..8bd6005d6 --- /dev/null +++ b/benchmarks/x_plus_42_12b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (12b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 12 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_13b.py b/benchmarks/x_plus_42_13b.py new file mode 100644 index 000000000..d8d18d1ea --- /dev/null +++ b/benchmarks/x_plus_42_13b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (13b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 13 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_14b.py b/benchmarks/x_plus_42_14b.py new file mode 100644 index 000000000..2e51a4acf --- /dev/null +++ b/benchmarks/x_plus_42_14b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (14b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 14 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_15b.py b/benchmarks/x_plus_42_15b.py new file mode 100644 index 000000000..1411de3e9 --- /dev/null +++ b/benchmarks/x_plus_42_15b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (15b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 15 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_16b.py b/benchmarks/x_plus_42_16b.py new file mode 100644 index 000000000..5f6ce8ee3 --- /dev/null +++ b/benchmarks/x_plus_42_16b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (16b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 16 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_32b.py b/benchmarks/x_plus_42_32b.py new file mode 100644 index 000000000..45b9cba5b --- /dev/null +++ b/benchmarks/x_plus_42_32b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (32b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 32 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_8b.py b/benchmarks/x_plus_42_8b.py new file mode 100644 index 000000000..2c0aeb5fb --- /dev/null +++ b/benchmarks/x_plus_42_8b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (8b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 8 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(i,) for i in range(2 ** max_precision - 42)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_plus_42_9b.py b/benchmarks/x_plus_42_9b.py new file mode 100644 index 000000000..b68cfc963 --- /dev/null +++ b/benchmarks/x_plus_42_9b.py @@ -0,0 +1,50 @@ +# bench: Unit Target: x + 42 (9b) + +import random + +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + + max_precision = 9 + + def function_to_compile(x): + return x + 42 + + x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision)) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + [(i,) for i in range(2 ** max_precision - 42)], + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + inputs = [] + labels = [] + for _ in range(4): + sample_x = random.randint(0, 2 ** max_precision - 1 - 42) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_reshape.py b/benchmarks/x_reshape.py new file mode 100644 index 000000000..36461e713 --- /dev/null +++ b/benchmarks/x_reshape.py @@ -0,0 +1,48 @@ +# bench: Unit Target: np.reshape(x, some_shape) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + def function_to_compile(x): + return np.reshape(x, (15, 4)) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(10, 6)) + + inputset = [(np.random.randint(0, 2 ** 3, size=(10, 6)),) for _ in range(128)] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(10, 6)) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main() diff --git a/benchmarks/x_tranpose.py b/benchmarks/x_tranpose.py new file mode 100644 index 000000000..2965c9deb --- /dev/null +++ b/benchmarks/x_tranpose.py @@ -0,0 +1,48 @@ +# bench: Unit Target: np.transpose(x) + +import numpy as np +from common import BENCHMARK_CONFIGURATION + +import concrete.numpy as hnp + + +def main(): + def function_to_compile(x): + return np.transpose(x) + + x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4)) + + inputset = [(np.random.randint(0, 2 ** 3, size=(2, 4)),) for _ in range(128)] + + inputs = [] + labels = [] + for _ in range(4): + sample_x = np.random.randint(0, 2 ** 3, size=(2, 4)) + + inputs.append([sample_x]) + labels.append(function_to_compile(*inputs[-1])) + + # bench: Measure: Compilation Time (ms) + engine = hnp.compile_numpy_function( + function_to_compile, + {"x": x}, + inputset, + compilation_configuration=BENCHMARK_CONFIGURATION, + ) + # bench: Measure: End + + correct = 0 + for input_i, label_i in zip(inputs, labels): + # bench: Measure: Evaluation Time (ms) + result_i = engine.run(*input_i) + # bench: Measure: End + + if result_i == label_i: + correct += 1 + + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 + + +if __name__ == "__main__": + main()