From 67f50fb8ce9d1cab2a7d38ae233e7daf117e9242 Mon Sep 17 00:00:00 2001 From: Umut Date: Thu, 14 Oct 2021 17:04:34 +0300 Subject: [PATCH] refactor(benchmarks): use 'bench' header prefix for all benchmark directives --- benchmarks/124_minus_x.py | 14 ++++----- benchmarks/124_minus_x_tensor.py | 14 ++++----- benchmarks/linear_regression.py | 18 ++++++------ benchmarks/logistic_regression.py | 18 ++++++------ benchmarks/single_table_lookup.py | 14 ++++----- benchmarks/x_minus_1_2_3.py | 14 ++++----- benchmarks/x_minus_1_2_3_broadcasted.py | 14 ++++----- benchmarks/x_minus_24.py | 14 ++++----- benchmarks/x_minus_24_tensor.py | 14 ++++----- benchmarks/x_minus_y.py | 14 ++++----- benchmarks/x_minus_y_broadcasted_tensors.py | 14 ++++----- benchmarks/x_minus_y_tensor_and_scalar.py | 14 ++++----- benchmarks/x_minus_y_tensors.py | 14 ++++----- benchmarks/x_plus_1_2_3.py | 14 ++++----- benchmarks/x_plus_1_2_3_broadcasted.py | 14 ++++----- benchmarks/x_plus_42.py | 14 ++++----- benchmarks/x_plus_42_tensor.py | 14 ++++----- benchmarks/x_plus_y.py | 14 ++++----- benchmarks/x_plus_y_broadcasted_tensors.py | 14 ++++----- benchmarks/x_plus_y_tensor_and_scalar.py | 14 ++++----- benchmarks/x_plus_y_tensors.py | 14 ++++----- benchmarks/x_times_1_2_3.py | 14 ++++----- benchmarks/x_times_1_2_3_broadcasted.py | 14 ++++----- benchmarks/x_times_7.py | 14 ++++----- benchmarks/x_times_7_tensor.py | 14 ++++----- benchmarks/x_times_y.py | 14 ++++----- benchmarks/x_times_y_broadcasted_tensors.py | 14 ++++----- benchmarks/x_times_y_tensor_and_scalar.py | 14 ++++----- benchmarks/x_times_y_tensors.py | 14 ++++----- benchmarks/x_to_the_power_of_2.py | 14 ++++----- script/progress_tracker_utils/measure.py | 32 +++++++++++---------- 31 files changed, 231 insertions(+), 229 deletions(-) diff --git a/benchmarks/124_minus_x.py b/benchmarks/124_minus_x.py index aa74101d0..e178dfdd7 100644 --- a/benchmarks/124_minus_x.py +++ b/benchmarks/124_minus_x.py @@ -1,4 +1,4 @@ -# Unit Target: 124 - x +# bench: Unit Target: 124 - x import random @@ -13,14 +13,14 @@ def main(): x = hnp.EncryptedScalar(hnp.UnsignedInteger(3)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, [(i,) for i in range(2 ** 3)], compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -32,15 +32,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/124_minus_x_tensor.py b/benchmarks/124_minus_x_tensor.py index 8b2db4834..3fa2c349b 100644 --- a/benchmarks/124_minus_x_tensor.py +++ b/benchmarks/124_minus_x_tensor.py @@ -1,4 +1,4 @@ -# Unit Target: 124 - x (Tensor) +# bench: Unit Target: 124 - x (Tensor) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 6, size=(3,)),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/linear_regression.py b/benchmarks/linear_regression.py index e0d15fb20..5b2f29a0c 100644 --- a/benchmarks/linear_regression.py +++ b/benchmarks/linear_regression.py @@ -1,4 +1,4 @@ -# Full Target: Linear Regression +# bench: Full Target: Linear Regression # Disable line length warnings as we have a looooong metric... # flake8: noqa: E501 @@ -172,14 +172,14 @@ def main(): for x_i in x_q: inputset.append((int(x_i[0]),)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x_0": hnp.EncryptedScalar(hnp.UnsignedInteger(input_bits))}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End non_homomorphic_loss = 0 homomorphic_loss = 0 @@ -198,9 +198,9 @@ def main(): ) .dequantize()[0] ) - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) homomorphic_prediction = QuantizedArray(engine.run(*x_i), y_parameters).dequantize() - # Measure: End + # bench: Measure: End non_homomorphic_loss += (non_homomorphic_prediction - y_i) ** 2 homomorphic_loss += (homomorphic_prediction - y_i) ** 2 @@ -222,10 +222,10 @@ def main(): print(f"Homomorphic Loss: {homomorphic_loss:.4f}") print(f"Relative Difference Percentage: {difference:.2f}%") - # Measure: Non Homomorphic Loss = non_homomorphic_loss - # Measure: Homomorphic Loss = homomorphic_loss - # Measure: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference - # Alert: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2 + # bench: Measure: Non Homomorphic Loss = non_homomorphic_loss + # bench: Measure: Homomorphic Loss = homomorphic_loss + # bench: Measure: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference + # bench: Alert: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2 if __name__ == "__main__": diff --git a/benchmarks/logistic_regression.py b/benchmarks/logistic_regression.py index bd9f95a42..805f8f783 100644 --- a/benchmarks/logistic_regression.py +++ b/benchmarks/logistic_regression.py @@ -1,4 +1,4 @@ -# Full Target: Logistic Regression +# bench: Full Target: Logistic Regression # Disable line length warnings as we have a looooong metric... # flake8: noqa: E501 @@ -244,7 +244,7 @@ def main(): for x_i in x_q: inputset.append((int(x_i[0]), int(x_i[1]))) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, { @@ -254,7 +254,7 @@ def main(): inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End non_homomorphic_correct = 0 homomorphic_correct = 0 @@ -273,9 +273,9 @@ def main(): ) ).dequantize()[0] ) - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) homomorphic_prediction = round(QuantizedArray(engine.run(*x_i), y_parameters).dequantize()) - # Measure: End + # bench: Measure: End if non_homomorphic_prediction == y_i: non_homomorphic_correct += 1 @@ -299,10 +299,10 @@ def main(): print(f"Homomorphic Accuracy: {homomorphic_accuracy:.4f}") print(f"Difference Percentage: {difference:.2f}%") - # Measure: Non Homomorphic Accuracy = non_homomorphic_accuracy - # Measure: Homomorphic Accuracy = homomorphic_accuracy - # Measure: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference - # Alert: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2 + # bench: Measure: Non Homomorphic Accuracy = non_homomorphic_accuracy + # bench: Measure: Homomorphic Accuracy = homomorphic_accuracy + # bench: Measure: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference + # bench: Alert: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2 if __name__ == "__main__": diff --git a/benchmarks/single_table_lookup.py b/benchmarks/single_table_lookup.py index 2b650319a..5db7af140 100644 --- a/benchmarks/single_table_lookup.py +++ b/benchmarks/single_table_lookup.py @@ -1,4 +1,4 @@ -# Unit Target: Single Table Lookup +# bench: Unit Target: Single Table Lookup import random @@ -18,14 +18,14 @@ def main(): x = hnp.EncryptedScalar(hnp.UnsignedInteger(input_bits)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, [(i,) for i in range(2 ** input_bits)], compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -37,15 +37,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_1_2_3.py b/benchmarks/x_minus_1_2_3.py index 9b1f59071..68e1db305 100644 --- a/benchmarks/x_minus_1_2_3.py +++ b/benchmarks/x_minus_1_2_3.py @@ -1,4 +1,4 @@ -# Unit Target: x - [1, 2, 3] +# bench: Unit Target: x - [1, 2, 3] import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 2, size=(3,)) + np.array([1, 2, 3]),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_1_2_3_broadcasted.py b/benchmarks/x_minus_1_2_3_broadcasted.py index 64f583384..7641653ab 100644 --- a/benchmarks/x_minus_1_2_3_broadcasted.py +++ b/benchmarks/x_minus_1_2_3_broadcasted.py @@ -1,4 +1,4 @@ -# Unit Target: x - [1, 2, 3] (Broadcasted) +# bench: Unit Target: x - [1, 2, 3] (Broadcasted) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -16,14 +16,14 @@ def main(): (np.random.randint(0, 2 ** 2, size=(2, 3)) + np.array([1, 2, 3]),) for _ in range(32) ] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -35,15 +35,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_24.py b/benchmarks/x_minus_24.py index 2b03e8d8d..8efceaec0 100644 --- a/benchmarks/x_minus_24.py +++ b/benchmarks/x_minus_24.py @@ -1,4 +1,4 @@ -# Unit Target: x - 24 +# bench: Unit Target: x - 24 import random @@ -13,14 +13,14 @@ def main(): x = hnp.EncryptedScalar(hnp.UnsignedInteger(6)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, [(i,) for i in range(24, 2 ** 6)], compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -32,15 +32,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_24_tensor.py b/benchmarks/x_minus_24_tensor.py index 06bd641b7..209717586 100644 --- a/benchmarks/x_minus_24_tensor.py +++ b/benchmarks/x_minus_24_tensor.py @@ -1,4 +1,4 @@ -# Unit Target: x - 24 (Tensor) +# bench: Unit Target: x - 24 (Tensor) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 5, size=(3,)) + 24,) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_y.py b/benchmarks/x_minus_y.py index 3f6413bda..cc6fee277 100644 --- a/benchmarks/x_minus_y.py +++ b/benchmarks/x_minus_y.py @@ -1,4 +1,4 @@ -# Unit Target: x - y +# bench: Unit Target: x - y import itertools import random @@ -17,14 +17,14 @@ def main(): inputset = itertools.product(range(4, 8), range(0, 4)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -37,15 +37,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_y_broadcasted_tensors.py b/benchmarks/x_minus_y_broadcasted_tensors.py index c9b7b9077..bb88176f4 100644 --- a/benchmarks/x_minus_y_broadcasted_tensors.py +++ b/benchmarks/x_minus_y_broadcasted_tensors.py @@ -1,4 +1,4 @@ -# Unit Target: x - y (Broadcasted Tensors) +# bench: Unit Target: x - y (Broadcasted Tensors) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -18,14 +18,14 @@ def main(): for _ in range(32) ] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -38,15 +38,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_y_tensor_and_scalar.py b/benchmarks/x_minus_y_tensor_and_scalar.py index 56f118d2d..480ee10ea 100644 --- a/benchmarks/x_minus_y_tensor_and_scalar.py +++ b/benchmarks/x_minus_y_tensor_and_scalar.py @@ -1,4 +1,4 @@ -# Unit Target: x - y (Tensor & Scalar) +# bench: Unit Target: x - y (Tensor & Scalar) import random @@ -17,14 +17,14 @@ def main(): inputset = [(np.random.randint(4, 8, size=(3,)), random.randint(0, 3)) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -37,15 +37,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_minus_y_tensors.py b/benchmarks/x_minus_y_tensors.py index f6f1ad44c..ab8892d21 100644 --- a/benchmarks/x_minus_y_tensors.py +++ b/benchmarks/x_minus_y_tensors.py @@ -1,4 +1,4 @@ -# Unit Target: x - y (Tensors) +# bench: Unit Target: x - y (Tensors) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -17,14 +17,14 @@ def main(): (np.random.randint(4, 8, size=(3,)), np.random.randint(0, 4, size=(3,))) for _ in range(32) ] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -37,15 +37,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_1_2_3.py b/benchmarks/x_plus_1_2_3.py index 2292e3460..46a6f7742 100644 --- a/benchmarks/x_plus_1_2_3.py +++ b/benchmarks/x_plus_1_2_3.py @@ -1,4 +1,4 @@ -# Unit Target: x + [1, 2, 3] +# bench: Unit Target: x + [1, 2, 3] import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_1_2_3_broadcasted.py b/benchmarks/x_plus_1_2_3_broadcasted.py index eb5e156ac..b1865cec6 100644 --- a/benchmarks/x_plus_1_2_3_broadcasted.py +++ b/benchmarks/x_plus_1_2_3_broadcasted.py @@ -1,4 +1,4 @@ -# Unit Target: x + [1, 2, 3] (Broadcasted) +# bench: Unit Target: x + [1, 2, 3] (Broadcasted) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 3, size=(2, 3)),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_42.py b/benchmarks/x_plus_42.py index 41b27b36a..9b28ddff3 100644 --- a/benchmarks/x_plus_42.py +++ b/benchmarks/x_plus_42.py @@ -1,4 +1,4 @@ -# Unit Target: x + 42 +# bench: Unit Target: x + 42 import random @@ -13,14 +13,14 @@ def main(): x = hnp.EncryptedScalar(hnp.UnsignedInteger(3)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, [(i,) for i in range(2 ** 3)], compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -32,15 +32,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_42_tensor.py b/benchmarks/x_plus_42_tensor.py index 9395926a1..2bbf7acc9 100644 --- a/benchmarks/x_plus_42_tensor.py +++ b/benchmarks/x_plus_42_tensor.py @@ -1,4 +1,4 @@ -# Unit Target: x + 42 (Tensor) +# bench: Unit Target: x + 42 (Tensor) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_y.py b/benchmarks/x_plus_y.py index bdf944d92..e58f5f0d9 100644 --- a/benchmarks/x_plus_y.py +++ b/benchmarks/x_plus_y.py @@ -1,4 +1,4 @@ -# Unit Target: x + y +# bench: Unit Target: x + y import random @@ -14,14 +14,14 @@ def main(): x = hnp.EncryptedScalar(hnp.UnsignedInteger(3)) y = hnp.EncryptedScalar(hnp.UnsignedInteger(3)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, [(random.randint(0, 7), random.randint(0, 7)) for _ in range(32)], compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -34,15 +34,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_y_broadcasted_tensors.py b/benchmarks/x_plus_y_broadcasted_tensors.py index e3960e895..a917e3c52 100644 --- a/benchmarks/x_plus_y_broadcasted_tensors.py +++ b/benchmarks/x_plus_y_broadcasted_tensors.py @@ -1,4 +1,4 @@ -# Unit Target: x + y (Broadcasted Tensors) +# bench: Unit Target: x + y (Broadcasted Tensors) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -18,14 +18,14 @@ def main(): for _ in range(32) ] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -38,15 +38,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_y_tensor_and_scalar.py b/benchmarks/x_plus_y_tensor_and_scalar.py index 8de52337c..fd108c0dd 100644 --- a/benchmarks/x_plus_y_tensor_and_scalar.py +++ b/benchmarks/x_plus_y_tensor_and_scalar.py @@ -1,4 +1,4 @@ -# Unit Target: x + y (Tensor & Scalar) +# bench: Unit Target: x + y (Tensor & Scalar) import random @@ -17,14 +17,14 @@ def main(): inputset = [(np.random.randint(0, 8, size=(3,)), random.randint(0, 7)) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -37,15 +37,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_plus_y_tensors.py b/benchmarks/x_plus_y_tensors.py index 79693d82f..6f9c1970d 100644 --- a/benchmarks/x_plus_y_tensors.py +++ b/benchmarks/x_plus_y_tensors.py @@ -1,4 +1,4 @@ -# Unit Target: x + y (Tensors) +# bench: Unit Target: x + y (Tensors) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -18,14 +18,14 @@ def main(): for _ in range(32) ] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -38,15 +38,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_1_2_3.py b/benchmarks/x_times_1_2_3.py index c6bd26c7d..839509e62 100644 --- a/benchmarks/x_times_1_2_3.py +++ b/benchmarks/x_times_1_2_3.py @@ -1,4 +1,4 @@ -# Unit Target: x * [1, 2, 3] +# bench: Unit Target: x * [1, 2, 3] import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_1_2_3_broadcasted.py b/benchmarks/x_times_1_2_3_broadcasted.py index 41e2b25f7..fcd5299c0 100644 --- a/benchmarks/x_times_1_2_3_broadcasted.py +++ b/benchmarks/x_times_1_2_3_broadcasted.py @@ -1,4 +1,4 @@ -# Unit Target: x * [1, 2, 3] (Broadcasted) +# bench: Unit Target: x * [1, 2, 3] (Broadcasted) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 3, size=(2, 3)),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_7.py b/benchmarks/x_times_7.py index f82454750..2c8ba90c1 100644 --- a/benchmarks/x_times_7.py +++ b/benchmarks/x_times_7.py @@ -1,4 +1,4 @@ -# Unit Target: x * 7 +# bench: Unit Target: x * 7 import random @@ -13,14 +13,14 @@ def main(): x = hnp.EncryptedScalar(hnp.UnsignedInteger(4)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, [(i,) for i in range(2 ** 4)], compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -32,15 +32,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_7_tensor.py b/benchmarks/x_times_7_tensor.py index f65cb6e92..ef0bd900b 100644 --- a/benchmarks/x_times_7_tensor.py +++ b/benchmarks/x_times_7_tensor.py @@ -1,4 +1,4 @@ -# Unit Target: x * 7 (Tensor) +# bench: Unit Target: x * 7 (Tensor) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -14,14 +14,14 @@ def main(): inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -33,15 +33,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_y.py b/benchmarks/x_times_y.py index fd8092ffc..b7c261cb3 100644 --- a/benchmarks/x_times_y.py +++ b/benchmarks/x_times_y.py @@ -1,4 +1,4 @@ -# Unit Target: x * y +# bench: Unit Target: x * y import itertools import random @@ -17,14 +17,14 @@ def main(): inputset = itertools.product(range(4, 8), range(0, 4)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -37,15 +37,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_y_broadcasted_tensors.py b/benchmarks/x_times_y_broadcasted_tensors.py index 18d50a74e..65ec15ce0 100644 --- a/benchmarks/x_times_y_broadcasted_tensors.py +++ b/benchmarks/x_times_y_broadcasted_tensors.py @@ -1,4 +1,4 @@ -# Unit Target: x * y (Broadcasted Tensors) +# bench: Unit Target: x * y (Broadcasted Tensors) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -18,14 +18,14 @@ def main(): for _ in range(32) ] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -38,15 +38,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_y_tensor_and_scalar.py b/benchmarks/x_times_y_tensor_and_scalar.py index bd2cccaac..878bbae00 100644 --- a/benchmarks/x_times_y_tensor_and_scalar.py +++ b/benchmarks/x_times_y_tensor_and_scalar.py @@ -1,4 +1,4 @@ -# Unit Target: x * y (Tensor & Scalar) +# bench: Unit Target: x * y (Tensor & Scalar) import random @@ -17,14 +17,14 @@ def main(): inputset = [(np.random.randint(0, 8, size=(3,)), random.randint(0, 7)) for _ in range(32)] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -37,15 +37,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_times_y_tensors.py b/benchmarks/x_times_y_tensors.py index adefc6b4b..ca94ee518 100644 --- a/benchmarks/x_times_y_tensors.py +++ b/benchmarks/x_times_y_tensors.py @@ -1,4 +1,4 @@ -# Unit Target: x * y (Tensors) +# bench: Unit Target: x * y (Tensors) import numpy as np from common import BENCHMARK_CONFIGURATION @@ -18,14 +18,14 @@ def main(): for _ in range(32) ] - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x, "y": y}, inputset, compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -38,15 +38,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/benchmarks/x_to_the_power_of_2.py b/benchmarks/x_to_the_power_of_2.py index 61a5f4577..863b3256f 100644 --- a/benchmarks/x_to_the_power_of_2.py +++ b/benchmarks/x_to_the_power_of_2.py @@ -1,4 +1,4 @@ -# Unit Target: x**2 +# bench: Unit Target: x**2 import random @@ -13,14 +13,14 @@ def main(): x = hnp.EncryptedScalar(hnp.UnsignedInteger(3)) - # Measure: Compilation Time (ms) + # bench: Measure: Compilation Time (ms) engine = hnp.compile_numpy_function( function_to_compile, {"x": x}, [(i,) for i in range(2 ** 3)], compilation_configuration=BENCHMARK_CONFIGURATION, ) - # Measure: End + # bench: Measure: End inputs = [] labels = [] @@ -32,15 +32,15 @@ def main(): correct = 0 for input_i, label_i in zip(inputs, labels): - # Measure: Evaluation Time (ms) + # bench: Measure: Evaluation Time (ms) result_i = engine.run(*input_i) - # Measure: End + # bench: Measure: End if result_i == label_i: correct += 1 - # Measure: Accuracy (%) = (correct / len(inputs)) * 100 - # Alert: Accuracy (%) != 100 + # bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100 + # bench: Alert: Accuracy (%) != 100 if __name__ == "__main__": diff --git a/script/progress_tracker_utils/measure.py b/script/progress_tracker_utils/measure.py index 069d688fb..01e91aa47 100644 --- a/script/progress_tracker_utils/measure.py +++ b/script/progress_tracker_utils/measure.py @@ -31,7 +31,7 @@ def register_alert(script, index, line, metrics, alerts): """Parse line, check its correctness, add it to list of alerts if it's valid""" # Extract the alert details - alert_line = line.replace("# Alert:", "") + alert_line = line.replace("# bench: Alert:", "") # Parse the alert and append it to list of alerts supported_operators = ["==", "!=", "<=", ">=", "<", ">"] @@ -89,7 +89,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts): line = line.strip() # Check whether the line is a special line or not - if line == "# Measure: End": + if line == "# bench: Measure: End": # Make sure a measurement is active already if not in_measurement: raise SyntaxError( @@ -107,7 +107,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts): # Set in_measurement to false as the active measurement has ended in_measurement = False - elif line.startswith("# Measure:"): + elif line.startswith("# bench: Measure:"): # Make sure a measurement is not active already if in_measurement: raise SyntaxError( @@ -116,7 +116,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts): ) # Extract the measurement details - measurement_details = line.replace("# Measure:", "").split("=") + measurement_details = line.replace("# bench: Measure:", "").split("=") # Extract metric name and id metric_label = measurement_details[0].strip() @@ -131,7 +131,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts): in_measurement = True measurement_line = index + 1 measurement_indentation = indentation - elif line.startswith("# Alert:"): + elif line.startswith("# bench: Alert:"): register_alert(script, index, line, metrics, alerts) # Make sure there isn't an active measurement that hasn't finished @@ -164,20 +164,20 @@ def create_modified_script(script, lines, metrics): # Copy the lines of the original script into the new script for line in lines[1:]: # And modify special lines along the way - if line.strip() == "# Measure: End": + if line.strip() == "# bench: Measure: End": # Replace `# Measure: End` with # # _end_ = time.time() # _measurements_["id"].append((_end_ - _start_) * 1000) - index = line.find("# Measure: End") + index = line.find("# bench: Measure: End") line = line[:index] f.write(f"{line}_end_ = time.time()\n") value = "(_end_ - _start_) * 1000" line += f'_measurements_["{current_metric_id}"].append({value})\n' - elif line.strip().startswith("# Measure:"): + elif line.strip().startswith("# bench: Measure:"): # Replace `# Measure: ...` with # # _start_ = time.time() @@ -186,11 +186,11 @@ def create_modified_script(script, lines, metrics): # # _measurements_["id"].append(expression) - metric_details = line.replace("# Measure:", "").split("=") + metric_details = line.replace("# bench: Measure:", "").split("=") metric_label = metric_details[0].strip() metric_id = name_to_id(metric_label) - index = line.find("# Measure:") + index = line.find("# bench: Measure:") line = line[:index] if len(metric_details) == 1: @@ -321,13 +321,13 @@ def main(args): break # Check whether the script is a target or not - if first_line.startswith("# Unit Target:"): + if first_line.startswith("# bench: Unit Target:"): # Extract target name - target_name = first_line.replace("# Unit Target:", "").strip() + target_name = first_line.replace("# bench: Unit Target:", "").strip() is_unit = True - elif first_line.startswith("# Full Target:"): + elif first_line.startswith("# bench: Full Target:"): # Extract target name - target_name = first_line.replace("# Full Target:", "").strip() + target_name = first_line.replace("# bench: Full Target:", "").strip() is_unit = False else: print() @@ -337,7 +337,9 @@ def main(args): with tqdm.tqdm(total=samples) as pbar: pbar.write(" Sample 1") pbar.write(" --------") - pbar.write(" Skipped (doesn't have a `# Target:` directive)\n") + pbar.write( + " Skipped (doesn't have a `# bench: Unit/Full Target:` directive)\n" + ) pbar.update(samples) print()