refactor(benchmarks): use 'bench' header prefix for all benchmark directives

This commit is contained in:
Umut
2021-10-14 17:04:34 +03:00
parent 95c48a419c
commit 67f50fb8ce
31 changed files with 231 additions and 229 deletions

View File

@@ -1,4 +1,4 @@
# Unit Target: 124 - x
# bench: Unit Target: 124 - x
import random
@@ -13,14 +13,14 @@ def main():
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[(i,) for i in range(2 ** 3)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -32,15 +32,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: 124 - x (Tensor)
# bench: Unit Target: 124 - x (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 6, size=(3,)),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Full Target: Linear Regression
# bench: Full Target: Linear Regression
# Disable line length warnings as we have a looooong metric...
# flake8: noqa: E501
@@ -172,14 +172,14 @@ def main():
for x_i in x_q:
inputset.append((int(x_i[0]),))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x_0": hnp.EncryptedScalar(hnp.UnsignedInteger(input_bits))},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
non_homomorphic_loss = 0
homomorphic_loss = 0
@@ -198,9 +198,9 @@ def main():
)
.dequantize()[0]
)
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
homomorphic_prediction = QuantizedArray(engine.run(*x_i), y_parameters).dequantize()
# Measure: End
# bench: Measure: End
non_homomorphic_loss += (non_homomorphic_prediction - y_i) ** 2
homomorphic_loss += (homomorphic_prediction - y_i) ** 2
@@ -222,10 +222,10 @@ def main():
print(f"Homomorphic Loss: {homomorphic_loss:.4f}")
print(f"Relative Difference Percentage: {difference:.2f}%")
# Measure: Non Homomorphic Loss = non_homomorphic_loss
# Measure: Homomorphic Loss = homomorphic_loss
# Measure: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference
# Alert: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2
# bench: Measure: Non Homomorphic Loss = non_homomorphic_loss
# bench: Measure: Homomorphic Loss = homomorphic_loss
# bench: Measure: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference
# bench: Alert: Relative Loss Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Full Target: Logistic Regression
# bench: Full Target: Logistic Regression
# Disable line length warnings as we have a looooong metric...
# flake8: noqa: E501
@@ -244,7 +244,7 @@ def main():
for x_i in x_q:
inputset.append((int(x_i[0]), int(x_i[1])))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{
@@ -254,7 +254,7 @@ def main():
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
non_homomorphic_correct = 0
homomorphic_correct = 0
@@ -273,9 +273,9 @@ def main():
)
).dequantize()[0]
)
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
homomorphic_prediction = round(QuantizedArray(engine.run(*x_i), y_parameters).dequantize())
# Measure: End
# bench: Measure: End
if non_homomorphic_prediction == y_i:
non_homomorphic_correct += 1
@@ -299,10 +299,10 @@ def main():
print(f"Homomorphic Accuracy: {homomorphic_accuracy:.4f}")
print(f"Difference Percentage: {difference:.2f}%")
# Measure: Non Homomorphic Accuracy = non_homomorphic_accuracy
# Measure: Homomorphic Accuracy = homomorphic_accuracy
# Measure: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference
# Alert: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2
# bench: Measure: Non Homomorphic Accuracy = non_homomorphic_accuracy
# bench: Measure: Homomorphic Accuracy = homomorphic_accuracy
# bench: Measure: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) = difference
# bench: Alert: Accuracy Difference Between Homomorphic and Non Homomorphic Implementation (%) > 2
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: Single Table Lookup
# bench: Unit Target: Single Table Lookup
import random
@@ -18,14 +18,14 @@ def main():
x = hnp.EncryptedScalar(hnp.UnsignedInteger(input_bits))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[(i,) for i in range(2 ** input_bits)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -37,15 +37,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - [1, 2, 3]
# bench: Unit Target: x - [1, 2, 3]
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 2, size=(3,)) + np.array([1, 2, 3]),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - [1, 2, 3] (Broadcasted)
# bench: Unit Target: x - [1, 2, 3] (Broadcasted)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -16,14 +16,14 @@ def main():
(np.random.randint(0, 2 ** 2, size=(2, 3)) + np.array([1, 2, 3]),) for _ in range(32)
]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -35,15 +35,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - 24
# bench: Unit Target: x - 24
import random
@@ -13,14 +13,14 @@ def main():
x = hnp.EncryptedScalar(hnp.UnsignedInteger(6))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[(i,) for i in range(24, 2 ** 6)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -32,15 +32,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - 24 (Tensor)
# bench: Unit Target: x - 24 (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 5, size=(3,)) + 24,) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - y
# bench: Unit Target: x - y
import itertools
import random
@@ -17,14 +17,14 @@ def main():
inputset = itertools.product(range(4, 8), range(0, 4))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -37,15 +37,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - y (Broadcasted Tensors)
# bench: Unit Target: x - y (Broadcasted Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -18,14 +18,14 @@ def main():
for _ in range(32)
]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -38,15 +38,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - y (Tensor & Scalar)
# bench: Unit Target: x - y (Tensor & Scalar)
import random
@@ -17,14 +17,14 @@ def main():
inputset = [(np.random.randint(4, 8, size=(3,)), random.randint(0, 3)) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -37,15 +37,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x - y (Tensors)
# bench: Unit Target: x - y (Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -17,14 +17,14 @@ def main():
(np.random.randint(4, 8, size=(3,)), np.random.randint(0, 4, size=(3,))) for _ in range(32)
]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -37,15 +37,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + [1, 2, 3]
# bench: Unit Target: x + [1, 2, 3]
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + [1, 2, 3] (Broadcasted)
# bench: Unit Target: x + [1, 2, 3] (Broadcasted)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 3, size=(2, 3)),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + 42
# bench: Unit Target: x + 42
import random
@@ -13,14 +13,14 @@ def main():
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[(i,) for i in range(2 ** 3)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -32,15 +32,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + 42 (Tensor)
# bench: Unit Target: x + 42 (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + y
# bench: Unit Target: x + y
import random
@@ -14,14 +14,14 @@ def main():
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
[(random.randint(0, 7), random.randint(0, 7)) for _ in range(32)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -34,15 +34,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + y (Broadcasted Tensors)
# bench: Unit Target: x + y (Broadcasted Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -18,14 +18,14 @@ def main():
for _ in range(32)
]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -38,15 +38,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + y (Tensor & Scalar)
# bench: Unit Target: x + y (Tensor & Scalar)
import random
@@ -17,14 +17,14 @@ def main():
inputset = [(np.random.randint(0, 8, size=(3,)), random.randint(0, 7)) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -37,15 +37,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x + y (Tensors)
# bench: Unit Target: x + y (Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -18,14 +18,14 @@ def main():
for _ in range(32)
]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -38,15 +38,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * [1, 2, 3]
# bench: Unit Target: x * [1, 2, 3]
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * [1, 2, 3] (Broadcasted)
# bench: Unit Target: x * [1, 2, 3] (Broadcasted)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 3, size=(2, 3)),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * 7
# bench: Unit Target: x * 7
import random
@@ -13,14 +13,14 @@ def main():
x = hnp.EncryptedScalar(hnp.UnsignedInteger(4))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[(i,) for i in range(2 ** 4)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -32,15 +32,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * 7 (Tensor)
# bench: Unit Target: x * 7 (Tensor)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -14,14 +14,14 @@ def main():
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -33,15 +33,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * y
# bench: Unit Target: x * y
import itertools
import random
@@ -17,14 +17,14 @@ def main():
inputset = itertools.product(range(4, 8), range(0, 4))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -37,15 +37,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * y (Broadcasted Tensors)
# bench: Unit Target: x * y (Broadcasted Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -18,14 +18,14 @@ def main():
for _ in range(32)
]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -38,15 +38,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * y (Tensor & Scalar)
# bench: Unit Target: x * y (Tensor & Scalar)
import random
@@ -17,14 +17,14 @@ def main():
inputset = [(np.random.randint(0, 8, size=(3,)), random.randint(0, 7)) for _ in range(32)]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -37,15 +37,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x * y (Tensors)
# bench: Unit Target: x * y (Tensors)
import numpy as np
from common import BENCHMARK_CONFIGURATION
@@ -18,14 +18,14 @@ def main():
for _ in range(32)
]
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -38,15 +38,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -1,4 +1,4 @@
# Unit Target: x**2
# bench: Unit Target: x**2
import random
@@ -13,14 +13,14 @@ def main():
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
# Measure: Compilation Time (ms)
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x},
[(i,) for i in range(2 ** 3)],
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# Measure: End
# bench: Measure: End
inputs = []
labels = []
@@ -32,15 +32,15 @@ def main():
correct = 0
for input_i, label_i in zip(inputs, labels):
# Measure: Evaluation Time (ms)
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# Measure: End
# bench: Measure: End
if result_i == label_i:
correct += 1
# Measure: Accuracy (%) = (correct / len(inputs)) * 100
# Alert: Accuracy (%) != 100
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":

View File

@@ -31,7 +31,7 @@ def register_alert(script, index, line, metrics, alerts):
"""Parse line, check its correctness, add it to list of alerts if it's valid"""
# Extract the alert details
alert_line = line.replace("# Alert:", "")
alert_line = line.replace("# bench: Alert:", "")
# Parse the alert and append it to list of alerts
supported_operators = ["==", "!=", "<=", ">=", "<", ">"]
@@ -89,7 +89,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts):
line = line.strip()
# Check whether the line is a special line or not
if line == "# Measure: End":
if line == "# bench: Measure: End":
# Make sure a measurement is active already
if not in_measurement:
raise SyntaxError(
@@ -107,7 +107,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts):
# Set in_measurement to false as the active measurement has ended
in_measurement = False
elif line.startswith("# Measure:"):
elif line.startswith("# bench: Measure:"):
# Make sure a measurement is not active already
if in_measurement:
raise SyntaxError(
@@ -116,7 +116,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts):
)
# Extract the measurement details
measurement_details = line.replace("# Measure:", "").split("=")
measurement_details = line.replace("# bench: Measure:", "").split("=")
# Extract metric name and id
metric_label = measurement_details[0].strip()
@@ -131,7 +131,7 @@ def identify_metrics_and_alerts(script, lines, metrics, alerts):
in_measurement = True
measurement_line = index + 1
measurement_indentation = indentation
elif line.startswith("# Alert:"):
elif line.startswith("# bench: Alert:"):
register_alert(script, index, line, metrics, alerts)
# Make sure there isn't an active measurement that hasn't finished
@@ -164,20 +164,20 @@ def create_modified_script(script, lines, metrics):
# Copy the lines of the original script into the new script
for line in lines[1:]:
# And modify special lines along the way
if line.strip() == "# Measure: End":
if line.strip() == "# bench: Measure: End":
# Replace `# Measure: End` with
#
# _end_ = time.time()
# _measurements_["id"].append((_end_ - _start_) * 1000)
index = line.find("# Measure: End")
index = line.find("# bench: Measure: End")
line = line[:index]
f.write(f"{line}_end_ = time.time()\n")
value = "(_end_ - _start_) * 1000"
line += f'_measurements_["{current_metric_id}"].append({value})\n'
elif line.strip().startswith("# Measure:"):
elif line.strip().startswith("# bench: Measure:"):
# Replace `# Measure: ...` with
#
# _start_ = time.time()
@@ -186,11 +186,11 @@ def create_modified_script(script, lines, metrics):
#
# _measurements_["id"].append(expression)
metric_details = line.replace("# Measure:", "").split("=")
metric_details = line.replace("# bench: Measure:", "").split("=")
metric_label = metric_details[0].strip()
metric_id = name_to_id(metric_label)
index = line.find("# Measure:")
index = line.find("# bench: Measure:")
line = line[:index]
if len(metric_details) == 1:
@@ -321,13 +321,13 @@ def main(args):
break
# Check whether the script is a target or not
if first_line.startswith("# Unit Target:"):
if first_line.startswith("# bench: Unit Target:"):
# Extract target name
target_name = first_line.replace("# Unit Target:", "").strip()
target_name = first_line.replace("# bench: Unit Target:", "").strip()
is_unit = True
elif first_line.startswith("# Full Target:"):
elif first_line.startswith("# bench: Full Target:"):
# Extract target name
target_name = first_line.replace("# Full Target:", "").strip()
target_name = first_line.replace("# bench: Full Target:", "").strip()
is_unit = False
else:
print()
@@ -337,7 +337,9 @@ def main(args):
with tqdm.tqdm(total=samples) as pbar:
pbar.write(" Sample 1")
pbar.write(" --------")
pbar.write(" Skipped (doesn't have a `# Target:` directive)\n")
pbar.write(
" Skipped (doesn't have a `# bench: Unit/Full Target:` directive)\n"
)
pbar.update(samples)
print()