feat(benchmarks): add dynamic indexing benchmarks

This commit is contained in:
Umut
2021-11-04 16:50:06 +03:00
parent 8e8e777c3d
commit 39ad5bd894
16 changed files with 928 additions and 0 deletions

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[:, y] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[:, y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(2, 4)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[:, y] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[:, y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(2, 4)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[::-1, y, z] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[::-1, y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(5, 4, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(5, 4, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(5, 4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[::-1, y, z] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[::-1, y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(5, 4, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(5, 4, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(5, 4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[y, 1:] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, 1:]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 5)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[y, 1:] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, 1:]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 5)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[y, :, z] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, :, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 5, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[y, :, z] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, :, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 5, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[y, :] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, :]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 2)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[y, :] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y, :]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4, 2)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[y, z, 0] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z, 0]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2, 5))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2, 5)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2, 5))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[y, z, 0] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z, 0]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2, 5))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2, 5)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2, 5))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[y, z] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
z = hnp.ClearScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,61 @@
# bench: Unit Target: x[y, z] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y, z):
return x[y, z]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 2))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
z = hnp.EncryptedScalar(hnp.UnsignedInteger(1))
inputset = [
(
np.random.randint(0, 2 ** 3, size=(4, 2)),
random.randint(0, (2 ** 2) - 1),
random.randint(0, (2 ** 1) - 1),
)
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y, "z": z},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4, 2))
sample_y = random.randint(0, (2 ** 2) - 1)
sample_z = random.randint(0, (2 ** 1) - 1)
inputs.append([sample_x, sample_y, sample_z])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[y] (Clear)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4,))
y = hnp.ClearScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4,)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4,))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,55 @@
# bench: Unit Target: x[y] (Encrypted)
import random
import numpy as np
from common import BENCHMARK_CONFIGURATION
import concrete.numpy as hnp
def main():
def function_to_compile(x, y):
return x[y]
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4,))
y = hnp.EncryptedScalar(hnp.UnsignedInteger(2))
inputset = [
(np.random.randint(0, 2 ** 3, size=(4,)), random.randint(0, (2 ** 2) - 1))
for _ in range(32)
]
# bench: Measure: Compilation Time (ms)
engine = hnp.compile_numpy_function(
function_to_compile,
{"x": x, "y": y},
inputset,
compilation_configuration=BENCHMARK_CONFIGURATION,
)
# bench: Measure: End
inputs = []
labels = []
for _ in range(100):
sample_x = np.random.randint(0, 2 ** 3, size=(4,))
sample_y = random.randint(0, (2 ** 2) - 1)
inputs.append([sample_x, sample_y])
labels.append(function_to_compile(*inputs[-1]))
correct = 0
for input_i, label_i in zip(inputs, labels):
# bench: Measure: Evaluation Time (ms)
result_i = engine.run(*input_i)
# bench: Measure: End
if result_i == label_i:
correct += 1
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
# bench: Alert: Accuracy (%) != 100
if __name__ == "__main__":
main()