mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-09 03:55:04 -05:00
committed by
Benoit Chevallier
parent
f1d28c0fad
commit
6edba1e10c
49
benchmarks/c_concatenate_x.py
Normal file
49
benchmarks/c_concatenate_x.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# bench: Unit Target: np.concatenate((c, x))
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return np.concatenate((c, x))
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
|
||||
c = np.arange(20).reshape((4, 5))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(4, 5)),) for _ in range(128)]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/c_matmul_x.py
Normal file
50
benchmarks/c_matmul_x.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: np.matmul(c, x)
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
c = np.arange(20, 30).reshape((5, 2))
|
||||
|
||||
def function_to_compile(x):
|
||||
return np.matmul(c, x)
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(2, 4))) for _ in range(128)]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
49
benchmarks/x_concatenate_c.py
Normal file
49
benchmarks/x_concatenate_c.py
Normal file
@@ -0,0 +1,49 @@
|
||||
# bench: Unit Target: np.concatenate((x, c))
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return np.concatenate((x, c))
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
|
||||
c = np.arange(20).reshape((4, 5))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(4, 5)),) for _ in range(128)]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
53
benchmarks/x_concatenate_y.py
Normal file
53
benchmarks/x_concatenate_y.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# bench: Unit Target: np.concatenate((x, y))
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x, y):
|
||||
return np.concatenate((x, y))
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
|
||||
y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
|
||||
|
||||
inputset = [
|
||||
(np.random.randint(0, 2 ** 3, size=(4, 5)), np.random.randint(0, 2 ** 3, size=(4, 5)))
|
||||
for _ in range(128)
|
||||
]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(4, 5))
|
||||
sample_y = np.random.randint(0, 2 ** 3, size=(4, 5))
|
||||
|
||||
inputs.append([sample_x, sample_y])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x, "y": y},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_matmul_c.py
Normal file
50
benchmarks/x_matmul_c.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: np.matmul(x, c)
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
c = np.arange(20).reshape((4, 5))
|
||||
|
||||
def function_to_compile(x):
|
||||
return np.matmul(x, c)
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(2, 4))) for _ in range(128)]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
53
benchmarks/x_matmul_y.py
Normal file
53
benchmarks/x_matmul_y.py
Normal file
@@ -0,0 +1,53 @@
|
||||
# bench: Unit Target: np.matmul(x, y)
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x, y):
|
||||
return np.matmul(x, y)
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
|
||||
y = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(4, 5))
|
||||
|
||||
inputset = [
|
||||
(np.random.randint(0, 2 ** 3, size=(2, 4)), np.random.randint(0, 2 ** 3, size=(4, 5)))
|
||||
for _ in range(128)
|
||||
]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
|
||||
sample_y = np.random.randint(0, 2 ** 3, size=(4, 5))
|
||||
|
||||
inputs.append([sample_x, sample_y])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x, "y": y},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x_negative.py
Normal file
48
benchmarks/x_negative.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: np.negative(x)
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return np.negative(x)
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(10, 6))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(10, 6)),) for _ in range(128)]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(10, 6))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -11,13 +11,13 @@ def main():
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(3))
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(10))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(i,) for i in range(2 ** 3)],
|
||||
[(i,) for i in range(2 ** 10)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
@@ -25,7 +25,7 @@ def main():
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** 3 - 1)
|
||||
sample_x = random.randint(0, 2 ** 10 - 1)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
50
benchmarks/x_plus_42_10b.py
Normal file
50
benchmarks/x_plus_42_10b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (10b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 10
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(i,) for i in range(2 ** max_precision - 42)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_11b.py
Normal file
50
benchmarks/x_plus_42_11b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (11b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 11
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_12b.py
Normal file
50
benchmarks/x_plus_42_12b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (12b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 12
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_13b.py
Normal file
50
benchmarks/x_plus_42_13b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (13b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 13
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_14b.py
Normal file
50
benchmarks/x_plus_42_14b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (14b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 14
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_15b.py
Normal file
50
benchmarks/x_plus_42_15b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (15b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 15
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_16b.py
Normal file
50
benchmarks/x_plus_42_16b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (16b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 16
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_32b.py
Normal file
50
benchmarks/x_plus_42_32b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (32b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 32
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(random.randint(0, 2 ** max_precision - 1 - 42),) for _ in range(128)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_8b.py
Normal file
50
benchmarks/x_plus_42_8b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (8b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 8
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(i,) for i in range(2 ** max_precision - 42)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
50
benchmarks/x_plus_42_9b.py
Normal file
50
benchmarks/x_plus_42_9b.py
Normal file
@@ -0,0 +1,50 @@
|
||||
# bench: Unit Target: x + 42 (9b)
|
||||
|
||||
import random
|
||||
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
max_precision = 9
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
x = hnp.EncryptedScalar(hnp.UnsignedInteger(max_precision))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
[(i,) for i in range(2 ** max_precision - 42)],
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = random.randint(0, 2 ** max_precision - 1 - 42)
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x_reshape.py
Normal file
48
benchmarks/x_reshape.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: np.reshape(x, some_shape)
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return np.reshape(x, (15, 4))
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(10, 6))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(10, 6)),) for _ in range(128)]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(10, 6))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x_tranpose.py
Normal file
48
benchmarks/x_tranpose.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: np.transpose(x)
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return np.transpose(x)
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(2, 4))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(2, 4)),) for _ in range(128)]
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(4):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(2, 4))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user