mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-09 20:25:34 -05:00
feat(benchmarks): add constant indexing benchmarks
This commit is contained in:
48
benchmarks/x-index-0-and-0.py
Normal file
48
benchmarks/x-index-0-and-0.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[0, 0]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[0, 0]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3, 2))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3, 2)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3, 2))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-index-0.py
Normal file
48
benchmarks/x-index-0.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[0]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[0]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-index-1.py
Normal file
48
benchmarks/x-index-1.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[1]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[1]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-index-colon-2.py
Normal file
48
benchmarks/x-index-colon-2.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[:2]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[:2]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-index-colon-and-1.py
Normal file
48
benchmarks/x-index-colon-and-1.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[:, 1]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[:, 1]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3, 2))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3, 2)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3, 2))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-index-colon.py
Normal file
48
benchmarks/x-index-colon.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[:]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[:]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-index-minus-1.py
Normal file
48
benchmarks/x-index-minus-1.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[-1]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[-1]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-index-one-colon.py
Normal file
48
benchmarks/x-index-one-colon.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[1:]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[1:]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
48
benchmarks/x-reversed.py
Normal file
48
benchmarks/x-reversed.py
Normal file
@@ -0,0 +1,48 @@
|
||||
# bench: Unit Target: x[::-1]
|
||||
|
||||
import numpy as np
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
|
||||
|
||||
def main():
|
||||
def function_to_compile(x):
|
||||
return x[::-1]
|
||||
|
||||
x = hnp.EncryptedTensor(hnp.UnsignedInteger(3), shape=(3,))
|
||||
|
||||
inputset = [(np.random.randint(0, 2 ** 3, size=(3,)),) for _ in range(32)]
|
||||
|
||||
# bench: Measure: Compilation Time (ms)
|
||||
engine = hnp.compile_numpy_function(
|
||||
function_to_compile,
|
||||
{"x": x},
|
||||
inputset,
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
# bench: Measure: End
|
||||
|
||||
inputs = []
|
||||
labels = []
|
||||
for _ in range(100):
|
||||
sample_x = np.random.randint(0, 2 ** 3, size=(3,))
|
||||
|
||||
inputs.append([sample_x])
|
||||
labels.append(function_to_compile(*inputs[-1]))
|
||||
|
||||
correct = 0
|
||||
for input_i, label_i in zip(inputs, labels):
|
||||
# bench: Measure: Evaluation Time (ms)
|
||||
result_i = engine.run(*input_i)
|
||||
# bench: Measure: End
|
||||
|
||||
if result_i == label_i:
|
||||
correct += 1
|
||||
|
||||
# bench: Measure: Accuracy (%) = (correct / len(inputs)) * 100
|
||||
# bench: Alert: Accuracy (%) != 100
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user