mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-08 11:35:02 -05:00
feat(compiler/tests): rounded tests, add rounded tlu + tensorized
This commit is contained in:
@@ -1,113 +1,143 @@
|
||||
import argparse
|
||||
from functools import reduce
|
||||
from platform import mac_ver
|
||||
|
||||
import numpy as np
|
||||
|
||||
from end_to_end_linalg_leveled_gen import P_ERROR
|
||||
|
||||
def min_max(from_p, signed):
|
||||
if signed:
|
||||
min_value = -(2 ** (from_p - 1))
|
||||
max_value = abs(min_value) - 1
|
||||
else:
|
||||
min_value = 0
|
||||
max_value = 2 ** from_p - 1
|
||||
return min_value, max_value
|
||||
|
||||
def round(val, p_start, p_end, signed=False):
|
||||
def round(inital_val, p_start, p_end, signed=False):
|
||||
assert p_start > p_end
|
||||
val = inital_val
|
||||
p_delta = p_start - p_end
|
||||
carry_mask = 1 << (p_delta - 1)
|
||||
new_val = val + 2 ** (p_delta - 1)
|
||||
if val & carry_mask != 0:
|
||||
val += carry_mask << 1
|
||||
assert val >> p_delta == new_val >> p_delta, f"{val} {new_val}"
|
||||
output = val >> p_delta
|
||||
if signed:
|
||||
if output >= (1 << (p_end - 1)):
|
||||
output = -output
|
||||
min_value, max_value = min_max(p_end, signed)
|
||||
if not(min_value <= output <= max_value):
|
||||
print(f"# warning: overflow on padding for input inital:{inital_val} -> rounded:{val} -> reduced:{output}")
|
||||
return output
|
||||
|
||||
def yaml_test(from_p, to_p, input_vals, output_vals, signed, with_tlu=False, with_shape=None):
|
||||
int_type = "esint" if signed else "eint"
|
||||
tlu_type = f"!FHE.{int_type}<{to_p}>"
|
||||
acc_type = f"!FHE.{int_type}<{from_p}>"
|
||||
if with_shape:
|
||||
shape_mlir = 'x'.join(map(str, with_shape))
|
||||
tlu_type = f"tensor<{shape_mlir} x {tlu_type}>"
|
||||
acc_type = f"tensor<{shape_mlir} x {acc_type}>"
|
||||
else:
|
||||
shape_mlir = "scalar"
|
||||
linalg = "Linalg" if with_shape else ""
|
||||
full_name = (
|
||||
f"{'signed' if signed else 'unsigned'}"
|
||||
f"_round_{'tlu' if with_tlu else 'alone'}"
|
||||
f"_{'tensorized_' if with_shape else ''}{shape_mlir}"
|
||||
)
|
||||
signed_yaml = "true" if signed else "false"
|
||||
print(f"""description: {full_name}_{from_p}to{to_p}bits""")
|
||||
if with_tlu:
|
||||
min_value, max_value = min_max(to_p, signed)
|
||||
tlu = list(range(max_value + 1)) + list(range(min_value, 0))
|
||||
tlu_const_type = f"tensor<{len(tlu)} x i64>"
|
||||
print(f"""\
|
||||
program: |
|
||||
func.func @main(%arg0: {acc_type}) -> {acc_type} {{
|
||||
%1 = \"FHE{linalg}.round\"(%arg0) : ({acc_type}) -> {tlu_type}
|
||||
%tlu = arith.constant dense<{tlu}> : {tlu_const_type}
|
||||
%2 = \"FHE{linalg}.apply_lookup_table\"(%1, %tlu) : ({tlu_type}, {tlu_const_type}) -> {acc_type}
|
||||
return %2: {acc_type}
|
||||
}}
|
||||
""")
|
||||
else:
|
||||
print(f"""\
|
||||
program: |
|
||||
func.func @main(%arg0: {acc_type}) -> {tlu_type} {{
|
||||
%1 = \"FHE{linalg}.round\"(%arg0) : ({acc_type}) -> {tlu_type}
|
||||
return %1: {tlu_type}
|
||||
}}
|
||||
""")
|
||||
print(f"""\
|
||||
p-error: {P_ERROR}
|
||||
tests:""")
|
||||
_, acc_max_value = min_max(from_p, signed)
|
||||
tlu_min_value, _ = min_max(to_p, signed)
|
||||
for input_val, output_val in zip(input_vals, output_vals):
|
||||
if input_val == acc_max_value and with_tlu:
|
||||
print(" # tlu with padding = 1")
|
||||
print(" # output is max_value + 1 <=> min_value + padding")
|
||||
if signed:
|
||||
# max_value + 1 <=> min_value + negacyclic
|
||||
output_val = -tlu_min_value
|
||||
else:
|
||||
output_val = 0
|
||||
if with_shape:
|
||||
from functools import reduce
|
||||
flat_size = reduce(lambda x, y: x * y, with_shape)
|
||||
input_vals = [input_val] * flat_size
|
||||
output_vals = [output_val] * flat_size
|
||||
print(f"""\
|
||||
- inputs:
|
||||
- tensor: {input_vals}
|
||||
shape: {with_shape}
|
||||
signed: {signed_yaml}
|
||||
outputs:
|
||||
- tensor: {output_vals}
|
||||
shape: {with_shape}
|
||||
signed: {signed_yaml}
|
||||
""")
|
||||
else:
|
||||
print(f"""\
|
||||
- inputs:
|
||||
- scalar: {input_val}
|
||||
signed: {signed_yaml}
|
||||
outputs:
|
||||
- scalar: {output_val}
|
||||
signed: {signed_yaml}
|
||||
""")
|
||||
print("---")
|
||||
|
||||
def generate(args):
|
||||
print("# /!\ DO NOT EDIT MANUALLY THIS FILE MANUALLY")
|
||||
print("# /!\ THIS FILE HAS BEEN GENERATED")
|
||||
np.random.seed(0)
|
||||
# unsigned_unsigned
|
||||
for from_p in args.bitwidth:
|
||||
for to_p in range(2, from_p):
|
||||
max_value = (2 ** from_p) - 1
|
||||
|
||||
# scalar
|
||||
print(f"description: unsigned_round_{from_p}to{to_p}bits")
|
||||
print("program: |")
|
||||
print(f" func.func @main(%arg0: !FHE.eint<{from_p}>) -> !FHE.eint<{to_p}> {{")
|
||||
print(f" %1 = \"FHE.round\"(%arg0) : (!FHE.eint<{from_p}>) -> !FHE.eint<{to_p}>")
|
||||
print(f" return %1: !FHE.eint<{to_p}>")
|
||||
print(" }")
|
||||
print(f"p-error: {P_ERROR}")
|
||||
print("tests:")
|
||||
for i in range(8):
|
||||
val = np.random.randint(max_value)
|
||||
print(" - inputs:")
|
||||
print(f" - scalar: {val}")
|
||||
print(" outputs:")
|
||||
print(f" - scalar: {round(val, from_p, to_p)}")
|
||||
print("---")
|
||||
|
||||
# tensor
|
||||
print(f"description: unsigned_round_2x2_{from_p}to{to_p}bits")
|
||||
print("program: |")
|
||||
print(f" func.func @main(%arg0: tensor<2x2x!FHE.eint<{from_p}>>) -> tensor<2x2x!FHE.eint<{to_p}>> {{")
|
||||
print(f" %1 = \"FHELinalg.round\"(%arg0) : (tensor<2x2x!FHE.eint<{from_p}>>) -> tensor<2x2x!FHE.eint<{to_p}>>")
|
||||
print(f" return %1: tensor<2x2x!FHE.eint<{to_p}>>")
|
||||
print(" }")
|
||||
print(f"p-error: {P_ERROR}")
|
||||
print("tests:")
|
||||
for i in range(8):
|
||||
sample = [np.random.randint(max_value) for _ in range(2 * 2)]
|
||||
print(" - inputs:")
|
||||
print(f" - tensor: {sample}")
|
||||
print(f" shape: [2, 2]")
|
||||
print(" outputs:")
|
||||
print(f" - tensor: {[round(value, from_p, to_p) for value in sample]}")
|
||||
print(f" shape: [2, 2]")
|
||||
print("---")
|
||||
|
||||
# signed_signed
|
||||
for from_p in args.bitwidth:
|
||||
for to_p in range(2, from_p):
|
||||
min_value = -(2 ** (from_p - 1))
|
||||
max_value = abs(min_value) - 1
|
||||
|
||||
# scalar
|
||||
print(f"description: signed_round_from_{from_p}to{to_p}bits")
|
||||
print("program: |")
|
||||
print(f" func.func @main(%arg0: !FHE.esint<{from_p}>) -> !FHE.esint<{to_p}> {{")
|
||||
print(f" %1 = \"FHE.round\"(%arg0) : (!FHE.esint<{from_p}>) -> !FHE.esint<{to_p}>")
|
||||
print(f" return %1: !FHE.esint<{to_p}>")
|
||||
print(" }")
|
||||
print(f"p-error: {P_ERROR}")
|
||||
print("tests:")
|
||||
for i in range(8):
|
||||
val = np.random.randint(min_value, max_value)
|
||||
print(" - inputs:")
|
||||
print(f" - scalar: {val}")
|
||||
print(f" signed: true")
|
||||
print(" outputs:")
|
||||
print(f" - scalar: {round(val, from_p, to_p, True)}")
|
||||
print(f" signed: true")
|
||||
print("---")
|
||||
|
||||
# tensor
|
||||
print(f"description: signed_round_2x2_{from_p}to{to_p}bits")
|
||||
print("program: |")
|
||||
print(f" func.func @main(%arg0: tensor<2x2x!FHE.esint<{from_p}>>) -> tensor<2x2x!FHE.esint<{to_p}>> {{")
|
||||
print(f" %1 = \"FHELinalg.round\"(%arg0) : (tensor<2x2x!FHE.esint<{from_p}>>) -> tensor<2x2x!FHE.esint<{to_p}>>")
|
||||
print(f" return %1: tensor<2x2x!FHE.esint<{to_p}>>")
|
||||
print(" }")
|
||||
print(f"p-error: {P_ERROR}")
|
||||
print("tests:")
|
||||
for i in range(8):
|
||||
sample = [np.random.randint(min_value, max_value) for _ in range(2 * 2)]
|
||||
print(" - inputs:")
|
||||
print(f" - tensor: {sample}")
|
||||
print(f" shape: [2, 2]")
|
||||
print(f" signed: true")
|
||||
print(" outputs:")
|
||||
print(f" - tensor: {[round(value, from_p, to_p, True) for value in sample]}")
|
||||
print(f" shape: [2, 2]")
|
||||
print(f" signed: true")
|
||||
print("---")
|
||||
domain = [
|
||||
(from_p, to_p, signed, with_tlu, with_shape)
|
||||
for from_p in args.acc_bitwidth
|
||||
for to_p in args.bitwidth
|
||||
for signed in (False, True)
|
||||
for with_tlu in (False, True)
|
||||
for with_shape in (None, [3], [2, 3] , [1, 2, 3])
|
||||
if to_p < from_p
|
||||
]
|
||||
for (from_p, to_p, signed, with_tlu, with_shape) in domain:
|
||||
min_value, max_value = min_max(from_p, signed)
|
||||
if with_shape:
|
||||
input_vals = list({min_value, 0, max_value})
|
||||
else:
|
||||
input_vals = list(range(min_value, max_value + 1))
|
||||
input_vals = [min_value, 0, max_value]
|
||||
output_vals = [
|
||||
round(val, from_p, to_p, signed)
|
||||
for val in input_vals
|
||||
]
|
||||
yaml_test(from_p, to_p, input_vals, output_vals, signed, with_tlu=with_tlu, with_shape=with_shape)
|
||||
|
||||
if __name__ == "__main__":
|
||||
CLI = argparse.ArgumentParser()
|
||||
@@ -116,12 +146,19 @@ if __name__ == "__main__":
|
||||
help="Specify the list of bitwidth to generate",
|
||||
nargs="+",
|
||||
type=int,
|
||||
default=list(range(3,9)),
|
||||
default=[1, 4, 6, 8],
|
||||
)
|
||||
CLI.add_argument(
|
||||
"--minimal",
|
||||
help="Specify whether to generate minimal tests only",
|
||||
type=bool,
|
||||
default=False,
|
||||
"--acc-bitwidth",
|
||||
help="Specify the list of bitwidth to generate",
|
||||
nargs="+",
|
||||
type=int,
|
||||
default=[8, 13, 16],
|
||||
)
|
||||
CLI.add_argument(
|
||||
"--minimal",
|
||||
help="Specify whether to generate minimal tests only",
|
||||
type=bool,
|
||||
default=False,
|
||||
)
|
||||
generate(CLI.parse_args())
|
||||
|
||||
Reference in New Issue
Block a user