mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-08 11:35:02 -05:00
feat: support signed integers in python bindings
This commit is contained in:
@@ -160,20 +160,36 @@ MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU32(
|
||||
std::vector<uint32_t> data, std::vector<int64_t> dimensions);
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU64(
|
||||
std::vector<uint64_t> data, std::vector<int64_t> dimensions);
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI8(
|
||||
std::vector<int8_t> data, std::vector<int64_t> dimensions);
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI16(
|
||||
std::vector<int16_t> data, std::vector<int64_t> dimensions);
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI32(
|
||||
std::vector<int32_t> data, std::vector<int64_t> dimensions);
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI64(
|
||||
std::vector<int64_t> data, std::vector<int64_t> dimensions);
|
||||
/// Create a lambdaArgument from a scalar
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromScalar(uint64_t scalar);
|
||||
MLIR_CAPI_EXPORTED lambdaArgument
|
||||
lambdaArgumentFromSignedScalar(int64_t scalar);
|
||||
/// Check if a lambdaArgument holds a tensor
|
||||
MLIR_CAPI_EXPORTED bool lambdaArgumentIsTensor(lambdaArgument &lambda_arg);
|
||||
/// Get tensor data from lambdaArgument
|
||||
MLIR_CAPI_EXPORTED std::vector<uint64_t>
|
||||
lambdaArgumentGetTensorData(lambdaArgument &lambda_arg);
|
||||
MLIR_CAPI_EXPORTED std::vector<int64_t>
|
||||
lambdaArgumentGetSignedTensorData(lambdaArgument &lambda_arg);
|
||||
/// Get tensor dimensions from lambdaArgument
|
||||
MLIR_CAPI_EXPORTED std::vector<int64_t>
|
||||
lambdaArgumentGetTensorDimensions(lambdaArgument &lambda_arg);
|
||||
/// Check if a lambdaArgument holds a scalar
|
||||
MLIR_CAPI_EXPORTED bool lambdaArgumentIsScalar(lambdaArgument &lambda_arg);
|
||||
/// Check if a lambdaArgument holds a signed value
|
||||
MLIR_CAPI_EXPORTED bool lambdaArgumentIsSigned(lambdaArgument &lambda_arg);
|
||||
/// Get scalar value from lambdaArgument
|
||||
MLIR_CAPI_EXPORTED uint64_t lambdaArgumentGetScalar(lambdaArgument &lambda_arg);
|
||||
MLIR_CAPI_EXPORTED int64_t
|
||||
lambdaArgumentGetSignedScalar(lambdaArgument &lambda_arg);
|
||||
|
||||
/// Compile the textual representation of MLIR modules to a library.
|
||||
MLIR_CAPI_EXPORTED std::string library(std::string libraryPath,
|
||||
|
||||
@@ -247,6 +247,19 @@ void mlir::concretelang::python::populateCompilerAPISubmodule(
|
||||
}
|
||||
}
|
||||
return result;
|
||||
})
|
||||
.def("input_signs",
|
||||
[](mlir::concretelang::ClientParameters &clientParameters) {
|
||||
std::vector<bool> result;
|
||||
for (auto input : clientParameters.inputs) {
|
||||
if (input.encryption.hasValue()) {
|
||||
result.push_back(
|
||||
input.encryption.getValue().encoding.isSigned);
|
||||
} else {
|
||||
result.push_back(true);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
});
|
||||
|
||||
pybind11::class_<clientlib::KeySet>(m, "KeySet")
|
||||
@@ -284,23 +297,40 @@ void mlir::concretelang::python::populateCompilerAPISubmodule(
|
||||
});
|
||||
|
||||
pybind11::class_<lambdaArgument>(m, "LambdaArgument")
|
||||
.def_static("from_tensor_8",
|
||||
.def_static("from_tensor_u8",
|
||||
[](std::vector<uint8_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorU8(tensor, dims);
|
||||
})
|
||||
.def_static("from_tensor_16",
|
||||
.def_static("from_tensor_u16",
|
||||
[](std::vector<uint16_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorU16(tensor, dims);
|
||||
})
|
||||
.def_static("from_tensor_32",
|
||||
.def_static("from_tensor_u32",
|
||||
[](std::vector<uint32_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorU32(tensor, dims);
|
||||
})
|
||||
.def_static("from_tensor_64",
|
||||
.def_static("from_tensor_u64",
|
||||
[](std::vector<uint64_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorU64(tensor, dims);
|
||||
})
|
||||
.def_static("from_tensor_i8",
|
||||
[](std::vector<int8_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorI8(tensor, dims);
|
||||
})
|
||||
.def_static("from_tensor_i16",
|
||||
[](std::vector<int16_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorI16(tensor, dims);
|
||||
})
|
||||
.def_static("from_tensor_i32",
|
||||
[](std::vector<int32_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorI32(tensor, dims);
|
||||
})
|
||||
.def_static("from_tensor_i64",
|
||||
[](std::vector<int64_t> tensor, std::vector<int64_t> dims) {
|
||||
return lambdaArgumentFromTensorI64(tensor, dims);
|
||||
})
|
||||
.def_static("from_scalar", lambdaArgumentFromScalar)
|
||||
.def_static("from_signed_scalar", lambdaArgumentFromSignedScalar)
|
||||
.def("is_tensor",
|
||||
[](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentIsTensor(lambda_arg);
|
||||
@@ -309,6 +339,10 @@ void mlir::concretelang::python::populateCompilerAPISubmodule(
|
||||
[](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentGetTensorData(lambda_arg);
|
||||
})
|
||||
.def("get_signed_tensor_data",
|
||||
[](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentGetSignedTensorData(lambda_arg);
|
||||
})
|
||||
.def("get_tensor_shape",
|
||||
[](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentGetTensorDimensions(lambda_arg);
|
||||
@@ -317,7 +351,15 @@ void mlir::concretelang::python::populateCompilerAPISubmodule(
|
||||
[](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentIsScalar(lambda_arg);
|
||||
})
|
||||
.def("get_scalar", [](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentGetScalar(lambda_arg);
|
||||
.def("is_signed",
|
||||
[](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentIsSigned(lambda_arg);
|
||||
})
|
||||
.def("get_scalar",
|
||||
[](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentGetScalar(lambda_arg);
|
||||
})
|
||||
.def("get_signed_scalar", [](lambdaArgument &lambda_arg) {
|
||||
return lambdaArgumentGetSignedScalar(lambda_arg);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -286,11 +286,19 @@ MLIR_CAPI_EXPORTED bool lambdaArgumentIsTensor(lambdaArgument &lambda_arg) {
|
||||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<uint32_t>>>() ||
|
||||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<uint64_t>>>();
|
||||
mlir::concretelang::IntLambdaArgument<uint64_t>>>() ||
|
||||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int8_t>>>() ||
|
||||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int16_t>>>() ||
|
||||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int32_t>>>() ||
|
||||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int64_t>>>();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
MLIR_CAPI_EXPORTED std::vector<uint64_t> copyTensorLambdaArgumentTo64bitsvector(
|
||||
template <typename T, typename R>
|
||||
MLIR_CAPI_EXPORTED std::vector<R> copyTensorLambdaArgumentTo64bitsvector(
|
||||
mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<T>> *tensor) {
|
||||
auto numElements = tensor->getNumElements();
|
||||
@@ -301,7 +309,7 @@ MLIR_CAPI_EXPORTED std::vector<uint64_t> copyTensorLambdaArgumentTo64bitsvector(
|
||||
<< llvm::toString(std::move(numElements.takeError()));
|
||||
throw std::runtime_error(os.str());
|
||||
}
|
||||
std::vector<uint64_t> res;
|
||||
std::vector<R> res;
|
||||
res.reserve(*numElements);
|
||||
T *data = tensor->getValue();
|
||||
for (size_t i = 0; i < *numElements; i++) {
|
||||
@@ -329,17 +337,52 @@ lambdaArgumentGetTensorData(lambdaArgument &lambda_arg) {
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<uint8_t>>>()) {
|
||||
return copyTensorLambdaArgumentTo64bitsvector(arg);
|
||||
return copyTensorLambdaArgumentTo64bitsvector<uint8_t, uint64_t>(arg);
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<uint16_t>>>()) {
|
||||
return copyTensorLambdaArgumentTo64bitsvector(arg);
|
||||
return copyTensorLambdaArgumentTo64bitsvector<uint16_t, uint64_t>(arg);
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<uint32_t>>>()) {
|
||||
return copyTensorLambdaArgumentTo64bitsvector(arg);
|
||||
return copyTensorLambdaArgumentTo64bitsvector<uint32_t, uint64_t>(arg);
|
||||
}
|
||||
throw std::invalid_argument(
|
||||
"LambdaArgument isn't a tensor or has an unsupported bitwidth");
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED std::vector<int64_t>
|
||||
lambdaArgumentGetSignedTensorData(lambdaArgument &lambda_arg) {
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int64_t>>>()) {
|
||||
llvm::Expected<size_t> sizeOrErr = arg->getNumElements();
|
||||
if (!sizeOrErr) {
|
||||
std::string backingString;
|
||||
llvm::raw_string_ostream os(backingString);
|
||||
os << "Couldn't get size of tensor: "
|
||||
<< llvm::toString(sizeOrErr.takeError());
|
||||
throw std::runtime_error(os.str());
|
||||
}
|
||||
std::vector<int64_t> data(arg->getValue(), arg->getValue() + *sizeOrErr);
|
||||
return data;
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int8_t>>>()) {
|
||||
return copyTensorLambdaArgumentTo64bitsvector<int8_t, int64_t>(arg);
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int16_t>>>()) {
|
||||
return copyTensorLambdaArgumentTo64bitsvector<int16_t, int64_t>(arg);
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int32_t>>>()) {
|
||||
return copyTensorLambdaArgumentTo64bitsvector<int32_t, int64_t>(arg);
|
||||
}
|
||||
throw std::invalid_argument(
|
||||
"LambdaArgument isn't a tensor or has an unsupported bitwidth");
|
||||
@@ -367,13 +410,52 @@ lambdaArgumentGetTensorDimensions(lambdaArgument &lambda_arg) {
|
||||
mlir::concretelang::IntLambdaArgument<uint64_t>>>()) {
|
||||
return arg->getDimensions();
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int8_t>>>()) {
|
||||
return arg->getDimensions();
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int16_t>>>()) {
|
||||
return arg->getDimensions();
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int32_t>>>()) {
|
||||
return arg->getDimensions();
|
||||
}
|
||||
if (auto arg =
|
||||
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int64_t>>>()) {
|
||||
return arg->getDimensions();
|
||||
}
|
||||
throw std::invalid_argument(
|
||||
"LambdaArgument isn't a tensor, should "
|
||||
"be a TensorLambdaArgument<IntLambdaArgument<uint64_t>>");
|
||||
"be a TensorLambdaArgument<IntLambdaArgument<(u)int{8,16,32,64}_t>>");
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED bool lambdaArgumentIsScalar(lambdaArgument &lambda_arg) {
|
||||
return lambda_arg.ptr->isa<mlir::concretelang::IntLambdaArgument<uint64_t>>();
|
||||
auto ptr = lambda_arg.ptr;
|
||||
return ptr->isa<mlir::concretelang::IntLambdaArgument<uint64_t>>() ||
|
||||
ptr->isa<mlir::concretelang::IntLambdaArgument<int64_t>>();
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED bool lambdaArgumentIsSigned(lambdaArgument &lambda_arg) {
|
||||
auto ptr = lambda_arg.ptr;
|
||||
return ptr->isa<mlir::concretelang::IntLambdaArgument<int8_t>>() ||
|
||||
ptr->isa<mlir::concretelang::IntLambdaArgument<int16_t>>() ||
|
||||
ptr->isa<mlir::concretelang::IntLambdaArgument<int32_t>>() ||
|
||||
ptr->isa<mlir::concretelang::IntLambdaArgument<int64_t>>() ||
|
||||
ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int8_t>>>() ||
|
||||
ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int16_t>>>() ||
|
||||
ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int32_t>>>() ||
|
||||
ptr->isa<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int64_t>>>();
|
||||
;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED uint64_t
|
||||
@@ -388,6 +470,18 @@ lambdaArgumentGetScalar(lambdaArgument &lambda_arg) {
|
||||
return arg->getValue();
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED int64_t
|
||||
lambdaArgumentGetSignedScalar(lambdaArgument &lambda_arg) {
|
||||
mlir::concretelang::IntLambdaArgument<int64_t> *arg =
|
||||
lambda_arg.ptr
|
||||
->dyn_cast<mlir::concretelang::IntLambdaArgument<int64_t>>();
|
||||
if (arg == nullptr) {
|
||||
throw std::invalid_argument("LambdaArgument isn't a scalar, should "
|
||||
"be an IntLambdaArgument<int64_t>");
|
||||
}
|
||||
return arg->getValue();
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU8(
|
||||
std::vector<uint8_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
@@ -396,6 +490,14 @@ MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU8(
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI8(
|
||||
std::vector<int8_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
std::make_shared<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int8_t>>>(data, dimensions)};
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU16(
|
||||
std::vector<uint16_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
@@ -404,6 +506,14 @@ MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU16(
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI16(
|
||||
std::vector<int16_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
std::make_shared<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int16_t>>>(data, dimensions)};
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU32(
|
||||
std::vector<uint32_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
@@ -412,6 +522,14 @@ MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU32(
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI32(
|
||||
std::vector<int32_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
std::make_shared<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int32_t>>>(data, dimensions)};
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU64(
|
||||
std::vector<uint64_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
@@ -420,9 +538,24 @@ MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU64(
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorI64(
|
||||
std::vector<int64_t> data, std::vector<int64_t> dimensions) {
|
||||
lambdaArgument tensor_arg{
|
||||
std::make_shared<mlir::concretelang::TensorLambdaArgument<
|
||||
mlir::concretelang::IntLambdaArgument<int64_t>>>(data, dimensions)};
|
||||
return tensor_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromScalar(uint64_t scalar) {
|
||||
lambdaArgument scalar_arg{
|
||||
std::make_shared<mlir::concretelang::IntLambdaArgument<uint64_t>>(
|
||||
scalar)};
|
||||
return scalar_arg;
|
||||
}
|
||||
|
||||
MLIR_CAPI_EXPORTED lambdaArgument
|
||||
lambdaArgumentFromSignedScalar(int64_t scalar) {
|
||||
lambdaArgument scalar_arg{
|
||||
std::make_shared<mlir::concretelang::IntLambdaArgument<int64_t>>(scalar)};
|
||||
return scalar_arg;
|
||||
}
|
||||
|
||||
@@ -37,6 +37,14 @@ class ClientParameters(WrapperCpp):
|
||||
)
|
||||
super().__init__(client_parameters)
|
||||
|
||||
def input_signs(self) -> List[bool]:
|
||||
"""Return the sign information of inputs.
|
||||
|
||||
Returns:
|
||||
List[bool]: list of booleans to indicate whether the inputs are signed or not
|
||||
"""
|
||||
return self.cpp().input_signs()
|
||||
|
||||
def output_signs(self) -> List[bool]:
|
||||
"""Return the sign information of outputs.
|
||||
|
||||
|
||||
@@ -112,7 +112,17 @@ class ClientSupport(WrapperCpp):
|
||||
)
|
||||
if not isinstance(keyset, KeySet):
|
||||
raise TypeError(f"keyset must be of type KeySet, not {type(keyset)}")
|
||||
lambda_arguments = [ClientSupport._create_lambda_argument(arg) for arg in args]
|
||||
|
||||
signs = client_parameters.input_signs()
|
||||
if len(signs) != len(args):
|
||||
raise RuntimeError(
|
||||
f"function has arity {len(signs)} but is applied to too many arguments"
|
||||
)
|
||||
|
||||
lambda_arguments = [
|
||||
ClientSupport._create_lambda_argument(arg, signed)
|
||||
for arg, signed in zip(args, signs)
|
||||
]
|
||||
return PublicArguments.wrap(
|
||||
_ClientSupport.encrypt_arguments(
|
||||
client_parameters.cpp(),
|
||||
@@ -155,24 +165,31 @@ class ClientSupport(WrapperCpp):
|
||||
output_signs = client_parameters.output_signs()
|
||||
assert len(output_signs) == 1
|
||||
|
||||
is_signed = output_signs[0]
|
||||
is_signed = lambda_arg.is_signed()
|
||||
if lambda_arg.is_scalar():
|
||||
result = lambda_arg.get_scalar()
|
||||
return (
|
||||
result if not is_signed else int(np.array([result]).astype(np.int64)[0])
|
||||
lambda_arg.get_signed_scalar() if is_signed else lambda_arg.get_scalar()
|
||||
)
|
||||
|
||||
if lambda_arg.is_tensor():
|
||||
shape = lambda_arg.get_tensor_shape()
|
||||
tensor = np.array(lambda_arg.get_tensor_data()).reshape(shape)
|
||||
return tensor if not is_signed else tensor.astype(np.int64)
|
||||
return np.array(
|
||||
lambda_arg.get_signed_tensor_data()
|
||||
if is_signed
|
||||
else lambda_arg.get_tensor_data(),
|
||||
dtype=(np.int64 if is_signed else np.uint64),
|
||||
).reshape(lambda_arg.get_tensor_shape())
|
||||
|
||||
raise RuntimeError("unknown return type")
|
||||
|
||||
@staticmethod
|
||||
def _create_lambda_argument(value: Union[int, np.ndarray]) -> LambdaArgument:
|
||||
def _create_lambda_argument(
|
||||
value: Union[int, np.ndarray], signed: bool
|
||||
) -> LambdaArgument:
|
||||
"""Create a lambda argument holding either an int or tensor value.
|
||||
|
||||
Args:
|
||||
value (Union[int, numpy.array]): value of the argument, either an int, or a numpy array
|
||||
signed (bool): whether the value is signed
|
||||
|
||||
Raises:
|
||||
TypeError: if the values aren't in the expected range, or using a wrong type
|
||||
@@ -180,6 +197,9 @@ class ClientSupport(WrapperCpp):
|
||||
Returns:
|
||||
LambdaArgument: lambda argument holding the appropriate value
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-return-statements,too-many-branches
|
||||
|
||||
if not isinstance(value, ACCEPTED_TYPES):
|
||||
raise TypeError(
|
||||
"value of lambda argument must be either int, numpy.array or numpy.(u)int{8,16,32,64}"
|
||||
@@ -192,8 +212,8 @@ class ClientSupport(WrapperCpp):
|
||||
raise TypeError(
|
||||
"single integer must be in the range [-2**63, 2**64 - 1]"
|
||||
)
|
||||
if value < 0:
|
||||
value = int(np.int64(value).astype(np.uint64))
|
||||
if signed:
|
||||
return LambdaArgument.from_signed_scalar(value)
|
||||
return LambdaArgument.from_scalar(value)
|
||||
assert isinstance(value, np.ndarray)
|
||||
if value.dtype not in ACCEPTED_NUMPY_UINTS:
|
||||
@@ -203,21 +223,39 @@ class ClientSupport(WrapperCpp):
|
||||
# extract the single element
|
||||
value = value.max()
|
||||
# should be a single uint here
|
||||
if signed:
|
||||
return LambdaArgument.from_signed_scalar(value)
|
||||
return LambdaArgument.from_scalar(value)
|
||||
if value.dtype in [np.uint8, np.int8]:
|
||||
return LambdaArgument.from_tensor_8(
|
||||
value.astype(np.uint8).flatten().tolist(), value.shape
|
||||
if value.dtype == np.uint8:
|
||||
return LambdaArgument.from_tensor_u8(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
if value.dtype in [np.uint16, np.int16]:
|
||||
return LambdaArgument.from_tensor_16(
|
||||
value.astype(np.uint16).flatten().tolist(), value.shape
|
||||
if value.dtype == np.uint16:
|
||||
return LambdaArgument.from_tensor_u16(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
if value.dtype in [np.uint32, np.int32]:
|
||||
return LambdaArgument.from_tensor_32(
|
||||
value.astype(np.uint32).flatten().tolist(), value.shape
|
||||
if value.dtype == np.uint32:
|
||||
return LambdaArgument.from_tensor_u32(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
if value.dtype in [np.uint64, np.int64]:
|
||||
return LambdaArgument.from_tensor_64(
|
||||
value.astype(np.uint64).flatten().tolist(), value.shape
|
||||
if value.dtype == np.uint64:
|
||||
return LambdaArgument.from_tensor_u64(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
if value.dtype == np.int8:
|
||||
return LambdaArgument.from_tensor_i8(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
if value.dtype == np.int16:
|
||||
return LambdaArgument.from_tensor_i16(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
if value.dtype == np.int32:
|
||||
return LambdaArgument.from_tensor_i32(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
if value.dtype == np.int64:
|
||||
return LambdaArgument.from_tensor_i64(
|
||||
value.flatten().tolist(), list(value.shape)
|
||||
)
|
||||
raise TypeError("numpy.array must be of dtype (u)int{8,16,32,64}")
|
||||
|
||||
@@ -58,25 +58,31 @@ class LambdaArgument(WrapperCpp):
|
||||
"""
|
||||
if not isinstance(scalar, ACCEPTED_INTS):
|
||||
raise TypeError(
|
||||
f"scalar must be of type int or numpy.(u)int, not {type(scalar)}"
|
||||
f"scalar must be of type int or numpy.int, not {type(scalar)}"
|
||||
)
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_scalar(scalar))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_8(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
def from_signed_scalar(scalar: int) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given scalar value.
|
||||
|
||||
Args:
|
||||
data (List[int]): flattened tensor data
|
||||
shape (List[int]): shape of original tensor before flattening
|
||||
scalar (int or numpy.int): scalar value to embed in LambdaArgument
|
||||
|
||||
Raises:
|
||||
TypeError: if scalar is not of type int or numpy.uint
|
||||
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_8(data, shape))
|
||||
if not isinstance(scalar, ACCEPTED_INTS):
|
||||
raise TypeError(
|
||||
f"scalar must be of type int or numpy.uint, not {type(scalar)}"
|
||||
)
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_signed_scalar(scalar))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_16(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
def from_tensor_u8(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
@@ -86,10 +92,10 @@ class LambdaArgument(WrapperCpp):
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_16(data, shape))
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_u8(data, shape))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_32(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
def from_tensor_u16(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
@@ -99,10 +105,10 @@ class LambdaArgument(WrapperCpp):
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_32(data, shape))
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_u16(data, shape))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_64(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
def from_tensor_u32(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
@@ -112,7 +118,80 @@ class LambdaArgument(WrapperCpp):
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_64(data, shape))
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_u32(data, shape))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_u64(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
data (List[int]): flattened tensor data
|
||||
shape (List[int]): shape of original tensor before flattening
|
||||
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_u64(data, shape))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_i8(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
data (List[int]): flattened tensor data
|
||||
shape (List[int]): shape of original tensor before flattening
|
||||
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_i8(data, shape))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_i16(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
data (List[int]): flattened tensor data
|
||||
shape (List[int]): shape of original tensor before flattening
|
||||
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_i16(data, shape))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_i32(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
data (List[int]): flattened tensor data
|
||||
shape (List[int]): shape of original tensor before flattening
|
||||
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_i32(data, shape))
|
||||
|
||||
@staticmethod
|
||||
def from_tensor_i64(data: List[int], shape: List[int]) -> "LambdaArgument":
|
||||
"""Build a LambdaArgument containing the given tensor.
|
||||
|
||||
Args:
|
||||
data (List[int]): flattened tensor data
|
||||
shape (List[int]): shape of original tensor before flattening
|
||||
|
||||
Returns:
|
||||
LambdaArgument
|
||||
"""
|
||||
return LambdaArgument.wrap(_LambdaArgument.from_tensor_i64(data, shape))
|
||||
|
||||
def is_signed(self) -> bool:
|
||||
"""Check if the contained argument is signed.
|
||||
|
||||
Returns:
|
||||
bool
|
||||
"""
|
||||
return self.cpp().is_signed()
|
||||
|
||||
def is_scalar(self) -> bool:
|
||||
"""Check if the contained argument is a scalar.
|
||||
@@ -130,6 +209,14 @@ class LambdaArgument(WrapperCpp):
|
||||
"""
|
||||
return self.cpp().get_scalar()
|
||||
|
||||
def get_signed_scalar(self) -> int:
|
||||
"""Return the contained scalar value.
|
||||
|
||||
Returns:
|
||||
int
|
||||
"""
|
||||
return self.cpp().get_signed_scalar()
|
||||
|
||||
def is_tensor(self) -> bool:
|
||||
"""Check if the contained argument is a tensor.
|
||||
|
||||
@@ -153,3 +240,11 @@ class LambdaArgument(WrapperCpp):
|
||||
List[int]
|
||||
"""
|
||||
return self.cpp().get_tensor_data()
|
||||
|
||||
def get_signed_tensor_data(self) -> List[int]:
|
||||
"""Return the contained flattened tensor data.
|
||||
|
||||
Returns:
|
||||
List[int]
|
||||
"""
|
||||
return self.cpp().get_signed_tensor_data()
|
||||
|
||||
@@ -17,7 +17,7 @@ from concrete.compiler import ClientSupport
|
||||
)
|
||||
def test_invalid_arg_type(garbage):
|
||||
with pytest.raises(TypeError):
|
||||
ClientSupport._create_lambda_argument(garbage)
|
||||
ClientSupport._create_lambda_argument(garbage, signed=False)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -32,7 +32,7 @@ def test_invalid_arg_type(garbage):
|
||||
)
|
||||
def test_accepted_ints(value):
|
||||
try:
|
||||
arg = ClientSupport._create_lambda_argument(value)
|
||||
arg = ClientSupport._create_lambda_argument(value, signed=False)
|
||||
except Exception:
|
||||
pytest.fail(f"value of type {type(value)} should be supported")
|
||||
assert arg.is_scalar(), "should have been a scalar"
|
||||
@@ -52,7 +52,7 @@ def test_accepted_ints(value):
|
||||
def test_accepted_ndarray(dtype, maxvalue):
|
||||
value = np.array([0, 1, 2, maxvalue], dtype=dtype)
|
||||
try:
|
||||
arg = ClientSupport._create_lambda_argument(value)
|
||||
arg = ClientSupport._create_lambda_argument(value, signed=False)
|
||||
except Exception:
|
||||
pytest.fail(f"value of type {type(value)} should be supported")
|
||||
|
||||
@@ -69,7 +69,7 @@ def test_accepted_ndarray(dtype, maxvalue):
|
||||
def test_accepted_array_as_scalar():
|
||||
value = np.array(7, dtype=np.uint16)
|
||||
try:
|
||||
arg = ClientSupport._create_lambda_argument(value)
|
||||
arg = ClientSupport._create_lambda_argument(value, signed=False)
|
||||
except Exception:
|
||||
pytest.fail(f"value of type {type(value)} should be supported")
|
||||
assert arg.is_scalar(), "should have been a scalar"
|
||||
|
||||
@@ -133,6 +133,28 @@ end_to_end_fixture = [
|
||||
np.array([63, 15, 14, 12]),
|
||||
id="add_eint_int_1D",
|
||||
),
|
||||
pytest.param(
|
||||
"""
|
||||
func.func @main(%arg0: !FHE.esint<7>) -> !FHE.esint<7> {
|
||||
%0 = "FHE.neg_eint"(%arg0): (!FHE.esint<7>) -> !FHE.esint<7>
|
||||
return %0: !FHE.esint<7>
|
||||
}
|
||||
""",
|
||||
(5,),
|
||||
-5,
|
||||
id="neg_eint_signed",
|
||||
),
|
||||
pytest.param(
|
||||
"""
|
||||
func.func @main(%arg0: tensor<2x!FHE.esint<7>>) -> tensor<2x!FHE.esint<7>> {
|
||||
%0 = "FHELinalg.neg_eint"(%arg0): (tensor<2x!FHE.esint<7>>) -> tensor<2x!FHE.esint<7>>
|
||||
return %0: tensor<2x!FHE.esint<7>>
|
||||
}
|
||||
""",
|
||||
(np.array([-5, 3]),),
|
||||
np.array([5, -3]),
|
||||
id="neg_eint_signed_2",
|
||||
),
|
||||
]
|
||||
|
||||
end_to_end_parallel_fixture = [
|
||||
|
||||
@@ -35,7 +35,9 @@ def test_esint_tensor(shape):
|
||||
register_dialects(ctx)
|
||||
eint = fhe.EncryptedSignedIntegerType.get(ctx, 3)
|
||||
tensor = RankedTensorType.get(shape, eint)
|
||||
assert tensor.__str__() == f"tensor<{'x'.join(map(str, shape))}x!FHE.esint<{3}>>"
|
||||
assert (
|
||||
tensor.__str__() == f"tensor<{'x'.join(map(str, shape))}x!FHE.esint<{3}>>"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("width", [0])
|
||||
|
||||
Reference in New Issue
Block a user