refactor(compiler/tests): Use test fixtures instead of specialized programmatic test

+ fix checkedJit (segfault on compilation error because of getting value of a expected in error state)
This commit is contained in:
Quentin Bourgerie
2022-02-24 14:58:47 +01:00
parent bb289b6191
commit e959f2f4d8
14 changed files with 860 additions and 673 deletions

View File

@@ -49,6 +49,7 @@ add_executable(
add_executable(
end_to_end_jit_fhe
end_to_end_jit_fhe.cc
EndToEndFixture.cpp
globals.cc
)
@@ -71,6 +72,7 @@ set_source_files_properties(
end_to_end_jit_fhe.cc
end_to_end_jit_fhelinalg.cc
end_to_end_jit_lambda.cc
EndToEndFixture.cpp
PROPERTIES COMPILE_FLAGS "-fno-rtti"
)

View File

@@ -0,0 +1,210 @@
#include "EndToEndFixture.h"
#include "concretelang/Support/CompilerEngine.h"
#include "concretelang/Support/Jit.h"
#include "concretelang/Support/JitCompilerEngine.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/YAMLTraits.h"
using mlir::concretelang::StreamStringError;
llvm::Expected<mlir::concretelang::LambdaArgument *>
scalarDescToLambdaArgument(ScalarDesc desc) {
switch (desc.width) {
case 8:
return new mlir::concretelang::IntLambdaArgument<uint8_t>(desc.value);
case 16:
return new mlir::concretelang::IntLambdaArgument<uint16_t>(desc.value);
case 32:
return new mlir::concretelang::IntLambdaArgument<uint32_t>(desc.value);
case 64:
return new mlir::concretelang::IntLambdaArgument<uint64_t>(desc.value);
}
return StreamStringError("unsupported width of scalar value: ") << desc.width;
}
llvm::Expected<mlir::concretelang::LambdaArgument *>
TensorDescriptionToLambdaArgument(TensorDescription desc) {
switch (desc.width) {
case 8:;
return new mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>(
std::vector<uint8_t>(desc.values.begin(), desc.values.end()),
desc.shape);
case 16:
return new mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint16_t>>(
std::vector<uint16_t>(desc.values.begin(), desc.values.end()),
desc.shape);
case 32:
return new mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint32_t>>(
std::vector<uint32_t>(desc.values.begin(), desc.values.end()),
desc.shape);
case 64:
return new mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>(desc.values,
desc.shape);
}
return StreamStringError("unsupported width of tensor value: ") << desc.width;
}
llvm::Expected<mlir::concretelang::LambdaArgument *>
valueDescriptionToLambdaArgument(ValueDescription desc) {
switch (desc.tag) {
case ValueDescription::SCALAR:
return scalarDescToLambdaArgument(desc.scalar);
case ValueDescription::TENSOR:
return TensorDescriptionToLambdaArgument(desc.tensor);
}
return StreamStringError("unsupported value description");
}
llvm::Error checkResult(ScalarDesc &desc,
mlir::concretelang::LambdaArgument *res) {
auto res64 = res->dyn_cast<mlir::concretelang::IntLambdaArgument<uint64_t>>();
if (res64 == nullptr) {
return StreamStringError("invocation result is not a scalar");
}
if (desc.value != res64->getValue()) {
return StreamStringError("unexpected result value: got ")
<< res64->getValue() << "expected " << desc.value;
}
return llvm::Error::success();
}
template <typename UINT>
llvm::Error
checkTensorResult(TensorDescription &desc,
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<UINT>> *res) {
if (!desc.shape.empty()) {
auto resShape = res->getDimensions();
if (desc.shape.size() != resShape.size()) {
return StreamStringError("size of shape differs, got ")
<< resShape.size() << " expected " << desc.shape.size();
}
for (size_t i = 0; i < desc.shape.size(); i++) {
if ((uint64_t)resShape[i] != desc.shape[i]) {
return StreamStringError("shape differs at pos ")
<< i << ", got " << resShape[i] << " expected " << desc.shape[i];
}
}
}
auto resValues = res->getValue();
auto numElts = res->getNumElements();
if (!numElts) {
return numElts.takeError();
}
if (desc.values.size() != *numElts) {
return StreamStringError("size of result differs, got ")
<< *numElts << " expected " << desc.values.size();
}
for (size_t i = 0; i < *numElts; i++) {
if (resValues[i] != desc.values[i]) {
return StreamStringError("result value differ at pos(")
<< i << "), got " << resValues[i] << " expected "
<< desc.values[i];
}
}
return llvm::Error::success();
}
llvm::Error checkResult(TensorDescription &desc,
mlir::concretelang::LambdaArgument *res) {
switch (desc.width) {
case 8:
return checkTensorResult<uint8_t>(
desc, res->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>>());
case 16:
return checkTensorResult<uint16_t>(
desc, res->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint16_t>>>());
case 32:
return checkTensorResult<uint32_t>(
desc, res->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint32_t>>>());
case 64:
return checkTensorResult<uint64_t>(
desc, res->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>>());
default:
return StreamStringError("Unsupported width");
}
}
llvm::Error
checkResult(ValueDescription &desc,
std::unique_ptr<mlir::concretelang::LambdaArgument> &res) {
switch (desc.tag) {
case ValueDescription::SCALAR:
return checkResult(desc.scalar, res.get());
case ValueDescription::TENSOR:
return checkResult(desc.tensor, res.get());
}
assert(false);
}
std::string printEndToEndDesc(const testing::TestParamInfo<EndToEndDesc> desc) {
return desc.param.description;
}
template <> struct llvm::yaml::MappingTraits<ValueDescription> {
static void mapping(IO &io, ValueDescription &desc) {
auto keys = io.keys();
if (std::find(keys.begin(), keys.end(), "scalar") != keys.end()) {
io.mapRequired("scalar", desc.scalar.value);
io.mapOptional("width", desc.scalar.width, 64);
desc.tag = ValueDescription::SCALAR;
return;
}
if (std::find(keys.begin(), keys.end(), "tensor") != keys.end()) {
io.mapRequired("tensor", desc.tensor.values);
io.mapOptional("width", desc.tensor.width, 64);
io.mapRequired("shape", desc.tensor.shape);
desc.tag = ValueDescription::TENSOR;
return;
}
io.setError("Missing scalar or tensor key");
}
};
LLVM_YAML_IS_SEQUENCE_VECTOR(ValueDescription);
template <> struct llvm::yaml::MappingTraits<TestDescription> {
static void mapping(IO &io, TestDescription &desc) {
io.mapOptional("inputs", desc.inputs);
io.mapOptional("outputs", desc.outputs);
}
};
LLVM_YAML_IS_SEQUENCE_VECTOR(TestDescription);
template <> struct llvm::yaml::MappingTraits<EndToEndDesc> {
static void mapping(IO &io, EndToEndDesc &desc) {
io.mapRequired("description", desc.description);
io.mapRequired("program", desc.program);
io.mapRequired("tests", desc.tests);
}
};
LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(EndToEndDesc);
std::vector<EndToEndDesc> loadEndToEndDesc(std::string path) {
std::ifstream file(path);
std::string content((std::istreambuf_iterator<char>(file)),
(std::istreambuf_iterator<char>()));
llvm::yaml::Input yin(content);
// Parse the YAML file
std::vector<EndToEndDesc> desc;
yin >> desc;
// Check for error
if (yin.error())
assert(false && "cannot parse doc");
return desc;
}

View File

@@ -0,0 +1,63 @@
#ifndef END_TO_END_FIXTURE_H
#define END_TO_END_FIXTURE_H
#include <fstream>
#include <string>
#include <vector>
#include "end_to_end_jit_test.h"
typedef uint8_t ValueWidth;
struct TensorDescription {
std::vector<int64_t> shape;
std::vector<uint64_t> values;
ValueWidth width;
};
struct ScalarDesc {
uint64_t value;
ValueWidth width;
};
struct ValueDescription {
template <typename T> static ValueDescription get(T value) {
ValueDescription desc;
desc.tag = ValueDescription::SCALAR;
desc.scalar.value = (uint64_t)value;
desc.scalar.width = sizeof(value) * 8;
return desc;
}
enum { SCALAR, TENSOR } tag;
ScalarDesc scalar;
TensorDescription tensor;
};
struct TestDescription {
std::vector<ValueDescription> inputs;
std::vector<ValueDescription> outputs;
};
struct EndToEndDesc {
std::string description;
std::string program;
std::vector<TestDescription> tests;
};
llvm::Expected<mlir::concretelang::LambdaArgument *>
scalarDescToLambdaArgument(ScalarDesc desc);
llvm::Expected<mlir::concretelang::LambdaArgument *>
valueDescriptionToLambdaArgument(ValueDescription desc);
llvm::Error checkResult(ScalarDesc &desc,
mlir::concretelang::LambdaArgument *res);
llvm::Error
checkResult(ValueDescription &desc,
std::unique_ptr<mlir::concretelang::LambdaArgument> &res);
std::vector<EndToEndDesc> loadEndToEndDesc(std::string path);
std::string printEndToEndDesc(const testing::TestParamInfo<EndToEndDesc> desc);
#endif

View File

@@ -0,0 +1,113 @@
description: identity
program: |
func @main(%t: tensor<2x10x!FHE.eint<6>>) -> tensor<2x10x!FHE.eint<6>> {
return %t : tensor<2x10x!FHE.eint<6>>
}
tests:
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
outputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
---
description: extract
program: |
func @main(%t: tensor<2x10x!FHE.eint<6>>, %i: index, %j: index) ->
!FHE.eint<6> {
%c = tensor.extract %t[%i, %j] : tensor<2x10x!FHE.eint<6>>
return %c : !FHE.eint<6>
}
tests:
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
- scalar: 0
- scalar: 0
outputs:
- scalar: 63
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
- scalar: 0
- scalar: 9
outputs:
- scalar: 0
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
- scalar: 1
- scalar: 0
outputs:
- scalar: 0
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
- scalar: 1
- scalar: 9
outputs:
- scalar: 9
---
description: extract_slice
program: |
func @main(%t: tensor<2x10x!FHE.eint<6>>) -> tensor<1x5x!FHE.eint<6>> {
%r = tensor.extract_slice %t[1, 5][1, 5][1, 1] : tensor<2x10x!FHE.eint<6>> to tensor<1x5x!FHE.eint<6>>
return %r : tensor<1x5x!FHE.eint<6>>
}
tests:
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
outputs:
- tensor: [ 5, 6, 7, 8, 9]
shape: [1,5]
---
description: extract_slice_stride
program: |
func @main(%t: tensor<2x10x!FHE.eint<6>>) -> tensor<1x5x!FHE.eint<6>> {
%r = tensor.extract_slice %t[1, 0][1, 5][1, 2] : tensor<2x10x!FHE.eint<6>> to tensor<1x5x!FHE.eint<6>>
return %r : tensor<1x5x!FHE.eint<6>>
}
tests:
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
outputs:
- tensor: [0, 2, 4, 6, 8]
shape: [1,5]
---
description: insert_slice
program: |
func @main(%t0: tensor<2x10x!FHE.eint<6>>, %t1: tensor<2x2x!FHE.eint<6>>) -> tensor<2x10x!FHE.eint<6>> {
%r = tensor.insert_slice %t1 into %t0[0, 5][2, 2][1, 1] : tensor<2x2x!FHE.eint<6>> into tensor<2x10x!FHE.eint<6>>
return %r : tensor<2x10x!FHE.eint<6>>
}
tests:
- inputs:
- tensor: [63, 12, 7, 43, 52, 9, 26, 34, 22, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
shape: [2,10]
width: 8
- tensor: [31, 32,
33, 34]
shape: [2,2]
width: 8
outputs:
- tensor: [63, 12, 7, 43, 52, 31, 32, 34, 22, 0,
0, 1, 2, 3, 4, 33, 34, 7, 8, 9]
shape: [2,10]

View File

@@ -0,0 +1,273 @@
description: identity
program: |
func @main(%arg0: !FHE.eint<3>) -> !FHE.eint<3> {
return %arg0: !FHE.eint<3>
}
tests:
- inputs:
- scalar: 1
outputs:
- scalar: 1
---
description: zero_tensor
program: |
func @main() -> tensor<2x2x4x!FHE.eint<6>> {
%0 = "FHE.zero_tensor"() : () -> tensor<2x2x4x!FHE.eint<6>>
return %0 : tensor<2x2x4x!FHE.eint<6>>
}
tests:
- outputs:
- tensor: [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
shape: [2,2,4]
---
description: add_eint_int_cst
program: |
func @main(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
%0 = arith.constant 1 : i3
%1 = "FHE.add_eint_int"(%arg0, %0): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 1
- inputs:
- scalar: 1
outputs:
- scalar: 2
- inputs:
- scalar: 2
outputs:
- scalar: 3
- inputs:
- scalar: 3
outputs:
- scalar: 4
---
description: add_eint_int_arg
program: |
func @main(%arg0: !FHE.eint<2>, %arg1: i3) -> !FHE.eint<2> {
%1 = "FHE.add_eint_int"(%arg0, %arg1): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
tests:
- inputs:
- scalar: 0
- scalar: 1
outputs:
- scalar: 1
- inputs:
- scalar: 1
- scalar: 2
outputs:
- scalar: 3
---
description: sub_eint_int_cst
program: |
func @main(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
%0 = arith.constant 7 : i3
%1 = "FHE.sub_int_eint"(%0, %arg0): (i3, !FHE.eint<2>) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
tests:
- inputs:
- scalar: 1
outputs:
- scalar: 6
- inputs:
- scalar: 2
outputs:
- scalar: 5
- inputs:
- scalar: 3
outputs:
- scalar: 4
- inputs:
- scalar: 4
outputs:
- scalar: 3
---
description: sub_int_eint_arg
program: |
func @main(%arg0: i3, %arg1: !FHE.eint<2>) -> !FHE.eint<2> {
%1 = "FHE.sub_int_eint"(%arg0, %arg1): (i3, !FHE.eint<2>) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
tests:
- inputs:
- scalar: 2
- scalar: 2
outputs:
- scalar: 0
- inputs:
- scalar: 2
- scalar: 1
outputs:
- scalar: 1
- inputs:
- scalar: 7
- scalar: 2
outputs:
- scalar: 5
---
description: neg_eint
program: |
func @main(%arg0: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 255
- inputs:
- scalar: 4
outputs:
- scalar: 252
- inputs:
- scalar: 250
outputs:
- scalar: 6
---
description: neg_eint_3bits
program: |
func @main(%arg0: !FHE.eint<3>) -> !FHE.eint<3> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<3>) -> (!FHE.eint<3>)
return %1: !FHE.eint<3>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 15
- inputs:
- scalar: 4
outputs:
- scalar: 12
- inputs:
- scalar: 13
outputs:
- scalar: 3
---
description: mul_eint_int_cst
program: |
func @main(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
%0 = arith.constant 2 : i3
%1 = "FHE.mul_eint_int"(%arg0, %0): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 2
- inputs:
- scalar: 2
outputs:
- scalar: 4
- inputs:
- scalar: 2
outputs:
- scalar: 4
---
description: mul_eint_int_arg
program: |
func @main(%arg0: !FHE.eint<2>, %arg1: i3) -> !FHE.eint<2> {
%1 = "FHE.mul_eint_int"(%arg0, %arg1): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
tests:
- inputs:
- scalar: 0
- scalar: 2
outputs:
- scalar: 0
- inputs:
- scalar: 1
- scalar: 2
outputs:
- scalar: 2
- inputs:
- scalar: 2
- scalar: 2
outputs:
- scalar: 4
---
description: add_eint
program: |
func @main(%arg0: !FHE.eint<7>, %arg1: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.add_eint"(%arg0, %arg1): (!FHE.eint<7>, !FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
tests:
- inputs:
- scalar: 1
- scalar: 2
outputs:
- scalar: 3
- inputs:
- scalar: 4
- scalar: 5
outputs:
- scalar: 9
- inputs:
- scalar: 1
- scalar: 1
outputs:
- scalar: 2
---
description: apply_lookup_table_multiple_precision
program: |
func @main(%arg0: !FHE.eint<6>, %arg1: !FHE.eint<3>) -> !FHE.eint<6> {
%tlu_7 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]> : tensor<64xi64>
%tlu_3 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>
%a = "FHE.apply_lookup_table"(%arg0, %tlu_7): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
%b = "FHE.apply_lookup_table"(%arg1, %tlu_3): (!FHE.eint<3>, tensor<8xi64>) -> (!FHE.eint<6>)
%a_plus_b = "FHE.add_eint"(%a, %b): (!FHE.eint<6>, !FHE.eint<6>) -> (!FHE.eint<6>)
return %a_plus_b: !FHE.eint<6>
}
tests:
- inputs:
- scalar: 23
- scalar: 7
outputs:
- scalar: 30
---
description: apply_lookup_table_random_func
program: |
func @main(%arg0: !FHE.eint<6>) -> !FHE.eint<6> {
%tlu = arith.constant dense<[16, 91, 16, 83, 80, 74, 21, 96, 1, 63, 49, 122, 76, 89, 74, 55, 109, 110, 103, 54, 105, 14, 66, 47, 52, 89, 7, 10, 73, 44, 119, 92, 25, 104, 123, 100, 108, 86, 29, 121, 118, 52, 107, 48, 34, 37, 13, 122, 107, 48, 74, 59, 96, 36, 50, 55, 120, 72, 27, 45, 12, 5, 96, 12]> : tensor<64xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
return %1: !FHE.eint<6>
}
tests:
- inputs:
- scalar: 5
outputs:
- scalar: 74
- inputs:
- scalar: 62
outputs:
- scalar: 96
- inputs:
- scalar: 0
outputs:
- scalar: 16
- inputs:
- scalar: 63
outputs:
- scalar: 12

View File

@@ -10,8 +10,7 @@
///////////////////////////////////////////////////////////////////////////////
TEST(ParallelizeAndRunFHE, add_eint_tree) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%arg0: !FHE.eint<7>, %arg1: !FHE.eint<7>, %arg2: !FHE.eint<7>, %arg3: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.add_eint"(%arg0, %arg1): (!FHE.eint<7>, !FHE.eint<7>) -> (!FHE.eint<7>)
%2 = "FHE.add_eint"(%arg0, %arg2): (!FHE.eint<7>, !FHE.eint<7>) -> (!FHE.eint<7>)
@@ -56,7 +55,7 @@ func @main(%arg0: !FHE.eint<7>, %arg1: !FHE.eint<7>, %arg2: !FHE.eint<7>, %arg3:
return %35: !FHE.eint<7>
}
)XXX",
"main", false, true);
"main", false, true);
ASSERT_EXPECTED_VALUE(lambda(1_u64, 2_u64, 3_u64, 4_u64), 150);
ASSERT_EXPECTED_VALUE(lambda(4_u64, 5_u64, 6_u64, 7_u64), 74);

View File

@@ -5,13 +5,13 @@
///////////////////////////////////////////////////////////////////////////////
TEST(End2EndJit_ClearTensor_1D, identity) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(
R"XXX(
checkedJit(lambda,
R"XXX(
func @main(%t: tensor<10xi64>) -> tensor<10xi64> {
return %t : tensor<10xi64>
}
)XXX",
"main", true);
"main", true);
uint64_t arg[]{0xFFFFFFFFFFFFFFFF,
0,
@@ -37,14 +37,13 @@ func @main(%t: tensor<10xi64>) -> tensor<10xi64> {
}
TEST(End2EndJit_ClearTensor_1D, extract_64) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10xi64>, %i: index) -> i64{
%c = tensor.extract %t[%i] : tensor<10xi64>
return %c : i64
}
)XXX",
"main", true);
"main", true);
uint64_t arg[]{0xFFFFFFFFFFFFFFFF,
0,
@@ -63,14 +62,13 @@ func @main(%t: tensor<10xi64>, %i: index) -> i64{
}
TEST(End2EndJit_ClearTensor_1D, extract_32) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10xi32>, %i: index) -> i32{
%c = tensor.extract %t[%i] : tensor<10xi32>
return %c : i32
}
)XXX",
"main", true);
"main", true);
uint32_t arg[]{0xFFFFFFFF, 0, 8978, 2587490, 90,
197864, 698735, 72132, 87474, 42};
@@ -82,14 +80,13 @@ func @main(%t: tensor<10xi32>, %i: index) -> i32{
TEST(End2EndJit_ClearTensor_1D, extract_16) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10xi16>, %i: index) -> i16{
%c = tensor.extract %t[%i] : tensor<10xi16>
return %c : i16
}
)XXX",
"main", true);
"main", true);
uint16_t arg[]{0xFFFF, 0, 59589, 47826, 16227,
63269, 36435, 52380, 7401, 13313};
@@ -101,14 +98,13 @@ func @main(%t: tensor<10xi16>, %i: index) -> i16{
TEST(End2EndJit_ClearTensor_1D, extract_8) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10xi8>, %i: index) -> i8{
%c = tensor.extract %t[%i] : tensor<10xi8>
return %c : i8
}
)XXX",
"main", true);
"main", true);
uint8_t arg[]{0xFF, 0, 120, 225, 14, 177, 131, 84, 174, 93};
@@ -119,14 +115,13 @@ func @main(%t: tensor<10xi8>, %i: index) -> i8{
TEST(End2EndJit_ClearTensor_1D, extract_5) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10xi5>, %i: index) -> i5{
%c = tensor.extract %t[%i] : tensor<10xi5>
return %c : i5
}
)XXX",
"main", true);
"main", true);
uint8_t arg[]{32, 0, 10, 25, 14, 25, 18, 28, 14, 7};
@@ -137,14 +132,13 @@ func @main(%t: tensor<10xi5>, %i: index) -> i5{
TEST(End2EndJit_ClearTensor_1D, extract_1) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10xi1>, %i: index) -> i1{
%c = tensor.extract %t[%i] : tensor<10xi1>
return %c : i1
}
)XXX",
"main", true);
"main", true);
uint8_t arg[]{0, 0, 1, 0, 1, 1, 0, 1, 1, 0};
@@ -191,13 +185,12 @@ const llvm::ArrayRef<int64_t> shape2D(dims, numDim);
TEST(End2EndJit_ClearTensor_2D, identity) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<2x10xi64>) -> tensor<2x10xi64> {
return %t : tensor<2x10xi64>
}
)XXX",
"main", true);
"main", true);
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>
@@ -217,14 +210,13 @@ func @main(%t: tensor<2x10xi64>) -> tensor<2x10xi64> {
TEST(End2EndJit_ClearTensor_2D, extract) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<2x10xi64>, %i: index, %j: index) -> i64 {
%c = tensor.extract %t[%i, %j] : tensor<2x10xi64>
return %c : i64
}
)XXX",
"main", true);
"main", true);
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>
@@ -241,14 +233,14 @@ func @main(%t: tensor<2x10xi64>, %i: index, %j: index) -> i64 {
TEST(End2EndJit_ClearTensor_2D, extract_slice) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
// mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(lambda, R"XXX(
func @main(%t: tensor<2x10xi64>) -> tensor<1x5xi64> {
%r = tensor.extract_slice %t[1, 5][1, 5][1, 1] : tensor<2x10xi64> to
tensor<1x5xi64> return %r : tensor<1x5xi64>
}
)XXX",
"main", true);
"main", true);
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>
@@ -270,14 +262,13 @@ func @main(%t: tensor<2x10xi64>) -> tensor<1x5xi64> {
TEST(End2EndJit_ClearTensor_2D, extract_slice_stride) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<2x10xi64>) -> tensor<1x5xi64> {
%r = tensor.extract_slice %t[1, 0][1, 5][1, 2] : tensor<2x10xi64> to
tensor<1x5xi64> return %r : tensor<1x5xi64>
}
)XXX",
"main", true);
"main", true);
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>
@@ -299,14 +290,13 @@ func @main(%t: tensor<2x10xi64>) -> tensor<1x5xi64> {
TEST(End2EndJit_ClearTensor_2D, insert_slice) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t0: tensor<2x10xi64>, %t1: tensor<2x2xi64>) -> tensor<2x10xi64> {
%r = tensor.insert_slice %t1 into %t0[0, 5][2, 2][1, 1] : tensor<2x2xi64>
into tensor<2x10xi64> return %r : tensor<2x10xi64>
}
)XXX",
"main", true);
"main", true);
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>
@@ -382,13 +372,11 @@ TEST_P(ReturnTensorWithPrecision, return_tensor) {
<< " return %res : tensor<5x3x2xi" << precision << ">\n"
<< "}";
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(mlirProgram.str(), "main", true);
checkedJit(lambda, mlirProgram.str(), "main", true);
llvm::Expected<std::unique_ptr<mlir::concretelang::LambdaArgument>> res =
lambda.operator()<std::unique_ptr<mlir::concretelang::LambdaArgument>>(
{});
ASSERT_EXPECTED_SUCCESS(res);
bool status;

View File

@@ -8,8 +8,7 @@
const mlir::concretelang::V0FHEConstraint defaultV0Constraints{10, 7};
TEST(CompileAndRunDFR, start_stop) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func private @_dfr_stop()
func private @_dfr_start()
func @main() -> i64{
@@ -19,13 +18,12 @@ func @main() -> i64{
return %1 : i64
}
)XXX",
"main", true);
"main", true);
ASSERT_EXPECTED_VALUE(lambda(), 7);
}
TEST(CompileAndRunDFR, 0in1out_task) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
llvm.func @_dfr_await_future(!llvm.ptr<i64>) -> !llvm.ptr<ptr<i64>> attributes {sym_visibility = "private"}
llvm.func @_dfr_create_async_task(...) attributes {sym_visibility = "private"}
llvm.func @_dfr_stop()
@@ -56,13 +54,12 @@ TEST(CompileAndRunDFR, 0in1out_task) {
llvm.return
}
)XXX",
"main", true);
"main", true);
ASSERT_EXPECTED_VALUE(lambda(), 7);
}
TEST(CompileAndRunDFR, 1in1out_task) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
llvm.func @_dfr_await_future(!llvm.ptr<i64>) -> !llvm.ptr<ptr<i64>> attributes {sym_visibility = "private"}
llvm.func @_dfr_create_async_task(...) attributes {sym_visibility = "private"}
llvm.func @malloc(i64) -> !llvm.ptr<i8>
@@ -102,14 +99,13 @@ TEST(CompileAndRunDFR, 1in1out_task) {
llvm.return
}
)XXX",
"main", true);
"main", true);
ASSERT_EXPECTED_VALUE(lambda(5_u64), 7);
}
TEST(CompileAndRunDFR, 2in1out_task) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
llvm.func @_dfr_await_future(!llvm.ptr<i64>) -> !llvm.ptr<ptr<i64>> attributes {sym_visibility = "private"}
llvm.func @_dfr_create_async_task(...) attributes {sym_visibility = "private"}
llvm.func @malloc(i64) -> !llvm.ptr<i8>
@@ -158,14 +154,13 @@ TEST(CompileAndRunDFR, 2in1out_task) {
llvm.return
}
)XXX",
"main", true);
"main", true);
ASSERT_EXPECTED_VALUE(lambda(1_u64, 6_u64), 7);
}
TEST(CompileAndRunDFR, taskgraph) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
llvm.func @_dfr_await_future(!llvm.ptr<i64>) -> !llvm.ptr<ptr<i64>> attributes {sym_visibility = "private"}
llvm.func @_dfr_create_async_task(...) attributes {sym_visibility = "private"}
llvm.func @malloc(i64) -> !llvm.ptr<i8>
@@ -348,7 +343,7 @@ TEST(CompileAndRunDFR, taskgraph) {
llvm.return
}
)XXX",
"main", true);
"main", true);
ASSERT_EXPECTED_VALUE(lambda(1_u64, 2_u64, 3_u64), 54);
ASSERT_EXPECTED_VALUE(lambda(2_u64, 5_u64, 1_u64), 72);

View File

@@ -1,95 +1,7 @@
#include "end_to_end_jit_test.h"
///////////////////////////////////////////////////////////////////////////////
// 2D encrypted tensor ////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
const size_t numDim = 2;
const size_t dim0 = 2;
const size_t dim1 = 10;
const int64_t dims[numDim]{dim0, dim1};
static std::vector<uint8_t> tensor2D{
63, 12, 7, 43, 52, 9, 26, 34, 22, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
};
const llvm::ArrayRef<int64_t> shape2D(dims, numDim);
#define GET_2D(tensor, i, j) (tensor)[i * dims[1] + j]
#define TENSOR2D_GET(i, j) GET_2D(tensor2D, i, j)
TEST(End2EndJit_EncryptedTensor_2D, identity) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%t: tensor<2x10x!FHE.eint<6>>) -> tensor<2x10x!FHE.eint<6>> {
return %t : tensor<2x10x!FHE.eint<6>>
}
)XXX");
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>
arg(tensor2D, shape2D);
llvm::Expected<std::vector<uint64_t>> res =
lambda.operator()<std::vector<uint64_t>>({&arg});
ASSERT_EXPECTED_SUCCESS(res);
ASSERT_EQ(res->size(), tensor2D.size());
for (size_t i = 0; i < tensor2D.size(); i++) {
EXPECT_EQ(tensor2D[i], (*res)[i]) << "result differ at pos " << i;
}
}
TEST(End2EndJit_EncryptedTensor_2D, extract) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%t: tensor<2x10x!FHE.eint<6>>, %i: index, %j: index) ->
!FHE.eint<6> {
%c = tensor.extract %t[%i, %j] : tensor<2x10x!FHE.eint<6>>
return %c : !FHE.eint<6>
}
)XXX");
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>
arg(tensor2D, shape2D);
for (int64_t i = 0; i < dims[0]; i++) {
for (int64_t j = 0; j < dims[1]; j++) {
mlir::concretelang::IntLambdaArgument<size_t> argi(i);
mlir::concretelang::IntLambdaArgument<size_t> argj(j);
ASSERT_EXPECTED_VALUE(lambda({&arg, &argi, &argj}), TENSOR2D_GET(i, j));
}
}
}
TEST(End2EndJit_EncryptedTensor_2D, extract_slice) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%t: tensor<2x10x!FHE.eint<6>>) -> tensor<1x5x!FHE.eint<6>> {
%r = tensor.extract_slice %t[1, 5][1, 5][1, 1] : tensor<2x10x!FHE.eint<6>> to tensor<1x5x!FHE.eint<6>>
return %r : tensor<1x5x!FHE.eint<6>>
}
)XXX");
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>
arg(tensor2D, shape2D);
llvm::Expected<std::vector<uint64_t>> res =
lambda.operator()<std::vector<uint64_t>>({&arg});
ASSERT_EXPECTED_SUCCESS(res);
ASSERT_EQ(res->size(), (size_t)1 * 5);
// Check the sub slice
for (size_t j = 0; j < 5; j++) {
// Get and assert the result
ASSERT_EQ((*res)[j], TENSOR2D_GET(1, j + 5));
}
}
TEST(End2EndJit_EncryptedTensor_2D, extract_slice_parametric_2x2) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<8x4x!FHE.eint<6>>, %y: index, %x: index) -> tensor<2x2x!FHE.eint<6>> {
%r = tensor.extract_slice %t[%y, %x][2, 2][1, 1] : tensor<8x4x!FHE.eint<6>> to tensor<2x2x!FHE.eint<6>>
return %r : tensor<2x2x!FHE.eint<6>>
@@ -129,7 +41,7 @@ func @main(%t: tensor<8x4x!FHE.eint<6>>, %y: index, %x: index) -> tensor<2x2x!FH
TEST(End2EndJit_EncryptedTensor_4D, extract_slice_parametric_2x2x2x2) {
constexpr int64_t dimSizes[4] = {8, 4, 5, 3};
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<8x4x5x3x!FHE.eint<6>>, %d0: index, %d1: index, %d2: index, %d3: index) -> tensor<2x2x2x2x!FHE.eint<6>> {
%r = tensor.extract_slice %t[%d0, %d1, %d2, %d3][2, 2, 2, 2][1, 1, 1, 1] : tensor<8x4x5x3x!FHE.eint<6>> to tensor<2x2x2x2x!FHE.eint<6>>
return %r : tensor<2x2x2x2x!FHE.eint<6>>
@@ -189,72 +101,3 @@ func @main(%t: tensor<8x4x5x3x!FHE.eint<6>>, %d0: index, %d1: index, %d2: index,
}
}
}
TEST(End2EndJit_EncryptedTensor_2D, extract_slice_stride) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%t: tensor<2x10x!FHE.eint<6>>) -> tensor<1x5x!FHE.eint<6>> {
%r = tensor.extract_slice %t[1, 0][1, 5][1, 2] : tensor<2x10x!FHE.eint<6>> to tensor<1x5x!FHE.eint<6>>
return %r : tensor<1x5x!FHE.eint<6>>
}
)XXX");
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>
arg(tensor2D, shape2D);
llvm::Expected<std::vector<uint64_t>> res =
lambda.operator()<std::vector<uint64_t>>({&arg});
ASSERT_EXPECTED_SUCCESS(res);
ASSERT_EQ(res->size(), (size_t)1 * 5);
// Check the sub slice
for (size_t j = 0; j < 5; j++) {
// Get and assert the result
ASSERT_EQ((*res)[j], TENSOR2D_GET(1, j * 2));
}
}
TEST(End2EndJit_EncryptedTensor_2D, insert_slice) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%t0: tensor<2x10x!FHE.eint<6>>, %t1: tensor<2x2x!FHE.eint<6>>)
-> tensor<2x10x!FHE.eint<6>> {
%r = tensor.insert_slice %t1 into %t0[0, 5][2, 2][1, 1] : tensor<2x2x!FHE.eint<6>> into tensor<2x10x!FHE.eint<6>>
return %r : tensor<2x10x!FHE.eint<6>>
}
)XXX");
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>
t0(tensor2D, shape2D);
int64_t t1Shape[] = {2, 2};
uint8_t t1Buffer[]{6, 9, 4, 0};
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>
t1(t1Buffer, t1Shape);
llvm::Expected<std::vector<uint64_t>> res =
lambda.operator()<std::vector<uint64_t>>({&t0, &t1});
ASSERT_EXPECTED_SUCCESS(res);
ASSERT_EQ(res->size(), tensor2D.size());
// Check the sub slice
for (size_t i = 0; i < dim0; i++) {
for (size_t j = 0; j < dim1; j++) {
if (j < 5 || j >= 5 + 2) {
ASSERT_EQ(GET_2D(*res, i, j), TENSOR2D_GET(i, j))
<< "at indexes (" << i << "," << j << ")";
} else {
// Get and assert the result
ASSERT_EQ(GET_2D(*res, i, j), t1Buffer[i * 2 + j - 5])
<< "at indexes (" << i << "," << j << ")";
;
}
}
}
}

View File

@@ -3,311 +3,57 @@
#include <gtest/gtest.h>
#include <type_traits>
#include "end_to_end_jit_test.h"
#include "EndToEndFixture.h"
///////////////////////////////////////////////////////////////////////////////
// FHE types and operators //////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
class EndToEndJitTest : public testing::TestWithParam<EndToEndDesc> {};
// FHE.eint /////////////////////////////////////////////////////////////////
TEST_P(EndToEndJitTest, compile_and_run) {
EndToEndDesc desc = GetParam();
TEST(End2EndJit_FHE, identity) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<3>) -> !FHE.eint<3> {
return %arg0: !FHE.eint<3>
}
)XXX");
// Compile program
// mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(lambda, desc.program);
ASSERT_EXPECTED_VALUE(lambda(1_u64), 1);
ASSERT_EXPECTED_VALUE(lambda(4_u64), 4);
ASSERT_EXPECTED_VALUE(lambda(8_u64), 8);
}
// Prepare arguments
for (auto test : desc.tests) {
std::vector<mlir::concretelang::LambdaArgument *> inputArguments;
inputArguments.reserve(test.inputs.size());
for (auto input : test.inputs) {
auto arg = valueDescriptionToLambdaArgument(input);
ASSERT_EXPECTED_SUCCESS(arg);
inputArguments.push_back(arg.get());
}
// FHE.zero_tensor ////////////////////////////////////////////////////////////
// Call the lambda
auto res =
lambda.operator()<std::unique_ptr<mlir::concretelang::LambdaArgument>>(
llvm::ArrayRef<mlir::concretelang::LambdaArgument *>(
inputArguments));
ASSERT_EXPECTED_SUCCESS(res);
if (test.outputs.size() != 1) {
FAIL() << "Only one result function are supported.";
}
ASSERT_LLVM_ERROR(checkResult(test.outputs[0], res.get()));
TEST(End2EndJit_FHE, zero_tensor) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main() -> tensor<2x2x4x!FHE.eint<6>> {
%0 = "FHE.zero_tensor"() : () -> tensor<2x2x4x!FHE.eint<6>>
return %0 : tensor<2x2x4x!FHE.eint<6>>
}
)XXX");
llvm::Expected<std::unique_ptr<mlir::concretelang::LambdaArgument>> res =
lambda.operator()<std::unique_ptr<mlir::concretelang::LambdaArgument>>();
ASSERT_EXPECTED_SUCCESS(res);
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<>> &resp =
(*res)
->cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<>>>();
ASSERT_EQ(resp.getDimensions().size(), (size_t)3);
ASSERT_EQ(resp.getDimensions().at(0), 2);
ASSERT_EQ(resp.getDimensions().at(1), 2);
ASSERT_EQ(resp.getDimensions().at(2), 4);
ASSERT_EXPECTED_VALUE(resp.getNumElements(), 2 * 2 * 4);
for (size_t i = 0; i < 2; i++) {
for (size_t j = 0; j < 2; j++) {
for (size_t k = 0; k < 4; k++) {
EXPECT_EQ(resp.getValue()[i * 8 + j * 4 + k], 0)
<< ", at pos(" << i << "," << j << "," << k << ")";
}
// Free arguments
for (auto arg : inputArguments) {
delete arg;
}
}
}
// FHE.add_eint_int /////////////////////////////////////////////////////////
#define INSTANTIATE_END_TO_END_JIT_TEST_SUITE_FROM_FILE(prefix, path) \
namespace prefix { \
auto valuesVector = loadEndToEndDesc(path); \
auto values = testing::ValuesIn<std::vector<EndToEndDesc>>(valuesVector); \
INSTANTIATE_TEST_SUITE_P(prefix, EndToEndJitTest, values, \
printEndToEndDesc); \
}
TEST(End2EndJit_FHE, add_eint_int_cst) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
%0 = arith.constant 1 : i3
%1 = "FHE.add_eint_int"(%arg0, %0): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(0_u64), 1);
ASSERT_EXPECTED_VALUE(lambda(1_u64), 2);
ASSERT_EXPECTED_VALUE(lambda(2_u64), 3);
ASSERT_EXPECTED_VALUE(lambda(3_u64), 4);
}
TEST(End2EndJit_FHE, add_eint_int_arg) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<2>, %arg1: i3) -> !FHE.eint<2> {
%1 = "FHE.add_eint_int"(%arg0, %arg1): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(0_u64, 1_u64), 1);
ASSERT_EXPECTED_VALUE(lambda(1_u64, 2_u64), 3);
}
// FHE.sub_int_eint /////////////////////////////////////////////////////////
TEST(End2EndJit_FHE, sub_int_eint_cst) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
%0 = arith.constant 7 : i3
%1 = "FHE.sub_int_eint"(%0, %arg0): (i3, !FHE.eint<2>) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(1_u64), 6);
ASSERT_EXPECTED_VALUE(lambda(2_u64), 5);
ASSERT_EXPECTED_VALUE(lambda(3_u64), 4);
ASSERT_EXPECTED_VALUE(lambda(4_u64), 3);
}
TEST(End2EndJit_FHE, sub_int_eint_arg) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: i3, %arg1: !FHE.eint<2>) -> !FHE.eint<2> {
%1 = "FHE.sub_int_eint"(%arg0, %arg1): (i3, !FHE.eint<2>) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(2_u64, 2_u64), 0);
ASSERT_EXPECTED_VALUE(lambda(2_u64, 1_u64), 1);
ASSERT_EXPECTED_VALUE(lambda(7_u64, 2_u64), 5);
}
// FHE.neg_eint /////////////////////////////////////////////////////////////
TEST(End2EndJit_FHE, neg_eint) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(0_u64), 0);
ASSERT_EXPECTED_VALUE(lambda(1_u64), 255);
ASSERT_EXPECTED_VALUE(lambda(4_u64), 252);
ASSERT_EXPECTED_VALUE(lambda(250_u64), 6);
}
TEST(End2EndJit_FHE, neg_eint_3bits) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<3>) -> !FHE.eint<3> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<3>) -> (!FHE.eint<3>)
return %1: !FHE.eint<3>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(0_u64), 0);
ASSERT_EXPECTED_VALUE(lambda(1_u64), 15);
ASSERT_EXPECTED_VALUE(lambda(4_u64), 12);
ASSERT_EXPECTED_VALUE(lambda(13_u64), 3);
}
// FHE.sub_int_eint /////////////////////////////////////////////////////////
TEST(End2EndJit_FHE, mul_eint_int_cst) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
%0 = arith.constant 2 : i3
%1 = "FHE.mul_eint_int"(%arg0, %0): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(0_u64), 0);
ASSERT_EXPECTED_VALUE(lambda(1_u64), 2);
ASSERT_EXPECTED_VALUE(lambda(2_u64), 4);
}
TEST(End2EndJit_FHE, mul_eint_int_arg) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<2>, %arg1: i3) -> !FHE.eint<2> {
%1 = "FHE.mul_eint_int"(%arg0, %arg1): (!FHE.eint<2>, i3) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(0_u64, 2), 0);
ASSERT_EXPECTED_VALUE(lambda(1_u64, 2), 2);
ASSERT_EXPECTED_VALUE(lambda(2_u64, 2), 4);
}
// FHE.add_eint /////////////////////////////////////////////////////////////
TEST(End2EndJit_FHE, add_eint) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<7>, %arg1: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.add_eint"(%arg0, %arg1): (!FHE.eint<7>, !FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(1_u64, 2_u64), 3);
ASSERT_EXPECTED_VALUE(lambda(4_u64, 5_u64), 9);
ASSERT_EXPECTED_VALUE(lambda(1_u64, 1_u64), 2);
}
// Same as End2EndJit_FHE::add_eint above, but using
// `LambdaArgument` instances as arguments
TEST(End2EndJit_FHE, add_eint_lambda_argument) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<7>, %arg1: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.add_eint"(%arg0, %arg1): (!FHE.eint<7>, !FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
)XXX");
mlir::concretelang::IntLambdaArgument<> ila1(1);
mlir::concretelang::IntLambdaArgument<> ila2(2);
mlir::concretelang::IntLambdaArgument<> ila7(7);
mlir::concretelang::IntLambdaArgument<> ila9(9);
ASSERT_EXPECTED_VALUE(lambda({&ila1, &ila2}), 3);
ASSERT_EXPECTED_VALUE(lambda({&ila7, &ila9}), 16);
ASSERT_EXPECTED_VALUE(lambda({&ila1, &ila7}), 8);
ASSERT_EXPECTED_VALUE(lambda({&ila1, &ila9}), 10);
ASSERT_EXPECTED_VALUE(lambda({&ila2, &ila7}), 9);
}
// Same as End2EndJit_FHE::add_eint above, but using
// `LambdaArgument` instances as arguments and as a result type
TEST(End2EndJit_FHE, add_eint_lambda_argument_res) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<7>, %arg1: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.add_eint"(%arg0, %arg1): (!FHE.eint<7>, !FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
)XXX");
mlir::concretelang::IntLambdaArgument<> ila1(1);
mlir::concretelang::IntLambdaArgument<> ila2(2);
mlir::concretelang::IntLambdaArgument<> ila7(7);
mlir::concretelang::IntLambdaArgument<> ila9(9);
auto eval = [&](mlir::concretelang::IntLambdaArgument<> &arg0,
mlir::concretelang::IntLambdaArgument<> &arg1,
uint64_t expected) {
llvm::Expected<std::unique_ptr<mlir::concretelang::LambdaArgument>> res0 =
lambda.operator()<std::unique_ptr<mlir::concretelang::LambdaArgument>>(
{&arg0, &arg1});
ASSERT_EXPECTED_SUCCESS(res0);
ASSERT_TRUE((*res0)->isa<mlir::concretelang::IntLambdaArgument<>>());
ASSERT_EQ(
(*res0)->cast<mlir::concretelang::IntLambdaArgument<>>().getValue(),
expected);
};
eval(ila1, ila2, 3);
eval(ila7, ila9, 16);
eval(ila1, ila7, 8);
eval(ila1, ila9, 10);
eval(ila2, ila7, 9);
}
// Same as End2EndJit_FHE::neg_eint above, but using
// `LambdaArgument` instances as arguments
TEST(End2EndJit_FHE, neg_eint_lambda_argument) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
)XXX");
mlir::concretelang::IntLambdaArgument<> ila0(0);
mlir::concretelang::IntLambdaArgument<> ila2(2);
mlir::concretelang::IntLambdaArgument<> ila7(7);
mlir::concretelang::IntLambdaArgument<> ila150(150);
mlir::concretelang::IntLambdaArgument<> ila249(249);
ASSERT_EXPECTED_VALUE(lambda({&ila0}), 0);
ASSERT_EXPECTED_VALUE(lambda({&ila2}), 254);
ASSERT_EXPECTED_VALUE(lambda({&ila7}), 249);
ASSERT_EXPECTED_VALUE(lambda({&ila150}), 106);
ASSERT_EXPECTED_VALUE(lambda({&ila249}), 7);
}
// Same as End2EndJit_FHE::neg_eint above, but using
// `LambdaArgument` instances as arguments and as a result type
TEST(End2EndJit_FHE, neg_eint_lambda_argument_res) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
)XXX");
mlir::concretelang::IntLambdaArgument<> ila1(1);
mlir::concretelang::IntLambdaArgument<> ila2(2);
mlir::concretelang::IntLambdaArgument<> ila7(7);
mlir::concretelang::IntLambdaArgument<> ila9(9);
auto eval = [&](mlir::concretelang::IntLambdaArgument<> &arg0,
uint64_t expected) {
llvm::Expected<std::unique_ptr<mlir::concretelang::LambdaArgument>> res0 =
lambda.operator()<std::unique_ptr<mlir::concretelang::LambdaArgument>>(
{&arg0});
ASSERT_EXPECTED_SUCCESS(res0);
ASSERT_TRUE((*res0)->isa<mlir::concretelang::IntLambdaArgument<>>());
ASSERT_EQ(
(*res0)->cast<mlir::concretelang::IntLambdaArgument<>>().getValue(),
expected);
};
eval(ila1, 255);
eval(ila2, 254);
eval(ila7, 249);
eval(ila9, 247);
}
INSTANTIATE_END_TO_END_JIT_TEST_SUITE_FROM_FILE(
FHE, "tests/unittest/end_to_end_fhe.yaml")
INSTANTIATE_END_TO_END_JIT_TEST_SUITE_FROM_FILE(
EncryptedTensor, "tests/unittest/end_to_end_encrypted_tensor.yaml")
// FHE.apply_lookup_table /////////////////////////////////////////////////////
@@ -332,8 +78,7 @@ TEST_P(ApplyLookupTableWithPrecision, identity_func) {
<< "return %1: !FHE.eint<" << precision << ">\n"
<< "}\n";
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(mlirProgram.str());
checkedJit(lambda, mlirProgram.str());
if (precision >= 6) {
// This test often fails for this precision, so we need retries.
@@ -358,37 +103,4 @@ TEST_P(ApplyLookupTableWithPrecision, identity_func) {
for (uint64_t i = 0; i < sizeOfTLU; i++)
ASSERT_EXPECTED_VALUE(lambda(i), i);
}
}
INSTANTIATE_TEST_SUITE_P(End2EndJit_FHE, ApplyLookupTableWithPrecision,
::testing::Values(1, 2, 3, 4, 5, 6, 7));
TEST(End2EndJit_FHE, apply_lookup_table_multiple_precision) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<6>, %arg1: !FHE.eint<3>) -> !FHE.eint<6> {
%tlu_7 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]> : tensor<64xi64>
%tlu_3 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>
%a = "FHE.apply_lookup_table"(%arg0, %tlu_7): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
%b = "FHE.apply_lookup_table"(%arg1, %tlu_3): (!FHE.eint<3>, tensor<8xi64>) -> (!FHE.eint<6>)
%a_plus_b = "FHE.add_eint"(%a, %b): (!FHE.eint<6>, !FHE.eint<6>) -> (!FHE.eint<6>)
return %a_plus_b: !FHE.eint<6>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(23_u64, 7_u64), 30);
}
TEST(End2EndJit_FHE, apply_lookup_table_random_func) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
func @main(%arg0: !FHE.eint<6>) -> !FHE.eint<6> {
%tlu = arith.constant dense<[16, 91, 16, 83, 80, 74, 21, 96, 1, 63, 49, 122, 76, 89, 74, 55, 109, 110, 103, 54, 105, 14, 66, 47, 52, 89, 7, 10, 73, 44, 119, 92, 25, 104, 123, 100, 108, 86, 29, 121, 118, 52, 107, 48, 34, 37, 13, 122, 107, 48, 74, 59, 96, 36, 50, 55, 120, 72, 27, 45, 12, 5, 96, 12]> : tensor<64xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
return %1: !FHE.eint<6>
}
)XXX");
ASSERT_EXPECTED_VALUE(lambda(5_u64), 74);
ASSERT_EXPECTED_VALUE(lambda(62_u64), 96);
ASSERT_EXPECTED_VALUE(lambda(0_u64), 16);
ASSERT_EXPECTED_VALUE(lambda(63_u64), 12);
}

View File

@@ -13,7 +13,7 @@ using tensorArgTy = Z::TensorLambdaArgument<Z::IntLambdaArgument<Elmt>>;
TEST(End2EndJit_FHELinalg, add_eint_int_term_to_term) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term addition of `%a0` with `%a1`
func @main(%a0: tensor<4x!FHE.eint<6>>, %a1: tensor<4xi7>) -> tensor<4x!FHE.eint<6>> {
%res = "FHELinalg.add_eint_int"(%a0, %a1) : (tensor<4x!FHE.eint<6>>, tensor<4xi7>) -> tensor<4x!FHE.eint<6>>
@@ -45,7 +45,7 @@ TEST(End2EndJit_FHELinalg, add_eint_int_term_to_term) {
// Same as add_eint_int_term_to_term test above, but returning a lambda argument
TEST(End2EndJit_FHELinalg, add_eint_int_term_to_term_ret_lambda_argument) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term addition of `%a0` with `%a1`
func @main(%a0: tensor<4x!FHE.eint<6>>, %a1: tensor<4xi7>) -> tensor<4x!FHE.eint<6>> {
%res = "FHELinalg.add_eint_int"(%a0, %a1) : (tensor<4x!FHE.eint<6>>, tensor<4xi7>) -> tensor<4x!FHE.eint<6>>
@@ -88,7 +88,7 @@ TEST(End2EndJit_FHELinalg, add_eint_int_term_to_term_ret_lambda_argument) {
TEST(End2EndJit_FHELinalg,
add_eint_int_term_to_term_ret_lambda_argument_multi_dim) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term addition of `%a0` with `%a1`
func @main(%a0: tensor<4x2x3x!FHE.eint<6>>, %a1: tensor<4x2x3xi7>) -> tensor<4x2x3x!FHE.eint<6>> {
%res = "FHELinalg.add_eint_int"(%a0, %a1) : (tensor<4x2x3x!FHE.eint<6>>, tensor<4x2x3xi7>) -> tensor<4x2x3x!FHE.eint<6>>
@@ -132,7 +132,7 @@ TEST(End2EndJit_FHELinalg,
TEST(End2EndJit_FHELinalg, add_eint_int_term_to_term_broadcast) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term addition of `%a0` with `%a1`
func @main(%a0: tensor<4x1x4x!FHE.eint<5>>, %a1: tensor<1x4x4xi6>) -> tensor<4x4x4x!FHE.eint<5>> {
%res = "FHELinalg.add_eint_int"(%a0, %a1) : (tensor<4x1x4x!FHE.eint<5>>, tensor<1x4x4xi6>) -> tensor<4x4x4x!FHE.eint<5>>
@@ -181,7 +181,7 @@ TEST(End2EndJit_FHELinalg, add_eint_int_term_to_term_broadcast) {
TEST(End2EndJit_FHELinalg, add_eint_int_matrix_column) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the addition of a 3x3 matrix of encrypted integers and a 3x1 matrix (a column) of encrypted integers.
//
// [1,2,3] [1] [2,3,4]
@@ -227,7 +227,7 @@ TEST(End2EndJit_FHELinalg, add_eint_int_matrix_column) {
}
TEST(End2EndJit_FHELinalg, add_eint_int_matrix_line) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the addition of a 3x3 matrix of encrypted integers and a 1x3 matrix (a line) of encrypted integers.
//
// [1,2,3] [2,4,6]
@@ -271,7 +271,7 @@ TEST(End2EndJit_FHELinalg, add_eint_int_matrix_line) {
}
TEST(End2EndJit_FHELinalg, add_eint_int_matrix_line_missing_dim) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Same behavior than the previous one, but as the dimension #2 of operand #2 is missing.
func @main(%a0: tensor<3x3x!FHE.eint<4>>, %a1: tensor<3xi5>) -> tensor<3x3x!FHE.eint<4>> {
%res = "FHELinalg.add_eint_int"(%a0, %a1) : (tensor<3x3x!FHE.eint<4>>, tensor<3xi5>) -> tensor<3x3x!FHE.eint<4>>
@@ -314,7 +314,7 @@ TEST(End2EndJit_FHELinalg, add_eint_int_matrix_line_missing_dim) {
TEST(End2EndJit_FHELinalg, add_eint_term_to_term) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term addition of `%a0` with `%a1`
func @main(%a0: tensor<4x!FHE.eint<6>>, %a1: tensor<4x!FHE.eint<6>>) -> tensor<4x!FHE.eint<6>> {
%res = "FHELinalg.add_eint"(%a0, %a1) : (tensor<4x!FHE.eint<6>>, tensor<4x!FHE.eint<6>>) -> tensor<4x!FHE.eint<6>>
@@ -348,7 +348,7 @@ TEST(End2EndJit_FHELinalg, add_eint_term_to_term) {
TEST(End2EndJit_FHELinalg, add_eint_term_to_term_broadcast) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term addition of `%a0` with `%a1`
func @main(%a0: tensor<4x1x4x!FHE.eint<5>>, %a1:
tensor<1x4x4x!FHE.eint<5>>) -> tensor<4x4x4x!FHE.eint<5>> {
@@ -399,7 +399,7 @@ TEST(End2EndJit_FHELinalg, add_eint_term_to_term_broadcast) {
TEST(End2EndJit_FHELinalg, add_eint_matrix_column) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the addition of a 3x3 matrix of encrypted integers and a 3x1 matrix (a column) of encrypted integers.
//
// [1,2,3] [1] [2,3,4]
@@ -447,7 +447,7 @@ TEST(End2EndJit_FHELinalg, add_eint_matrix_column) {
TEST(End2EndJit_FHELinalg, add_eint_matrix_line) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the addition of a 3x3 matrix of encrypted integers and a 1x3 matrix (a line) of encrypted integers.
//
// [1,2,3] [2,4,6]
@@ -494,7 +494,7 @@ TEST(End2EndJit_FHELinalg, add_eint_matrix_line) {
TEST(End2EndJit_FHELinalg, add_eint_matrix_line_missing_dim) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Same behavior than the previous one, but as the dimension #2 of operand #2 is missing.
func @main(%a0: tensor<3x3x!FHE.eint<4>>, %a1: tensor<3x!FHE.eint<4>>) -> tensor<3x3x!FHE.eint<4>> {
%res = "FHELinalg.add_eint"(%a0, %a1) : (tensor<3x3x!FHE.eint<4>>, tensor<3x!FHE.eint<4>>) -> tensor<3x3x!FHE.eint<4>>
@@ -533,7 +533,7 @@ TEST(End2EndJit_FHELinalg, add_eint_matrix_line_missing_dim) {
TEST(End2EndJit_FHELinalg, add_eint_tensor_dim_equals_1) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Broadcasting shouldn't happen when some dimensions are equals to 1
func @main(%arg0: tensor<3x1x2x!FHE.eint<5>>, %arg1: tensor<3x1x2x!FHE.eint<5>>) -> tensor<3x1x2x!FHE.eint<5>> {
%1 = "FHELinalg.add_eint"(%arg0, %arg1) : (tensor<3x1x2x!FHE.eint<5>>, tensor<3x1x2x!FHE.eint<5>>) -> tensor<3x1x2x!FHE.eint<5>>
@@ -580,7 +580,7 @@ TEST(End2EndJit_FHELinalg, add_eint_tensor_dim_equals_1) {
TEST(End2EndJit_FHELinalg, sub_int_eint_term_to_term) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term substraction of `%a0` with `%a1`
func @main(%a0: tensor<4xi5>, %a1: tensor<4x!FHE.eint<4>>) -> tensor<4x!FHE.eint<4>> {
%res = "FHELinalg.sub_int_eint"(%a0, %a1) : (tensor<4xi5>, tensor<4x!FHE.eint<4>>) -> tensor<4x!FHE.eint<4>>
@@ -611,7 +611,7 @@ TEST(End2EndJit_FHELinalg, sub_int_eint_term_to_term) {
TEST(End2EndJit_FHELinalg, sub_int_eint_term_to_term_broadcast) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term substraction of `%a0` with `%a1`, where dimensions equals to one are stretched.
func @main(%a0: tensor<4x1x4xi8>, %a1: tensor<1x4x4x!FHE.eint<7>>) -> tensor<4x4x4x!FHE.eint<7>> {
%res = "FHELinalg.sub_int_eint"(%a0, %a1) : (tensor<4x1x4xi8>, tensor<1x4x4x!FHE.eint<7>>) -> tensor<4x4x4x!FHE.eint<7>>
@@ -658,7 +658,7 @@ TEST(End2EndJit_FHELinalg, sub_int_eint_term_to_term_broadcast) {
TEST(End2EndJit_FHELinalg, sub_int_eint_matrix_column) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the substraction of a 3x3 matrix of integers and a 3x1 matrix (a column) of encrypted integers.
//
// [1,2,3] [1] [0,2,3]
@@ -707,7 +707,7 @@ TEST(End2EndJit_FHELinalg, sub_int_eint_matrix_column) {
TEST(End2EndJit_FHELinalg, sub_int_eint_matrix_line) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the substraction of a 3x3 matrix of integers and a 1x3 matrix (a line) of encrypted integers.
//
// [1,2,3] [0,0,0]
@@ -754,7 +754,7 @@ TEST(End2EndJit_FHELinalg, sub_int_eint_matrix_line) {
TEST(End2EndJit_FHELinalg, sub_int_eint_matrix_line_missing_dim) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Same behavior than the previous one, but as the dimension #2 of operand #2 is missing.
func @main(%a0: tensor<3x3xi5>, %a1: tensor<3x!FHE.eint<4>>) -> tensor<3x3x!FHE.eint<4>> {
%res = "FHELinalg.sub_int_eint"(%a0, %a1) : (tensor<3x3xi5>, tensor<3x!FHE.eint<4>>) -> tensor<3x3x!FHE.eint<4>>
@@ -797,7 +797,7 @@ TEST(End2EndJit_FHELinalg, sub_int_eint_matrix_line_missing_dim) {
TEST(End2EndJit_FHELinalg, mul_eint_int_term_to_term) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term multiplication of `%a0` with `%a1`
func @main(%a0: tensor<4x!FHE.eint<6>>, %a1: tensor<4xi7>) -> tensor<4x!FHE.eint<6>> {
%res = "FHELinalg.mul_eint_int"(%a0, %a1) : (tensor<4x!FHE.eint<6>>, tensor<4xi7>) -> tensor<4x!FHE.eint<6>>
@@ -828,7 +828,7 @@ TEST(End2EndJit_FHELinalg, mul_eint_int_term_to_term) {
TEST(End2EndJit_FHELinalg, mul_eint_int_term_to_term_broadcast) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the term to term multiplication of `%a0` with `%a1`, where dimensions equals to one are stretched.
func @main(%a0: tensor<4x1x4x!FHE.eint<6>>, %a1: tensor<1x4x4xi7>) -> tensor<4x4x4x!FHE.eint<6>> {
%res = "FHELinalg.mul_eint_int"(%a0, %a1) : (tensor<4x1x4x!FHE.eint<6>>, tensor<1x4x4xi7>) -> tensor<4x4x4x!FHE.eint<6>>
@@ -876,7 +876,7 @@ TEST(End2EndJit_FHELinalg, mul_eint_int_term_to_term_broadcast) {
TEST(End2EndJit_FHELinalg, mul_eint_int_matrix_column) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the multiplication of a 3x3 matrix of encrypted integers and a 3x1 matrix (a column) of integers.
//
// [1,2,3] [1] [1,2,3]
@@ -923,7 +923,7 @@ TEST(End2EndJit_FHELinalg, mul_eint_int_matrix_column) {
TEST(End2EndJit_FHELinalg, mul_eint_int_matrix_line) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the multiplication of a 3x3 matrix of encrypted integers and a 1x3 matrix (a line) of integers.
//
// [1,2,3] [2,4,6]
@@ -968,7 +968,7 @@ TEST(End2EndJit_FHELinalg, mul_eint_int_matrix_line) {
TEST(End2EndJit_FHELinalg, mul_eint_int_matrix_line_missing_dim) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Same behavior than the previous one, but as the dimension #2 of operand #2 is missing.
func @main(%a0: tensor<3x3x!FHE.eint<4>>, %a1: tensor<3xi5>) -> tensor<3x3x!FHE.eint<4>> {
%res = "FHELinalg.mul_eint_int"(%a0, %a1) : (tensor<3x3x!FHE.eint<4>>, tensor<3xi5>) -> tensor<3x3x!FHE.eint<4>>
@@ -1011,7 +1011,7 @@ TEST(End2EndJit_FHELinalg, mul_eint_int_matrix_line_missing_dim) {
TEST(End2EndJit_FHELinalg, apply_lookup_table) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the lookup of 3x3 matrix of encrypted indices of with 2 on a table of size 4=2² of clear integers.
//
// [0,1,2] [1,3,5]
@@ -1059,7 +1059,7 @@ TEST(End2EndJit_FHELinalg, apply_lookup_table) {
TEST(End2EndJit_FHELinalg, apply_multi_lookup_table) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the lookup of 3x3 matrix of encrypted indices of width 2 on a 3x3 matrix of tables of size 4=2² of clear integers.
func @main(%arg0: tensor<3x3x!FHE.eint<2>>, %arg1: tensor<3x3x4xi64>) -> tensor<3x3x!FHE.eint<2>> {
%1 = "FHELinalg.apply_multi_lookup_table"(%arg0, %arg1): (tensor<3x3x!FHE.eint<2>>, tensor<3x3x4xi64>) -> tensor<3x3x!FHE.eint<2>>
@@ -1108,7 +1108,7 @@ TEST(End2EndJit_FHELinalg, apply_multi_lookup_table) {
TEST(End2EndJit_FHELinalg, apply_multi_lookup_table_with_boradcast) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the lookup of 3x3 matrix of encrypted indices of width 2 on a vector of 3 tables of size 4=2² of clear integers.
func @main(%arg0: tensor<3x3x!FHE.eint<2>>, %arg1: tensor<3x4xi64>) -> tensor<3x3x!FHE.eint<2>> {
%1 = "FHELinalg.apply_multi_lookup_table"(%arg0, %arg1): (tensor<3x3x!FHE.eint<2>>, tensor<3x4xi64>) -> tensor<3x3x!FHE.eint<2>>
@@ -1160,7 +1160,7 @@ TEST(End2EndJit_FHELinalg, apply_multi_lookup_table_with_boradcast) {
TEST(End2EndJit_FHELinalg, apply_mapped_lookup_table_sequential) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the lookup of 3x3 matrix of encrypted indices of width 2 of a 3x3 matrix of tables of size 4=2² of clear integers.
func @main(%t: tensor<3x3x!FHE.eint<2>>, %luts: tensor<9x4xi64>, %map: tensor<3x3xindex>) -> tensor<3x3x!FHE.eint<2>> {
%1 = "FHELinalg.apply_mapped_lookup_table"(%t, %luts, %map) :
@@ -1208,7 +1208,7 @@ TEST(End2EndJit_FHELinalg, apply_mapped_lookup_table_sequential) {
TEST(End2EndJit_FHELinalg, apply_mapped_lookup_table_same_lut) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the lookup of 3x3 matrix of encrypted indices of width 2 of a 3x3 matrix of tables of size 4=2² of clear integers.
func @main(%t: tensor<3x3x!FHE.eint<2>>, %luts: tensor<9x4xi64>, %map: tensor<3x3xindex>) -> tensor<3x3x!FHE.eint<2>> {
%1 = "FHELinalg.apply_mapped_lookup_table"(%t, %luts, %map) :
@@ -1259,7 +1259,7 @@ TEST(End2EndJit_FHELinalg, apply_mapped_lookup_table_same_lut) {
///////////////////////////////////////////////////////////////////////////////
TEST(CompileAndRunTensorEncrypted, dot_eint_int_7) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%arg0: tensor<4x!FHE.eint<7>>,
%arg1: tensor<4xi8>) -> !FHE.eint<7>
{
@@ -1283,7 +1283,7 @@ func @main(%arg0: tensor<4x!FHE.eint<7>>,
TEST(End2EndJit_FHELinalg, neg_eint) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the negation of a 3x3 matrix of encrypted integers of width 2.
//
// ([0,1,2]) [0,7,6]
@@ -1331,7 +1331,7 @@ TEST(End2EndJit_FHELinalg, neg_eint) {
TEST(End2EndJit_FHELinalg, matmul_eint_int_2d_2d) {
namespace concretelang = mlir::concretelang;
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x2x!FHE.eint<7>> {
%y = arith.constant dense<
@@ -1389,7 +1389,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, matmul_eint_int_1d_2d) {
namespace concretelang = mlir::concretelang;
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x!FHE.eint<7>>) -> tensor<2x!FHE.eint<7>> {
%y = arith.constant dense<
@@ -1434,7 +1434,7 @@ func @main(%x: tensor<3x!FHE.eint<7>>) -> tensor<2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, matmul_eint_int_1d_3d) {
namespace concretelang = mlir::concretelang;
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x!FHE.eint<7>>) -> tensor<4x2x!FHE.eint<7>> {
%y = arith.constant dense<
@@ -1505,7 +1505,7 @@ func @main(%x: tensor<3x!FHE.eint<7>>) -> tensor<4x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, matmul_eint_int_2d_1d) {
namespace concretelang = mlir::concretelang;
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>> {
%y = arith.constant dense<
@@ -1554,7 +1554,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, matmul_eint_int_3d_1d) {
namespace concretelang = mlir::concretelang;
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<2x3x4x!FHE.eint<7>>) -> tensor<2x3x!FHE.eint<7>> {
%y = arith.constant dense<
@@ -1613,7 +1613,7 @@ func @main(%x: tensor<2x3x4x!FHE.eint<7>>) -> tensor<2x3x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, matmul_eint_int_3d_3d) {
namespace concretelang = mlir::concretelang;
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<2x3x4x!FHE.eint<7>>) -> tensor<2x3x2x!FHE.eint<7>> {
%y = arith.constant dense<
@@ -1696,7 +1696,7 @@ func @main(%x: tensor<2x3x4x!FHE.eint<7>>) -> tensor<2x3x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, matmul_eint_int_4d_3d) {
namespace concretelang = mlir::concretelang;
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<2x1x3x4x!FHE.eint<7>>) -> tensor<2x5x3x2x!FHE.eint<7>> {
%y = arith.constant dense<
@@ -1852,7 +1852,7 @@ func @main(%x: tensor<2x1x3x4x!FHE.eint<7>>) -> tensor<2x5x3x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, matmul_int_eint) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
// Returns the matrix multiplication of a 3x2 matrix of encrypted integers and a 2x3 matrix of integers.
// [ 1, 2, 3]
// [ 2, 3, 4]
@@ -1908,7 +1908,7 @@ TEST(End2EndJit_FHELinalg, matmul_int_eint) {
TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel22) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%input: tensor<1x1x4x4x!FHE.eint<6>>, %weight: tensor<1x1x2x2xi7>) -> tensor<1x1x2x2x!FHE.eint<6>> {
%0 = "FHELinalg.conv2d"(%input, %weight){
strides = dense<[2,2]> : tensor<2xi64>, dilations = dense<[1,1]> : tensor<2xi64>, padding = dense<[0,0,0,0]> : tensor<4xi64>
@@ -1955,7 +1955,7 @@ TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel22) {
TEST(End2EndJit_FHELinalg, conv2d_simple_input44_const_kernel22) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%input: tensor<1x1x4x4x!FHE.eint<6>>) -> tensor<1x1x2x2x!FHE.eint<6>> {
%weight = arith.constant dense<[[[[1, 2], [2, 1]]]]> : tensor<1x1x2x2xi7>
%0 = "FHELinalg.conv2d"(%input, %weight){
@@ -1996,7 +1996,7 @@ TEST(End2EndJit_FHELinalg, conv2d_simple_input44_const_kernel22) {
TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel22_const_bias) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%input: tensor<1x1x4x4x!FHE.eint<6>>, %weight: tensor<1x1x2x2xi7>) -> tensor<1x1x2x2x!FHE.eint<6>> {
%bias = arith.constant dense<[1]> : tensor<1xi7>
%0 = "FHELinalg.conv2d"(%input, %weight, %bias){
@@ -2044,7 +2044,7 @@ TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel22_const_bias) {
TEST(End2EndJit_FHELinalg, conv2d_batched_input44_kernel22) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%input: tensor<3x1x4x4x!FHE.eint<6>>, %weight: tensor<1x1x2x2xi7>) -> tensor<3x1x2x2x!FHE.eint<6>> {
%0 = "FHELinalg.conv2d"(%input, %weight){
strides = dense<[2,2]> : tensor<2xi64>, dilations = dense<[1,1]> : tensor<2xi64>, padding = dense<[0,0,0,0]> : tensor<4xi64>
@@ -2114,7 +2114,7 @@ TEST(End2EndJit_FHELinalg, conv2d_batched_input44_kernel22) {
TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel2122) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%input: tensor<1x1x4x4x!FHE.eint<6>>, %weight: tensor<2x1x2x2xi7>) -> tensor<1x2x2x2x!FHE.eint<6>> {
%0 = "FHELinalg.conv2d"(%input, %weight){
strides = dense<[2,2]> : tensor<2xi64>, dilations = dense<[1,1]> : tensor<2xi64>, padding = dense<[0,0,0,0]> : tensor<4xi64>
@@ -2174,7 +2174,7 @@ TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel2122) {
TEST(End2EndJit_FHELinalg, conv2d_simple_input1244_kernel1222) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%input: tensor<1x2x4x4x!FHE.eint<6>>, %weight: tensor<1x2x2x2xi7>) -> tensor<1x1x2x2x!FHE.eint<6>> {
%0 = "FHELinalg.conv2d"(%input, %weight){
strides = dense<[2,2]> : tensor<2xi64>, dilations = dense<[1,1]> : tensor<2xi64>, padding = dense<[0,0,0,0]> : tensor<4xi64>
@@ -2233,7 +2233,7 @@ TEST(End2EndJit_FHELinalg, conv2d_simple_input1244_kernel1222) {
TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel22_dilation2) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%input: tensor<1x1x4x4x!FHE.eint<6>>, %weight: tensor<1x1x2x2xi7>) -> tensor<1x1x2x2x!FHE.eint<6>> {
%0 = "FHELinalg.conv2d"(%input, %weight){
strides = dense<[1,1]> : tensor<2xi64>, dilations = dense<[2,2]> : tensor<2xi64>, padding = dense<[0,0,0,0]> : tensor<4xi64>
@@ -2284,7 +2284,7 @@ TEST(End2EndJit_FHELinalg, conv2d_simple_input44_kernel22_dilation2) {
TEST(End2EndJit_Linalg, tensor_collapse_shape) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%a: tensor<2x2x4x!FHE.eint<6>>) -> tensor<2x8x!FHE.eint<6>> {
%0 = linalg.tensor_collapse_shape %a [[0],[1,2]] : tensor<2x2x4x!FHE.eint<6>> into tensor<2x8x!FHE.eint<6>>
return %0 : tensor<2x8x!FHE.eint<6>>
@@ -2334,7 +2334,7 @@ func @main(%a: tensor<2x2x4x!FHE.eint<6>>) -> tensor<2x8x!FHE.eint<6>> {
TEST(End2EndJit_Linalg, tensor_expand_shape) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%a: tensor<2x8x!FHE.eint<6>>) -> tensor<2x2x4x!FHE.eint<6>> {
%0 = linalg.tensor_expand_shape %a [[0],[1,2]] : tensor<2x8x!FHE.eint<6>> into tensor<2x2x4x!FHE.eint<6>>
return %0 : tensor<2x2x4x!FHE.eint<6>>
@@ -2389,7 +2389,7 @@ func @main(%a: tensor<2x8x!FHE.eint<6>>) -> tensor<2x2x4x!FHE.eint<6>> {
TEST(End2EndJit_FHELinalg, sum_empty) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<0x!FHE.eint<7>>) -> !FHE.eint<7> {
%0 = "FHELinalg.sum"(%x) : (tensor<0x!FHE.eint<7>>) -> !FHE.eint<7>
@@ -2413,7 +2413,7 @@ func @main(%x: tensor<0x!FHE.eint<7>>) -> !FHE.eint<7> {
TEST(End2EndJit_FHELinalg, sum_1D_no_axes) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<4x!FHE.eint<7>>) -> !FHE.eint<7> {
%0 = "FHELinalg.sum"(%x) : (tensor<4x!FHE.eint<7>>) -> !FHE.eint<7>
@@ -2438,7 +2438,7 @@ func @main(%x: tensor<4x!FHE.eint<7>>) -> !FHE.eint<7> {
TEST(End2EndJit_FHELinalg, sum_1D_axes_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<4x!FHE.eint<7>>) -> !FHE.eint<7> {
%0 = "FHELinalg.sum"(%x) { axes = [0] } : (tensor<4x!FHE.eint<7>>) -> !FHE.eint<7>
@@ -2463,7 +2463,7 @@ func @main(%x: tensor<4x!FHE.eint<7>>) -> !FHE.eint<7> {
TEST(End2EndJit_FHELinalg, sum_2D_no_axes) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> !FHE.eint<7> {
%0 = "FHELinalg.sum"(%x) : (tensor<3x4x!FHE.eint<7>>) -> !FHE.eint<7>
@@ -2492,7 +2492,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> !FHE.eint<7> {
TEST(End2EndJit_FHELinalg, sum_2D_axes_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<4x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0] } : (tensor<3x4x!FHE.eint<7>>) -> tensor<4x!FHE.eint<7>>
@@ -2533,7 +2533,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<4x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_2D_axes_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [1] } : (tensor<3x4x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>>
@@ -2574,7 +2574,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_2D_axes_0_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> !FHE.eint<7> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 1] } : (tensor<3x4x!FHE.eint<7>>) -> !FHE.eint<7>
@@ -2603,7 +2603,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> !FHE.eint<7> {
TEST(End2EndJit_FHELinalg, sum_3D_no_axes) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> !FHE.eint<7> {
%0 = "FHELinalg.sum"(%x) : (tensor<3x4x2x!FHE.eint<7>>) -> !FHE.eint<7>
@@ -2647,7 +2647,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> !FHE.eint<7> {
TEST(End2EndJit_FHELinalg, sum_3D_axes_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<4x2x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0] } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<4x2x!FHE.eint<7>>
@@ -2712,7 +2712,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<4x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_axes_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x2x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [1] } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x2x!FHE.eint<7>>
@@ -2776,7 +2776,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_axes_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x4x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [2] } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x4x!FHE.eint<7>>
@@ -2840,7 +2840,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x4x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_axes_0_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<2x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 1] } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<2x!FHE.eint<7>>
@@ -2896,7 +2896,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_axes_1_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [1, 2] } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>>
@@ -2952,7 +2952,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_axes_0_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<4x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 2] } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<4x!FHE.eint<7>>
@@ -3008,7 +3008,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<4x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_axes_0_1_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> !FHE.eint<7> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 1, 2] } : (tensor<3x4x2x!FHE.eint<7>>) -> !FHE.eint<7>
@@ -3052,7 +3052,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> !FHE.eint<7> {
TEST(End2EndJit_FHELinalg, sum_keep_dims_empty) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<0x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { keep_dims = true } : (tensor<0x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>>
@@ -3088,7 +3088,7 @@ func @main(%x: tensor<0x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_1D_keep_dims_no_axes) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<4x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { keep_dims = true } : (tensor<4x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>>
@@ -3125,7 +3125,7 @@ func @main(%x: tensor<4x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_1D_keep_dims_axes_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<4x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0], keep_dims = true } : (tensor<4x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>>
@@ -3162,7 +3162,7 @@ func @main(%x: tensor<4x!FHE.eint<7>>) -> tensor<1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_2D_keep_dims_no_axes) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<1x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { keep_dims = true } : (tensor<3x4x!FHE.eint<7>>) -> tensor<1x1x!FHE.eint<7>>
@@ -3207,7 +3207,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<1x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_2D_keep_dims_axes_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<1x4x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0], keep_dims = true } : (tensor<3x4x!FHE.eint<7>>) -> tensor<1x4x!FHE.eint<7>>
@@ -3254,7 +3254,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<1x4x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_2D_keep_dims_axes_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [1], keep_dims = true } : (tensor<3x4x!FHE.eint<7>>) -> tensor<3x1x!FHE.eint<7>>
@@ -3303,7 +3303,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<3x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_2D_keep_dims_axes_0_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<1x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 1], keep_dims = true } : (tensor<3x4x!FHE.eint<7>>) -> tensor<1x1x!FHE.eint<7>>
@@ -3348,7 +3348,7 @@ func @main(%x: tensor<3x4x!FHE.eint<7>>) -> tensor<1x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_no_axes) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x1x!FHE.eint<7>>
@@ -3411,7 +3411,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_axes_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x4x2x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0], keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x4x2x!FHE.eint<7>>
@@ -3479,7 +3479,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x4x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_axes_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x1x2x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [1], keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x1x2x!FHE.eint<7>>
@@ -3546,7 +3546,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x1x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_axes_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x4x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [2], keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x4x1x!FHE.eint<7>>
@@ -3613,7 +3613,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x4x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_axes_0_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x2x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 1], keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x2x!FHE.eint<7>>
@@ -3676,7 +3676,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x2x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_axes_1_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x1x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [1, 2], keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x1x1x!FHE.eint<7>>
@@ -3739,7 +3739,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<3x1x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_axes_0_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x4x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 2], keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x4x1x!FHE.eint<7>>
@@ -3802,7 +3802,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x4x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, sum_3D_keep_dims_axes_0_1_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x1x!FHE.eint<7>> {
%0 = "FHELinalg.sum"(%x) { axes = [0, 1, 2], keep_dims = true } : (tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x1x!FHE.eint<7>>
@@ -3865,7 +3865,7 @@ func @main(%x: tensor<3x4x2x!FHE.eint<7>>) -> tensor<1x1x1x!FHE.eint<7>> {
TEST(End2EndJit_FHELinalg, concat_1D_axis_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x!FHE.eint<7>>, %y: tensor<4x!FHE.eint<7>>) -> tensor<7x!FHE.eint<7>> {
%0 = "FHELinalg.concat"(%x, %y) { axis = 0 } : (tensor<3x!FHE.eint<7>>, tensor<4x!FHE.eint<7>>) -> tensor<7x!FHE.eint<7>>
return %0 : tensor<7x!FHE.eint<7>>
@@ -3907,7 +3907,7 @@ func @main(%x: tensor<3x!FHE.eint<7>>, %y: tensor<4x!FHE.eint<7>>) -> tensor<7x!
TEST(End2EndJit_FHELinalg, concat_2D_axis_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<2x3x!FHE.eint<7>>, %y: tensor<3x3x!FHE.eint<7>>) -> tensor<5x3x!FHE.eint<7>> {
%0 = "FHELinalg.concat"(%x, %y) { axis = 0 } : (tensor<2x3x!FHE.eint<7>>, tensor<3x3x!FHE.eint<7>>) -> tensor<5x3x!FHE.eint<7>>
return %0 : tensor<5x3x!FHE.eint<7>>
@@ -3962,7 +3962,7 @@ func @main(%x: tensor<2x3x!FHE.eint<7>>, %y: tensor<3x3x!FHE.eint<7>>) -> tensor
TEST(End2EndJit_FHELinalg, concat_2D_axis_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<3x2x!FHE.eint<7>>, %y: tensor<3x3x!FHE.eint<7>>) -> tensor<3x5x!FHE.eint<7>> {
%0 = "FHELinalg.concat"(%x, %y) { axis = 1 } : (tensor<3x2x!FHE.eint<7>>, tensor<3x3x!FHE.eint<7>>) -> tensor<3x5x!FHE.eint<7>>
return %0 : tensor<3x5x!FHE.eint<7>>
@@ -4020,7 +4020,7 @@ func @main(%x: tensor<3x2x!FHE.eint<7>>, %y: tensor<3x3x!FHE.eint<7>>) -> tensor
TEST(End2EndJit_FHELinalg, concat_3D_axis_0) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<2x4x3x!FHE.eint<7>>, %y: tensor<2x4x3x!FHE.eint<7>>) -> tensor<4x4x3x!FHE.eint<7>> {
%0 = "FHELinalg.concat"(%x, %y) { axis = 0 } : (tensor<2x4x3x!FHE.eint<7>>, tensor<2x4x3x!FHE.eint<7>>) -> tensor<4x4x3x!FHE.eint<7>>
return %0 : tensor<4x4x3x!FHE.eint<7>>
@@ -4120,7 +4120,7 @@ func @main(%x: tensor<2x4x3x!FHE.eint<7>>, %y: tensor<2x4x3x!FHE.eint<7>>) -> te
TEST(End2EndJit_FHELinalg, concat_3D_axis_1) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<2x4x3x!FHE.eint<7>>, %y: tensor<2x4x3x!FHE.eint<7>>) -> tensor<2x8x3x!FHE.eint<7>> {
%0 = "FHELinalg.concat"(%x, %y) { axis = 1 } : (tensor<2x4x3x!FHE.eint<7>>, tensor<2x4x3x!FHE.eint<7>>) -> tensor<2x8x3x!FHE.eint<7>>
return %0 : tensor<2x8x3x!FHE.eint<7>>
@@ -4216,7 +4216,7 @@ func @main(%x: tensor<2x4x3x!FHE.eint<7>>, %y: tensor<2x4x3x!FHE.eint<7>>) -> te
TEST(End2EndJit_FHELinalg, concat_3D_axis_2) {
namespace concretelang = mlir::concretelang;
concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%x: tensor<2x4x3x!FHE.eint<7>>, %y: tensor<2x4x3x!FHE.eint<7>>) -> tensor<2x4x6x!FHE.eint<7>> {
%0 = "FHELinalg.concat"(%x, %y) { axis = 2 } : (tensor<2x4x3x!FHE.eint<7>>, tensor<2x4x3x!FHE.eint<7>>) -> tensor<2x4x6x!FHE.eint<7>>
return %0 : tensor<2x4x6x!FHE.eint<7>>
@@ -4319,8 +4319,7 @@ TEST_P(TiledMatMulParametric, tiled_matmul_eint_int) {
<< " return %0 : tensor<8x2x!FHE.eint<6>>\n"
<< " }";
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(mlirProgram.str());
checkedJit(lambda, mlirProgram.str());
const size_t rowsA = 8;
const size_t colsA = 4;

View File

@@ -2,10 +2,8 @@
#include "end_to_end_jit_test.h"
using Lambda = mlir::concretelang::JitCompilerEngine::Lambda;
TEST(Lambda_check_param, int_to_void_missing_param) {
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%arg0: !FHE.eint<1>) {
return
}
@@ -15,7 +13,7 @@ TEST(Lambda_check_param, int_to_void_missing_param) {
TEST(Lambda_check_param, DISABLED_int_to_void_good) {
// DISABLED Note: it segfaults
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%arg0: !FHE.eint<1>) {
return
}
@@ -24,7 +22,7 @@ TEST(Lambda_check_param, DISABLED_int_to_void_good) {
}
TEST(Lambda_check_param, int_to_void_superfluous_param) {
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%arg0: !FHE.eint<1>) {
return
}
@@ -33,7 +31,7 @@ TEST(Lambda_check_param, int_to_void_superfluous_param) {
}
TEST(Lambda_check_param, scalar_parameters_number) {
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(
%arg0: !FHE.eint<1>, %arg1: !FHE.eint<1>,
%arg2: !FHE.eint<1>) -> !FHE.eint<1>
@@ -49,7 +47,7 @@ TEST(Lambda_check_param, scalar_parameters_number) {
}
TEST(Lambda_check_param, scalar_tensor_to_scalar_missing_param) {
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(
%arg0: !FHE.eint<1>, %arg1: tensor<2x!FHE.eint<1>>) -> !FHE.eint<1>
{
@@ -60,7 +58,7 @@ TEST(Lambda_check_param, scalar_tensor_to_scalar_missing_param) {
}
TEST(Lambda_check_param, scalar_tensor_to_scalar) {
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(
%arg0: !FHE.eint<1>, %arg1: tensor<2x!FHE.eint<1>>) -> !FHE.eint<1>
{
@@ -72,7 +70,7 @@ TEST(Lambda_check_param, scalar_tensor_to_scalar) {
}
TEST(Lambda_check_param, scalar_tensor_to_scalar_superfluous_param) {
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(
%arg0: !FHE.eint<1>, %arg1: tensor<2x!FHE.eint<1>>) -> !FHE.eint<1>
{
@@ -85,7 +83,7 @@ TEST(Lambda_check_param, scalar_tensor_to_scalar_superfluous_param) {
}
TEST(Lambda_check_param, scalar_tensor_to_tensor_good_number_param) {
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(
%arg0: !FHE.eint<1>, %arg1: tensor<2x!FHE.eint<1>>) -> tensor<2x!FHE.eint<1>>
{
@@ -99,7 +97,7 @@ TEST(Lambda_check_param, scalar_tensor_to_tensor_good_number_param) {
TEST(Lambda_check_param, DISABLED_check_parameters_scalar_too_big) {
// DISABLED Note: loss of precision without any warning or error.
Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%arg0: !FHE.eint<1>) -> !FHE.eint<1>
{
return %arg0: !FHE.eint<1>

View File

@@ -6,14 +6,13 @@
#include "end_to_end_jit_test.h"
TEST(CompileAndRunClear, add_u64) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%arg0: i64, %arg1: i64) -> i64 {
%1 = arith.addi %arg0, %arg1 : i64
return %1: i64
}
)XXX",
"main", true);
"main", true);
ASSERT_EXPECTED_VALUE(lambda(1_u64, 2_u64), (uint64_t)3);
ASSERT_EXPECTED_VALUE(lambda(4_u64, 5_u64), (uint64_t)9);
@@ -21,7 +20,7 @@ func @main(%arg0: i64, %arg1: i64) -> i64 {
}
TEST(CompileAndRunTensorEncrypted, extract_5) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10x!FHE.eint<5>>, %i: index) -> !FHE.eint<5>{
%c = tensor.extract %t[%i] : tensor<10x!FHE.eint<5>>
return %c : !FHE.eint<5>
@@ -35,7 +34,7 @@ func @main(%t: tensor<10x!FHE.eint<5>>, %i: index) -> !FHE.eint<5>{
}
TEST(CompileAndRunTensorEncrypted, extract_twice_and_add_5) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10x!FHE.eint<5>>, %i: index, %j: index) ->
!FHE.eint<5>{
%ti = tensor.extract %t[%i] : tensor<10x!FHE.eint<5>>
@@ -54,7 +53,7 @@ func @main(%t: tensor<10x!FHE.eint<5>>, %i: index, %j: index) ->
}
TEST(CompileAndRunTensorEncrypted, dim_5) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%t: tensor<10x!FHE.eint<5>>) -> index{
%c0 = arith.constant 0 : index
%c = tensor.dim %t, %c0 : tensor<10x!FHE.eint<5>>
@@ -67,7 +66,7 @@ func @main(%t: tensor<10x!FHE.eint<5>>) -> index{
}
TEST(CompileAndRunTensorEncrypted, from_elements_5) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%0: !FHE.eint<5>) -> tensor<1x!FHE.eint<5>> {
%t = tensor.from_elements %0 : tensor<1x!FHE.eint<5>>
return %t: tensor<1x!FHE.eint<5>>
@@ -85,7 +84,7 @@ func @main(%0: !FHE.eint<5>) -> tensor<1x!FHE.eint<5>> {
// Same as `CompileAndRunTensorEncrypted::from_elements_5 but with
// `LambdaArgument` instances as arguments and as a result type
TEST(CompileAndRunTensorEncrypted, from_elements_5_lambda_argument_res) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%0: !FHE.eint<5>) -> tensor<1x!FHE.eint<5>> {
%t = tensor.from_elements %0 : tensor<1x!FHE.eint<5>>
return %t: tensor<1x!FHE.eint<5>>
@@ -116,7 +115,7 @@ func @main(%0: !FHE.eint<5>) -> tensor<1x!FHE.eint<5>> {
}
TEST(CompileAndRunTensorEncrypted, in_out_tensor_with_op_5) {
mlir::concretelang::JitCompilerEngine::Lambda lambda = checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
func @main(%in: tensor<2x!FHE.eint<5>>) -> tensor<3x!FHE.eint<5>> {
%c_0 = arith.constant 0 : index
%c_1 = arith.constant 1 : index
@@ -147,8 +146,7 @@ func @main(%in: tensor<2x!FHE.eint<5>>) -> tensor<3x!FHE.eint<5>> {
// Test is failing since with the bufferization and the parallel options.
// DISABLED as is a bit artificial test, let's investigate later.
TEST(CompileAndRunTensorEncrypted, DISABLED_linalg_generic) {
mlir::concretelang::JitCompilerEngine::Lambda lambda =
checkedJit(R"XXX(
checkedJit(lambda, R"XXX(
#map0 = affine_map<(d0) -> (d0)>
#map1 = affine_map<(d0) -> (0)>
func @main(%arg0: tensor<2x!FHE.eint<7>>, %arg1: tensor<2xi8>, %acc:
@@ -167,7 +165,7 @@ func @main(%arg0: tensor<2x!FHE.eint<7>>, %arg1: tensor<2xi8>, %acc:
return %ret : !FHE.eint<7>
}
)XXX",
"main", true);
"main", true);
static uint8_t arg0[] = {2, 8};
static uint8_t arg1[] = {6, 8};

View File

@@ -12,8 +12,7 @@
#define ASSERT_LLVM_ERROR(err) \
if (err) { \
llvm::errs() << "error: " << std::move(err) << "\n"; \
ASSERT_TRUE(false); \
ASSERT_TRUE(false) << llvm::toString(err); \
}
// Checks that the value `val` is not in an error state. Returns
@@ -21,7 +20,6 @@
template <typename T>
static bool assert_expected_success(llvm::Expected<T> &val) {
if (!((bool)val)) {
llvm::errs() << llvm::toString(std::move(val.takeError())) << "\n";
return false;
}
@@ -46,8 +44,10 @@ static bool assert_expected_failure(llvm::Expected<T> &&val) {
// an error state.
#define ASSERT_EXPECTED_SUCCESS(val) \
do { \
if (!assert_expected_success(val)) \
GTEST_FATAL_FAILURE_("Expected<T> in error state"); \
if (!assert_expected_success(val)) { \
GTEST_FATAL_FAILURE_("Expected<T> in error state") \
<< llvm::toString(val.takeError()); \
} \
} while (0)
// Checks that the value `val` of type `llvm::Expected<T>` is in
@@ -121,10 +121,10 @@ getTestKeySetCachePtr() {
// Jit-compiles the function specified by `func` from `src` and
// returns the corresponding lambda. Any compilation errors are caught
// and reult in abnormal termination.
template <typename F>
mlir::concretelang::JitCompilerEngine::Lambda internalCheckedJit(
F checkFunc, llvm::StringRef src, llvm::StringRef func = "main",
bool useDefaultFHEConstraints = false, bool autoParallelize = false) {
inline llvm::Expected<mlir::concretelang::JitCompilerEngine::Lambda>
internalCheckedJit(llvm::StringRef src, llvm::StringRef func = "main",
bool useDefaultFHEConstraints = false,
bool autoParallelize = false) {
mlir::concretelang::JitCompilerEngine engine;
@@ -139,12 +139,7 @@ mlir::concretelang::JitCompilerEngine::Lambda internalCheckedJit(
llvm::Expected<mlir::concretelang::JitCompilerEngine::Lambda> lambdaOrErr =
engine.buildLambda(src, func, getTestKeySetCache());
if (!lambdaOrErr) {
std::cout << llvm::toString(lambdaOrErr.takeError()) << std::endl;
}
checkFunc(lambdaOrErr);
return std::move(*lambdaOrErr);
return lambdaOrErr;
}
// Shorthands to create integer literals of a specific type
@@ -160,10 +155,9 @@ static inline uint64_t operator"" _u64(unsigned long long int v) { return v; }
// Wrapper around `internalCheckedJit` that causes
// `ASSERT_EXPECTED_SUCCESS` to use the file and line number of the
// caller instead of `internalCheckedJit`.
#define checkedJit(...) \
internalCheckedJit( \
[](llvm::Expected<mlir::concretelang::JitCompilerEngine::Lambda> \
&lambda) { ASSERT_EXPECTED_SUCCESS(lambda); }, \
__VA_ARGS__)
#define checkedJit(VARNAME, ...) \
auto VARNAMEOrErr = internalCheckedJit(__VA_ARGS__); \
ASSERT_EXPECTED_SUCCESS(VARNAMEOrErr); \
auto VARNAME = std::move(*VARNAMEOrErr);
#endif