mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-09 12:15:09 -05:00
fix: remove subinteint folder
it actually requires to negate the result which can't be done via standard folder, so we remove it can cause erroneous computation
This commit is contained in:
committed by
Quentin Bourgerie
parent
1354759884
commit
670af02112
@@ -153,8 +153,6 @@ def SubIntEintOp : FHE_Op<"sub_int_eint"> {
|
||||
let verifier = [{
|
||||
return ::mlir::concretelang::FHE::verifySubIntEintOp(*this);
|
||||
}];
|
||||
|
||||
let hasFolder = 1;
|
||||
}
|
||||
|
||||
def NegEintOp : FHE_Op<"neg_eint"> {
|
||||
|
||||
@@ -180,8 +180,6 @@ def SubIntEintOp : FHELinalg_Op<"sub_int_eint", [TensorBroadcastingRules, Tensor
|
||||
build($_builder, $_state, lhs.getType(), rhs, lhs);
|
||||
}]>
|
||||
];
|
||||
|
||||
let hasFolder = 1;
|
||||
}
|
||||
|
||||
def NegEintOp : FHELinalg_Op<"neg_eint", [TensorUnaryEint]> {
|
||||
|
||||
@@ -139,19 +139,6 @@ OpFoldResult AddEintIntOp::fold(ArrayRef<Attribute> operands) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Avoid subtraction with constant 0
|
||||
OpFoldResult SubIntEintOp::fold(ArrayRef<Attribute> operands) {
|
||||
assert(operands.size() == 2);
|
||||
auto toSub = operands[0].dyn_cast_or_null<mlir::IntegerAttr>();
|
||||
if (toSub != nullptr) {
|
||||
auto intToSub = toSub.getInt();
|
||||
if (intToSub == 0) {
|
||||
return getOperand(1);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Avoid multiplication with constant 1
|
||||
OpFoldResult MulEintIntOp::fold(ArrayRef<Attribute> operands) {
|
||||
assert(operands.size() == 2);
|
||||
|
||||
@@ -1655,20 +1655,6 @@ OpFoldResult AddEintIntOp::fold(ArrayRef<Attribute> operands) {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
// Avoid subtraction with constant tensor of 0s
|
||||
OpFoldResult SubIntEintOp::fold(ArrayRef<Attribute> operands) {
|
||||
assert(operands.size() == 2);
|
||||
auto toSub = operands[0].dyn_cast_or_null<mlir::DenseIntElementsAttr>();
|
||||
if (toSub == nullptr)
|
||||
return nullptr;
|
||||
for (int64_t i = 0; i < toSub.size(); i++) {
|
||||
llvm::APInt cst = toSub.getFlatValue<llvm::APInt>(i);
|
||||
if (cst != 0)
|
||||
return nullptr;
|
||||
}
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
// Avoid multiplication with constant tensor of 1s
|
||||
OpFoldResult MulEintIntOp::fold(ArrayRef<Attribute> operands) {
|
||||
assert(operands.size() == 2);
|
||||
|
||||
@@ -9,15 +9,6 @@ func @add_eint_int(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
|
||||
return %1: !FHE.eint<2>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @sub_int_eint(%arg0: !FHE.eint<2>) -> !FHE.eint<2>
|
||||
func @sub_int_eint(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
|
||||
// CHECK-NEXT: return %arg0 : !FHE.eint<2>
|
||||
|
||||
%0 = arith.constant 0 : i3
|
||||
%1 = "FHE.sub_int_eint"(%0, %arg0): (i3, !FHE.eint<2>) -> (!FHE.eint<2>)
|
||||
return %1: !FHE.eint<2>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @mul_eint_int(%arg0: !FHE.eint<2>) -> !FHE.eint<2>
|
||||
func @mul_eint_int(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
|
||||
// CHECK-NEXT: return %arg0 : !FHE.eint<2>
|
||||
|
||||
@@ -27,33 +27,6 @@ func @add_eint_int_2D_broadcast(%a0: tensor<4x3x!FHE.eint<2>>) -> tensor<4x3x!FH
|
||||
return %1: tensor<4x3x!FHE.eint<2>>
|
||||
}
|
||||
|
||||
// CHECK: func @sub_int_eint_1D(%[[a0:.*]]: tensor<4x!FHE.eint<2>>) -> tensor<4x!FHE.eint<2>> {
|
||||
// CHECK-NEXT: return %[[a0]] : tensor<4x!FHE.eint<2>>
|
||||
// CHECK-NEXT: }
|
||||
func @sub_int_eint_1D(%a0: tensor<4x!FHE.eint<2>>) -> tensor<4x!FHE.eint<2>> {
|
||||
%a1 = arith.constant dense<[0, 0, 0, 0]> : tensor<4xi3>
|
||||
%1 = "FHELinalg.sub_int_eint"(%a1, %a0) : (tensor<4xi3>, tensor<4x!FHE.eint<2>>) -> tensor<4x!FHE.eint<2>>
|
||||
return %1: tensor<4x!FHE.eint<2>>
|
||||
}
|
||||
|
||||
// CHECK: func @sub_int_eint_1D_broadcast(%[[a0:.*]]: tensor<4x!FHE.eint<2>>) -> tensor<4x!FHE.eint<2>> {
|
||||
// CHECK-NEXT: return %[[a0]] : tensor<4x!FHE.eint<2>>
|
||||
// CHECK-NEXT: }
|
||||
func @sub_int_eint_1D_broadcast(%a0: tensor<4x!FHE.eint<2>>) -> tensor<4x!FHE.eint<2>> {
|
||||
%a1 = arith.constant dense<[0]> : tensor<1xi3>
|
||||
%1 = "FHELinalg.sub_int_eint"(%a1, %a0) : (tensor<1xi3>, tensor<4x!FHE.eint<2>>) -> tensor<4x!FHE.eint<2>>
|
||||
return %1: tensor<4x!FHE.eint<2>>
|
||||
}
|
||||
|
||||
// CHECK: func @sub_int_eint_2D_broadcast(%[[a0:.*]]: tensor<4x3x!FHE.eint<2>>) -> tensor<4x3x!FHE.eint<2>> {
|
||||
// CHECK-NEXT: return %[[a0]] : tensor<4x3x!FHE.eint<2>>
|
||||
// CHECK-NEXT: }
|
||||
func @sub_int_eint_2D_broadcast(%a0: tensor<4x3x!FHE.eint<2>>) -> tensor<4x3x!FHE.eint<2>> {
|
||||
%a1 = arith.constant dense<[[0]]> : tensor<1x1xi3>
|
||||
%1 = "FHELinalg.sub_int_eint"(%a1, %a0) : (tensor<1x1xi3>, tensor<4x3x!FHE.eint<2>>) -> tensor<4x3x!FHE.eint<2>>
|
||||
return %1: tensor<4x3x!FHE.eint<2>>
|
||||
}
|
||||
|
||||
// CHECK: func @mul_eint_int_1D(%[[a0:.*]]: tensor<4x!FHE.eint<2>>) -> tensor<4x!FHE.eint<2>> {
|
||||
// CHECK-NEXT: return %[[a0]] : tensor<4x!FHE.eint<2>>
|
||||
// CHECK-NEXT: }
|
||||
|
||||
Reference in New Issue
Block a user