mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-10 12:44:57 -05:00
- no more Concrete ciphertext/plaintext types: they are represented using standard MLIR types (int/tensor) - Technically BConcrete was renamed to Concrete, and old Concrete was removed - TFHE -> Concrete now takes into account the conversion of tensor of ciphertext into tensors of an additional dimension (LWE dim) - Bufferization now works in Concrete - Old Concrete optimization were moved to TFHE - Concrete is now the dialect that lowers to CAPI calls - TFHE -> Concrete now uses OpConversionPattern and is much cleaner in terms of type conversion - Disabled tests for batching, as there was something weird about it: batchable operations implemented in Concrete but pass run in FHELinalg
153 lines
6.5 KiB
C++
153 lines
6.5 KiB
C++
// Part of the Concrete Compiler Project, under the BSD3 License with Zama
|
|
// Exceptions. See
|
|
// https://github.com/zama-ai/concrete-compiler-internal/blob/main/LICENSE.txt
|
|
// for license information.
|
|
|
|
#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h"
|
|
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
|
|
#include "mlir/Dialect/Bufferization/Transforms/BufferUtils.h"
|
|
#include "mlir/Dialect/Func/IR/FuncOps.h"
|
|
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
|
#include "mlir/Dialect/MemRef/IR/MemRef.h"
|
|
#include "mlir/Dialect/SCF/IR/SCF.h"
|
|
#include "mlir/Dialect/Tensor/IR/Tensor.h"
|
|
#include "mlir/IR/Dialect.h"
|
|
#include "mlir/IR/Operation.h"
|
|
|
|
#include "concretelang/Conversion/Tools.h"
|
|
#include "concretelang/Dialect/Concrete/IR/ConcreteDialect.h"
|
|
#include "concretelang/Dialect/Concrete/IR/ConcreteOps.h"
|
|
#include "concretelang/Dialect/Concrete/Transforms/BufferizableOpInterfaceImpl.h"
|
|
#include "concretelang/Dialect/Tracing/IR/TracingOps.h"
|
|
#include "concretelang/Support/CompilerEngine.h"
|
|
#include <mlir/IR/AffineExpr.h>
|
|
#include <mlir/IR/AffineMap.h>
|
|
#include <mlir/IR/BuiltinTypes.h>
|
|
|
|
using namespace mlir;
|
|
using namespace mlir::bufferization;
|
|
using namespace mlir::tensor;
|
|
|
|
namespace {
|
|
|
|
namespace Tracing = mlir::concretelang::Tracing;
|
|
namespace Concrete = mlir::concretelang::Concrete;
|
|
|
|
template <typename TensorOp, typename MemrefOp>
|
|
struct TensorToMemrefOp : public BufferizableOpInterface::ExternalModel<
|
|
TensorToMemrefOp<TensorOp, MemrefOp>, TensorOp> {
|
|
bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
|
|
const AnalysisState &state) const {
|
|
return true;
|
|
}
|
|
|
|
bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
|
|
const AnalysisState &state) const {
|
|
return false;
|
|
}
|
|
|
|
SmallVector<OpResult> getAliasingOpResult(Operation *op, OpOperand &opOperand,
|
|
const AnalysisState &state) const {
|
|
return {};
|
|
}
|
|
|
|
BufferRelation bufferRelation(Operation *op, OpResult opResult,
|
|
const AnalysisState &state) const {
|
|
return BufferRelation::None;
|
|
}
|
|
|
|
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
|
|
const BufferizationOptions &options) const {
|
|
|
|
auto loc = op->getLoc();
|
|
auto castOp = cast<TensorOp>(op);
|
|
|
|
auto resTensorType =
|
|
castOp.result().getType().template cast<mlir::TensorType>();
|
|
|
|
auto outMemrefType = MemRefType::get(resTensorType.getShape(),
|
|
resTensorType.getElementType());
|
|
auto outMemref = options.createAlloc(rewriter, loc, outMemrefType, {});
|
|
if (mlir::failed(outMemref)) {
|
|
return mlir::failure();
|
|
}
|
|
|
|
// The first operand is the result
|
|
mlir::SmallVector<mlir::Value, 3> operands{
|
|
*outMemref,
|
|
};
|
|
for (auto &operand : op->getOpOperands()) {
|
|
if (!operand.get().getType().isa<mlir::RankedTensorType>()) {
|
|
operands.push_back(operand.get());
|
|
} else {
|
|
operands.push_back(
|
|
bufferization::getBuffer(rewriter, operand.get(), options));
|
|
}
|
|
}
|
|
|
|
rewriter.create<MemrefOp>(loc, mlir::TypeRange{}, operands, op->getAttrs());
|
|
|
|
replaceOpWithBufferizedValues(rewriter, op, *outMemref);
|
|
|
|
return success();
|
|
}
|
|
};
|
|
|
|
} // namespace
|
|
|
|
void mlir::concretelang::Concrete::
|
|
registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry) {
|
|
registry.addExtension(+[](MLIRContext *ctx,
|
|
Concrete::ConcreteDialect *dialect) {
|
|
// add_lwe_tensor => add_lwe_buffer
|
|
Concrete::AddLweTensorOp::attachInterface<
|
|
TensorToMemrefOp<Concrete::AddLweTensorOp, Concrete::AddLweBufferOp>>(
|
|
*ctx);
|
|
// add_plaintext_lwe_tensor => add_plaintext_lwe_buffer
|
|
Concrete::AddPlaintextLweTensorOp::attachInterface<TensorToMemrefOp<
|
|
Concrete::AddPlaintextLweTensorOp, Concrete::AddPlaintextLweBufferOp>>(
|
|
*ctx);
|
|
// mul_cleartext_lwe_tensor => mul_cleartext_lwe_buffer
|
|
Concrete::MulCleartextLweTensorOp::attachInterface<TensorToMemrefOp<
|
|
Concrete::MulCleartextLweTensorOp, Concrete::MulCleartextLweBufferOp>>(
|
|
*ctx);
|
|
// negate_cleartext_lwe_tensor => negate_cleartext_lwe_buffer
|
|
Concrete::NegateLweTensorOp::attachInterface<TensorToMemrefOp<
|
|
Concrete::NegateLweTensorOp, Concrete::NegateLweBufferOp>>(*ctx);
|
|
// negate_cleartext_lwe_tensor => negate_cleartext_lwe_buffer
|
|
Concrete::NegateLweTensorOp::attachInterface<TensorToMemrefOp<
|
|
Concrete::NegateLweTensorOp, Concrete::NegateLweBufferOp>>(*ctx);
|
|
// keyswitch_lwe_tensor => keyswitch_lwe_buffer
|
|
Concrete::KeySwitchLweTensorOp::attachInterface<TensorToMemrefOp<
|
|
Concrete::KeySwitchLweTensorOp, Concrete::KeySwitchLweBufferOp>>(*ctx);
|
|
// bootstrap_lwe_tensor => bootstrap_lwe_buffer
|
|
Concrete::BootstrapLweTensorOp::attachInterface<TensorToMemrefOp<
|
|
Concrete::BootstrapLweTensorOp, Concrete::BootstrapLweBufferOp>>(*ctx);
|
|
// batched_keyswitch_lwe_tensor => batched_keyswitch_lwe_buffer
|
|
Concrete::BatchedKeySwitchLweTensorOp::attachInterface<
|
|
TensorToMemrefOp<Concrete::BatchedKeySwitchLweTensorOp,
|
|
Concrete::BatchedKeySwitchLweBufferOp>>(*ctx);
|
|
// batched_bootstrap_lwe_tensor => batched_bootstrap_lwe_buffer
|
|
Concrete::BatchedBootstrapLweTensorOp::attachInterface<
|
|
TensorToMemrefOp<Concrete::BatchedBootstrapLweTensorOp,
|
|
Concrete::BatchedBootstrapLweBufferOp>>(*ctx);
|
|
// wop_pbs_crt_lwe_tensor => wop_pbs_crt_lwe_buffer
|
|
Concrete::WopPBSCRTLweTensorOp::attachInterface<TensorToMemrefOp<
|
|
Concrete::WopPBSCRTLweTensorOp, Concrete::WopPBSCRTLweBufferOp>>(*ctx);
|
|
// encode_plaintext_with_crt_tensor => encode_plaintext_with_crt_buffer
|
|
Concrete::EncodePlaintextWithCrtTensorOp::attachInterface<
|
|
TensorToMemrefOp<Concrete::EncodePlaintextWithCrtTensorOp,
|
|
Concrete::EncodePlaintextWithCrtBufferOp>>(*ctx);
|
|
// encode_expand_lut_for_bootstrap_tensor =>
|
|
// encode_expand_lut_for_bootstrap_buffer
|
|
Concrete::EncodeExpandLutForBootstrapTensorOp::attachInterface<
|
|
TensorToMemrefOp<Concrete::EncodeExpandLutForBootstrapTensorOp,
|
|
Concrete::EncodeExpandLutForBootstrapBufferOp>>(*ctx);
|
|
// encode_expand_lut_for_woppbs_tensor =>
|
|
// encode_expand_lut_for_woppbs_buffer
|
|
Concrete::EncodeExpandLutForWopPBSTensorOp::attachInterface<
|
|
TensorToMemrefOp<Concrete::EncodeExpandLutForWopPBSTensorOp,
|
|
Concrete::EncodeExpandLutForWopPBSBufferOp>>(*ctx);
|
|
});
|
|
}
|