mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-10 12:44:57 -05:00
This commit rebases the compiler onto commit 465ee9bfb26d from
llvm-project with locally maintained patches on top, i.e.:
* 5d8669d669ee: Fix the element alignment (size) for memrefCopy
* 4239163ea337: fix: Do not fold the memref.subview if the offset are
!= 0 and strides != 1
* 72c5decfcc21: remove github stuff from llvm
* 8d0ce8f9eca1: Support arbitrary element types in named operations
via attributes
* 94f64805c38c: Copy attributes of scf.for on bufferization and make
it an allocation hoisting barrier
Main upstream changes from llvm-project that required modification of
concretecompiler:
* Switch to C++17
* Various changes in the interfaces for linalg named operations
* Transition from `llvm::Optional` to `std::optional`
* Use of enums instead of string values for iterator types in linalg
* Changed default naming convention of getter methods in
ODS-generated operation classes from `some_value()` to
`getSomeValue()`
* Renaming of Arithmetic dialect to Arith
* Refactoring of side effect interfaces (i.e., renaming from
`NoSideEffect` to `Pure`)
* Re-design of the data flow analysis framework
* Refactoring of build targets for Python bindings
* Refactoring of array attributes with integer values
* Renaming of `linalg.init_tensor` to `tensor.empty`
* Emission of `linalg.map` operations in bufferization of the Tensor
dialect requiring another linalg conversion pass and registration
of the bufferization op interfaces for linalg operations
* Refactoring of the one-shot bufferizer
* Necessity to run the expand-strided-metadata, affine-to-std and
finalize-memref-to-llvm passes before converson to the LLVM
dialect
* Renaming of `BlockAndValueMapping` to `IRMapping`
* Changes in the build function of `LLVM::CallOp`
* Refactoring of the construction of `llvm::ArrayRef` and
`llvm::MutableArrayRef` (direct invocation of constructor instead
of builder functions for some cases)
* New naming conventions for generated SSA values requiring rewrite
of some check tests
* Refactoring of `mlir::LLVM::lookupOrCreateMallocFn()`
* Interface changes in generated type parsers
* New dependencies for to mlir_float16_utils and
MLIRSparseTensorRuntime for the runtime
* Overhaul of MLIR-c deleting `mlir-c/Registration.h`
* Deletion of library MLIRLinalgToSPIRV
* Deletion of library MLIRLinalgAnalysis
* Deletion of library MLIRMemRefUtils
* Deletion of library MLIRQuantTransforms
* Deletion of library MLIRVectorToROCDL
73 lines
2.4 KiB
C++
73 lines
2.4 KiB
C++
// Part of the Concrete Compiler Project, under the BSD3 License with Zama
|
|
// Exceptions. See
|
|
// https://github.com/zama-ai/concrete-compiler-internal/blob/main/LICENSE.txt
|
|
// for license information.
|
|
|
|
#include <concretelang/Conversion/Passes.h>
|
|
#include <concretelang/Support/LinalgExtras.h>
|
|
#include <mlir/Dialect/Linalg/IR/Linalg.h>
|
|
#include <mlir/Transforms/GreedyPatternRewriteDriver.h>
|
|
|
|
namespace {
|
|
struct LinalgGenericOpWithTensorsToLoopsPass
|
|
: public LinalgGenericOpWithTensorsToLoopsBase<
|
|
LinalgGenericOpWithTensorsToLoopsPass> {
|
|
LinalgGenericOpWithTensorsToLoopsPass() = delete;
|
|
LinalgGenericOpWithTensorsToLoopsPass(bool parallelizeLoops)
|
|
: parallelizeLoops(parallelizeLoops){};
|
|
void runOnOperation() final;
|
|
|
|
private:
|
|
bool parallelizeLoops;
|
|
};
|
|
} // namespace
|
|
|
|
template <typename LoopType>
|
|
class LinalgRewritePattern
|
|
: public mlir::OpRewritePattern<mlir::linalg::GenericOp> {
|
|
public:
|
|
using OpRewritePattern<mlir::linalg::GenericOp>::OpRewritePattern;
|
|
|
|
LinalgRewritePattern(::mlir::MLIRContext *context, bool parallelizeLoops,
|
|
mlir::PatternBenefit benefit = 0)
|
|
: ::mlir::OpRewritePattern<mlir::linalg::GenericOp>(context, benefit),
|
|
parallelizeLoops(parallelizeLoops) {}
|
|
|
|
mlir::LogicalResult
|
|
matchAndRewrite(mlir::linalg::GenericOp linalgOp,
|
|
mlir::PatternRewriter &rewriter) const override {
|
|
mlir::FailureOr<mlir::linalg::LinalgLoops> loops =
|
|
mlir::concretelang::linalgextras::linalgTensorOpToLoopsImpl<LoopType>(
|
|
rewriter, linalgOp, parallelizeLoops);
|
|
|
|
if (((mlir::LogicalResult)loops).failed() || loops->size() == 0)
|
|
return mlir::failure();
|
|
|
|
rewriter.replaceOp(linalgOp, loops.value()[0]->getResult(0));
|
|
|
|
return mlir::success();
|
|
};
|
|
|
|
private:
|
|
bool parallelizeLoops;
|
|
};
|
|
|
|
void LinalgGenericOpWithTensorsToLoopsPass::runOnOperation() {
|
|
auto op = this->getOperation();
|
|
|
|
mlir::RewritePatternSet patterns(&getContext());
|
|
patterns.insert<LinalgRewritePattern<mlir::scf::ForOp>>(&getContext(),
|
|
parallelizeLoops);
|
|
(void)applyPatternsAndFoldGreedily(op, std::move(patterns));
|
|
}
|
|
|
|
namespace mlir {
|
|
namespace concretelang {
|
|
std::unique_ptr<OperationPass<ModuleOp>>
|
|
createLinalgGenericOpWithTensorsToLoopsPass(bool parallelizeLoops) {
|
|
return std::make_unique<LinalgGenericOpWithTensorsToLoopsPass>(
|
|
parallelizeLoops);
|
|
}
|
|
} // namespace concretelang
|
|
} // namespace mlir
|