Files
concrete/compilers/concrete-compiler/compiler/lib/Bindings/Python/CompilerEngine.cpp
Andi Drebes c8c969773e Rebase onto llvm-project 465ee9bfb26d with local changes
This commit rebases the compiler onto commit 465ee9bfb26d from
llvm-project with locally maintained patches on top, i.e.:

  * 5d8669d669ee: Fix the element alignment (size) for memrefCopy
  * 4239163ea337: fix: Do not fold the memref.subview if the offset are
                  != 0 and strides != 1
  * 72c5decfcc21: remove github stuff from llvm
  * 8d0ce8f9eca1: Support arbitrary element types in named operations
                  via attributes
  * 94f64805c38c: Copy attributes of scf.for on bufferization and make
                  it an allocation hoisting barrier

Main upstream changes from llvm-project that required modification of
concretecompiler:

  * Switch to C++17
  * Various changes in the interfaces for linalg named operations
  * Transition from `llvm::Optional` to `std::optional`
  * Use of enums instead of string values for iterator types in linalg
  * Changed default naming convention of getter methods in
    ODS-generated operation classes from `some_value()` to
    `getSomeValue()`
  * Renaming of Arithmetic dialect to Arith
  * Refactoring of side effect interfaces (i.e., renaming from
    `NoSideEffect` to `Pure`)
  * Re-design of the data flow analysis framework
  * Refactoring of build targets for Python bindings
  * Refactoring of array attributes with integer values
  * Renaming of `linalg.init_tensor` to `tensor.empty`
  * Emission of `linalg.map` operations in bufferization of the Tensor
    dialect requiring another linalg conversion pass and registration
    of the bufferization op interfaces for linalg operations
  * Refactoring of the one-shot bufferizer
  * Necessity to run the expand-strided-metadata, affine-to-std and
    finalize-memref-to-llvm passes before converson to the LLVM
    dialect
  * Renaming of `BlockAndValueMapping` to `IRMapping`
  * Changes in the build function of `LLVM::CallOp`
  * Refactoring of the construction of `llvm::ArrayRef` and
    `llvm::MutableArrayRef` (direct invocation of constructor instead
    of builder functions for some cases)
  * New naming conventions for generated SSA values requiring rewrite
    of some check tests
  * Refactoring of `mlir::LLVM::lookupOrCreateMallocFn()`
  * Interface changes in generated type parsers
  * New dependencies for to mlir_float16_utils and
    MLIRSparseTensorRuntime for the runtime
  * Overhaul of MLIR-c deleting `mlir-c/Registration.h`
  * Deletion of library MLIRLinalgToSPIRV
  * Deletion of library MLIRLinalgAnalysis
  * Deletion of library MLIRMemRefUtils
  * Deletion of library MLIRQuantTransforms
  * Deletion of library MLIRVectorToROCDL
2023-03-09 17:47:16 +01:00

429 lines
16 KiB
C++

// Part of the Concrete Compiler Project, under the BSD3 License with Zama
// Exceptions. See
// https://github.com/zama-ai/concrete-compiler-internal/blob/main/LICENSE.txt
// for license information.
#include "llvm/ADT/SmallString.h"
#include "concretelang/Bindings/Python/CompilerEngine.h"
#include "concretelang/ClientLib/KeySetCache.h"
#include "concretelang/ClientLib/Serializers.h"
#include "concretelang/Runtime/DFRuntime.hpp"
#include "concretelang/Support/CompilerEngine.h"
#include "concretelang/Support/JITSupport.h"
#include "concretelang/Support/Jit.h"
#define GET_OR_THROW_LLVM_EXPECTED(VARNAME, EXPECTED) \
auto VARNAME = EXPECTED; \
if (auto err = VARNAME.takeError()) { \
throw std::runtime_error(llvm::toString(std::move(err))); \
}
// JIT Support bindings ///////////////////////////////////////////////////////
MLIR_CAPI_EXPORTED JITSupport_Py jit_support(std::string runtimeLibPath) {
auto opt = runtimeLibPath.empty()
? std::nullopt
: std::optional<std::string>(runtimeLibPath);
return JITSupport_Py{mlir::concretelang::JITSupport(opt)};
}
MLIR_CAPI_EXPORTED std::unique_ptr<mlir::concretelang::JitCompilationResult>
jit_compile(JITSupport_Py support, const char *module,
mlir::concretelang::CompilationOptions options) {
GET_OR_THROW_LLVM_EXPECTED(compilationResult,
support.support.compile(module, options));
return std::move(*compilationResult);
}
MLIR_CAPI_EXPORTED mlir::concretelang::ClientParameters
jit_load_client_parameters(JITSupport_Py support,
mlir::concretelang::JitCompilationResult &result) {
GET_OR_THROW_LLVM_EXPECTED(clientParameters,
support.support.loadClientParameters(result));
return *clientParameters;
}
MLIR_CAPI_EXPORTED mlir::concretelang::CompilationFeedback
jit_load_compilation_feedback(
JITSupport_Py support, mlir::concretelang::JitCompilationResult &result) {
GET_OR_THROW_LLVM_EXPECTED(compilationFeedback,
support.support.loadCompilationFeedback(result));
return *compilationFeedback;
}
MLIR_CAPI_EXPORTED std::shared_ptr<mlir::concretelang::JITLambda>
jit_load_server_lambda(JITSupport_Py support,
mlir::concretelang::JitCompilationResult &result) {
GET_OR_THROW_LLVM_EXPECTED(serverLambda,
support.support.loadServerLambda(result));
return *serverLambda;
}
MLIR_CAPI_EXPORTED std::unique_ptr<concretelang::clientlib::PublicResult>
jit_server_call(JITSupport_Py support, mlir::concretelang::JITLambda &lambda,
concretelang::clientlib::PublicArguments &args,
concretelang::clientlib::EvaluationKeys &evaluationKeys) {
GET_OR_THROW_LLVM_EXPECTED(publicResult, lambda.call(args, evaluationKeys));
return std::move(*publicResult);
}
// Library Support bindings ///////////////////////////////////////////////////
MLIR_CAPI_EXPORTED LibrarySupport_Py
library_support(const char *outputPath, const char *runtimeLibraryPath,
bool generateSharedLib, bool generateStaticLib,
bool generateClientParameters, bool generateCompilationFeedback,
bool generateCppHeader) {
return LibrarySupport_Py{mlir::concretelang::LibrarySupport(
outputPath, runtimeLibraryPath, generateSharedLib, generateStaticLib,
generateClientParameters, generateCompilationFeedback,
generateCppHeader)};
}
MLIR_CAPI_EXPORTED std::unique_ptr<mlir::concretelang::LibraryCompilationResult>
library_compile(LibrarySupport_Py support, const char *module,
mlir::concretelang::CompilationOptions options) {
GET_OR_THROW_LLVM_EXPECTED(compilationResult,
support.support.compile(module, options));
return std::move(*compilationResult);
}
MLIR_CAPI_EXPORTED mlir::concretelang::ClientParameters
library_load_client_parameters(
LibrarySupport_Py support,
mlir::concretelang::LibraryCompilationResult &result) {
GET_OR_THROW_LLVM_EXPECTED(clientParameters,
support.support.loadClientParameters(result));
return *clientParameters;
}
MLIR_CAPI_EXPORTED mlir::concretelang::CompilationFeedback
library_load_compilation_feedback(
LibrarySupport_Py support,
mlir::concretelang::LibraryCompilationResult &result) {
GET_OR_THROW_LLVM_EXPECTED(compilationFeedback,
support.support.loadCompilationFeedback(result));
return *compilationFeedback;
}
MLIR_CAPI_EXPORTED concretelang::serverlib::ServerLambda
library_load_server_lambda(
LibrarySupport_Py support,
mlir::concretelang::LibraryCompilationResult &result) {
GET_OR_THROW_LLVM_EXPECTED(serverLambda,
support.support.loadServerLambda(result));
return *serverLambda;
}
MLIR_CAPI_EXPORTED std::unique_ptr<concretelang::clientlib::PublicResult>
library_server_call(LibrarySupport_Py support,
concretelang::serverlib::ServerLambda lambda,
concretelang::clientlib::PublicArguments &args,
concretelang::clientlib::EvaluationKeys &evaluationKeys) {
GET_OR_THROW_LLVM_EXPECTED(
publicResult, support.support.serverCall(lambda, args, evaluationKeys));
return std::move(*publicResult);
}
MLIR_CAPI_EXPORTED std::string
library_get_shared_lib_path(LibrarySupport_Py support) {
return support.support.getSharedLibPath();
}
MLIR_CAPI_EXPORTED std::string
library_get_client_parameters_path(LibrarySupport_Py support) {
return support.support.getClientParametersPath();
}
// Client Support bindings ///////////////////////////////////////////////////
MLIR_CAPI_EXPORTED std::unique_ptr<concretelang::clientlib::KeySet>
key_set(concretelang::clientlib::ClientParameters clientParameters,
std::optional<concretelang::clientlib::KeySetCache> cache) {
GET_OR_THROW_LLVM_EXPECTED(
ks, (mlir::concretelang::LambdaSupport<int, int>::keySet(clientParameters,
cache)));
return std::move(*ks);
}
MLIR_CAPI_EXPORTED std::unique_ptr<concretelang::clientlib::PublicArguments>
encrypt_arguments(concretelang::clientlib::ClientParameters clientParameters,
concretelang::clientlib::KeySet &keySet,
llvm::ArrayRef<mlir::concretelang::LambdaArgument *> args) {
GET_OR_THROW_LLVM_EXPECTED(
publicArguments,
(mlir::concretelang::LambdaSupport<int, int>::exportArguments(
clientParameters, keySet, args)));
return std::move(*publicArguments);
}
MLIR_CAPI_EXPORTED lambdaArgument
decrypt_result(concretelang::clientlib::KeySet &keySet,
concretelang::clientlib::PublicResult &publicResult) {
GET_OR_THROW_LLVM_EXPECTED(
result, mlir::concretelang::typedResult<
std::unique_ptr<mlir::concretelang::LambdaArgument>>(
keySet, publicResult));
lambdaArgument result_{std::move(*result)};
return result_;
}
MLIR_CAPI_EXPORTED std::unique_ptr<concretelang::clientlib::PublicArguments>
publicArgumentsUnserialize(
mlir::concretelang::ClientParameters &clientParameters,
const std::string &buffer) {
std::stringstream istream(buffer);
auto argsOrError = concretelang::clientlib::PublicArguments::unserialize(
clientParameters, istream);
if (!argsOrError) {
throw std::runtime_error(argsOrError.error().mesg);
}
return std::move(argsOrError.value());
}
MLIR_CAPI_EXPORTED std::string publicArgumentsSerialize(
concretelang::clientlib::PublicArguments &publicArguments) {
std::ostringstream buffer(std::ios::binary);
auto voidOrError = publicArguments.serialize(buffer);
if (!voidOrError) {
throw std::runtime_error(voidOrError.error().mesg);
}
return buffer.str();
}
MLIR_CAPI_EXPORTED std::unique_ptr<concretelang::clientlib::PublicResult>
publicResultUnserialize(mlir::concretelang::ClientParameters &clientParameters,
const std::string &buffer) {
std::stringstream istream(buffer);
auto publicResultOrError = concretelang::clientlib::PublicResult::unserialize(
clientParameters, istream);
if (!publicResultOrError) {
throw std::runtime_error(publicResultOrError.error().mesg);
}
return std::move(publicResultOrError.value());
}
MLIR_CAPI_EXPORTED std::string
publicResultSerialize(concretelang::clientlib::PublicResult &publicResult) {
std::ostringstream buffer(std::ios::binary);
auto voidOrError = publicResult.serialize(buffer);
if (!voidOrError) {
throw std::runtime_error(voidOrError.error().mesg);
}
return buffer.str();
}
MLIR_CAPI_EXPORTED concretelang::clientlib::EvaluationKeys
evaluationKeysUnserialize(const std::string &buffer) {
std::stringstream istream(buffer);
concretelang::clientlib::EvaluationKeys evaluationKeys =
concretelang::clientlib::readEvaluationKeys(istream);
if (istream.fail()) {
throw std::runtime_error("Cannot read evaluation keys");
}
return evaluationKeys;
}
MLIR_CAPI_EXPORTED std::string evaluationKeysSerialize(
concretelang::clientlib::EvaluationKeys &evaluationKeys) {
std::ostringstream buffer(std::ios::binary);
concretelang::clientlib::operator<<(buffer, evaluationKeys);
return buffer.str();
}
MLIR_CAPI_EXPORTED mlir::concretelang::ClientParameters
clientParametersUnserialize(const std::string &json) {
GET_OR_THROW_LLVM_EXPECTED(
clientParams,
llvm::json::parse<mlir::concretelang::ClientParameters>(json));
return clientParams.get();
}
MLIR_CAPI_EXPORTED std::string
clientParametersSerialize(mlir::concretelang::ClientParameters &params) {
llvm::json::Value value(params);
std::string jsonParams;
llvm::raw_string_ostream buffer(jsonParams);
buffer << value;
return jsonParams;
}
MLIR_CAPI_EXPORTED void terminateDataflowParallelization() { _dfr_terminate(); }
MLIR_CAPI_EXPORTED void initDataflowParallelization() {
mlir::concretelang::dfr::_dfr_set_required(true);
}
MLIR_CAPI_EXPORTED std::string roundTrip(const char *module) {
std::shared_ptr<mlir::concretelang::CompilationContext> ccx =
mlir::concretelang::CompilationContext::createShared();
mlir::concretelang::CompilerEngine ce{ccx};
std::string backingString;
llvm::raw_string_ostream os(backingString);
llvm::Expected<mlir::concretelang::CompilerEngine::CompilationResult>
retOrErr = ce.compile(
module, mlir::concretelang::CompilerEngine::Target::ROUND_TRIP);
if (!retOrErr) {
os << "MLIR parsing failed: " << llvm::toString(retOrErr.takeError());
throw std::runtime_error(os.str());
}
retOrErr->mlirModuleRef->get().print(os);
return os.str();
}
MLIR_CAPI_EXPORTED bool lambdaArgumentIsTensor(lambdaArgument &lambda_arg) {
return lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>>() ||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint16_t>>>() ||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint32_t>>>() ||
lambda_arg.ptr->isa<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>>();
}
template <typename T>
MLIR_CAPI_EXPORTED std::vector<uint64_t> copyTensorLambdaArgumentTo64bitsvector(
mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<T>> *tensor) {
auto numElements = tensor->getNumElements();
if (!numElements) {
std::string backingString;
llvm::raw_string_ostream os(backingString);
os << "Couldn't get size of tensor: "
<< llvm::toString(std::move(numElements.takeError()));
throw std::runtime_error(os.str());
}
std::vector<uint64_t> res;
res.reserve(*numElements);
T *data = tensor->getValue();
for (size_t i = 0; i < *numElements; i++) {
res.push_back(data[i]);
}
return res;
}
MLIR_CAPI_EXPORTED std::vector<uint64_t>
lambdaArgumentGetTensorData(lambdaArgument &lambda_arg) {
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>>()) {
llvm::Expected<size_t> sizeOrErr = arg->getNumElements();
if (!sizeOrErr) {
std::string backingString;
llvm::raw_string_ostream os(backingString);
os << "Couldn't get size of tensor: "
<< llvm::toString(sizeOrErr.takeError());
throw std::runtime_error(os.str());
}
std::vector<uint64_t> data(arg->getValue(), arg->getValue() + *sizeOrErr);
return data;
}
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>>()) {
return copyTensorLambdaArgumentTo64bitsvector(arg);
}
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint16_t>>>()) {
return copyTensorLambdaArgumentTo64bitsvector(arg);
}
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint32_t>>>()) {
return copyTensorLambdaArgumentTo64bitsvector(arg);
}
throw std::invalid_argument(
"LambdaArgument isn't a tensor or has an unsupported bitwidth");
}
MLIR_CAPI_EXPORTED std::vector<int64_t>
lambdaArgumentGetTensorDimensions(lambdaArgument &lambda_arg) {
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>>()) {
return arg->getDimensions();
}
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint16_t>>>()) {
return arg->getDimensions();
}
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint32_t>>>()) {
return arg->getDimensions();
}
if (auto arg =
lambda_arg.ptr->dyn_cast<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>>()) {
return arg->getDimensions();
}
throw std::invalid_argument(
"LambdaArgument isn't a tensor, should "
"be a TensorLambdaArgument<IntLambdaArgument<uint64_t>>");
}
MLIR_CAPI_EXPORTED bool lambdaArgumentIsScalar(lambdaArgument &lambda_arg) {
return lambda_arg.ptr->isa<mlir::concretelang::IntLambdaArgument<uint64_t>>();
}
MLIR_CAPI_EXPORTED uint64_t
lambdaArgumentGetScalar(lambdaArgument &lambda_arg) {
mlir::concretelang::IntLambdaArgument<uint64_t> *arg =
lambda_arg.ptr
->dyn_cast<mlir::concretelang::IntLambdaArgument<uint64_t>>();
if (arg == nullptr) {
throw std::invalid_argument("LambdaArgument isn't a scalar, should "
"be an IntLambdaArgument<uint64_t>");
}
return arg->getValue();
}
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU8(
std::vector<uint8_t> data, std::vector<int64_t> dimensions) {
lambdaArgument tensor_arg{
std::make_shared<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint8_t>>>(data, dimensions)};
return tensor_arg;
}
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU16(
std::vector<uint16_t> data, std::vector<int64_t> dimensions) {
lambdaArgument tensor_arg{
std::make_shared<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint16_t>>>(data, dimensions)};
return tensor_arg;
}
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU32(
std::vector<uint32_t> data, std::vector<int64_t> dimensions) {
lambdaArgument tensor_arg{
std::make_shared<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint32_t>>>(data, dimensions)};
return tensor_arg;
}
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromTensorU64(
std::vector<uint64_t> data, std::vector<int64_t> dimensions) {
lambdaArgument tensor_arg{
std::make_shared<mlir::concretelang::TensorLambdaArgument<
mlir::concretelang::IntLambdaArgument<uint64_t>>>(data, dimensions)};
return tensor_arg;
}
MLIR_CAPI_EXPORTED lambdaArgument lambdaArgumentFromScalar(uint64_t scalar) {
lambdaArgument scalar_arg{
std::make_shared<mlir::concretelang::IntLambdaArgument<uint64_t>>(
scalar)};
return scalar_arg;
}