mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-13 06:04:58 -05:00
This refactoring commit restructures the compilation pipeline of
`zamacompiler`, such that it is possible to enter and exit the
pipeline at different points, effectively defining the level of
abstraction at the input and the required level of abstraction for the
output.
The entry point is specified using the `--entry-dialect`
argument. Valid choices are:
`--entry-dialect=hlfhe`: Source contains HLFHE operations
`--entry-dialect=midlfhe`: Source contains MidLFHE operations
`--entry-dialect=lowlfhe`: Source contains LowLFHE operations
`--entry-dialect=std`: Source does not contain any FHE Operations
`--entry-dialect=llvm`: Source is in LLVM dialect
The exit point is defined by an action, specified using --action.
`--action=roundtrip`:
Parse the source file to in-memory representation and immediately
dump as text without any processing
`--action=dump-midlfhe`:
Lower source to MidLFHE and dump result as text
`--action=dump-lowlfhe`:
Lower source to LowLFHE and dump result as text
`--action=dump-std`:
Lower source to only standard MLIR dialects (i.e., all FHE
operations have already been lowered)
`--action=dump-llvm-dialect`:
Lower source to MLIR's LLVM dialect (i.e., the LLVM dialect, not
LLVM IR)
`--action=dump-llvm-ir`:
Lower source to plain LLVM IR (i.e., not the LLVM dialect, but
actual LLVM IR)
`--action=dump-optimized-llvm-ir`:
Lower source to plain LLVM IR (i.e., not the LLVM dialect, but
actual LLVM IR), pass the result through the LLVM optimizer and
print the result.
`--action=dump-jit-invoke`:
Execute the full lowering pipeline to optimized LLVM IR, JIT
compile the result, invoke the function specified in
`--jit-funcname` with the parameters from `--jit-args` and print
the functions return value.
111 lines
3.7 KiB
C++
111 lines
3.7 KiB
C++
#ifndef COMPILER_JIT_H
|
|
#define COMPILER_JIT_H
|
|
|
|
#include <mlir/ExecutionEngine/ExecutionEngine.h>
|
|
#include <mlir/IR/BuiltinOps.h>
|
|
#include <mlir/Support/LogicalResult.h>
|
|
|
|
#include <zamalang/Support/KeySet.h>
|
|
|
|
namespace mlir {
|
|
namespace zamalang {
|
|
mlir::LogicalResult
|
|
runJit(mlir::ModuleOp module, llvm::StringRef func,
|
|
llvm::ArrayRef<uint64_t> funcArgs, mlir::zamalang::KeySet &keySet,
|
|
std::function<llvm::Error(llvm::Module *)> optPipeline,
|
|
llvm::raw_ostream &os);
|
|
|
|
/// JITLambda is a tool to JIT compile an mlir module and to invoke a function
|
|
/// of the module.
|
|
class JITLambda {
|
|
public:
|
|
class Argument {
|
|
public:
|
|
Argument(KeySet &keySet);
|
|
~Argument();
|
|
|
|
// Create lambda Argument that use the given KeySet to perform encryption
|
|
// and decryption operations.
|
|
static llvm::Expected<std::unique_ptr<Argument>> create(KeySet &keySet);
|
|
|
|
// Set a scalar argument at the given pos as a uint64_t.
|
|
llvm::Error setArg(size_t pos, uint64_t arg);
|
|
|
|
// Set a argument at the given pos as a tensor of int64.
|
|
llvm::Error setArg(size_t pos, uint64_t *data, size_t size) {
|
|
return setArg(pos, 64, (void *)data, size);
|
|
}
|
|
|
|
// Set a argument at the given pos as a tensor of int32.
|
|
llvm::Error setArg(size_t pos, uint32_t *data, size_t size) {
|
|
return setArg(pos, 32, (void *)data, size);
|
|
}
|
|
|
|
// Set a argument at the given pos as a tensor of int32.
|
|
llvm::Error setArg(size_t pos, uint16_t *data, size_t size) {
|
|
return setArg(pos, 16, (void *)data, size);
|
|
}
|
|
|
|
// Set a tensor argument at the given pos as a uint64_t.
|
|
llvm::Error setArg(size_t pos, uint8_t *data, size_t size) {
|
|
return setArg(pos, 8, (void *)data, size);
|
|
}
|
|
|
|
// Get the result at the given pos as an uint64_t.
|
|
llvm::Error getResult(size_t pos, uint64_t &res);
|
|
|
|
// Fill the result.
|
|
llvm::Error getResult(size_t pos, uint64_t *res, size_t size);
|
|
|
|
private:
|
|
llvm::Error setArg(size_t pos, size_t width, void *data, size_t size);
|
|
|
|
friend JITLambda;
|
|
// Store the pointer on inputs values and outputs values
|
|
std::vector<void *> rawArg;
|
|
// Store the values of inputs
|
|
std::vector<void *> inputs;
|
|
// Store the values of outputs
|
|
std::vector<void *> outputs;
|
|
// Store the input gates description and the offset of the argument.
|
|
std::vector<std::tuple<CircuitGate, size_t /*offet*/>> inputGates;
|
|
// Store the outputs gates description and the offset of the argument.
|
|
std::vector<std::tuple<CircuitGate, size_t /*offet*/>> outputGates;
|
|
// Store allocated lwe ciphertexts (for free)
|
|
std::vector<LweCiphertext_u64 *> allocatedCiphertexts;
|
|
// Store buffers of ciphertexts
|
|
std::vector<LweCiphertext_u64 **> ciphertextBuffers;
|
|
|
|
KeySet &keySet;
|
|
};
|
|
JITLambda(mlir::LLVM::LLVMFunctionType type, llvm::StringRef name)
|
|
: type(type), name(name){};
|
|
|
|
/// create a JITLambda that point to the function name of the given module.
|
|
static llvm::Expected<std::unique_ptr<JITLambda>>
|
|
create(llvm::StringRef name, mlir::ModuleOp &module,
|
|
llvm::function_ref<llvm::Error(llvm::Module *)> optPipeline);
|
|
|
|
/// invokeRaw execute the jit lambda with a list of Argument, the last one is
|
|
/// used to store the result of the computation.
|
|
/// Example:
|
|
/// uin64_t arg0 = 1;
|
|
/// uin64_t res;
|
|
/// llvm::SmallVector<void *> args{&arg1, &res};
|
|
/// lambda.invokeRaw(args);
|
|
llvm::Error invokeRaw(llvm::MutableArrayRef<void *> args);
|
|
|
|
/// invoke the jit lambda with the Argument.
|
|
llvm::Error invoke(Argument &args);
|
|
|
|
private:
|
|
mlir::LLVM::LLVMFunctionType type;
|
|
llvm::StringRef name;
|
|
std::unique_ptr<mlir::ExecutionEngine> engine;
|
|
};
|
|
|
|
} // namespace zamalang
|
|
} // namespace mlir
|
|
|
|
#endif // COMPILER_JIT_H
|