feat(python): support loop parallelization

Remove hard checks on parallelization support, we allow compilation
unconditionally of the support for parallel execution
This commit is contained in:
youben11
2022-04-04 10:26:24 +01:00
committed by Ayoub Benaissa
parent 01f6a334ff
commit 690b4f75c5
3 changed files with 46 additions and 23 deletions

View File

@@ -1,9 +1,5 @@
set(LLVM_OPTIONAL_SOURCES CompilerEngine.cpp)
if(CONCRETELANG_PARALLEL_EXECUTION_ENABLED)
add_compile_options(-DCONCRETELANG_PARALLEL_EXECUTION_ENABLED)
endif()
add_mlir_public_c_api_library(CONCRETELANGCAPISupport
CompilerEngine.cpp

View File

@@ -30,13 +30,6 @@ MLIR_CAPI_EXPORTED JITSupport_C jit_support(std::string runtimeLibPath) {
std::unique_ptr<mlir::concretelang::JitCompilationResult>
jit_compile(JITSupport_C support, const char *module,
mlir::concretelang::CompilationOptions options) {
#ifndef CONCRETELANG_PARALLEL_EXECUTION_ENABLED
if (options.autoParallelize || options.loopParallelize ||
options.dataflowParallelize) {
throw std::runtime_error(
"This package was built without parallelization support");
}
#endif
GET_OR_THROW_LLVM_EXPECTED(compilationResult,
support.support.compile(module, options));
return std::move(*compilationResult);
@@ -75,13 +68,6 @@ library_support(const char *outputPath, const char *runtimeLibraryPath) {
std::unique_ptr<mlir::concretelang::LibraryCompilationResult>
library_compile(LibrarySupport_C support, const char *module,
mlir::concretelang::CompilationOptions options) {
#ifndef CONCRETELANG_PARALLEL_EXECUTION_ENABLED
if (options.autoParallelize || options.loopParallelize ||
options.dataflowParallelize) {
throw std::runtime_error(
"This package was built without parallelization support");
}
#endif
GET_OR_THROW_LLVM_EXPECTED(compilationResult,
support.support.compile(module, options));
return std::move(*compilationResult);

View File

@@ -11,9 +11,9 @@ KEY_SET_CACHE_PATH = os.path.join(tempfile.gettempdir(), "KeySetCache")
keyset_cache = KeySetCache.new(KEY_SET_CACHE_PATH)
def compile_and_run(engine, mlir_input, args, expected_result):
options = CompilationOptions.new("main")
options.set_auto_parallelize(True)
def compile_and_run(
engine, mlir_input, args, expected_result, options=CompilationOptions.new("main")
):
compilation_result = engine.compile(mlir_input, options)
# Client
client_parameters = engine.load_client_parameters(compilation_result)
@@ -65,6 +65,47 @@ def compile_and_run(engine, mlir_input, args, expected_result):
),
],
)
def test_compile_and_run_parallel(mlir_input, args, expected_result):
def test_compile_and_run_auto_parallelize(mlir_input, args, expected_result):
engine = JITSupport.new()
compile_and_run(engine, mlir_input, args, expected_result)
options = CompilationOptions.new("main")
options.set_auto_parallelize(True)
compile_and_run(engine, mlir_input, args, expected_result, options=options)
@pytest.mark.parametrize(
"mlir_input, args, expected_result",
[
pytest.param(
"""
func @main(%arg0: !FHE.eint<7>, %arg1: i8) -> !FHE.eint<7> {
%1 = "FHE.add_eint_int"(%arg0, %arg1): (!FHE.eint<7>, i8) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
""",
(5, 7),
12,
id="add_eint_int",
),
pytest.param(
"""
func @main(%arg0: tensor<4x!FHE.eint<7>>, %arg1: tensor<4xi8>) -> !FHE.eint<7>
{
%ret = "FHELinalg.dot_eint_int"(%arg0, %arg1) :
(tensor<4x!FHE.eint<7>>, tensor<4xi8>) -> !FHE.eint<7>
return %ret : !FHE.eint<7>
}
""",
(
np.array([1, 2, 3, 4], dtype=np.uint8),
np.array([4, 3, 2, 1], dtype=np.uint8),
),
20,
id="dot_eint_int_uint8",
),
],
)
def test_compile_and_run_loop_parallelize(mlir_input, args, expected_result):
engine = JITSupport.new()
options = CompilationOptions.new("main")
options.set_loop_parallelize(True)
compile_and_run(engine, mlir_input, args, expected_result, options=options)