diff --git a/compiler/Makefile b/compiler/Makefile index 93c5d94ab..839f9276f 100644 --- a/compiler/Makefile +++ b/compiler/Makefile @@ -295,7 +295,7 @@ $(BENCHMARK_CPU_DIR)/%.yaml: tests/end_to_end_fixture/%_gen.py $(BENCHMARK_CPU_DIR): mkdir -p $@ -generate-cpu-benchmarks: $(BENCHMARK_CPU_DIR) $(BENCHMARK_CPU_DIR)/end_to_end_linalg_apply_lookup_table.yaml $(BENCHMARK_CPU_DIR)/end_to_end_apply_lookup_table.yaml +generate-cpu-benchmarks: $(BENCHMARK_CPU_DIR) $(BENCHMARK_CPU_DIR)/end_to_end_linalg_apply_lookup_table.yaml $(BENCHMARK_CPU_DIR)/end_to_end_apply_lookup_table.yaml $(BENCHMARK_CPU_DIR)/end_to_end_leveled.yaml build-benchmarks: build-initialized cmake --build $(BUILD_DIR) --target end_to_end_benchmark diff --git a/compiler/tests/end_to_end_benchmarks/end_to_end_benchmark.cpp b/compiler/tests/end_to_end_benchmarks/end_to_end_benchmark.cpp index af7f2d453..a83e538e9 100644 --- a/compiler/tests/end_to_end_benchmarks/end_to_end_benchmark.cpp +++ b/compiler/tests/end_to_end_benchmarks/end_to_end_benchmark.cpp @@ -98,7 +98,8 @@ static void BM_Evaluate(benchmark::State &state, EndToEndDesc description, } static int registerEndToEndTestFromFile(std::string prefix, std::string path, - size_t stackSizeRequirement = 0) { + size_t stackSizeRequirement = 0, + bool only_evaluate = false) { auto registe = [&](std::string optionsName, mlir::concretelang::CompilationOptions options) { llvm::for_each(loadEndToEndDesc(path), [&](EndToEndDesc &description) { @@ -110,22 +111,24 @@ static int registerEndToEndTestFromFile(std::string prefix, std::string path, << description.description; return s.str(); }; - benchmark::RegisterBenchmark( - benchName("Compile").c_str(), [=](::benchmark::State &st) { - BM_Compile(st, description, support, options); - }); - benchmark::RegisterBenchmark( - benchName("KeyGen").c_str(), [=](::benchmark::State &st) { - BM_KeyGen(st, description, support, options); - }); - benchmark::RegisterBenchmark( - benchName("ExportArguments").c_str(), [=](::benchmark::State &st) { - BM_ExportArguments(st, description, support, options); - }); benchmark::RegisterBenchmark( benchName("Evaluate").c_str(), [=](::benchmark::State &st) { BM_Evaluate(st, description, support, options); }); + if (!only_evaluate) { + benchmark::RegisterBenchmark( + benchName("Compile").c_str(), [=](::benchmark::State &st) { + BM_Compile(st, description, support, options); + }); + benchmark::RegisterBenchmark( + benchName("KeyGen").c_str(), [=](::benchmark::State &st) { + BM_KeyGen(st, description, support, options); + }); + benchmark::RegisterBenchmark( + benchName("ExportArguments").c_str(), [=](::benchmark::State &st) { + BM_ExportArguments(st, description, support, options); + }); + } return; }); }; @@ -146,11 +149,28 @@ static int registerEndToEndTestFromFile(std::string prefix, std::string path, return 1; } -auto _ = {registerEndToEndTestFromFile( - "FHELinalg", "tests/end_to_end_fixture/benchmarks_cpu/" - "end_to_end_apply_lookup_table.yaml"), - registerEndToEndTestFromFile( - "FHELinalgTLU", "tests/end_to_end_fixture/benchmarks_cpu/" - "end_to_end_linalg_apply_lookup_table.yaml")}; +auto stackSizeRequirement = 0; +auto _ = { + registerEndToEndTestFromFile("FHELinalgLeveled", + "tests/end_to_end_fixture/benchmarks_cpu/" + "end_to_end_leveled.yaml", + stackSizeRequirement, + /* only_evaluate = */ false), + // For lookup table bench we only bench the keygen time on simple lookup + // table bench, to avoid + // bench the same keygen several times as it take times + registerEndToEndTestFromFile("FHELinalg", + "tests/end_to_end_fixture/benchmarks_cpu/" + "end_to_end_apply_lookup_table.yaml", + stackSizeRequirement, + /* only_evaluate = */ false), + // So for the other lookup table benchmarks we only test the evaluataion + // times + registerEndToEndTestFromFile("FHELinalgTLU", + "tests/end_to_end_fixture/benchmarks_cpu/" + "end_to_end_linalg_apply_lookup_table.yaml", + stackSizeRequirement, + /* only_evaluate = */ true), +}; BENCHMARK_MAIN();