refactor(tests): Refactor test tools to easiest run tests with description files and different compilation options

This commit is contained in:
Quentin Bourgerie
2022-12-01 20:10:40 +01:00
parent 1f7878b961
commit 07c1e8347e
15 changed files with 524 additions and 10587 deletions

View File

@@ -235,22 +235,31 @@ run-rust-tests: rust-bindings
## end-to-end-tests
build-end-to-end-tests: build-end-to-end-jit-test build-end-to-end-jit-fhe build-end-to-end-jit-encrypted-tensor build-end-to-end-jit-fhelinalg build-end-to-end-jit-lambda
FIXTURE_CPU_DIR=tests/end_to_end_fixture/tests_cpu
run-end-to-end-tests: build-end-to-end-tests
find $(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests -name end_to_end* $(FIND_EXECUTABLE_ARG) -type f | xargs -n1 ./run_test_bin.sh
$(FIXTURE_CPU_DIR)/%.yaml: tests/end_to_end_fixture/%_gen.py
mkdir -p $(FIXTURE_CPU_DIR)
$(Python3_EXECUTABLE) $< > $@
$(FIXTURE_CPU_DIR)/bug_report.yaml:
unzip -o $(FIXTURE_CPU_DIR)/bug_report.zip -d $(FIXTURE_CPU_DIR)
generate-cpu-tests: $(FIXTURE_CPU_DIR)/end_to_end_leveled.yaml $(FIXTURE_CPU_DIR)/end_to_end_apply_lookup_table.yaml $(FIXTURE_CPU_DIR)/end_to_end_linalg_apply_lookup_table.yaml $(FIXTURE_CPU_DIR)/bug_report.yaml
build-end-to-end-tests: build-end-to-end-jit-test build-end-to-end-test build-end-to-end-jit-encrypted-tensor build-end-to-end-jit-fhelinalg build-end-to-end-jit-lambda
run-end-to-end-tests: build-end-to-end-tests generate-cpu-tests
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_jit_test
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_jit_encrypted_tensor
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_jit_fhelinalg
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_jit_lambda
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_test --loop-parallelize --jit $(FIXTURE_CPU_DIR)/*.yaml
build-end-to-end-jit-test: build-initialized
cmake --build $(BUILD_DIR) --target end_to_end_jit_test
generate-end-to-end-tests:
$(Python3_EXECUTABLE) ./tests/end_to_end_fixture/end_to_end_linalg_apply_lookup_table_gen.py \
--n-lut 2 --n-ct 4 \
> ./tests/end_to_end_fixture/end_to_end_linalg_2_apply_lookup_table.yaml
unzip -o tests/end_to_end_fixture/bug_report.zip -d tests/end_to_end_fixture/
build-end-to-end-jit-fhe: build-initialized generate-end-to-end-tests
cmake --build $(BUILD_DIR) --target end_to_end_jit_fhe
build-end-to-end-test: build-initialized
cmake --build $(BUILD_DIR) --target end_to_end_test
build-end-to-end-jit-encrypted-tensor: build-initialized
cmake --build $(BUILD_DIR) --target end_to_end_jit_encrypted_tensor
@@ -272,24 +281,26 @@ run-end-to-end-dataflow-tests: build-end-to-end-dataflow-tests
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_jit_auto_parallelization
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_jit_distributed
## GPU tests
build-end-to-end-gpu-tests: build-initialized
cmake --build $(BUILD_DIR) --target end_to_end_gpu_test
run-end-to-end-gpu-tests: build-end-to-end-gpu-tests
$(BUILD_DIR)/tools/concretelang/tests/end_to_end_tests/end_to_end_gpu_test
# benchmark
generate-benchmarks:
$(Python3_EXECUTABLE) ./tests/end_to_end_fixture/end_to_end_linalg_apply_lookup_table_gen.py > tests/end_to_end_fixture/end_to_end_linalg_apply_lookup_table.yaml
$(Python3_EXECUTABLE) ./tests/end_to_end_fixture/end_to_end_linalg_leveled_gen.py > tests/end_to_end_fixture/end_to_end_linalg_leveled.yaml
BENCHMARK_CPU_DIR=tests/end_to_end_fixture/benchmarks_cpu
$(BENCHMARK_CPU_DIR)/end_to_end_linalg_apply_lookup_table.yaml: tests/end_to_end_fixture/end_to_end_linalg_apply_lookup_table_gen.py
$(Python3_EXECUTABLE) $< --n-ct 64 128 1024 > $@
$(BENCHMARK_CPU_DIR)/%.yaml: tests/end_to_end_fixture/%_gen.py
mkdir -p $(FIXTURE_CPU_DIR)
$(Python3_EXECUTABLE) $< > $@
$(BENCHMARK_CPU_DIR):
mkdir -p $@
generate-cpu-benchmarks: $(BENCHMARK_CPU_DIR) $(BENCHMARK_CPU_DIR)/end_to_end_linalg_apply_lookup_table.yaml $(BENCHMARK_CPU_DIR)/end_to_end_apply_lookup_table.yaml
build-benchmarks: build-initialized
cmake --build $(BUILD_DIR) --target end_to_end_benchmark
run-benchmarks: build-benchmarks generate-benchmarks
run-benchmarks: build-benchmarks generate-cpu-benchmarks
$(BUILD_DIR)/bin/end_to_end_benchmark --benchmark_out=benchmarks_results.json --benchmark_out_format=json
build-mlbench: build-initialized

View File

@@ -146,20 +146,11 @@ static int registerEndToEndTestFromFile(std::string prefix, std::string path,
return 1;
}
auto _ = {
registerEndToEndTestFromFile(
"FHE", "tests/end_to_end_fixture/end_to_end_fhe.yaml"),
registerEndToEndTestFromFile(
"EncryptedTensor",
"tests/end_to_end_fixture/end_to_end_encrypted_tensor.yaml"),
registerEndToEndTestFromFile(
"FHELinalg", "tests/end_to_end_fixture/end_to_end_fhelinalg.yaml"),
registerEndToEndTestFromFile(
"FHELinalgTLU",
"tests/end_to_end_fixture/end_to_end_linalg_apply_lookup_table.yaml"),
registerEndToEndTestFromFile(
"FHELinalgLeveled",
"tests/end_to_end_fixture/end_to_end_linalg_leveled.yaml"),
};
auto _ = {registerEndToEndTestFromFile(
"FHELinalg", "tests/end_to_end_fixture/benchmarks_cpu/"
"end_to_end_apply_lookup_table.yaml"),
registerEndToEndTestFromFile(
"FHELinalgTLU", "tests/end_to_end_fixture/benchmarks_cpu/"
"end_to_end_linalg_apply_lookup_table.yaml")};
BENCHMARK_MAIN();

File diff suppressed because one or more lines are too long

View File

@@ -7,15 +7,14 @@ MAX_PRECISION = 16
def main():
print("# /!\ DO NOT EDIT MANUALLY THIS FILE MANUALLY")
print("# /!\ THIS FILE HAS BEEN GENERATED THANKS THE end_to_end_levelled_gen.py scripts")
print("# /!\ THIS FILE HAS BEEN GENERATED")
np.random.seed(0)
for p in range(MIN_PRECISON, MAX_PRECISION+1):
if p != 1:
print("---")
max_value = (2 ** p) - 1
random_lut = np.random.randint(max_value+1, size=2**p)
# identity_apply_lookup_table
print("description: identity_apply_lookup_table_{0}bits".format(p))
print("description: apply_lookup_table_{0}bits".format(p))
print("program: |")
print(
" func.func @main(%arg0: !FHE.eint<{0}>) -> !FHE.eint<{0}> {{".format(p))

View File

@@ -1,320 +0,0 @@
# TODO: Rewrite/Remove
# The FHE.neg_eint op doesn't come with a well defined semantics as FHE.eint
# has an undefined behavior for under/overflow.
# For now we keep it, knowning the compiler/optimizer behavior but that could
# break at anytime.
description: neg_eint
program: |
func.func @main(%arg0: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 255
- inputs:
- scalar: 4
outputs:
- scalar: 252
- inputs:
- scalar: 250
outputs:
- scalar: 6
---
# TODO: Rewrite/Remove
# The FHE.neg_eint op doesn't come with a well defined semantics as FHE.eint
# has an undefined behavior for under/overflow.
# For now we keep it, knowning the compiler/optimizer behavior but that could
# break at anytime.
description: neg_eint_16bits
program: |
func.func @main(%arg0: !FHE.eint<16>) -> !FHE.eint<16> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<16>) -> (!FHE.eint<16>)
return %1: !FHE.eint<16>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 131071
- inputs:
- scalar: 131071
outputs:
- scalar: 1
---
description: neg_eint_3bits
program: |
func.func @main(%arg0: !FHE.eint<3>) -> !FHE.eint<3> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<3>) -> (!FHE.eint<3>)
return %1: !FHE.eint<3>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 15
- inputs:
- scalar: 4
outputs:
- scalar: 12
- inputs:
- scalar: 13
outputs:
- scalar: 3
---
description: apply_lookup_table_1_bits
program: |
func.func @main(%arg0: !FHE.eint<1>) -> !FHE.eint<1> {
%tlu = arith.constant dense<[0, 1]> : tensor<2xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<1>, tensor<2xi64>) -> (!FHE.eint<1>)
return %1: !FHE.eint<1>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 1
---
description: apply_lookup_table_2_bits
program: |
func.func @main(%arg0: !FHE.eint<2>) -> !FHE.eint<2> {
%tlu = arith.constant dense<[0, 1, 2, 3]> : tensor<4xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<2>, tensor<4xi64>) -> (!FHE.eint<2>)
return %1: !FHE.eint<2>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 2
outputs:
- scalar: 2
- inputs:
- scalar: 3
outputs:
- scalar: 3
---
description: apply_lookup_table_3_bits
program: |
func.func @main(%arg0: !FHE.eint<3>) -> !FHE.eint<3> {
%tlu = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<3>, tensor<8xi64>) -> (!FHE.eint<3>)
return %1: !FHE.eint<3>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 7
outputs:
- scalar: 7
- inputs:
- scalar: 2
outputs:
- scalar: 2
---
description: apply_lookup_table_4_bits
program: |
func.func @main(%arg0: !FHE.eint<4>) -> !FHE.eint<4> {
%tlu = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]> : tensor<16xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<4>, tensor<16xi64>) -> (!FHE.eint<4>)
return %1: !FHE.eint<4>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 15
outputs:
- scalar: 15
- inputs:
- scalar: 12
outputs:
- scalar: 12
test-error-rates:
- global-p-error: 0.05
nb-repetition: 100
- global-p-error: 0.01
nb-repetition: 100
- global-p-error: 0.001
nb-repetition: 100
---
description: apply_lookup_table_5_bits
program: |
func.func @main(%arg0: !FHE.eint<5>) -> !FHE.eint<5> {
%tlu = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]> : tensor<32xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<5>, tensor<32xi64>) -> (!FHE.eint<5>)
return %1: !FHE.eint<5>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 31
outputs:
- scalar: 31
- inputs:
- scalar: 23
outputs:
- scalar: 23
---
description: apply_lookup_table_6_bits
program: |
func.func @main(%arg0: !FHE.eint<6>) -> !FHE.eint<6> {
%tlu = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]> : tensor<64xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
return %1: !FHE.eint<6>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 63
outputs:
- scalar: 63
- inputs:
- scalar: 59
outputs:
- scalar: 59
---
description: apply_lookup_table_7_bits
program: |
func.func @main(%arg0: !FHE.eint<7>) -> !FHE.eint<7> {
%tlu = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127]> : tensor<128xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<7>, tensor<128xi64>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 127
outputs:
- scalar: 127
- inputs:
- scalar: 96
outputs:
- scalar: 96
---
description: apply_lookup_table_8_bits
program: |
func.func @main(%arg0: !FHE.eint<8>) -> !FHE.eint<8> {
%tlu = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]> : tensor<256xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<8>, tensor<256xi64>) -> (!FHE.eint<8>)
return %1: !FHE.eint<8>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 255
outputs:
- scalar: 255
- inputs:
- scalar: 96
outputs:
- scalar: 96
---
description: apply_lookup_table_multiple_precision
program: |
func.func @main(%arg0: !FHE.eint<6>, %arg1: !FHE.eint<3>) -> !FHE.eint<6> {
%tlu_7 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]> : tensor<64xi64>
%tlu_3 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>
%a = "FHE.apply_lookup_table"(%arg0, %tlu_7): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
%b = "FHE.apply_lookup_table"(%arg1, %tlu_3): (!FHE.eint<3>, tensor<8xi64>) -> (!FHE.eint<6>)
%a_plus_b = "FHE.add_eint"(%a, %b): (!FHE.eint<6>, !FHE.eint<6>) -> (!FHE.eint<6>)
return %a_plus_b: !FHE.eint<6>
}
tests:
- inputs:
- scalar: 23
- scalar: 7
outputs:
- scalar: 30
---
description: apply_lookup_table_random_func
program: |
func.func @main(%arg0: !FHE.eint<6>) -> !FHE.eint<6> {
%tlu = arith.constant dense<[16, 91, 16, 83, 80, 74, 21, 96, 1, 63, 49, 122, 76, 89, 74, 55, 109, 110, 103, 54, 105, 14, 66, 47, 52, 89, 7, 10, 73, 44, 119, 92, 25, 104, 123, 100, 108, 86, 29, 121, 118, 52, 107, 48, 34, 37, 13, 122, 107, 48, 74, 59, 96, 36, 50, 55, 120, 72, 27, 45, 12, 5, 96, 12]> : tensor<64xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
return %1: !FHE.eint<6>
}
tests:
- inputs:
- scalar: 5
outputs:
- scalar: 74
- inputs:
- scalar: 62
outputs:
- scalar: 96
- inputs:
- scalar: 0
outputs:
- scalar: 16
- inputs:
- scalar: 63
outputs:
- scalar: 12
---
# https://github.com/zama-ai/concrete-compiler-internal/issues/809
description: bug_809
program: |
func.func @main(%arg0: !FHE.eint<6>) -> !FHE.eint<6> {
%cst = arith.constant dense<[1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]> : tensor<64xi64>
%0 = "FHE.apply_lookup_table"(%arg0, %cst) : (!FHE.eint<6>, tensor<64xi64>) -> !FHE.eint<6>
%cst_0 = arith.constant dense<[0, 10, 20, 30, 40, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73]> : tensor<64xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %cst_0) : (!FHE.eint<6>, tensor<64xi64>) -> !FHE.eint<6>
%2 = "FHE.add_eint"(%0, %1) : (!FHE.eint<6>, !FHE.eint<6>) -> !FHE.eint<6>
return %2 : !FHE.eint<6>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 1
- inputs:
- scalar: 5
outputs:
- scalar: 15
- inputs:
- scalar: 62
outputs:
- scalar: 72
- inputs:
- scalar: 63
outputs:
- scalar: 74

File diff suppressed because it is too large Load Diff

View File

@@ -8,55 +8,55 @@ def generate(args):
print("# /!\ THIS FILE HAS BEEN GENERATED")
np.random.seed(0)
for n_ct in args.n_ct:
for p in range(args.min_bitwidth, args.max_bitwidth+1):
max_value = (2 ** p) - 1
random_lut = np.random.randint(max_value+1, size=2**p)
# identity_apply_lookup_table
print(f"description: apply_lookup_table_{p}bits_{n_ct}ct")
print("program: |")
print(
f" func.func @main(%0: tensor<{n_ct}x!FHE.eint<{p}>>) -> tensor<{n_ct}x!FHE.eint<{p}>> {{")
print(f" %tlu = arith.constant dense<[{','.join(map(str, random_lut))}]> : tensor<{2**p}xi64>")
for i in range(0, args.n_lut):
print(f" %{i+1} = \"FHELinalg.apply_lookup_table\"(%{i}, %tlu):")
print(f" (tensor<{n_ct}x!FHE.eint<{p}>>, tensor<{2**p}xi64>) -> (tensor<{n_ct}x!FHE.eint<{p}>>)")
print(f" return %{args.n_lut}: tensor<{n_ct}x!FHE.eint<{p}>>")
print(" }")
random_input = np.random.randint(max_value+1, size=n_ct)
print("tests:")
print(" - inputs:")
print(f" - tensor: [{','.join(map(str, random_input))}]")
print(f" shape: [{n_ct}]")
outputs = random_input
for i in range(0, args.n_lut):
outputs = [random_lut[v] for v in outputs]
print(" outputs:")
print(f" - tensor: [{','.join(map(str, outputs))}]")
print(f" shape: [{n_ct}]")
print("---")
for p in args.bitwidth:
for n_lut in args.n_lut:
max_value = (2 ** p) - 1
random_lut = np.random.randint(max_value+1, size=2**p)
# identity_apply_lookup_table
print(f"description: apply_lookup_table_{p}bits_{n_ct}ct_{n_lut}layer")
print("program: |")
print(
f" func.func @main(%0: tensor<{n_ct}x!FHE.eint<{p}>>) -> tensor<{n_ct}x!FHE.eint<{p}>> {{")
print(f" %tlu = arith.constant dense<[{','.join(map(str, random_lut))}]> : tensor<{2**p}xi64>")
for i in range(0, n_lut):
print(f" %{i+1} = \"FHELinalg.apply_lookup_table\"(%{i}, %tlu):")
print(f" (tensor<{n_ct}x!FHE.eint<{p}>>, tensor<{2**p}xi64>) -> (tensor<{n_ct}x!FHE.eint<{p}>>)")
print(f" return %{n_lut}: tensor<{n_ct}x!FHE.eint<{p}>>")
print(" }")
random_input = np.random.randint(max_value+1, size=n_ct)
print("tests:")
print(" - inputs:")
print(f" - tensor: [{','.join(map(str, random_input))}]")
print(f" shape: [{n_ct}]")
outputs = random_input
for i in range(0, n_lut):
outputs = [random_lut[v] for v in outputs]
print(" outputs:")
print(f" - tensor: [{','.join(map(str, outputs))}]")
print(f" shape: [{n_ct}]")
print("---")
CLI = argparse.ArgumentParser()
CLI.add_argument(
"--min-bitwidth",
"--bitwidth",
help="Specify the list of bitwidth to generate",
nargs="+",
type=int,
default=1,
)
CLI.add_argument(
"--max-bitwidth",
type=int,
default=16,
default=list(range(1,16)),
)
CLI.add_argument(
"--n-ct",
help="Specify the tensor sizes to generate",
nargs="+",
type=int,
default=[1, 64, 128, 1024],
default=[4],
)
CLI.add_argument(
"--n-lut",
help="Specify the number of FHELinalg.apply_lookup_table layers to generate",
nargs="+",
type=int,
default=1,
default=[1,2],
)
generate(CLI.parse_args())

View File

@@ -0,0 +1,148 @@
# TODO: Rewrite/Remove
# The FHE.neg_eint op doesn't come with a well defined semantics as FHE.eint
# has an undefined behavior for under/overflow.
# For now we keep it, knowning the compiler/optimizer behavior but that could
# break at anytime.
description: neg_eint
program: |
func.func @main(%arg0: !FHE.eint<7>) -> !FHE.eint<7> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<7>) -> (!FHE.eint<7>)
return %1: !FHE.eint<7>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 255
- inputs:
- scalar: 4
outputs:
- scalar: 252
- inputs:
- scalar: 250
outputs:
- scalar: 6
---
# TODO: Rewrite/Remove
# The FHE.neg_eint op doesn't come with a well defined semantics as FHE.eint
# has an undefined behavior for under/overflow.
# For now we keep it, knowning the compiler/optimizer behavior but that could
# break at anytime.
description: neg_eint_16bits
program: |
func.func @main(%arg0: !FHE.eint<16>) -> !FHE.eint<16> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<16>) -> (!FHE.eint<16>)
return %1: !FHE.eint<16>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 131071
- inputs:
- scalar: 131071
outputs:
- scalar: 1
---
description: neg_eint_3bits
program: |
func.func @main(%arg0: !FHE.eint<3>) -> !FHE.eint<3> {
%1 = "FHE.neg_eint"(%arg0): (!FHE.eint<3>) -> (!FHE.eint<3>)
return %1: !FHE.eint<3>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 0
- inputs:
- scalar: 1
outputs:
- scalar: 15
- inputs:
- scalar: 4
outputs:
- scalar: 12
- inputs:
- scalar: 13
outputs:
- scalar: 3
---
description: apply_lookup_table_multiple_precision
program: |
func.func @main(%arg0: !FHE.eint<6>, %arg1: !FHE.eint<3>) -> !FHE.eint<6> {
%tlu_7 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63]> : tensor<64xi64>
%tlu_3 = arith.constant dense<[0, 1, 2, 3, 4, 5, 6, 7]> : tensor<8xi64>
%a = "FHE.apply_lookup_table"(%arg0, %tlu_7): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
%b = "FHE.apply_lookup_table"(%arg1, %tlu_3): (!FHE.eint<3>, tensor<8xi64>) -> (!FHE.eint<6>)
%a_plus_b = "FHE.add_eint"(%a, %b): (!FHE.eint<6>, !FHE.eint<6>) -> (!FHE.eint<6>)
return %a_plus_b: !FHE.eint<6>
}
tests:
- inputs:
- scalar: 23
- scalar: 7
outputs:
- scalar: 30
---
description: apply_lookup_table_random_func
program: |
func.func @main(%arg0: !FHE.eint<6>) -> !FHE.eint<6> {
%tlu = arith.constant dense<[16, 91, 16, 83, 80, 74, 21, 96, 1, 63, 49, 122, 76, 89, 74, 55, 109, 110, 103, 54, 105, 14, 66, 47, 52, 89, 7, 10, 73, 44, 119, 92, 25, 104, 123, 100, 108, 86, 29, 121, 118, 52, 107, 48, 34, 37, 13, 122, 107, 48, 74, 59, 96, 36, 50, 55, 120, 72, 27, 45, 12, 5, 96, 12]> : tensor<64xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %tlu): (!FHE.eint<6>, tensor<64xi64>) -> (!FHE.eint<6>)
return %1: !FHE.eint<6>
}
tests:
- inputs:
- scalar: 5
outputs:
- scalar: 74
- inputs:
- scalar: 62
outputs:
- scalar: 96
- inputs:
- scalar: 0
outputs:
- scalar: 16
- inputs:
- scalar: 63
outputs:
- scalar: 12
---
# https://github.com/zama-ai/concrete-compiler-internal/issues/809
description: bug_809
program: |
func.func @main(%arg0: !FHE.eint<6>) -> !FHE.eint<6> {
%cst = arith.constant dense<[1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1]> : tensor<64xi64>
%0 = "FHE.apply_lookup_table"(%arg0, %cst) : (!FHE.eint<6>, tensor<64xi64>) -> !FHE.eint<6>
%cst_0 = arith.constant dense<[0, 10, 20, 30, 40, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73]> : tensor<64xi64>
%1 = "FHE.apply_lookup_table"(%arg0, %cst_0) : (!FHE.eint<6>, tensor<64xi64>) -> !FHE.eint<6>
%2 = "FHE.add_eint"(%0, %1) : (!FHE.eint<6>, !FHE.eint<6>) -> !FHE.eint<6>
return %2 : !FHE.eint<6>
}
tests:
- inputs:
- scalar: 0
outputs:
- scalar: 1
- inputs:
- scalar: 5
outputs:
- scalar: 15
- inputs:
- scalar: 62
outputs:
- scalar: 72
- inputs:
- scalar: 63
outputs:
- scalar: 74

View File

@@ -24,7 +24,7 @@ add_concretecompiler_unittest(end_to_end_jit_test end_to_end_jit_test.cc globals
add_concretecompiler_unittest(end_to_end_jit_encrypted_tensor end_to_end_jit_encrypted_tensor.cc globals.cc)
add_concretecompiler_unittest(end_to_end_jit_fhe end_to_end_jit_fhe.cc globals.cc)
add_concretecompiler_unittest(end_to_end_test end_to_end_test.cc globals.cc)
add_concretecompiler_unittest(end_to_end_jit_fhelinalg end_to_end_jit_fhelinalg.cc globals.cc)

View File

@@ -1,274 +0,0 @@
#include <cstdint>
#include <gtest/gtest.h>
#include <type_traits>
#include "concretelang/Support/CompilationFeedback.h"
#include "concretelang/Support/JITSupport.h"
#include "concretelang/Support/LibrarySupport.h"
#include "end_to_end_fixture/EndToEndFixture.h"
#include "end_to_end_jit_test.h"
#include "tests_tools/GtestEnvironment.h"
#include "tests_tools/keySetCache.h"
#define CHECK_OR_ERROR(val) \
{ \
if (!bool(val)) { \
return StreamStringError(llvm::toString(std::move(val.takeError())) + \
"\nInvalid '" #val "'"); \
} \
}
using mlir::concretelang::StreamStringError;
typedef std::pair<EndToEndDesc, mlir::concretelang::CompilationOptions>
TestParam;
template <typename LambdaSupport>
void compile_and_run(EndToEndDesc desc,
mlir::concretelang::CompilationOptions options,
LambdaSupport support) {
if (desc.v0Constraint.hasValue()) {
options.v0FHEConstraints = *desc.v0Constraint;
}
if (desc.v0Parameter.hasValue()) {
options.v0Parameter = *desc.v0Parameter;
}
if (desc.largeIntegerParameter.hasValue()) {
options.largeIntegerParameter = *desc.largeIntegerParameter;
}
if (desc.test_error_rates.empty()) {
compile_and_run_for_config(desc, support, options, llvm::None);
} else {
for (auto test_error_rate : desc.test_error_rates) {
options.optimizerConfig.global_p_error = test_error_rate.global_p_error;
options.optimizerConfig.p_error = test_error_rate.global_p_error;
compile_and_run_for_config(desc, support, options, test_error_rate);
}
}
}
template <typename LambdaSupport>
void compile_and_run_for_config(EndToEndDesc desc, LambdaSupport support,
mlir::concretelang::CompilationOptions options,
llvm::Optional<TestErrorRate> test_error_rate) {
/* 1 - Compile the program */
auto compilationResult = support.compile(desc.program, options);
ASSERT_EXPECTED_SUCCESS(compilationResult);
/* 2 - Load the client parameters and build the keySet */
auto clientParameters = support.loadClientParameters(**compilationResult);
ASSERT_EXPECTED_SUCCESS(clientParameters);
auto keySet = support.keySet(*clientParameters, getTestKeySetCache());
ASSERT_EXPECTED_SUCCESS(keySet);
auto evaluationKeys = (*keySet)->evaluationKeys();
/* 3 - Load the server lambda */
auto serverLambda = support.loadServerLambda(**compilationResult);
ASSERT_EXPECTED_SUCCESS(serverLambda);
// Just test that we can load the compilation feedback
auto feedback = support.loadCompilationFeedback(**compilationResult);
ASSERT_EXPECTED_SUCCESS(feedback);
assert_all_test_entries(desc, test_error_rate, support, keySet,
evaluationKeys, clientParameters, serverLambda);
}
template <typename LambdaSupport, typename KeySet, typename EvaluationKeys,
typename ClientParameters, typename ServerLambda>
llvm::Error run_once_1_test_entry_once(TestDescription &test,
LambdaSupport &support, KeySet &keySet,
EvaluationKeys &evaluationKeys,
ClientParameters &clientParameters,
ServerLambda &serverLambda) {
std::vector<const mlir::concretelang::LambdaArgument *> inputArguments;
inputArguments.reserve(test.inputs.size());
for (auto &input : test.inputs) {
inputArguments.push_back(&input.getValue());
}
/* 4 - Create the public arguments */
auto publicArguments =
support.exportArguments(*clientParameters, **keySet, inputArguments);
CHECK_OR_ERROR(publicArguments);
/* 5 - Call the server lambda */
auto publicResult =
support.serverCall(*serverLambda, **publicArguments, evaluationKeys);
CHECK_OR_ERROR(publicResult);
/* 6 - Decrypt the public result */
auto result = mlir::concretelang::typedResult<
std::unique_ptr<mlir::concretelang::LambdaArgument>>(**keySet,
**publicResult);
/* 7 - Check result */
CHECK_OR_ERROR(result);
auto error = checkResult(test.outputs[0], **result);
return error;
}
template <typename LambdaSupport, typename KeySet, typename EvaluationKeys,
typename ClientParameters, typename ServerLambda>
void assert_all_test_entries(EndToEndDesc &desc,
llvm::Optional<TestErrorRate> &opt_test_error_rate,
LambdaSupport &support, KeySet &keySet,
EvaluationKeys &evaluationKeys,
ClientParameters &clientParameters,
ServerLambda &serverLambda) {
auto run = [&](TestDescription &test) {
return run_once_1_test_entry_once(test, support, keySet, evaluationKeys,
clientParameters, serverLambda);
};
if (!opt_test_error_rate.has_value()) {
for (auto test : desc.tests) {
ASSERT_LLVM_ERROR(run(test));
}
return;
}
auto test_error_rate = opt_test_error_rate.value();
ASSERT_LE(desc.tests.size(), test_error_rate.nb_repetition);
int nb_error = 0;
for (size_t i = 0; i < test_error_rate.nb_repetition; i++) {
auto test = desc.tests[i % desc.tests.size()];
auto error = run(test);
if (error) {
nb_error += 1;
DISCARD_LLVM_ERROR(error);
}
}
double maximum_errors = test_error_rate.too_high_error_count_threshold();
// std::cout << "n_rep " << maximum_errors << " p_error " <<
// test_error_rate.p_error << " maximum_errors " << maximum_errors << "\n";
ASSERT_LE(nb_error, maximum_errors) << "Empirical error rate is too high";
}
std::string printEndToEndDesc(const testing::TestParamInfo<TestParam> desc) {
const auto options = desc.param.second;
std::ostringstream opt;
if (options.loopParallelize)
opt << "_loop";
if (options.dataflowParallelize)
opt << "_dataflow";
if (options.emitGPUOps)
opt << "_gpu";
std::ostringstream name;
name << desc.param.first.description;
auto ostr = opt.str();
if (ostr.size() == 0) {
ostr = "_default";
}
name << ostr;
return name.str();
}
// Macro to define and end to end TestSuite that run test thanks the
// LambdaSupport according a EndToEndDesc
#define INSTANTIATE_END_TO_END_COMPILE_AND_RUN(TestSuite, lambdaSupport) \
TEST_P(TestSuite, compile_and_run) { \
auto param = GetParam(); \
compile_and_run(std::get<0>(param), std::get<1>(param), lambdaSupport); \
}
std::vector<TestParam>
testParam(std::vector<EndToEndDesc> descs,
std::vector<mlir::concretelang::CompilationOptions> options) {
std::vector<TestParam> params;
for (auto opt : options) {
std::transform(descs.begin(), descs.end(), std::back_inserter(params),
[&](auto d) { return TestParam(d, opt); });
}
return params;
}
#define INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE(prefix, suite, options, \
lambdasupport, path) \
namespace prefix##suite { \
auto descs = loadEndToEndDesc(path); \
std::vector<TestParam> params = testParam(descs, options); \
auto values = testing::ValuesIn<std::vector<TestParam>>(params); \
INSTANTIATE_TEST_SUITE_P(prefix, suite, values, printEndToEndDesc); \
}
#define INSTANTIATE_END_TO_END_TEST_SUITE_FROM_ALL_TEST_FILES(suite, options, \
lambdasupport) \
\
class suite : public testing::TestWithParam<TestParam> {}; \
INSTANTIATE_END_TO_END_COMPILE_AND_RUN(suite, lambdasupport) \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
ClearTensor, suite, options, lambdasupport, \
"tests/end_to_end_fixture/end_to_end_clear_tensor.yaml") \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
FHE, suite, options, lambdasupport, \
"tests/end_to_end_fixture/end_to_end_fhe.yaml") \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
EncryptedTensor, suite, options, lambdasupport, \
"tests/end_to_end_fixture/end_to_end_encrypted_tensor.yaml") \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
FHELinalg, suite, options, lambdasupport, \
"tests/end_to_end_fixture/end_to_end_fhelinalg.yaml") \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
FHELeveledOps, suite, options, lambdasupport, \
"tests/end_to_end_fixture/end_to_end_leveled.yaml") \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
FHEApplyLookupTable, suite, options, lambdasupport, \
"tests/end_to_end_fixture/end_to_end_apply_lookup_table.yaml") \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
FHELinalgLookupTable, suite, options, lambdasupport, \
"tests/end_to_end_fixture/end_to_end_linalg_2_apply_lookup_table.yaml") \
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_FILE( \
BugReport000, suite, options, lambdasupport, \
"tests/end_to_end_fixture/bug_report.yaml")
mlir::concretelang::CompilationOptions defaultOptions() {
mlir::concretelang::CompilationOptions o("main");
return o;
}
mlir::concretelang::CompilationOptions loopOptions() {
mlir::concretelang::CompilationOptions o("main");
o.loopParallelize = true;
return o;
}
mlir::concretelang::CompilationOptions dataflowOptions() {
mlir::concretelang::CompilationOptions o("main");
o.dataflowParallelize = true;
return o;
}
mlir::concretelang::CompilationOptions gpuOptions() {
mlir::concretelang::CompilationOptions o("main");
o.emitGPUOps = true;
o.optimizerConfig.display = true;
return o;
}
// mlir::concretelang::CompilationOptions optionsLoop("main");
// optionsLoop.loopParallelize = true;
/// Instantiate the test suite for Jit
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_ALL_TEST_FILES(
JitTest, {defaultOptions()}, mlir::concretelang::JITSupport())
std::vector<mlir::concretelang::CompilationOptions> allOptions{
defaultOptions(),
loopOptions(),
#ifdef CONCRETELANG_DATAFLOW_EXECUTION_ENABLED
dataflowOptions(),
#endif
#ifdef CONCRETELANG_CUDA_SUPPORT
gpuOptions(),
#endif
};
/// Instantiate the test suite for Library
INSTANTIATE_END_TO_END_TEST_SUITE_FROM_ALL_TEST_FILES(
LibraryTest, allOptions,
mlir::concretelang::LibrarySupport("/tmp/end_to_end_test_" +
param.first.description))

View File

@@ -0,0 +1,297 @@
#include <cstdint>
#include <filesystem>
#include <gtest/gtest.h>
#include <type_traits>
#include "concretelang/Support/CompilationFeedback.h"
#include "concretelang/Support/JITSupport.h"
#include "concretelang/Support/LibrarySupport.h"
#include "end_to_end_fixture/EndToEndFixture.h"
#include "end_to_end_jit_test.h"
#include "tests_tools/GtestEnvironment.h"
#include "tests_tools/keySetCache.h"
/// @brief EndToEndTest is a template that allows testing for one program for a
/// TestDescription using a LambdaSupport.
template <typename LambdaSupport> class EndToEndTest : public ::testing::Test {
public:
explicit EndToEndTest(std::string program, TestDescription desc,
llvm::Optional<TestErrorRate> errorRate,
LambdaSupport support,
mlir::concretelang::CompilationOptions options)
: program(program), desc(desc), errorRate(errorRate), support(support),
options(options) {
if (errorRate.hasValue()) {
options.optimizerConfig.global_p_error = errorRate->global_p_error;
options.optimizerConfig.p_error = errorRate->global_p_error;
}
};
void SetUp() override {
/* Compile the program */
auto expectCompilationResult = support.compile(program, options);
ASSERT_EXPECTED_SUCCESS(expectCompilationResult);
/* Load the client parameters */
auto expectClientParameters =
support.loadClientParameters(**expectCompilationResult);
ASSERT_EXPECTED_SUCCESS(expectClientParameters);
clientParameters = *expectClientParameters;
/* Build the keyset */
auto expectKeySet = support.keySet(clientParameters, getTestKeySetCache());
ASSERT_EXPECTED_SUCCESS(expectKeySet);
keySet = std::move(*expectKeySet);
/* Load the server lambda */
auto expectServerLambda =
support.loadServerLambda(**expectCompilationResult);
ASSERT_EXPECTED_SUCCESS(expectServerLambda);
serverLambda = *expectServerLambda;
/* Create the public argument */
std::vector<const mlir::concretelang::LambdaArgument *> inputArguments;
inputArguments.reserve(desc.inputs.size());
for (auto &input : desc.inputs) {
inputArguments.push_back(&input.getValue());
}
auto expectPublicArguments =
support.exportArguments(clientParameters, *keySet, inputArguments);
ASSERT_EXPECTED_SUCCESS(expectPublicArguments);
publicArguments = std::move(*expectPublicArguments);
}
void TestBody() override {
if (!errorRate.hasValue()) {
testOnce();
} else {
testErrorRate();
}
}
void testOnce() {
auto evaluationKeys = keySet->evaluationKeys();
/* Call the server lambda */
auto publicResult =
support.serverCall(serverLambda, *publicArguments, evaluationKeys);
ASSERT_EXPECTED_SUCCESS(publicResult);
/* Decrypt the public result */
auto result = mlir::concretelang::typedResult<
std::unique_ptr<mlir::concretelang::LambdaArgument>>(*keySet,
**publicResult);
ASSERT_EXPECTED_SUCCESS(result);
/* Check result */
// For now we support just one result
assert(desc.outputs.size() == 1);
ASSERT_LLVM_ERROR(checkResult(desc.outputs[0], **result));
}
void testErrorRate() {
auto evaluationKeys = keySet->evaluationKeys();
auto nbError = 0;
for (size_t i = 0; i < errorRate->nb_repetition; i++) {
/* Call the server lambda */
auto publicResult =
support.serverCall(serverLambda, *publicArguments, evaluationKeys);
ASSERT_EXPECTED_SUCCESS(publicResult);
/* Decrypt the public result */
auto result = mlir::concretelang::typedResult<
std::unique_ptr<mlir::concretelang::LambdaArgument>>(*keySet,
**publicResult);
ASSERT_EXPECTED_SUCCESS(result);
/* Check result */
// For now we support just one result
assert(desc.outputs.size() == 1);
auto err = checkResult(desc.outputs[0], **result);
if (err) {
nbError++;
DISCARD_LLVM_ERROR(err);
}
}
double threshold = errorRate->too_high_error_count_threshold();
std::cout << "n_rep " << errorRate->nb_repetition << " p_error "
<< errorRate->global_p_error << " maximum_errors " << threshold
<< "\n";
ASSERT_LE(nbError, threshold) << "Empirical error rate is too high";
}
private:
std::string program;
TestDescription desc;
llvm::Optional<TestErrorRate> errorRate;
LambdaSupport support;
mlir::concretelang::CompilationOptions options;
// Initialized by the SetUp
typename LambdaSupport::lambda serverLambda;
mlir::concretelang::ClientParameters clientParameters;
std::unique_ptr<concretelang::clientlib::KeySet> keySet;
std::unique_ptr<concretelang::clientlib::PublicArguments> publicArguments;
};
std::string getTestName(EndToEndDesc desc,
mlir::concretelang::CompilationOptions options,
int testNum) {
std::ostringstream os;
if (options.loopParallelize)
os << "_loop";
if (options.dataflowParallelize)
os << "_dataflow";
if (options.emitGPUOps)
os << "_gpu";
auto ostr = os.str();
if (ostr.size() == 0) {
os << "_default";
}
os << "." << desc.description << "." << testNum;
return os.str().substr(1);
}
void registerEndToEnd(std::string suiteName, std::string testName,
std::string valueName, std::string libpath,
std::string program, TestDescription test,
llvm::Optional<TestErrorRate> errorRate,
mlir::concretelang::CompilationOptions options) {
// TODO: Get file and line from yaml
auto file = __FILE__;
auto line = __LINE__;
if (libpath.empty()) {
::testing::RegisterTest(
suiteName.c_str(), testName.c_str(), nullptr, valueName.c_str(), file,
line, [=]() -> EndToEndTest<mlir::concretelang::JITSupport> * {
return new EndToEndTest<mlir::concretelang::JITSupport>(
program, test, errorRate, mlir::concretelang::JITSupport(),
options);
});
} else {
::testing::RegisterTest(
suiteName.c_str(), testName.c_str(), nullptr, valueName.c_str(), file,
line, [=]() -> EndToEndTest<mlir::concretelang::LibrarySupport> * {
return new EndToEndTest<mlir::concretelang::LibrarySupport>(
program, test, errorRate,
mlir::concretelang::LibrarySupport(libpath), options);
});
}
}
void registerEndToEnd(std::string suiteName, std::string libpath,
EndToEndDesc desc,
mlir::concretelang::CompilationOptions options) {
if (desc.v0Constraint.hasValue()) {
options.v0FHEConstraints = desc.v0Constraint;
}
auto i = 0;
for (auto test : desc.tests) {
auto valueName = std::to_string(i);
auto testName = getTestName(desc, options, i);
if (desc.test_error_rates.empty()) {
registerEndToEnd(suiteName, testName, valueName,
libpath.empty() ? libpath : libpath + desc.description,
desc.program, test, llvm::None, options);
} else {
auto j = 0;
for (auto rate : desc.test_error_rates) {
auto rateName = testName + "_rate" + std::to_string(j);
registerEndToEnd(suiteName, rateName, valueName,
libpath.empty() ? libpath : libpath + desc.description,
desc.program, test, rate, options);
j++;
}
}
i++;
}
}
/// @brief Register a suite of end to end test
/// @param suiteName The name of the suite.
/// @param descriptions A vector of description of tests to register .
/// @param options The compilation options.
void registerEndToEndSuite(std::string suiteName, std::string libpath,
std::vector<EndToEndDesc> descriptions,
mlir::concretelang::CompilationOptions options) {
for (auto desc : descriptions) {
registerEndToEnd(suiteName, libpath, desc, options);
}
}
namespace path = llvm::sys::path;
int main(int argc, char **argv) {
// Parse google test options, update argc and argv by removing gtest options
::testing::InitGoogleTest(&argc, argv);
// Main command line options
llvm::cl::ResetCommandLineParser();
llvm::cl::list<std::string> descriptionFiles(
llvm::cl::Positional, llvm::cl::desc("<End to end description Files>"),
llvm::cl::OneOrMore);
// Compilation options
llvm::cl::opt<bool> loopParallelize(
"loop-parallelize",
llvm::cl::desc(
"Set the loopParallelize compilation options to run the tests"),
llvm::cl::init(false));
llvm::cl::opt<bool> dataflowParallelize(
"dataflow-parallelize",
llvm::cl::desc(
"Set the loopParallelize compilation options to run the tests"),
llvm::cl::init(false));
llvm::cl::opt<bool> emitGPUOps(
"emit-gpu-ops",
llvm::cl::desc("Set the emitGPUOps compilation options to run the tests"),
llvm::cl::init(false));
// Optimizer options
llvm::cl::opt<bool> optimizerDisplay(
"optimizer-display",
llvm::cl::desc("Set the optimizerConfig.display compilation options to "
"run the tests"),
llvm::cl::init(false));
// JIT or Library support
llvm::cl::opt<bool> jit(
"jit",
llvm::cl::desc("Use JIT support to run the tests (default, overwritten "
"if --library is set"),
llvm::cl::init(true));
llvm::cl::opt<std::string> library(
"library",
llvm::cl::desc("Use library support to run the tests and specify the "
"prefix for compilation artifacts"),
llvm::cl::init<std::string>(""));
llvm::cl::ParseCommandLineOptions(argc, argv);
// Build compilation options
mlir::concretelang::CompilationOptions compilationOptions("main");
compilationOptions.loopParallelize = loopParallelize.getValue();
compilationOptions.dataflowParallelize = dataflowParallelize.getValue();
compilationOptions.emitGPUOps = emitGPUOps.getValue();
compilationOptions.optimizerConfig.display = optimizerDisplay.getValue();
for (auto descFile : descriptionFiles) {
auto desc = loadEndToEndDesc(descFile);
auto suiteName = path::stem(descFile).str();
auto libpath = library.getValue();
if (libpath.empty() && !jit.getValue()) {
llvm::errs()
<< "You must specify the library path or use jit to run the test";
return 1;
}
if (libpath.empty()) {
suiteName = suiteName + ".jit";
} else {
suiteName = suiteName + ".library";
}
registerEndToEndSuite(suiteName, libpath, desc, compilationOptions);
}
return RUN_ALL_TESTS();
}