From ef74f6b5f676df981665271b8ebd841e80244dff Mon Sep 17 00:00:00 2001 From: Benoit Chevallier-Mames Date: Wed, 5 Jan 2022 19:49:02 +0100 Subject: [PATCH] chore: Numpy, not NumPy. --- docs/dev/explanation/compilation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/dev/explanation/compilation.md b/docs/dev/explanation/compilation.md index 7219bc01b..ef2a23143 100644 --- a/docs/dev/explanation/compilation.md +++ b/docs/dev/explanation/compilation.md @@ -17,7 +17,7 @@ import concrete.numpy as hnp def f(x, y): return (2 * x) + y -# Create a NumPy FHE Compiler +# Create a Numpy FHE Compiler compiler = hnp.NPFHECompiler(f, {"x": "encrypted", "y": "encrypted"}) # Compile an FHE Circuit using an inputset @@ -57,7 +57,7 @@ Here is the visual representation of the pipeline: Compiling a torch Module is pretty straightforward. -The torch Module is first converted to a NumPy equivalent we call `NumpyModule` if all the layers in the torch Module are supported. +The torch Module is first converted to a Numpy equivalent we call `NumpyModule` if all the layers in the torch Module are supported. Then the module is quantized post-training to be compatible with our compiler which only works on integers. The post training quantization uses the provided dataset for calibration.