diff --git a/concrete/quantization/quantized_array.py b/concrete/quantization/quantized_array.py index 5c4830207..8285106a1 100644 --- a/concrete/quantization/quantized_array.py +++ b/concrete/quantization/quantized_array.py @@ -41,6 +41,11 @@ class QuantizedArray: if rmax - rmin < STABILITY_CONST: # In this case there is a single unique value to quantize + + # is is_signed is True, we need to set the offset back to 0. + # Signed quantization does not make sense for a single value. + self.offset = 0 + # This value could be multiplied with inputs at some point in the model # Since zero points need to be integers, if this value is a small float (ex: 0.01) # it will be quantized to 0 with a 0 zero-point, thus becoming useless in multiplication diff --git a/concrete/torch/compile.py b/concrete/torch/compile.py index a9883f369..b00459b54 100644 --- a/concrete/torch/compile.py +++ b/concrete/torch/compile.py @@ -78,7 +78,7 @@ def compile_torch_model( ) # Quantize with post-training static method, to have a model with integer weights - post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model) + post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model, is_signed=True) quantized_module = post_training_quant.quantize_module(numpy_inputset_as_single_array) # Quantize input diff --git a/docs/user/howto/compiling_torch_model.md b/docs/user/howto/compiling_torch_model.md index 52d06692d..55c3690f7 100644 --- a/docs/user/howto/compiling_torch_model.md +++ b/docs/user/howto/compiling_torch_model.md @@ -9,7 +9,6 @@ A simple command can compile a torch model to its FHE counterpart. This process ```python from torch import nn import torch -torch.manual_seed(0) class LogisticRegression(nn.Module): """LogisticRegression with Torch""" diff --git a/tests/quantization/test_quantized_module.py b/tests/quantization/test_quantized_module.py index 821091bc0..0725f156b 100644 --- a/tests/quantization/test_quantized_module.py +++ b/tests/quantization/test_quantized_module.py @@ -80,7 +80,11 @@ N_BITS_ATOL_TUPLE_LIST = [ pytest.param(FC, (100, 32 * 32 * 3)), ], ) -def test_quantized_linear(model, input_shape, n_bits, atol, seed_torch): +@pytest.mark.parametrize( + "is_signed", + [pytest.param([False, True])], +) +def test_quantized_linear(model, input_shape, n_bits, atol, is_signed, seed_torch): """Test the quantized module with a post-training static quantization. With n_bits>>0 we expect the results of the quantized module @@ -97,7 +101,9 @@ def test_quantized_linear(model, input_shape, n_bits, atol, seed_torch): # Predict with real model numpy_prediction = numpy_fc_model(numpy_input) # Quantize with post-training static method - post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_fc_model) + post_training_quant = PostTrainingAffineQuantization( + n_bits, numpy_fc_model, is_signed=is_signed + ) quantized_model = post_training_quant.quantize_module(numpy_input) # Quantize input q_input = QuantizedArray(n_bits, numpy_input)