feat: use signed weights in compile_torch_model

This commit is contained in:
jfrery
2021-12-20 11:45:20 +01:00
committed by Jordan Fréry
parent 57bbb8feab
commit 65be1b0818
4 changed files with 14 additions and 4 deletions

View File

@@ -41,6 +41,11 @@ class QuantizedArray:
if rmax - rmin < STABILITY_CONST:
# In this case there is a single unique value to quantize
# is is_signed is True, we need to set the offset back to 0.
# Signed quantization does not make sense for a single value.
self.offset = 0
# This value could be multiplied with inputs at some point in the model
# Since zero points need to be integers, if this value is a small float (ex: 0.01)
# it will be quantized to 0 with a 0 zero-point, thus becoming useless in multiplication

View File

@@ -78,7 +78,7 @@ def compile_torch_model(
)
# Quantize with post-training static method, to have a model with integer weights
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model)
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model, is_signed=True)
quantized_module = post_training_quant.quantize_module(numpy_inputset_as_single_array)
# Quantize input

View File

@@ -9,7 +9,6 @@ A simple command can compile a torch model to its FHE counterpart. This process
```python
from torch import nn
import torch
torch.manual_seed(0)
class LogisticRegression(nn.Module):
"""LogisticRegression with Torch"""

View File

@@ -80,7 +80,11 @@ N_BITS_ATOL_TUPLE_LIST = [
pytest.param(FC, (100, 32 * 32 * 3)),
],
)
def test_quantized_linear(model, input_shape, n_bits, atol, seed_torch):
@pytest.mark.parametrize(
"is_signed",
[pytest.param([False, True])],
)
def test_quantized_linear(model, input_shape, n_bits, atol, is_signed, seed_torch):
"""Test the quantized module with a post-training static quantization.
With n_bits>>0 we expect the results of the quantized module
@@ -97,7 +101,9 @@ def test_quantized_linear(model, input_shape, n_bits, atol, seed_torch):
# Predict with real model
numpy_prediction = numpy_fc_model(numpy_input)
# Quantize with post-training static method
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_fc_model)
post_training_quant = PostTrainingAffineQuantization(
n_bits, numpy_fc_model, is_signed=is_signed
)
quantized_model = post_training_quant.quantize_module(numpy_input)
# Quantize input
q_input = QuantizedArray(n_bits, numpy_input)