mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-08 19:44:57 -05:00
feat: use signed weights in compile_torch_model
This commit is contained in:
@@ -41,6 +41,11 @@ class QuantizedArray:
|
||||
|
||||
if rmax - rmin < STABILITY_CONST:
|
||||
# In this case there is a single unique value to quantize
|
||||
|
||||
# is is_signed is True, we need to set the offset back to 0.
|
||||
# Signed quantization does not make sense for a single value.
|
||||
self.offset = 0
|
||||
|
||||
# This value could be multiplied with inputs at some point in the model
|
||||
# Since zero points need to be integers, if this value is a small float (ex: 0.01)
|
||||
# it will be quantized to 0 with a 0 zero-point, thus becoming useless in multiplication
|
||||
|
||||
@@ -78,7 +78,7 @@ def compile_torch_model(
|
||||
)
|
||||
|
||||
# Quantize with post-training static method, to have a model with integer weights
|
||||
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model)
|
||||
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model, is_signed=True)
|
||||
quantized_module = post_training_quant.quantize_module(numpy_inputset_as_single_array)
|
||||
|
||||
# Quantize input
|
||||
|
||||
@@ -9,7 +9,6 @@ A simple command can compile a torch model to its FHE counterpart. This process
|
||||
```python
|
||||
from torch import nn
|
||||
import torch
|
||||
torch.manual_seed(0)
|
||||
class LogisticRegression(nn.Module):
|
||||
"""LogisticRegression with Torch"""
|
||||
|
||||
|
||||
@@ -80,7 +80,11 @@ N_BITS_ATOL_TUPLE_LIST = [
|
||||
pytest.param(FC, (100, 32 * 32 * 3)),
|
||||
],
|
||||
)
|
||||
def test_quantized_linear(model, input_shape, n_bits, atol, seed_torch):
|
||||
@pytest.mark.parametrize(
|
||||
"is_signed",
|
||||
[pytest.param([False, True])],
|
||||
)
|
||||
def test_quantized_linear(model, input_shape, n_bits, atol, is_signed, seed_torch):
|
||||
"""Test the quantized module with a post-training static quantization.
|
||||
|
||||
With n_bits>>0 we expect the results of the quantized module
|
||||
@@ -97,7 +101,9 @@ def test_quantized_linear(model, input_shape, n_bits, atol, seed_torch):
|
||||
# Predict with real model
|
||||
numpy_prediction = numpy_fc_model(numpy_input)
|
||||
# Quantize with post-training static method
|
||||
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_fc_model)
|
||||
post_training_quant = PostTrainingAffineQuantization(
|
||||
n_bits, numpy_fc_model, is_signed=is_signed
|
||||
)
|
||||
quantized_model = post_training_quant.quantize_module(numpy_input)
|
||||
# Quantize input
|
||||
q_input = QuantizedArray(n_bits, numpy_input)
|
||||
|
||||
Reference in New Issue
Block a user