feat: add quantization utilities

This commit is contained in:
jfrery
2021-10-27 18:50:34 +02:00
committed by jfrery
parent 5af498cb60
commit c5952cd09f
10 changed files with 478 additions and 20 deletions

View File

@@ -0,0 +1,42 @@
"""Tests for the quantized activation functions."""
import numpy
import pytest
from concrete.quantization import QuantizedArray, QuantizedSigmoid
N_BITS_ATOL_TUPLE_LIST = [
(32, 10 ** -2),
(28, 10 ** -2),
(20, 10 ** -2),
(16, 10 ** -1),
(8, 10 ** -0),
(4, 10 ** -0),
]
@pytest.mark.parametrize(
"n_bits, atol",
[pytest.param(n_bits, atol) for n_bits, atol in N_BITS_ATOL_TUPLE_LIST],
)
@pytest.mark.parametrize(
"quant_activation, values",
[pytest.param(QuantizedSigmoid, numpy.random.uniform(size=(10, 40, 20)))],
)
def test_activations(quant_activation, values, n_bits, atol):
"""Test activation functions."""
q_inputs = QuantizedArray(n_bits, values)
quant_sigmoid = quant_activation(n_bits)
quant_sigmoid.calibrate(values)
expected_output = quant_sigmoid.q_out.values
q_output = quant_sigmoid(q_inputs)
qvalues = q_output.qvalues
# Quantized values must be contained between 0 and 2**n_bits - 1.
assert numpy.max(qvalues) <= 2 ** n_bits - 1
assert numpy.min(qvalues) >= 0
# Dequantized values must be close to original values
dequant_values = q_output.dequant()
# Check that all values are close
assert numpy.isclose(dequant_values, expected_output, atol=atol).all()

View File

@@ -0,0 +1,53 @@
"""Tests for the quantized array/tensors."""
import numpy
import pytest
from concrete.quantization import QuantizedArray
N_BITS_ATOL_TUPLE_LIST = [
(32, 10 ** -2),
(28, 10 ** -2),
(20, 10 ** -2),
(16, 10 ** -1),
(8, 10 ** -0),
(4, 10 ** -0),
]
@pytest.mark.parametrize(
"n_bits, atol",
[pytest.param(n_bits, atol) for n_bits, atol in N_BITS_ATOL_TUPLE_LIST],
)
@pytest.mark.parametrize("values", [pytest.param(numpy.random.randn(2000))])
def test_quant_dequant_update(values, n_bits, atol):
"""Test the quant and dequant function."""
quant_array = QuantizedArray(n_bits, values)
qvalues = quant_array.quant()
# Quantized values must be contained between 0 and 2**n_bits
assert numpy.max(qvalues) <= 2 ** n_bits - 1
assert numpy.min(qvalues) >= 0
# Dequantized values must be close to original values
dequant_values = quant_array.dequant()
# Check that all values are close
assert numpy.isclose(dequant_values, values, atol=atol).all()
# Test update functions
new_values = numpy.array([0.3, 0.5, -1.2, -3.4])
new_qvalues_ = quant_array.update_values(new_values)
# Make sure the shape changed for the qvalues
assert new_qvalues_.shape != qvalues.shape
new_qvalues = numpy.array([1, 4, 7, 29])
new_values_updated = quant_array.update_qvalues(new_qvalues)
# Make sure that we can see at least one change.
assert not numpy.array_equal(new_qvalues, new_qvalues_)
assert not numpy.array_equal(new_values, new_values_updated)
# Check that the __call__ returns also the qvalues.
assert numpy.array_equal(quant_array(), new_qvalues)

View File

@@ -0,0 +1,58 @@
"""Tests for the quantized layers."""
import numpy
import pytest
from concrete.quantization import QuantizedArray, QuantizedLinear
# QuantizedLinear unstable with n_bits>23.
N_BITS_LIST = [20, 16, 8, 4]
@pytest.mark.parametrize(
"n_bits",
[pytest.param(n_bits) for n_bits in N_BITS_LIST],
)
@pytest.mark.parametrize(
"n_examples, n_features, n_neurons",
[
pytest.param(20, 500, 30),
pytest.param(200, 300, 50),
pytest.param(10000, 100, 1),
pytest.param(10, 20, 1),
],
)
def test_quantized_linear(n_examples, n_features, n_neurons, n_bits):
"""Test the quantization linear layer of numpy.array.
With n_bits>>0 we expect the results of the quantized linear
to be the same as the standard linear layer.
"""
inputs = numpy.random.uniform(size=(n_examples, n_features))
q_inputs = QuantizedArray(n_bits, inputs)
# shape of weights: (n_examples, n_features, n_neurons)
weights = numpy.random.uniform(size=(n_neurons, n_features))
q_weights = QuantizedArray(n_bits, weights)
bias = numpy.random.uniform(size=(n_neurons))
q_bias = QuantizedArray(n_bits, bias)
# Define our QuantizedLinear layer
q_linear = QuantizedLinear(n_bits, q_weights, q_bias)
# Calibrate the Quantized layer
q_linear.calibrate(inputs)
expected_outputs = q_linear.q_out.values
actual_output = q_linear(q_inputs).dequant()
assert numpy.isclose(expected_outputs, actual_output, rtol=10 ** -1).all()
# Same test without bias
q_linear = QuantizedLinear(n_bits, q_weights)
# Calibrate the Quantized layer
q_linear.calibrate(inputs)
expected_outputs = q_linear.q_out.values
actual_output = q_linear(q_inputs).dequant()
assert numpy.isclose(expected_outputs, actual_output, rtol=10 ** -1).all()

View File

@@ -64,19 +64,54 @@ class FC(nn.Module):
"model, input_shape",
[
pytest.param(FC, (100, 32 * 32 * 3)),
pytest.param(CNN, (100, 3, 32, 32), marks=pytest.mark.xfail(strict=True)),
],
)
def test_torch_to_numpy(model, input_shape):
"""Test the different model architecture from torch numpy."""
# Define the torch model
torch_fc_model = model()
torch_input = torch.randn(input_shape)
torch_predictions = torch_fc_model(torch_input).detach().numpy()
# Create random input
torch_input_1 = torch.randn(input_shape)
# Predict with torch model
torch_predictions = torch_fc_model(torch_input_1).detach().numpy()
# Create corresponding numpy model
numpy_fc_model = NumpyModule(torch_fc_model)
# torch_input to numpy.
numpy_input = torch_input.detach().numpy()
numpy_predictions = numpy_fc_model(numpy_input)
# Torch input to numpy
numpy_input_1 = torch_input_1.detach().numpy()
# Predict with numpy model
numpy_predictions = numpy_fc_model(numpy_input_1)
# Test: the output of the numpy model is the same as the torch model.
assert numpy_predictions.shape == torch_predictions.shape
# Test: prediction from the numpy model are the same as the torh model.
assert numpy.isclose(torch_predictions, numpy_predictions, rtol=10 - 3).all()
# Test: dynamics between layers is working (quantized input and activations)
torch_input_2 = torch.randn(input_shape)
# Make sure both inputs are different
assert (torch_input_1 != torch_input_2).any()
# Predict with torch
torch_predictions = torch_fc_model(torch_input_2).detach().numpy()
# Torch input to numpy
numpy_input_2 = torch_input_2.detach().numpy()
# Numpy predictions using the previous model
numpy_predictions = numpy_fc_model(numpy_input_2)
assert numpy.isclose(torch_predictions, numpy_predictions, rtol=10 - 3).all()
@pytest.mark.parametrize(
"model, incompatible_layer",
[pytest.param(CNN, "Conv2d")],
)
def test_raises(model, incompatible_layer):
"""Function to test incompatible layers."""
torch_incompatible_model = model()
expected_errmsg = (
f"The following module is currently not implemented: {incompatible_layer}. "
f"Please stick to the available torch modules: "
f"{', '.join(sorted(module.__name__ for module in NumpyModule.IMPLEMENTED_MODULES))}."
)
with pytest.raises(ValueError, match=expected_errmsg):
NumpyModule(torch_incompatible_model)