From 8f6d524c0f4595bb99a8452ec8b446f020fbe6df Mon Sep 17 00:00:00 2001 From: jfrery Date: Fri, 7 Jan 2022 09:55:44 +0100 Subject: [PATCH] feat: add ref issue 7b --- concrete/quantization/post_training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/concrete/quantization/post_training.py b/concrete/quantization/post_training.py index 10a61ce5a..05febde1c 100644 --- a/concrete/quantization/post_training.py +++ b/concrete/quantization/post_training.py @@ -95,7 +95,7 @@ class PostTrainingAffineQuantization: # compared to its quantized counterpart. # Since this is the last layer and mostly used for classification, # this does not have much impact. - # Put back 7 bits when 100% at 7b is achieved. + # TODO: Put back 7 bits when 100% at 7b is achieved (see issue #1332). q_layer = QuantizedLinear(6, q_weights, q_bias) else: q_layer = QuantizedLinear(self.n_bits, q_weights, q_bias)