mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-08 19:44:57 -05:00
fix: put_back_round
This commit is contained in:
@@ -55,7 +55,9 @@ class QuantizedActivation(ABC):
|
||||
assert self.q_out is not None
|
||||
|
||||
qoutput_activation = qoutput_activation / self.q_out.scale + self.q_out.zero_point
|
||||
qoutput_activation = (qoutput_activation).clip(0, 2 ** self.q_out.n_bits - 1).astype(int)
|
||||
qoutput_activation = (
|
||||
numpy.rint(qoutput_activation).clip(0, 2 ** self.q_out.n_bits - 1).astype(int)
|
||||
)
|
||||
|
||||
# TODO find a better way to do the following (see issue #832)
|
||||
q_out = copy.copy(self.q_out)
|
||||
|
||||
@@ -59,7 +59,7 @@ class QuantizedArray:
|
||||
qvalues = self.values / scale + zero_point
|
||||
|
||||
qvalues = (
|
||||
qvalues.round()
|
||||
numpy.rint(qvalues)
|
||||
.clip(-self.offset, 2 ** (self.n_bits) - 1 - self.offset)
|
||||
.astype(int) # Careful this can be very large with high number of bits
|
||||
)
|
||||
@@ -100,8 +100,7 @@ class QuantizedArray:
|
||||
"""
|
||||
|
||||
self.qvalues = (
|
||||
(self.values / self.scale + self.zero_point)
|
||||
.round()
|
||||
numpy.rint(self.values / self.scale + self.zero_point)
|
||||
.clip(-self.offset, 2 ** (self.n_bits) - 1 - self.offset)
|
||||
.astype(int)
|
||||
)
|
||||
|
||||
@@ -96,7 +96,7 @@ class QuantizedLinear:
|
||||
numpy_q_out = m_matmul * numpy_q_out
|
||||
numpy_q_out = self.q_out.zero_point + bias_part + numpy_q_out
|
||||
|
||||
numpy_q_out = numpy_q_out.clip(0, 2 ** self.q_out.n_bits - 1).astype(int)
|
||||
numpy_q_out = numpy.rint(numpy_q_out).clip(0, 2 ** self.q_out.n_bits - 1).astype(int)
|
||||
|
||||
# TODO find a more intuitive way to do the following (see issue #832)
|
||||
# We should be able to reuse q_out quantization parameters
|
||||
|
||||
Reference in New Issue
Block a user