mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-08 19:44:57 -05:00
refactor: remove ml related functionality, small bug fixes
This commit is contained in:
23
Makefile
23
Makefile
@@ -112,16 +112,8 @@ mypy_ns:
|
||||
mypy_test:
|
||||
find ./tests/ -name "*.py" | xargs poetry run mypy --ignore-missing-imports
|
||||
|
||||
.PHONY: mypy_concrete_benchmark # Run mypy on concrete benchmark files
|
||||
mypy_concrete_benchmark:
|
||||
find ./benchmarks/concrete/ -name "*.py" | xargs poetry run mypy --ignore-missing-imports
|
||||
|
||||
.PHONY: mypy_ml_benchmark # Run mypy on ml benchmark files
|
||||
mypy_ml_benchmark:
|
||||
find ./benchmarks/ml/ -name "*.py" | xargs poetry run mypy --ignore-missing-imports
|
||||
|
||||
.PHONY: mypy_benchmark # Run mypy on benchmark files
|
||||
mypy_benchmark: mypy_concrete_benchmark mypy_ml_benchmark
|
||||
find ./benchmarks/ -name "*.py" | xargs poetry run mypy --ignore-missing-imports
|
||||
|
||||
.PHONY: mypy_script # Run mypy on scripts
|
||||
mypy_script:
|
||||
@@ -216,17 +208,10 @@ finalize_nb:
|
||||
pytest_nb:
|
||||
find docs -name "*.ipynb" | grep -v _build | grep -v .ipynb_checkpoints | xargs poetry run pytest -Wignore --nbmake
|
||||
|
||||
.PHONY: concrete_benchmark # Launch concrete benchmarks
|
||||
concrete_benchmark:
|
||||
.PHONY: benchmark # Launch concrete benchmarks
|
||||
benchmark:
|
||||
rm -rf progress.json && \
|
||||
for script in benchmarks/concrete/*.py; do \
|
||||
poetry run python $$script; \
|
||||
done
|
||||
|
||||
.PHONY: ml_benchmark # Launch ml benchmarks
|
||||
ml_benchmark:
|
||||
rm -rf progress.json && \
|
||||
for script in benchmarks/ml/*.py; do \
|
||||
for script in benchmarks/*.py; do \
|
||||
poetry run python $$script; \
|
||||
done
|
||||
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
import concrete.numpy as hnp
|
||||
from concrete.numpy import compile as compile_
|
||||
|
||||
# This is only for benchmarks to speed up compilation times
|
||||
# pylint: disable=protected-access
|
||||
compile_._COMPILE_FHE_INSECURE_KEY_CACHE_DIR = "/tmp/keycache"
|
||||
# pylint: enable=protected-access
|
||||
|
||||
BENCHMARK_CONFIGURATION = hnp.CompilationConfiguration(
|
||||
check_every_input_in_inputset=True,
|
||||
dump_artifacts_on_unexpected_failures=True,
|
||||
enable_topological_optimizations=True,
|
||||
enable_unsafe_features=True,
|
||||
treat_warnings_as_errors=True,
|
||||
use_insecure_key_cache=True,
|
||||
)
|
||||
@@ -1,282 +0,0 @@
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
import progress
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
from sklearn.compose import ColumnTransformer
|
||||
from sklearn.datasets import fetch_openml
|
||||
from sklearn.decomposition import PCA
|
||||
from sklearn.linear_model import PoissonRegressor
|
||||
from sklearn.metrics import mean_poisson_deviance
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.pipeline import Pipeline, make_pipeline
|
||||
from sklearn.preprocessing import (
|
||||
FunctionTransformer,
|
||||
KBinsDiscretizer,
|
||||
OneHotEncoder,
|
||||
StandardScaler,
|
||||
)
|
||||
from tqdm import tqdm
|
||||
|
||||
from concrete.quantization import QuantizedArray, QuantizedLinear, QuantizedModule
|
||||
from concrete.quantization.quantized_activations import QuantizedActivation
|
||||
|
||||
|
||||
class QuantizedExp(QuantizedActivation):
|
||||
"""
|
||||
Quantized Exponential function
|
||||
|
||||
This class will build a quantized lookup table for the exp function
|
||||
applied to input calibration data
|
||||
"""
|
||||
|
||||
def calibrate(self, x: np.ndarray):
|
||||
self.q_out = QuantizedArray(self.n_bits, np.exp(x))
|
||||
|
||||
def __call__(self, q_input: QuantizedArray) -> QuantizedArray:
|
||||
"""Process the forward pass of the exponential.
|
||||
|
||||
Args:
|
||||
q_input (QuantizedArray): Quantized input.
|
||||
|
||||
Returns:
|
||||
q_out (QuantizedArray): Quantized output.
|
||||
"""
|
||||
|
||||
quant_exp = np.exp(self.dequant_input(q_input))
|
||||
|
||||
q_out = self.quant_output(quant_exp)
|
||||
return q_out
|
||||
|
||||
|
||||
class QuantizedGLM(QuantizedModule):
|
||||
"""
|
||||
Quantized Generalized Linear Model
|
||||
|
||||
Building on top of QuantizedModule, this class will chain together a linear transformation
|
||||
and an inverse-link function
|
||||
"""
|
||||
|
||||
def __init__(self, n_bits, sklearn_model, calibration_data) -> None:
|
||||
self.n_bits = n_bits
|
||||
|
||||
# We need to calibrate to a sufficiently low number of bits
|
||||
# so that the output of the Linear layer (w . x + b)
|
||||
# does not exceed 7 bits
|
||||
self.q_calibration_data = QuantizedArray(self.n_bits, calibration_data)
|
||||
|
||||
# Quantize the weights and create the quantized linear layer
|
||||
q_weights = QuantizedArray(self.n_bits, np.expand_dims(sklearn_model.coef_, 1))
|
||||
q_bias = QuantizedArray(self.n_bits, sklearn_model.intercept_)
|
||||
q_layer = QuantizedLinear(self.n_bits, q_weights, q_bias)
|
||||
|
||||
# Store quantized layers
|
||||
quant_layers_dict: Dict[str, Any] = {}
|
||||
|
||||
# Calibrate the linear layer and obtain calibration_data for the next layers
|
||||
calibration_data = self._calibrate_and_store_layers_activation(
|
||||
"linear", q_layer, calibration_data, quant_layers_dict
|
||||
)
|
||||
|
||||
# Add the inverse-link for inference.
|
||||
# This function needs to be quantized since it's computed in FHE.
|
||||
# However, we can use 7 bits of output since, in this case,
|
||||
# the result of the inverse-link is not processed by any further layers
|
||||
# Seven bits is the maximum precision but this could be lowered to improve speed
|
||||
# at the possible expense of higher deviance of the regressor
|
||||
q_exp = QuantizedExp(n_bits=7)
|
||||
|
||||
# Now calibrate the inverse-link function with the linear layer's output data
|
||||
calibration_data = self._calibrate_and_store_layers_activation(
|
||||
"invlink", q_exp, calibration_data, quant_layers_dict
|
||||
)
|
||||
|
||||
# Finally construct out Module using the quantized layers
|
||||
super().__init__(quant_layers_dict)
|
||||
|
||||
def _calibrate_and_store_layers_activation(
|
||||
self, name, q_function, calibration_data, quant_layers_dict
|
||||
):
|
||||
# Calibrate the output of the layer
|
||||
q_function.calibrate(calibration_data)
|
||||
# Store the learned quantized layer
|
||||
quant_layers_dict[name] = q_function
|
||||
# Create new calibration data (output of the previous layer)
|
||||
q_calibration_data = QuantizedArray(self.n_bits, calibration_data)
|
||||
# Dequantize to have the value in clear and ready for next calibration
|
||||
return q_function(q_calibration_data).dequant()
|
||||
|
||||
def quantize_input(self, x):
|
||||
q_input_arr = deepcopy(self.q_calibration_data)
|
||||
q_input_arr.update_values(x)
|
||||
return q_input_arr
|
||||
|
||||
|
||||
def score_estimator(y_pred, y_gt, gt_weight):
|
||||
"""Score an estimator on the test set."""
|
||||
|
||||
y_pred = np.squeeze(y_pred)
|
||||
# Ignore non-positive predictions, as they are invalid for
|
||||
# the Poisson deviance. We want to issue a warning if for some reason
|
||||
# (e.g. FHE noise, bad quantization, user error), the regressor predictions are negative
|
||||
|
||||
# Find all strictly positive values
|
||||
mask = y_pred > 0
|
||||
# If any non-positive values are found, issue a warning
|
||||
if (~mask).any():
|
||||
n_masked, n_samples = (~mask).sum(), mask.shape[0]
|
||||
print(
|
||||
"WARNING: Estimator yields invalid, non-positive predictions "
|
||||
f" for {n_masked} samples out of {n_samples}. These predictions "
|
||||
"are ignored when computing the Poisson deviance."
|
||||
)
|
||||
|
||||
# Compute the Poisson Deviance for all valid values
|
||||
dev = mean_poisson_deviance(
|
||||
y_gt[mask],
|
||||
y_pred[mask],
|
||||
sample_weight=gt_weight[mask],
|
||||
)
|
||||
print(f"mean Poisson deviance: {dev}")
|
||||
return dev
|
||||
|
||||
|
||||
def score_sklearn_estimator(estimator, df_test):
|
||||
"""A wrapper to score a sklearn pipeline on a dataframe"""
|
||||
return score_estimator(estimator.predict(df_test), df_test["Frequency"], df_test["Exposure"])
|
||||
|
||||
|
||||
def score_concrete_glm_estimator(poisson_glm_pca, q_glm, df_test):
|
||||
"""A wrapper to score QuantizedGLM on a dataframe, transforming the dataframe using
|
||||
a sklearn pipeline
|
||||
"""
|
||||
test_data = poisson_glm_pca["pca"].transform(poisson_glm_pca["preprocessor"].transform(df_test))
|
||||
q_test_data = q_glm.quantize_input(test_data)
|
||||
y_pred = q_glm.forward_and_dequant(q_test_data)
|
||||
return score_estimator(y_pred, df_test["Frequency"], df_test["Exposure"])
|
||||
|
||||
|
||||
@progress.track([{"id": "glm", "name": "Generalized Linear Model"}])
|
||||
def main():
|
||||
"""
|
||||
This is our main benchmark function. It gets a dataset, trains a GLM model,
|
||||
then trains a GLM model on PCA reduced features, a QuantizedGLM model
|
||||
and finally compiles the QuantizedGLM to FHE. All models are evaluated and poisson deviance
|
||||
is computed to determine the increase in deviance from quantization and to verify
|
||||
that the FHE compiled model acheives the same deviance as the quantized model in the 'clear'
|
||||
"""
|
||||
|
||||
df, _ = fetch_openml(
|
||||
data_id=41214, as_frame=True, cache=True, data_home="~/.cache/sklean", return_X_y=True
|
||||
)
|
||||
df = df.head(50000)
|
||||
|
||||
df["Frequency"] = df["ClaimNb"] / df["Exposure"]
|
||||
|
||||
log_scale_transformer = make_pipeline(
|
||||
FunctionTransformer(np.log, validate=False), StandardScaler()
|
||||
)
|
||||
|
||||
linear_model_preprocessor = ColumnTransformer(
|
||||
[
|
||||
("passthrough_numeric", "passthrough", ["BonusMalus"]),
|
||||
("binned_numeric", KBinsDiscretizer(n_bins=10), ["VehAge", "DrivAge"]),
|
||||
("log_scaled_numeric", log_scale_transformer, ["Density"]),
|
||||
(
|
||||
"onehot_categorical",
|
||||
OneHotEncoder(sparse=False),
|
||||
["VehBrand", "VehPower", "VehGas", "Region", "Area"],
|
||||
),
|
||||
],
|
||||
remainder="drop",
|
||||
)
|
||||
|
||||
df_train, df_test = train_test_split(df, test_size=0.2, random_state=0)
|
||||
df_calib, df_test = train_test_split(df_test, test_size=100, random_state=0)
|
||||
|
||||
poisson_glm = Pipeline(
|
||||
[
|
||||
("preprocessor", linear_model_preprocessor),
|
||||
("regressor", PoissonRegressor(alpha=1e-12, max_iter=300)),
|
||||
]
|
||||
)
|
||||
|
||||
poisson_glm_pca = Pipeline(
|
||||
[
|
||||
("preprocessor", linear_model_preprocessor),
|
||||
("pca", PCA(n_components=15, whiten=True)),
|
||||
("regressor", PoissonRegressor(alpha=1e-12, max_iter=300)),
|
||||
]
|
||||
)
|
||||
|
||||
poisson_glm.fit(df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"])
|
||||
|
||||
poisson_glm_pca.fit(
|
||||
df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"]
|
||||
)
|
||||
|
||||
# Let's check what prediction performance we lose due to PCA
|
||||
print("PoissonRegressor evaluation:")
|
||||
_ = score_sklearn_estimator(poisson_glm, df_test)
|
||||
print("PoissonRegressor+PCA evaluation:")
|
||||
_ = score_sklearn_estimator(poisson_glm_pca, df_test)
|
||||
|
||||
# Now, get calibration data from the held out set
|
||||
calib_data = poisson_glm_pca["pca"].transform(
|
||||
poisson_glm_pca["preprocessor"].transform(df_calib)
|
||||
)
|
||||
|
||||
# Let's see how performance decreases with bit-depth.
|
||||
# This is just a test of our quantized model, not in FHE
|
||||
for n_bits in [28, 16, 6, 5, 4, 3, 2]:
|
||||
q_glm = QuantizedGLM(n_bits, poisson_glm_pca["regressor"], calib_data)
|
||||
print(f"{n_bits}b Quantized PoissonRegressor evaluation:")
|
||||
score_concrete_glm_estimator(poisson_glm_pca, q_glm, df_test)
|
||||
|
||||
q_glm = QuantizedGLM(2, poisson_glm_pca["regressor"], calib_data)
|
||||
dev_pca_quantized = score_concrete_glm_estimator(poisson_glm_pca, q_glm, df_test)
|
||||
test_data = poisson_glm_pca["pca"].transform(poisson_glm_pca["preprocessor"].transform(df_test))
|
||||
q_test_data = q_glm.quantize_input(test_data)
|
||||
|
||||
engine = q_glm.compile(
|
||||
q_test_data,
|
||||
BENCHMARK_CONFIGURATION,
|
||||
show_mlir=False,
|
||||
)
|
||||
|
||||
y_pred_fhe = np.zeros((test_data.shape[0],), np.float32)
|
||||
for i, test_sample in enumerate(tqdm(q_test_data.qvalues)):
|
||||
with progress.measure(id="evaluation-time-ms", label="Evaluation Time (ms)"):
|
||||
q_sample = np.expand_dims(test_sample, 1).transpose([1, 0]).astype(np.uint8)
|
||||
q_pred_fhe = engine.run(q_sample)
|
||||
y_pred_fhe[i] = q_glm.dequantize_output(q_pred_fhe)
|
||||
|
||||
dev_pca_quantized_fhe = score_estimator(y_pred_fhe, df_test["Frequency"], df_test["Exposure"])
|
||||
|
||||
if dev_pca_quantized_fhe > 0.001:
|
||||
difference = abs(dev_pca_quantized - dev_pca_quantized_fhe) * 100 / dev_pca_quantized_fhe
|
||||
else:
|
||||
difference = 0
|
||||
|
||||
print(f"Quantized deviance: {dev_pca_quantized}")
|
||||
progress.measure(
|
||||
id="non-homomorphic-loss",
|
||||
label="Non Homomorphic Loss",
|
||||
value=dev_pca_quantized,
|
||||
)
|
||||
|
||||
print(f"FHE Quantized deviance: {dev_pca_quantized_fhe}")
|
||||
progress.measure(
|
||||
id="homomorphic-loss",
|
||||
label="Homomorphic Loss",
|
||||
value=dev_pca_quantized_fhe,
|
||||
)
|
||||
|
||||
print(f"Percentage difference: {difference}%")
|
||||
progress.measure(
|
||||
id="relative-loss-difference-percent",
|
||||
label="Relative Loss Difference (%)",
|
||||
value=difference,
|
||||
alert=(">", 7.5),
|
||||
)
|
||||
@@ -1,191 +0,0 @@
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
import progress
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
from sklearn.datasets import make_regression
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from sklearn.metrics import r2_score
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tqdm import tqdm
|
||||
|
||||
from concrete.quantization import QuantizedArray, QuantizedLinear, QuantizedModule
|
||||
|
||||
|
||||
class QuantizedLinearRegression(QuantizedModule):
|
||||
"""
|
||||
Quantized Generalized Linear Model
|
||||
Building on top of QuantizedModule, implement a quantized linear transformation (w.x + b)
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def from_sklearn(sklearn_model, calibration_data):
|
||||
"""Create a Quantized Linear Regression initialized from a sklearn trained model"""
|
||||
weights = np.expand_dims(sklearn_model.coef_, 1)
|
||||
bias = sklearn_model.intercept_
|
||||
# Quantize with 6 bits for input data, 1 for weights, 1 for the bias and 6 for the output
|
||||
return QuantizedLinearRegression(6, 1, 1, 6, weights, bias, calibration_data)
|
||||
|
||||
def __init__(self, q_bits, w_bits, b_bits, out_bits, weights, bias, calibration_data) -> None:
|
||||
"""
|
||||
Create the linear regression with different quantization bit precisions:
|
||||
|
||||
Quantization Parameters - Number of bits:
|
||||
q_bits (int): bits for input data, insuring that the number of bits of
|
||||
the w . x + b operation does not exceed 7 for the calibration data
|
||||
w_bits (int): bits for weights: in the case of a univariate regression this
|
||||
can be 1
|
||||
b_bits (int): bits for bias (this is a single value so a single bit is enough)
|
||||
out_bits (int): bits for the result of the linear transformation (w.x + b).
|
||||
In our case since the result of the linear transformation is
|
||||
directly decrypted we can use the maximum of 7 bits
|
||||
|
||||
Other parameters:
|
||||
weights: a numpy nd-array of weights (Nxd) where d is the data dimensionality
|
||||
bias: a numpy scalar
|
||||
calibration_data: a numpy nd-array of data (Nxd)
|
||||
"""
|
||||
self.n_bits = out_bits
|
||||
|
||||
# We need to calibrate to a sufficiently low number of bits
|
||||
# so that the output of the Linear layer (w . x + b)
|
||||
# does not exceed 7 bits
|
||||
self.q_calibration_data = QuantizedArray(q_bits, calibration_data)
|
||||
|
||||
# Quantize the weights and create the quantized linear layer
|
||||
q_weights = QuantizedArray(w_bits, weights)
|
||||
q_bias = QuantizedArray(b_bits, bias)
|
||||
q_layer = QuantizedLinear(out_bits, q_weights, q_bias)
|
||||
|
||||
# Store quantized layers
|
||||
quant_layers_dict: Dict[str, Any] = {}
|
||||
|
||||
# Calibrate the linear layer and obtain calibration_data for the next layers
|
||||
calibration_data = self._calibrate_and_store_layers_activation(
|
||||
"linear", q_layer, calibration_data, quant_layers_dict
|
||||
)
|
||||
|
||||
# Finally construct our Module using the quantized layers
|
||||
super().__init__(quant_layers_dict)
|
||||
|
||||
def _calibrate_and_store_layers_activation(
|
||||
self, name, q_function, calibration_data, quant_layers_dict
|
||||
):
|
||||
"""
|
||||
This function calibrates a layer of a quantized module (e.g. linear, inverse-link,
|
||||
activation, etc) by looking at the input data, then computes the output of the quantized
|
||||
version of the layer to be used as input to the following layers
|
||||
"""
|
||||
|
||||
# Calibrate the output of the layer
|
||||
q_function.calibrate(calibration_data)
|
||||
# Store the learned quantized layer
|
||||
quant_layers_dict[name] = q_function
|
||||
# Create new calibration data (output of the previous layer)
|
||||
q_calibration_data = QuantizedArray(self.n_bits, calibration_data)
|
||||
# Dequantize to have the value in clear and ready for next calibration
|
||||
return q_function(q_calibration_data).dequant()
|
||||
|
||||
def quantize_input(self, x):
|
||||
"""Quantize an input set with the quantization parameters determined from calibration"""
|
||||
q_input_arr = deepcopy(self.q_calibration_data)
|
||||
q_input_arr.update_values(x)
|
||||
return q_input_arr
|
||||
|
||||
|
||||
@progress.track([{"id": "linear-regression", "name": "Linear Regression"}])
|
||||
def main():
|
||||
"""
|
||||
Our linear regression benchmark. Use some synthetic data to train a regression model,
|
||||
then fit a model with sklearn. We quantize the sklearn model and compile it to FHE.
|
||||
We compute the training loss for the quantized and FHE models and compare them. We also
|
||||
predict on a test set and compare FHE results to predictions from the quantized model
|
||||
"""
|
||||
|
||||
X, y, _ = make_regression(
|
||||
n_samples=200, n_features=1, n_targets=1, bias=5.0, noise=30.0, random_state=42, coef=True
|
||||
)
|
||||
|
||||
# Split it into train/test and sort the sets for nicer visualization
|
||||
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
|
||||
|
||||
sidx = np.argsort(np.squeeze(x_train))
|
||||
x_train = x_train[sidx, :]
|
||||
y_train = y_train[sidx]
|
||||
|
||||
sidx = np.argsort(np.squeeze(x_test))
|
||||
x_test = x_test[sidx, :]
|
||||
y_test = y_test[sidx]
|
||||
|
||||
# Train a linear regression with sklearn and predict on the test data
|
||||
linreg = LinearRegression()
|
||||
linreg.fit(x_train, y_train)
|
||||
|
||||
# Calibrate the model for quantization using both training and test data
|
||||
calib_data = X # np.vstack((x_train, x_test))
|
||||
q_linreg = QuantizedLinearRegression.from_sklearn(linreg, calib_data)
|
||||
|
||||
# Compile the quantized model to FHE
|
||||
engine = q_linreg.compile(
|
||||
q_linreg.quantize_input(calib_data),
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
|
||||
# Measure test error using the clear-sklearn, the clear-quantized and the FHE quantized model
|
||||
# as R^2 coefficient for the test data
|
||||
|
||||
# First, predict using the sklearn classifier
|
||||
y_pred = linreg.predict(x_test)
|
||||
|
||||
# Now that the model is quantized, predict on the test set
|
||||
x_test_q = q_linreg.quantize_input(x_test)
|
||||
q_y_pred = q_linreg.forward_and_dequant(x_test_q)
|
||||
|
||||
# Now predict using the FHE quantized model on the testing set
|
||||
y_test_pred_fhe = np.zeros_like(x_test)
|
||||
|
||||
for i, x_i in enumerate(tqdm(x_test_q.qvalues)):
|
||||
q_sample = np.expand_dims(x_i, 1).transpose([1, 0]).astype(np.uint8)
|
||||
with progress.measure(id="evaluation-time-ms", label="Evaluation Time (ms)"):
|
||||
q_pred_fhe = engine.run(q_sample)
|
||||
y_test_pred_fhe[i] = q_linreg.dequantize_output(q_pred_fhe)
|
||||
|
||||
# Measure the error for the three versions of the classifier
|
||||
sklearn_r2 = r2_score(y_pred, y_test)
|
||||
non_homomorphic_test_error = r2_score(q_y_pred, y_test)
|
||||
homomorphic_test_error = r2_score(y_test_pred_fhe, y_test)
|
||||
|
||||
# Measure the error of the FHE quantized model w.r.t the clear quantized model
|
||||
difference = (
|
||||
abs(homomorphic_test_error - non_homomorphic_test_error) * 100 / non_homomorphic_test_error
|
||||
)
|
||||
|
||||
print(f"Sklearn R^2: {sklearn_r2:.4f}")
|
||||
progress.measure(
|
||||
id="sklearn-r2",
|
||||
label="Sklearn R^2",
|
||||
value=sklearn_r2,
|
||||
)
|
||||
|
||||
print(f"Non Homomorphic R^2: {non_homomorphic_test_error:.4f}")
|
||||
progress.measure(
|
||||
id="non-homomorphic-r2",
|
||||
label="Non Homomorphic R^2",
|
||||
value=non_homomorphic_test_error,
|
||||
)
|
||||
|
||||
print(f"Homomorphic R^2: {homomorphic_test_error:.4f}")
|
||||
progress.measure(
|
||||
id="homomorphic-r2",
|
||||
label="Homomorphic R^2",
|
||||
value=homomorphic_test_error,
|
||||
)
|
||||
|
||||
print(f"Relative Loss Difference (%): {difference:.2f}%")
|
||||
progress.measure(
|
||||
id="relative-loss-difference-percent",
|
||||
label="Relative Loss Difference (%)",
|
||||
value=difference,
|
||||
alert=(">", 7.5),
|
||||
)
|
||||
@@ -1,230 +0,0 @@
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict
|
||||
|
||||
import numpy as np
|
||||
import progress
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
from numpy.random import RandomState
|
||||
from sklearn.datasets import make_classification
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tqdm import tqdm
|
||||
|
||||
from concrete.quantization import QuantizedArray, QuantizedLinear, QuantizedModule, QuantizedSigmoid
|
||||
|
||||
|
||||
class QuantizedLogisticRegression(QuantizedModule):
|
||||
"""
|
||||
Quantized Logistic Regression
|
||||
Building on top of QuantizedModule, this class will chain together a linear transformation
|
||||
and an inverse-link function, in this case the logistic function
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def from_sklearn(sklearn_model, calibration_data):
|
||||
"""Create a Quantized Logistic Regression initialized from a sklearn trained model"""
|
||||
if sklearn_model.coef_.ndim == 1:
|
||||
weights = np.expand_dims(sklearn_model.coef_, 1)
|
||||
else:
|
||||
weights = sklearn_model.coef_.transpose()
|
||||
|
||||
bias = sklearn_model.intercept_
|
||||
|
||||
# In our case we have two data dimensions, we the weights precision needs to be 2 bits, as
|
||||
# for now we need the quantized values to be greater than zero for weights
|
||||
# Thus, to insure a maximum of 7 bits in the output of the linear transformation, we choose
|
||||
# 4 bits for the data and the minimum of 1 for the bias
|
||||
return QuantizedLogisticRegression(4, 2, 1, 6, weights, bias, calibration_data)
|
||||
|
||||
def __init__(self, q_bits, w_bits, b_bits, out_bits, weights, bias, calibration_data) -> None:
|
||||
"""
|
||||
Create the Logistic regression with different quantization bit precisions:
|
||||
|
||||
Quantization Parameters - Number of bits:
|
||||
q_bits (int): bits for input data, insuring that the number of bits of
|
||||
the w . x + b operation does not exceed 7 for the calibration data
|
||||
w_bits (int): bits for weights: in the case of a univariate regression this
|
||||
can be 1
|
||||
b_bits (int): bits for bias (this is a single value so a single bit is enough)
|
||||
out_bits (int): bits for the result of the linear transformation (w.x + b).
|
||||
In the case of Logistic Regression the result of the linear
|
||||
transformation is input to a univariate inverse-link function, so
|
||||
this value can be 7
|
||||
|
||||
Other parameters:
|
||||
weights: a numpy nd-array of weights (Nxd) where d is the data dimensionality
|
||||
bias: a numpy scalar
|
||||
calibration_data: a numpy nd-array of data (Nxd)
|
||||
"""
|
||||
self.n_bits = out_bits
|
||||
|
||||
# We need to calibrate to a sufficiently low number of bits
|
||||
# so that the output of the Linear layer (w . x + b)
|
||||
# does not exceed 7 bits
|
||||
self.q_calibration_data = QuantizedArray(q_bits, calibration_data)
|
||||
|
||||
# Quantize the weights and create the quantized linear layer
|
||||
q_weights = QuantizedArray(w_bits, weights)
|
||||
q_bias = QuantizedArray(b_bits, bias)
|
||||
q_layer = QuantizedLinear(out_bits, q_weights, q_bias)
|
||||
|
||||
# Store quantized layers
|
||||
quant_layers_dict: Dict[str, Any] = {}
|
||||
|
||||
# Calibrate the linear layer and obtain calibration_data for the next layers
|
||||
calibration_data = self._calibrate_and_store_layers_activation(
|
||||
"linear", q_layer, calibration_data, quant_layers_dict
|
||||
)
|
||||
|
||||
# Add the inverse-link for inference.
|
||||
# This needs to be quantized since it's computed in FHE,
|
||||
# but we can use 7 bits of output since, in this case,
|
||||
# the result of the inverse-link is not processed by any further layers
|
||||
# Seven bits is the maximum precision but this could be lowered to improve speed
|
||||
# at the possible expense of higher deviance of the regressor
|
||||
q_logit = QuantizedSigmoid(n_bits=7)
|
||||
|
||||
# Now calibrate the inverse-link function with the linear layer's output data
|
||||
calibration_data = self._calibrate_and_store_layers_activation(
|
||||
"invlink", q_logit, calibration_data, quant_layers_dict
|
||||
)
|
||||
|
||||
# Finally construct our Module using the quantized layers
|
||||
super().__init__(quant_layers_dict)
|
||||
|
||||
def _calibrate_and_store_layers_activation(
|
||||
self, name, q_function, calibration_data, quant_layers_dict
|
||||
):
|
||||
"""
|
||||
This function calibrates a layer of a quantized module (e.g. linear, inverse-link,
|
||||
activation, etc) by looking at the input data, then computes the output of the quantized
|
||||
version of the layer to be used as input to the following layers
|
||||
"""
|
||||
|
||||
# Calibrate the output of the layer
|
||||
q_function.calibrate(calibration_data)
|
||||
# Store the learned quantized layer
|
||||
quant_layers_dict[name] = q_function
|
||||
# Create new calibration data (output of the previous layer)
|
||||
q_calibration_data = QuantizedArray(self.n_bits, calibration_data)
|
||||
# Dequantize to have the value in clear and ready for next calibration
|
||||
return q_function(q_calibration_data).dequant()
|
||||
|
||||
def quantize_input(self, x):
|
||||
q_input_arr = deepcopy(self.q_calibration_data)
|
||||
q_input_arr.update_values(x)
|
||||
return q_input_arr
|
||||
|
||||
|
||||
@progress.track([{"id": "logistic-regression", "name": "Logistic Regression"}])
|
||||
def main():
|
||||
"""Main benchmark function: generate some synthetic data for two class classification,
|
||||
split train-test, train a sklearn classifier, calibrate and quantize it on the whole dataset
|
||||
then compile it to FHE. Test the three versions of the classifier on the test set and
|
||||
report accuracy"""
|
||||
|
||||
# Generate some data with a fixed seed
|
||||
X, y = make_classification(
|
||||
n_features=2,
|
||||
n_redundant=0,
|
||||
n_informative=2,
|
||||
random_state=2,
|
||||
n_clusters_per_class=1,
|
||||
n_samples=100,
|
||||
)
|
||||
|
||||
# Scale the data randomly, fixing seeds for reproductibility
|
||||
rng = RandomState(2)
|
||||
X += 2 * rng.uniform(size=X.shape)
|
||||
|
||||
# Split it into train/test
|
||||
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
|
||||
|
||||
# Train a logistic regression with sklearn on the training set
|
||||
logreg = LogisticRegression()
|
||||
logreg.fit(x_train, y_train)
|
||||
|
||||
# Calibrate the model for quantization using both training and test data
|
||||
calib_data = X
|
||||
q_logreg = QuantizedLogisticRegression.from_sklearn(logreg, calib_data)
|
||||
|
||||
# Now, we can compile our model to FHE, taking as possible input set all of our dataset
|
||||
engine = q_logreg.compile(
|
||||
q_logreg.quantize_input(X),
|
||||
compilation_configuration=BENCHMARK_CONFIGURATION,
|
||||
)
|
||||
|
||||
# Start classifier evaluation
|
||||
|
||||
# Test the original classifier
|
||||
y_pred_test = np.asarray(logreg.predict(x_test))
|
||||
|
||||
# Now that the model is quantized, predict on the test set
|
||||
x_test_q = q_logreg.quantize_input(x_test)
|
||||
q_y_score_test = q_logreg.forward_and_dequant(x_test_q)
|
||||
q_y_pred_test = (q_y_score_test > 0.5).astype(np.int32)
|
||||
|
||||
non_homomorphic_correct = 0
|
||||
homomorphic_correct = 0
|
||||
|
||||
# Track the samples that are wrongly classified due to quantization issues
|
||||
q_wrong_predictions = np.zeros((0, 2), dtype=X.dtype)
|
||||
|
||||
# Predict the FHE quantized classifier probabilities on the test set.
|
||||
# Compute FHE quantized accuracy, clear-quantized accuracy and
|
||||
# keep track of samples wrongly classified due to quantization
|
||||
for i, x_i in enumerate(tqdm(x_test_q.qvalues)):
|
||||
y_i = y_test[i]
|
||||
|
||||
fhe_in_sample = np.expand_dims(x_i, 1).transpose([1, 0]).astype(np.uint8)
|
||||
|
||||
with progress.measure(id="evaluation-time-ms", label="Evaluation Time (ms)"):
|
||||
q_pred_fhe = engine.run(fhe_in_sample)
|
||||
|
||||
y_score_fhe = q_logreg.dequantize_output(q_pred_fhe)
|
||||
homomorphic_prediction = (y_score_fhe > 0.5).astype(np.int32)
|
||||
|
||||
non_homomorphic_prediction = q_y_pred_test[i]
|
||||
if non_homomorphic_prediction == y_i:
|
||||
non_homomorphic_correct += 1
|
||||
elif y_pred_test[i] == y_i:
|
||||
# If this was a correct prediction with the clear-sklearn classifier
|
||||
q_wrong_predictions = np.vstack((q_wrong_predictions, x_test[i, :]))
|
||||
|
||||
if homomorphic_prediction == y_i:
|
||||
homomorphic_correct += 1
|
||||
|
||||
# Aggregate accuracies for all the versions of the classifier
|
||||
sklearn_acc = np.sum(y_pred_test == y_test) / len(y_test) * 100
|
||||
non_homomorphic_accuracy = (non_homomorphic_correct / len(y_test)) * 100
|
||||
homomorphic_accuracy = (homomorphic_correct / len(y_test)) * 100
|
||||
difference = abs(homomorphic_accuracy - non_homomorphic_accuracy)
|
||||
|
||||
print(f"Sklearn Accuracy (%): {sklearn_acc:.4f}")
|
||||
progress.measure(
|
||||
id="sklearn-accuracy-percent",
|
||||
label="Sklearn Accuracy (%)",
|
||||
value=sklearn_acc,
|
||||
)
|
||||
|
||||
print(f"Non Homomorphic Accuracy (%): {non_homomorphic_accuracy:.4f}")
|
||||
progress.measure(
|
||||
id="non-homomorphic-accuracy-percent",
|
||||
label="Non Homomorphic Accuracy (%)",
|
||||
value=non_homomorphic_accuracy,
|
||||
)
|
||||
|
||||
print(f"Homomorphic Accuracy (%): {homomorphic_accuracy:.4f}")
|
||||
progress.measure(
|
||||
id="homomorphic-accuracy-percent",
|
||||
label="Homomorphic Accuracy (%)",
|
||||
value=homomorphic_accuracy,
|
||||
)
|
||||
|
||||
print(f"Relative Accuracy Difference (%): {difference:.2f}%")
|
||||
progress.measure(
|
||||
id="relative-accuracy-difference-percent",
|
||||
label="Relative Accuracy Difference (%)",
|
||||
value=difference,
|
||||
alert=(">", 2.0),
|
||||
)
|
||||
@@ -1,7 +1,7 @@
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
import progress
|
||||
import py_progress_tracker as progress
|
||||
from common import BENCHMARK_CONFIGURATION
|
||||
|
||||
import concrete.numpy as hnp
|
||||
@@ -9,7 +9,6 @@ from typing import Any, Callable, Dict, Optional, Union
|
||||
|
||||
import networkx as nx
|
||||
from loguru import logger
|
||||
from PIL import Image
|
||||
|
||||
from ..debugging import assert_true, draw_graph, format_operation_graph
|
||||
from ..operator_graph import OPGraph
|
||||
@@ -27,7 +26,7 @@ class CompilationArtifacts:
|
||||
source_code_of_the_function_to_compile: Optional[str]
|
||||
parameters_of_the_function_to_compile: Dict[str, str]
|
||||
|
||||
drawings_of_operation_graphs: Dict[str, Image.Image]
|
||||
drawings_of_operation_graphs: Dict[str, str]
|
||||
textual_representations_of_operation_graphs: Dict[str, str]
|
||||
|
||||
final_operation_graph: Optional[OPGraph]
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
"""Modules for quantization."""
|
||||
from .post_training import PostTrainingAffineQuantization
|
||||
from .quantized_activations import QuantizedReLU6, QuantizedSigmoid
|
||||
from .quantized_array import QuantizedArray
|
||||
from .quantized_layers import QuantizedLinear
|
||||
from .quantized_module import QuantizedModule
|
||||
@@ -1,123 +0,0 @@
|
||||
"""Post Training Quantization methods."""
|
||||
|
||||
import numpy
|
||||
from torch import nn
|
||||
|
||||
from ..torch import NumpyModule
|
||||
from .quantized_activations import QuantizedReLU6, QuantizedSigmoid
|
||||
from .quantized_array import QuantizedArray
|
||||
from .quantized_layers import QuantizedLinear
|
||||
from .quantized_module import QuantizedModule
|
||||
|
||||
|
||||
class PostTrainingAffineQuantization:
|
||||
"""Post-training Affine Quantization."""
|
||||
|
||||
IMPLEMENTED_MODULES = {nn.Linear, nn.Sigmoid, nn.ReLU6}
|
||||
|
||||
quant_layers_dict: dict
|
||||
n_bits: int
|
||||
quant_params: dict
|
||||
numpy_model: NumpyModule
|
||||
is_signed: bool
|
||||
|
||||
def __init__(self, n_bits: int, numpy_model: NumpyModule, is_signed: bool = False):
|
||||
"""Create the quantized version of numpy module.
|
||||
|
||||
Args:
|
||||
n_bits (int): Number of bits to quantize the model. Currently this
|
||||
n_bits will be used for all activation/inputs/weights
|
||||
numpy_model (NumpyModule): Model in numpy.
|
||||
is_signed: Whether the weights of the layers can be signed.
|
||||
Currently, only the weights can be signed.
|
||||
|
||||
Returns:
|
||||
QuantizedModule: A quantized version of the numpy model.
|
||||
"""
|
||||
self.quant_layers_dict = {}
|
||||
self.n_bits = n_bits
|
||||
self.quant_params = {}
|
||||
self.numpy_model = numpy_model
|
||||
self.is_signed = is_signed
|
||||
|
||||
def quantize_module(self, calibration_data: numpy.ndarray) -> QuantizedModule:
|
||||
"""Quantize numpy module.
|
||||
|
||||
Following https://arxiv.org/abs/1712.05877 guidelines.
|
||||
|
||||
Args:
|
||||
calibration_data (numpy.ndarray): Data that will be used to compute the bounds,
|
||||
scales and zero point values for every quantized
|
||||
object.
|
||||
|
||||
Returns:
|
||||
QuantizedModule: Quantized numpy module
|
||||
"""
|
||||
# First transform all parameters to their quantized version
|
||||
self._quantize_params()
|
||||
# Quantize and calibrate each output layer/activation
|
||||
self._quantize_layers(calibration_data=calibration_data)
|
||||
# Create quantized module from self.quant_layers_dict
|
||||
return QuantizedModule(self.quant_layers_dict)
|
||||
|
||||
def _quantize_params(self):
|
||||
"""Transform all floating points parameters to integers."""
|
||||
|
||||
for name, params in self.numpy_model.numpy_module_dict.items():
|
||||
self.quant_params[name] = QuantizedArray(self.n_bits, params, self.is_signed)
|
||||
|
||||
def _calibrate_layers_activation(self, name, q_function, calibration_data):
|
||||
# Calibrate the output of the layer
|
||||
q_function.calibrate(calibration_data)
|
||||
# Store the learned quantized layer
|
||||
self.quant_layers_dict[name] = q_function
|
||||
# Create new calibration data (output of the previous layer)
|
||||
q_calibration_data = QuantizedArray(self.n_bits, calibration_data)
|
||||
# Dequantize to have the value in clear and ready for next calibration
|
||||
return q_function(q_calibration_data).dequant()
|
||||
|
||||
def _quantize_layers(self, calibration_data: numpy.ndarray):
|
||||
"""Compute all parameters for the static post-training quantization.
|
||||
|
||||
Does a forward pass over a batch of data and compute all
|
||||
quantization parameters for activations and layers.
|
||||
"""
|
||||
for name, layer in self.numpy_model.torch_model.named_children():
|
||||
|
||||
if isinstance(layer, nn.Linear):
|
||||
# Create a QuantizedLinear layer
|
||||
q_weights = self.quant_params[f"{name}.weight"]
|
||||
q_bias = self.quant_params[f"{name}.bias"]
|
||||
# Check if layer is last layer from the model
|
||||
if name == list(self.numpy_model.torch_model.named_children())[-1][0]:
|
||||
# If last layer, we can use 7 bits (maximum allowed) of precision.
|
||||
# However, 6 bits is currently used to allow 100% FHE precision
|
||||
# compared to its quantized counterpart.
|
||||
# Since this is the last layer and mostly used for classification,
|
||||
# this does not have much impact.
|
||||
# TODO: Put back 7 bits when 100% at 7b is achieved (see issue #1332).
|
||||
q_layer = QuantizedLinear(numpy.maximum(6, self.n_bits), q_weights, q_bias)
|
||||
else:
|
||||
q_layer = QuantizedLinear(self.n_bits, q_weights, q_bias)
|
||||
# Calibrate and get new calibration_data for next layer/activation
|
||||
calibration_data = self._calibrate_layers_activation(
|
||||
name, q_layer, calibration_data
|
||||
)
|
||||
elif isinstance(layer, nn.Sigmoid):
|
||||
# Create a new quantized layer (based on type(layer))
|
||||
q_sigmoid = QuantizedSigmoid(n_bits=self.n_bits)
|
||||
calibration_data = self._calibrate_layers_activation(
|
||||
name, q_sigmoid, calibration_data
|
||||
)
|
||||
elif isinstance(layer, nn.ReLU6):
|
||||
# Create a new quantized layer (based on type(layer))
|
||||
q_relu = QuantizedReLU6(n_bits=self.n_bits)
|
||||
calibration_data = self._calibrate_layers_activation(name, q_relu, calibration_data)
|
||||
else: # pragma: no cover
|
||||
# If we find a layer that has not been implemented we throw an error
|
||||
hf_m_names = sorted(module.__name__ for module in self.IMPLEMENTED_MODULES)
|
||||
raise ValueError(
|
||||
f"The following module is currently not implemented: {type(layer).__name__}"
|
||||
f"Please stick to the available quantized modules:"
|
||||
f"{', '.join(hf_m_names)}."
|
||||
)
|
||||
@@ -1,113 +0,0 @@
|
||||
"""Quantized activation functions."""
|
||||
import copy
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional
|
||||
|
||||
import numpy
|
||||
|
||||
from .quantized_array import QuantizedArray
|
||||
|
||||
|
||||
class QuantizedActivation(ABC):
|
||||
"""Base class for quantized activation function."""
|
||||
|
||||
q_out: Optional[QuantizedArray]
|
||||
|
||||
def __init__(self, n_bits) -> None:
|
||||
self.n_bits = n_bits
|
||||
self.q_out = None
|
||||
|
||||
@abstractmethod
|
||||
def __call__(self, q_input: QuantizedArray) -> QuantizedArray:
|
||||
"""Execute the forward pass."""
|
||||
|
||||
@abstractmethod
|
||||
def calibrate(self, x: numpy.ndarray) -> None:
|
||||
"""Create corresponding QuantizedArray for the output of the activation function.
|
||||
|
||||
Args:
|
||||
x (numpy.ndarray): Inputs.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def dequant_input(q_input: QuantizedArray) -> numpy.ndarray:
|
||||
"""Dequantize the input of the activation function.
|
||||
|
||||
Args:
|
||||
q_input (QuantizedArray): Quantized array for the inputs
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Return dequantized input in a numpy array
|
||||
"""
|
||||
|
||||
# TODO remove this + (-x) when issue #721 is fixed
|
||||
return (q_input.qvalues + (-q_input.zero_point)) * q_input.scale
|
||||
|
||||
def quant_output(self, qoutput_activation: numpy.ndarray) -> QuantizedArray:
|
||||
"""Quantize the output of the activation function.
|
||||
|
||||
Args:
|
||||
q_out (numpy.ndarray): Output of the activation function.
|
||||
|
||||
Returns:
|
||||
QuantizedArray: Quantized output.
|
||||
"""
|
||||
assert self.q_out is not None
|
||||
|
||||
qoutput_activation = qoutput_activation / self.q_out.scale + self.q_out.zero_point
|
||||
qoutput_activation = (
|
||||
numpy.rint(qoutput_activation).clip(0, 2 ** self.q_out.n_bits - 1).astype(int)
|
||||
)
|
||||
|
||||
# TODO find a better way to do the following (see issue #832)
|
||||
q_out = copy.copy(self.q_out)
|
||||
q_out.update_qvalues(qoutput_activation)
|
||||
return q_out
|
||||
|
||||
|
||||
class QuantizedSigmoid(QuantizedActivation):
|
||||
"""Quantized sigmoid activation function."""
|
||||
|
||||
def calibrate(self, x: numpy.ndarray):
|
||||
self.q_out = QuantizedArray(self.n_bits, 1 / (1 + numpy.exp(-x)))
|
||||
|
||||
def __call__(self, q_input: QuantizedArray) -> QuantizedArray:
|
||||
"""Process the forward pass of the quantized sigmoid.
|
||||
|
||||
Args:
|
||||
q_input (QuantizedArray): Quantized input.
|
||||
|
||||
Returns:
|
||||
q_out (QuantizedArray): Quantized output.
|
||||
"""
|
||||
|
||||
quant_sigmoid = self.dequant_input(q_input)
|
||||
quant_sigmoid = 1 + numpy.exp(-quant_sigmoid)
|
||||
quant_sigmoid = 1 / quant_sigmoid
|
||||
|
||||
q_out = self.quant_output(quant_sigmoid)
|
||||
return q_out
|
||||
|
||||
|
||||
class QuantizedReLU6(QuantizedActivation):
|
||||
"""Quantized ReLU6 activation function."""
|
||||
|
||||
def calibrate(self, x: numpy.ndarray):
|
||||
x = numpy.minimum(numpy.maximum(0, x), 6)
|
||||
self.q_out = QuantizedArray(self.n_bits, x)
|
||||
|
||||
def __call__(self, q_input: QuantizedArray) -> QuantizedArray:
|
||||
"""Process the forward pass of the quantized ReLU6.
|
||||
|
||||
Args:
|
||||
q_input (QuantizedArray): Quantized input.
|
||||
|
||||
Returns:
|
||||
q_out (QuantizedArray): Quantized output.
|
||||
"""
|
||||
|
||||
quant_relu6 = self.dequant_input(q_input)
|
||||
quant_relu6 = numpy.minimum(numpy.maximum(0, quant_relu6), 6)
|
||||
|
||||
q_out = self.quant_output(quant_relu6)
|
||||
return q_out
|
||||
@@ -1,136 +0,0 @@
|
||||
"""Quantization utilities for a numpy array/tensor."""
|
||||
from copy import deepcopy
|
||||
from typing import Optional
|
||||
|
||||
import numpy
|
||||
|
||||
STABILITY_CONST = 10 ** -6
|
||||
|
||||
|
||||
class QuantizedArray:
|
||||
"""Abstraction of quantized array."""
|
||||
|
||||
def __init__(self, n_bits: int, values: numpy.ndarray, is_signed=False):
|
||||
"""Quantize an array.
|
||||
|
||||
See https://arxiv.org/abs/1712.05877.
|
||||
|
||||
Args:
|
||||
values (numpy.ndarray): Values to be quantized.
|
||||
n_bits (int): The number of bits to use for quantization.
|
||||
is_signed (bool): Whether the quantization can be on signed integers.
|
||||
"""
|
||||
|
||||
self.offset = 0
|
||||
if is_signed:
|
||||
self.offset = 2 ** (n_bits - 1)
|
||||
self.values = values
|
||||
self.n_bits = n_bits
|
||||
self.is_signed = is_signed
|
||||
self.scale, self.zero_point, self.qvalues = self.compute_quantization_parameters()
|
||||
self.n_features = 1 if len(values.shape) <= 1 else values.shape[1]
|
||||
|
||||
def __call__(self) -> Optional[numpy.ndarray]:
|
||||
return self.qvalues
|
||||
|
||||
def compute_quantization_parameters(self):
|
||||
"""Compute the quantization parameters."""
|
||||
# Small constant needed for stability
|
||||
rmax = numpy.max(self.values)
|
||||
rmin = numpy.min(self.values)
|
||||
|
||||
if rmax - rmin < STABILITY_CONST:
|
||||
# In this case there is a single unique value to quantize
|
||||
|
||||
# is is_signed is True, we need to set the offset back to 0.
|
||||
# Signed quantization does not make sense for a single value.
|
||||
self.offset = 0
|
||||
|
||||
# This value could be multiplied with inputs at some point in the model
|
||||
# Since zero points need to be integers, if this value is a small float (ex: 0.01)
|
||||
# it will be quantized to 0 with a 0 zero-point, thus becoming useless in multiplication
|
||||
|
||||
if numpy.abs(rmax) < STABILITY_CONST:
|
||||
# If the value is a 0 we cannot do it since the scale would become 0 as well
|
||||
# resulting in division by 0
|
||||
scale = 1
|
||||
# Ideally we should get rid of round here but it is risky
|
||||
# regarding the FHE compilation.
|
||||
# Indeed, the zero_point value for the weights has to be an integer
|
||||
# for the compilation to work.
|
||||
zero_point = numpy.round(-rmin)
|
||||
else:
|
||||
# If the value is not a 0 we can tweak the scale factor so that
|
||||
# the value quantizes to 2^b - 1, the highest possible quantized value
|
||||
|
||||
# TODO: should we quantize it to the value of 1 what ever the number of bits
|
||||
# in order to save some precision bits ?
|
||||
scale = rmax / (2 ** self.n_bits - 1)
|
||||
zero_point = 0
|
||||
else:
|
||||
scale = (rmax - rmin) / (2 ** self.n_bits - 1) if rmax != rmin else 1.0
|
||||
|
||||
zero_point = numpy.round(
|
||||
(rmax * (-self.offset) - (rmin * (2 ** self.n_bits - 1 - self.offset)))
|
||||
/ (rmax - rmin)
|
||||
).astype(int)
|
||||
|
||||
# Compute quantized values and store
|
||||
qvalues = self.values / scale + zero_point
|
||||
|
||||
qvalues = (
|
||||
numpy.rint(qvalues)
|
||||
.clip(-self.offset, 2 ** (self.n_bits) - 1 - self.offset)
|
||||
.astype(int) # Careful this can be very large with high number of bits
|
||||
)
|
||||
|
||||
return scale, zero_point, qvalues
|
||||
|
||||
def update_values(self, values: numpy.ndarray) -> Optional[numpy.ndarray]:
|
||||
"""Update values to get their corresponding qvalues using the related quantized parameters.
|
||||
|
||||
Args:
|
||||
values (numpy.ndarray): Values to replace self.values
|
||||
|
||||
Returns:
|
||||
qvalues (numpy.ndarray): Corresponding qvalues
|
||||
"""
|
||||
self.values = deepcopy(values)
|
||||
self.quant()
|
||||
return self.qvalues
|
||||
|
||||
def update_qvalues(self, qvalues: numpy.ndarray) -> Optional[numpy.ndarray]:
|
||||
"""Update qvalues to get their corresponding values using the related quantized parameters.
|
||||
|
||||
Args:
|
||||
qvalues (numpy.ndarray): Values to replace self.qvalues
|
||||
|
||||
Returns:
|
||||
values (numpy.ndarray): Corresponding values
|
||||
"""
|
||||
self.qvalues = deepcopy(qvalues)
|
||||
self.dequant()
|
||||
return self.values
|
||||
|
||||
def quant(self) -> Optional[numpy.ndarray]:
|
||||
"""Quantize self.values.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Quantized values.
|
||||
"""
|
||||
|
||||
self.qvalues = (
|
||||
numpy.rint(self.values / self.scale + self.zero_point)
|
||||
.clip(-self.offset, 2 ** (self.n_bits) - 1 - self.offset)
|
||||
.astype(int)
|
||||
)
|
||||
return self.qvalues
|
||||
|
||||
def dequant(self) -> numpy.ndarray:
|
||||
"""Dequantize self.qvalues.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Dequantized values.
|
||||
"""
|
||||
self.values = self.scale * (self.qvalues - self.zero_point)
|
||||
return self.values
|
||||
@@ -1,107 +0,0 @@
|
||||
"""Quantized layers."""
|
||||
import copy
|
||||
from typing import Optional
|
||||
|
||||
import numpy
|
||||
|
||||
from .quantized_array import QuantizedArray
|
||||
|
||||
|
||||
class QuantizedLinear:
|
||||
"""Fully connected quantized layer."""
|
||||
|
||||
q_out: Optional[QuantizedArray]
|
||||
|
||||
def __init__(
|
||||
self, n_bits: int, q_weights: QuantizedArray, q_bias: Optional[QuantizedArray] = None
|
||||
):
|
||||
"""Implement the forward pass of a quantized linear layer.
|
||||
|
||||
Note: QuantizedLinear seems to become unstable when n_bits > 23.
|
||||
|
||||
Args:
|
||||
n_bits (int): Maximum number of bits for the ouput.
|
||||
q_weights (QuantizedArray): Quantized weights (n_features, n_neurons).
|
||||
q_bias (QuantizedArray, optional): Quantized bias (1, n_neurons). Defaults to None.
|
||||
"""
|
||||
self.q_weights = q_weights
|
||||
self.q_bias = q_bias
|
||||
self.n_bits = n_bits
|
||||
|
||||
if self.q_bias is None:
|
||||
self.q_bias = QuantizedArray(n_bits, numpy.zeros(self.q_weights.values.shape[-1]))
|
||||
self.q_out = None
|
||||
|
||||
def calibrate(self, x: numpy.ndarray):
|
||||
"""Create corresponding QuantizedArray for the output of QuantizedLinear.
|
||||
|
||||
Args:
|
||||
x (numpy.ndarray): Inputs.
|
||||
"""
|
||||
assert self.q_bias is not None
|
||||
self.q_out = QuantizedArray(self.n_bits, (x @ self.q_weights.values) + self.q_bias.values)
|
||||
|
||||
def __call__(self, q_input: QuantizedArray) -> QuantizedArray:
|
||||
"""Process the forward pass of the quantized linear layer.
|
||||
|
||||
Note: in standard quantization, floats are problematics as quantization
|
||||
targets a specific integer only hardware. However in FHE, we can create a table lookup
|
||||
to bypass this problem. Thus we leave the floats as is.
|
||||
Args:
|
||||
q_input (QuantizedArray): Quantized input.
|
||||
|
||||
Returns:
|
||||
q_out_ (QuantizedArray): Quantized output.
|
||||
"""
|
||||
# Satisfy mypy.
|
||||
assert self.q_out is not None
|
||||
assert self.q_bias is not None
|
||||
|
||||
# The following MatMul is done with integers, and thus, does not use of any PBS.
|
||||
# Only the final conversion to float is done with a PBS, which can actually
|
||||
# be merged with the PBS of following activation.
|
||||
# State of the art quantization method assumes the following results in a int32 accumulator.
|
||||
|
||||
# Here we follow Eq.7 in https://arxiv.org/abs/1712.05877 to split the core computation
|
||||
# from the zero points and scales.
|
||||
|
||||
p = self.q_weights.qvalues.shape[0]
|
||||
|
||||
# Core matmul operation in full intergers with a shape change (INTEGERS)
|
||||
matmul = q_input.qvalues @ self.q_weights.qvalues
|
||||
|
||||
# Sum operation in full integers resulting in large integers (INTEGERS)
|
||||
# [WORKAROUND #995] numpy.sum can't be currently done in our framework
|
||||
# sum_input = self.q_weights.zero_point * numpy.sum(q_input.qvalues, axis=1, keepdims=True)
|
||||
# Hack because we can't do numpy.sum(axis...,keepdims...)
|
||||
const_ones = numpy.ones(shape=(q_input.n_features, 1), dtype=int)
|
||||
sum_input = self.q_weights.zero_point * (q_input.qvalues @ const_ones)
|
||||
|
||||
# Last part that has to be done in FHE the rest must go in a PBS.
|
||||
# Forced fusing using .astype(numpy.float32)
|
||||
numpy_q_out = (matmul + (numpy.negative(sum_input))).astype(numpy.float32)
|
||||
|
||||
# sum_weights is a constant
|
||||
sum_weights = q_input.zero_point * numpy.sum(self.q_weights.qvalues, axis=0, keepdims=True)
|
||||
|
||||
# Quantization scales and zero points (FLOATS involved)
|
||||
# This is going to be compiled with a PBS (along with the following activation function)
|
||||
m_matmul = (q_input.scale * self.q_weights.scale) / (self.q_out.scale)
|
||||
bias_part = (
|
||||
self.q_bias.scale / self.q_out.scale * (self.q_bias.qvalues - self.q_bias.zero_point)
|
||||
)
|
||||
final_term = p * q_input.zero_point * self.q_weights.zero_point
|
||||
|
||||
numpy_q_out = numpy_q_out + final_term + (numpy.negative(sum_weights))
|
||||
numpy_q_out = m_matmul * numpy_q_out
|
||||
numpy_q_out = self.q_out.zero_point + bias_part + numpy_q_out
|
||||
|
||||
numpy_q_out = numpy.rint(numpy_q_out).clip(0, 2 ** self.q_out.n_bits - 1).astype(int)
|
||||
|
||||
# TODO find a more intuitive way to do the following (see issue #832)
|
||||
# We should be able to reuse q_out quantization parameters
|
||||
# easily to get a new QuantizedArray
|
||||
q_out_ = copy.copy(self.q_out)
|
||||
q_out_.update_qvalues(numpy_q_out)
|
||||
|
||||
return q_out_
|
||||
@@ -1,128 +0,0 @@
|
||||
"""QuantizedModule API."""
|
||||
import copy
|
||||
from typing import Optional, Union
|
||||
|
||||
import numpy
|
||||
|
||||
from ..common.compilation.artifacts import CompilationArtifacts
|
||||
from ..common.compilation.configuration import CompilationConfiguration
|
||||
from ..common.fhe_circuit import FHECircuit
|
||||
from ..numpy.np_fhe_compiler import NPFHECompiler
|
||||
from .quantized_array import QuantizedArray
|
||||
|
||||
|
||||
class QuantizedModule:
|
||||
"""Inference for a quantized model."""
|
||||
|
||||
quant_layers_dict: dict
|
||||
_mode: str
|
||||
q_input: Optional[QuantizedArray]
|
||||
forward_fhe: Union[None, FHECircuit]
|
||||
|
||||
def __init__(self, quant_layers_dict: dict):
|
||||
self.quant_layers_dict = copy.deepcopy(quant_layers_dict)
|
||||
self.compiled = False
|
||||
self.forward_fhe = None
|
||||
self.q_input = None
|
||||
|
||||
def __call__(self, x: QuantizedArray):
|
||||
return self.forward(x)
|
||||
|
||||
def forward(self, q_x: Union[numpy.ndarray, QuantizedArray]) -> numpy.ndarray:
|
||||
"""Forward pass with numpy function only.
|
||||
|
||||
Args:
|
||||
q_x (Union[numpy.ndarray, QuantizedArray]): QuantizedArray containing the inputs
|
||||
or a numpy.array containing the q_values.
|
||||
In the latter, the stored input parameters
|
||||
are used:
|
||||
(q_input.scale, q_input.zero_point).
|
||||
|
||||
Returns:
|
||||
(numpy.ndarray): Predictions of the quantized model
|
||||
"""
|
||||
# Following "if not" important for compilation as the tracer
|
||||
# need to fall in it the statement (tracing).
|
||||
# If the q_x is a numpy module then we reuse self.q_input parameters
|
||||
# computed during calibration.
|
||||
# Later we might want to only allow nympy.array input
|
||||
if not isinstance(q_x, QuantizedArray):
|
||||
assert self.q_input is not None
|
||||
self.q_input.update_qvalues(q_x)
|
||||
q_x = self.q_input
|
||||
|
||||
for _, layer in self.quant_layers_dict.items():
|
||||
q_x = layer(q_x)
|
||||
|
||||
# mypy compliance
|
||||
assert isinstance(q_x, QuantizedArray)
|
||||
|
||||
return q_x.qvalues
|
||||
|
||||
def forward_and_dequant(self, q_x: Union[numpy.ndarray, QuantizedArray]) -> numpy.ndarray:
|
||||
"""Forward pass with numpy function only plus dequantization.
|
||||
|
||||
Args:
|
||||
q_x (Union[numpy.ndarray, QuantizedArray]): QuantizedArray containing the inputs
|
||||
or a numpy.array containing the q_values.
|
||||
In the latter, the stored input parameters
|
||||
are used:
|
||||
(q_input.scale, q_input.zero_point).
|
||||
|
||||
Returns:
|
||||
(numpy.ndarray): Predictions of the quantized model
|
||||
"""
|
||||
q_out = self.forward(q_x)
|
||||
return self.dequantize_output(q_out)
|
||||
|
||||
def dequantize_output(self, qvalues: numpy.ndarray) -> numpy.ndarray:
|
||||
"""Take the last layer q_out and use its dequant function.
|
||||
|
||||
Args:
|
||||
qvalues (numpy.ndarray): Quantized values of the last layer.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: Dequantized values of the last layer.
|
||||
"""
|
||||
last_layer = list(self.quant_layers_dict.values())[-1]
|
||||
real_values = last_layer.q_out.update_qvalues(qvalues)
|
||||
return real_values
|
||||
|
||||
def compile(
|
||||
self,
|
||||
q_input: QuantizedArray,
|
||||
compilation_configuration: Optional[CompilationConfiguration] = None,
|
||||
compilation_artifacts: Optional[CompilationArtifacts] = None,
|
||||
show_mlir: bool = False,
|
||||
) -> FHECircuit:
|
||||
"""Compile the forward function of the module.
|
||||
|
||||
Args:
|
||||
q_input (QuantizedArray): Needed for tracing and building the boundaries.
|
||||
compilation_configuration (Optional[CompilationConfiguration]): Configuration object
|
||||
to use during
|
||||
compilation
|
||||
compilation_artifacts (Optional[CompilationArtifacts]): Artifacts object to fill during
|
||||
compilation
|
||||
show_mlir (bool, optional): if set, the MLIR produced by the converter and which is
|
||||
going to be sent to the compiler backend is shown on the screen, e.g., for debugging
|
||||
or demo. Defaults to False.
|
||||
|
||||
Returns:
|
||||
FHECircuit: the compiled FHECircuit.
|
||||
"""
|
||||
|
||||
self.q_input = copy.deepcopy(q_input)
|
||||
compiler = NPFHECompiler(
|
||||
self.forward,
|
||||
{
|
||||
"q_x": "encrypted",
|
||||
},
|
||||
compilation_configuration,
|
||||
compilation_artifacts,
|
||||
)
|
||||
self.forward_fhe = compiler.compile_on_inputset(
|
||||
(numpy.expand_dims(arr, 0) for arr in self.q_input.qvalues), show_mlir
|
||||
)
|
||||
|
||||
return self.forward_fhe
|
||||
@@ -1,2 +0,0 @@
|
||||
"""Modules for torch to numpy conversion."""
|
||||
from .numpy_module import NumpyModule
|
||||
@@ -1,90 +0,0 @@
|
||||
"""torch compilation function."""
|
||||
|
||||
from typing import Iterable, Optional, Union
|
||||
|
||||
import numpy
|
||||
import torch
|
||||
|
||||
from ..common.compilation import CompilationArtifacts, CompilationConfiguration
|
||||
from ..quantization import PostTrainingAffineQuantization, QuantizedArray, QuantizedModule
|
||||
from . import NumpyModule
|
||||
|
||||
TorchDataset = Iterable[torch.Tensor]
|
||||
NPDataset = Iterable[numpy.ndarray]
|
||||
|
||||
|
||||
def convert_torch_tensor_or_numpy_array_to_numpy_array(
|
||||
torch_tensor_or_numpy_array: Union[torch.Tensor, numpy.ndarray]
|
||||
) -> numpy.ndarray:
|
||||
"""Convert a torch tensor or a numpy array to a numpy array.
|
||||
|
||||
Args:
|
||||
torch_tensor_or_numpy_array (Union[torch.Tensor, numpy.ndarray]): the value that is either
|
||||
a torch tensor or a numpy array.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray: the value converted to a numpy array.
|
||||
"""
|
||||
return (
|
||||
torch_tensor_or_numpy_array
|
||||
if isinstance(torch_tensor_or_numpy_array, numpy.ndarray)
|
||||
else torch_tensor_or_numpy_array.cpu().numpy()
|
||||
)
|
||||
|
||||
|
||||
def compile_torch_model(
|
||||
torch_model: torch.nn.Module,
|
||||
torch_inputset: Union[TorchDataset, NPDataset],
|
||||
compilation_configuration: Optional[CompilationConfiguration] = None,
|
||||
compilation_artifacts: Optional[CompilationArtifacts] = None,
|
||||
show_mlir: bool = False,
|
||||
n_bits=7,
|
||||
) -> QuantizedModule:
|
||||
"""Take a model in torch, turn it to numpy, transform weights to integer.
|
||||
|
||||
Later, we'll compile the integer model.
|
||||
|
||||
Args:
|
||||
torch_model (torch.nn.Module): the model to quantize,
|
||||
torch_inputset (Union[TorchDataset, NPDataset]): the inputset, can contain either torch
|
||||
tensors or numpy.ndarray, only datasets with a single input are supported for now.
|
||||
function_parameters_encrypted_status (Dict[str, Union[str, EncryptedStatus]]): a dict with
|
||||
the name of the parameter and its encrypted status
|
||||
compilation_configuration (CompilationConfiguration): Configuration object to use
|
||||
during compilation
|
||||
compilation_artifacts (CompilationArtifacts): Artifacts object to fill
|
||||
during compilation
|
||||
show_mlir (bool): if set, the MLIR produced by the converter and which is going
|
||||
to be sent to the compiler backend is shown on the screen, e.g., for debugging or demo
|
||||
n_bits: the number of bits for the quantization
|
||||
|
||||
Returns:
|
||||
QuantizedModule: The resulting compiled QuantizedModule.
|
||||
"""
|
||||
|
||||
# Create corresponding numpy model
|
||||
numpy_model = NumpyModule(torch_model)
|
||||
|
||||
# Torch input to numpy
|
||||
numpy_inputset_as_single_array = numpy.concatenate(
|
||||
tuple(
|
||||
numpy.expand_dims(convert_torch_tensor_or_numpy_array_to_numpy_array(input_), 0)
|
||||
for input_ in torch_inputset
|
||||
)
|
||||
)
|
||||
|
||||
# Quantize with post-training static method, to have a model with integer weights
|
||||
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model, is_signed=True)
|
||||
quantized_module = post_training_quant.quantize_module(numpy_inputset_as_single_array)
|
||||
|
||||
# Quantize input
|
||||
quantized_numpy_inputset = QuantizedArray(n_bits, numpy_inputset_as_single_array)
|
||||
|
||||
quantized_module.compile(
|
||||
quantized_numpy_inputset,
|
||||
compilation_configuration,
|
||||
compilation_artifacts,
|
||||
show_mlir,
|
||||
)
|
||||
|
||||
return quantized_module
|
||||
@@ -1,73 +0,0 @@
|
||||
"""A torch to numpy module."""
|
||||
import numpy
|
||||
from torch import nn
|
||||
|
||||
|
||||
class NumpyModule:
|
||||
"""General interface to transform a torch.nn.Module to numpy module."""
|
||||
|
||||
IMPLEMENTED_MODULES = {nn.Linear, nn.Sigmoid, nn.ReLU6}
|
||||
|
||||
def __init__(self, torch_model: nn.Module):
|
||||
"""Initialize our numpy module.
|
||||
|
||||
Current constraint: All objects used in the forward have to be defined in the
|
||||
__init__() of torch.nn.Module and follow the exact same order.
|
||||
(i.e. each linear layer must have one variable defined in the
|
||||
right order). This constraint will disappear when
|
||||
TorchScript is in place. (issue #818)
|
||||
|
||||
Args:
|
||||
torch_model (nn.Module): A fully trained, torch model alond with its parameters.
|
||||
"""
|
||||
self.torch_model = torch_model
|
||||
self.check_compatibility()
|
||||
self.convert_to_numpy()
|
||||
|
||||
def check_compatibility(self):
|
||||
"""Check the compatibility of all layers in the torch model."""
|
||||
|
||||
for _, layer in self.torch_model.named_children():
|
||||
if (layer_type := type(layer)) not in self.IMPLEMENTED_MODULES:
|
||||
raise ValueError(
|
||||
f"The following module is currently not implemented: {layer_type.__name__}. "
|
||||
f"Please stick to the available torch modules: "
|
||||
f"{', '.join(sorted(module.__name__ for module in self.IMPLEMENTED_MODULES))}."
|
||||
)
|
||||
return True
|
||||
|
||||
def convert_to_numpy(self):
|
||||
"""Transform all parameters from torch tensor to numpy arrays."""
|
||||
self.numpy_module_dict = {}
|
||||
|
||||
for name, weights in self.torch_model.state_dict().items():
|
||||
params = weights.detach().numpy()
|
||||
self.numpy_module_dict[name] = params.T if "weight" in name else params
|
||||
|
||||
def __call__(self, x: numpy.ndarray):
|
||||
"""Return the function to be compiled."""
|
||||
return self.forward(x)
|
||||
|
||||
def forward(self, x: numpy.ndarray) -> numpy.ndarray:
|
||||
"""Apply a forward pass with numpy function only.
|
||||
|
||||
Args:
|
||||
x (numpy.array): Input to be processed in the forward pass.
|
||||
|
||||
Returns:
|
||||
x (numpy.array): Processed input.
|
||||
"""
|
||||
|
||||
for name, layer in self.torch_model.named_children():
|
||||
|
||||
if isinstance(layer, nn.Linear):
|
||||
# Apply a matmul product and add the bias.
|
||||
x = (
|
||||
x @ self.numpy_module_dict[f"{name}.weight"]
|
||||
+ self.numpy_module_dict[f"{name}.bias"]
|
||||
)
|
||||
elif isinstance(layer, nn.Sigmoid):
|
||||
x = 1 / (1 + numpy.exp(-x))
|
||||
elif isinstance(layer, nn.ReLU6):
|
||||
x = numpy.minimum(numpy.maximum(0, x), 6)
|
||||
return x
|
||||
@@ -8,13 +8,11 @@
|
||||
loguru 0.5.3 MIT License
|
||||
matplotlib 3.5.1 Python Software Foundation License
|
||||
networkx 2.6.3 BSD License
|
||||
numpy 1.22.0 BSD License
|
||||
numpy 1.22.1 BSD License
|
||||
packaging 21.3 Apache Software License; BSD License
|
||||
pygraphviz 1.7 BSD License
|
||||
pyparsing 3.0.6 MIT License
|
||||
python-dateutil 2.8.2 Apache Software License; BSD License
|
||||
setuptools-scm 6.3.2 MIT License
|
||||
setuptools-scm 6.4.1 MIT License
|
||||
six 1.16.0 MIT License
|
||||
tomli 1.2.3 MIT License
|
||||
torch 1.10.1 BSD License
|
||||
typing-extensions 4.0.1 Python Software Foundation License
|
||||
|
||||
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 21 KiB |
@@ -53,22 +53,6 @@ Here is the visual representation of the pipeline:
|
||||
|
||||

|
||||
|
||||
## Overview of the torch compilation process
|
||||
|
||||
Compiling a torch Module is pretty straightforward.
|
||||
|
||||
The torch Module is first converted to a Numpy equivalent we call `NumpyModule` if all the layers in the torch Module are supported.
|
||||
|
||||
Then the module is quantized post-training to be compatible with our compiler which only works on integers. The post training quantization uses the provided dataset for calibration.
|
||||
|
||||
The dataset is then quantized to be usable for compilation with the QuantizedModule.
|
||||
|
||||
The QuantizedModule is compiled yielding an executable FHECircuit.
|
||||
|
||||
Here is the visual representation of the different steps:
|
||||
|
||||

|
||||
|
||||
## Tracing
|
||||
|
||||
Given a Python function `f` such as this one,
|
||||
|
||||
@@ -45,13 +45,3 @@ In this section, we will discuss the module structure of **concrete-numpy** brie
|
||||
- np_inputset_helpers: utilities for inputsets
|
||||
- np_mlir_converter: utilities for MLIR conversion
|
||||
- tracing: tracing of numpy functions
|
||||
- quantization: tools to quantize networks
|
||||
- post_training: post training quantization
|
||||
- quantized_activations: management of quantization in activations
|
||||
- quantized_array: utilities for quantization
|
||||
- quantized_layers: management of quantization of neural network layers
|
||||
- quantized_module: main API for quantization
|
||||
- torch: torch compilation and conversion
|
||||
- compile: compilation of a torch module, including quantization
|
||||
- numpy_module: conversion tools to turn a torch module into a numpy function
|
||||
|
||||
|
||||
@@ -1,632 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Decision Tree Classifier"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Trees are a popular class of algorithm in Machine Learning. In this notebook we build a simple Decision Tree Classifier using `scikit-learn` to show that they can be executed homomorphically using Concrete Numpy.\n",
|
||||
"\n",
|
||||
"State of the art classifiers are generally a bit more complex than a single decision tree, but here we wanted to demonstrate FHE decision trees so results may not compete with the best models out there.\n",
|
||||
"\n",
|
||||
"Converting a tree working over quantized data to its FHE equivalent takes only a few lines of code thanks to Concrete Numpy.\n",
|
||||
"\n",
|
||||
"Let's dive in!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## The Use Case"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The use case is a spam classification task from OpenML you can find here: https://www.openml.org/d/44\n",
|
||||
"\n",
|
||||
"Some pre-extracted features (like some word frequencies) are provided as well as a class, `0` for a normal e-mail and `1` for spam, for 4601 samples.\n",
|
||||
"\n",
|
||||
"Let's first get the dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"(4601, 57)\n",
|
||||
"(4601,)\n",
|
||||
"Number of features: 57\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy\n",
|
||||
"from sklearn.datasets import fetch_openml\n",
|
||||
"from sklearn.model_selection import train_test_split\n",
|
||||
"\n",
|
||||
"features, classes = fetch_openml(data_id=44, as_frame=False, cache=True, return_X_y=True)\n",
|
||||
"classes = classes.astype(numpy.int64)\n",
|
||||
"\n",
|
||||
"print(features.shape)\n",
|
||||
"print(classes.shape)\n",
|
||||
"\n",
|
||||
"num_features = features.shape[1]\n",
|
||||
"print(f\"Number of features: {num_features}\")\n",
|
||||
"\n",
|
||||
"x_train, x_test, y_train, y_test = train_test_split(\n",
|
||||
" features,\n",
|
||||
" classes,\n",
|
||||
" test_size=0.15,\n",
|
||||
" random_state=42,\n",
|
||||
")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We first train a decision tree on the dataset as is and see what performance we can get."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Depth: 29\n",
|
||||
"Mean accuracy: 0.91027496382055\n",
|
||||
"Number of test samples: 691\n",
|
||||
"Number of spams in test samples: 304\n",
|
||||
"True Negative (legit mail well classified) rate: 0.9328165374677002\n",
|
||||
"False Positive (legit mail classified as spam) rate: 0.06718346253229975\n",
|
||||
"False Negative (spam mail classified as legit) rate: 0.11842105263157894\n",
|
||||
"True Positive (spam well classified) rate: 0.881578947368421\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from sklearn.metrics import confusion_matrix\n",
|
||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||
"\n",
|
||||
"clear_clf = DecisionTreeClassifier()\n",
|
||||
"clear_clf = clear_clf.fit(x_train, y_train)\n",
|
||||
"\n",
|
||||
"print(f\"Depth: {clear_clf.get_depth()}\")\n",
|
||||
"\n",
|
||||
"preds = clear_clf.predict(x_test)\n",
|
||||
"\n",
|
||||
"mean_accuracy = numpy.mean(preds == y_test)\n",
|
||||
"print(f\"Mean accuracy: {mean_accuracy}\")\n",
|
||||
"\n",
|
||||
"true_negative, false_positive, false_negative, true_positive = confusion_matrix(\n",
|
||||
" y_test, preds, normalize=\"true\"\n",
|
||||
").ravel()\n",
|
||||
"\n",
|
||||
"num_samples = len(y_test)\n",
|
||||
"num_spam = sum(y_test)\n",
|
||||
"\n",
|
||||
"print(f\"Number of test samples: {num_samples}\")\n",
|
||||
"print(f\"Number of spams in test samples: {num_spam}\")\n",
|
||||
"\n",
|
||||
"print(f\"True Negative (legit mail well classified) rate: {true_negative}\")\n",
|
||||
"print(f\"False Positive (legit mail classified as spam) rate: {false_positive}\")\n",
|
||||
"print(f\"False Negative (spam mail classified as legit) rate: {false_negative}\")\n",
|
||||
"print(f\"True Positive (spam well classified) rate: {true_positive}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now quantize the features to train the tree directly on quantized data, this will make the trained tree FHE friendly by default which is a nice bonus, as well as allowing to see how both trees compare to each other.\n",
|
||||
"\n",
|
||||
"The choice here is to compute the quantization parameters over the training set. We use 6 bits for each feature individually as the Concrete Numpy precision for PBSes is better for 6 bits of precision."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[ 0 0 6 0 3 5 0 0 0 2 0 19 0 0 0 0 0 0 3 0 0 0 0 0\n",
|
||||
" 4 4 0 7 3 0 0 0 2 0 0 4 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||
" 0 1 0 0 0 0 0 0 1]\n",
|
||||
"[ 0 0 0 0 6 0 0 0 0 0 0 10 0 0 0 0 0 0 4 0 7 0 0 0\n",
|
||||
" 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
|
||||
" 0 0 0 0 0 0 0 0 0]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from concrete.quantization import QuantizedArray\n",
|
||||
"\n",
|
||||
"# And quantize accordingly training and test samples\n",
|
||||
"q_x_train = numpy.zeros_like(x_train, dtype=numpy.int64)\n",
|
||||
"q_x_test = numpy.zeros_like(x_test, dtype=numpy.int64)\n",
|
||||
"for feature_idx in range(num_features):\n",
|
||||
" q_x_train[:, feature_idx] = QuantizedArray(6, x_train[:, feature_idx]).qvalues\n",
|
||||
" q_x_test[:, feature_idx] = QuantizedArray(6, x_test[:, feature_idx]).qvalues\n",
|
||||
"\n",
|
||||
"print(q_x_train[0])\n",
|
||||
"print(q_x_test[-1])\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So far so good, we can now train a DecisionTreeClassifier on the quantized dataset."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Depth: 7\n",
|
||||
"Mean accuracy: 0.8813314037626628\n",
|
||||
"Number of test samples: 691\n",
|
||||
"Number of spams in test samples: 304\n",
|
||||
"True Negative (legit mail well classified) rate: 0.9276485788113695\n",
|
||||
"False Positive (legit mail classified as spam) rate: 0.07235142118863049\n",
|
||||
"False Negative (spam mail classified as legit) rate: 0.17763157894736842\n",
|
||||
"True Positive (spam well classified) rate: 0.8223684210526315\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We limit the depth to have reasonable FHE runtimes, but deep trees can still compile properly!\n",
|
||||
"clf = DecisionTreeClassifier(max_depth=7)\n",
|
||||
"clf = clf.fit(q_x_train, y_train)\n",
|
||||
"\n",
|
||||
"print(f\"Depth: {clf.get_depth()}\")\n",
|
||||
"\n",
|
||||
"preds = clf.predict(q_x_test)\n",
|
||||
"\n",
|
||||
"mean_accuracy = numpy.mean(preds == y_test)\n",
|
||||
"print(f\"Mean accuracy: {mean_accuracy}\")\n",
|
||||
"\n",
|
||||
"true_negative, false_positive, false_negative, true_positive = confusion_matrix(\n",
|
||||
" y_test, preds, normalize=\"true\"\n",
|
||||
").ravel()\n",
|
||||
"\n",
|
||||
"num_samples = len(y_test)\n",
|
||||
"num_spam = sum(y_test)\n",
|
||||
"\n",
|
||||
"print(f\"Number of test samples: {num_samples}\")\n",
|
||||
"print(f\"Number of spams in test samples: {num_spam}\")\n",
|
||||
"\n",
|
||||
"print(f\"True Negative (legit mail well classified) rate: {true_negative}\")\n",
|
||||
"print(f\"False Positive (legit mail classified as spam) rate: {false_positive}\")\n",
|
||||
"print(f\"False Negative (spam mail classified as legit) rate: {false_negative}\")\n",
|
||||
"print(f\"True Positive (spam well classified) rate: {true_positive}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"This simple classifier achieves about a 7% false positive (legit mail classified as spam) rate and about a 17% false negative (spam mail classified as legit) rate. In a more common setting, not shown in this tutorial, we would use gradient boosting to assemble several small classifiers into a single one that would be more effective.\n",
|
||||
"\n",
|
||||
"We can see that the accuracy is relatively similar to the tree trained in the clear despite the quantization (to be FHE compatible) and smaller depth to allow for faster FHE computations. The main difference being a higher False Positive rate (legit mail classified as spam).\n",
|
||||
"\n",
|
||||
"The point here is not to beat the state of the art methods for spam detection but rather show that given a certain tree classifier we can run it homomorphically."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Homorphic Trees"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Before we can do that we need to convert the tree to a form that is easy to run homomorphically.\n",
|
||||
"\n",
|
||||
"The Hummingbird paper from Microsoft (https://scnakandala.github.io/papers/TR_2020_Hummingbird.pdf and https://github.com/microsoft/hummingbird) gives a method to convert tree evaluation to tensor operations which we support in Concrete Numpy.\n",
|
||||
"\n",
|
||||
"The next few cells implement the functions necessary for the conversion. They are not optimized well so that they remain readable.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# First an sklearn import we need\n",
|
||||
"from sklearn.tree import _tree"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def create_hummingbird_tensor_a(tree_, features, internal_nodes):\n",
|
||||
" \"\"\"Create Hummingbird tensor A.\"\"\"\n",
|
||||
" a = numpy.zeros((len(features), len(internal_nodes)), dtype=numpy.int64)\n",
|
||||
" for i in range(a.shape[0]):\n",
|
||||
" for j in range(a.shape[1]):\n",
|
||||
" a[i, j] = tree_.feature[internal_nodes[j]] == features[i]\n",
|
||||
"\n",
|
||||
" return a"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def create_hummingbird_tensor_b(tree_, internal_nodes, is_integer_tree=False):\n",
|
||||
" \"\"\"Create Hummingbird tensor B.\"\"\"\n",
|
||||
" b = numpy.array([tree_.threshold[int_node] for int_node in internal_nodes])\n",
|
||||
"\n",
|
||||
" return b.astype(numpy.int64) if is_integer_tree else b"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def create_subtree_nodes_set_per_node(\n",
|
||||
" all_nodes, leaf_nodes, is_left_child_of: dict, is_right_child_of: dict\n",
|
||||
"):\n",
|
||||
" \"\"\"Create subtrees nodes set for each node in the tree.\"\"\"\n",
|
||||
" left_subtree_nodes_per_node = {node: set() for node in all_nodes}\n",
|
||||
" right_subtree_nodes_per_node = {node: set() for node in all_nodes}\n",
|
||||
"\n",
|
||||
" current_nodes = {node: None for node in leaf_nodes}\n",
|
||||
" while current_nodes:\n",
|
||||
" next_nodes = {}\n",
|
||||
" for node in current_nodes:\n",
|
||||
" parent_as_left_child = is_left_child_of.get(node, None)\n",
|
||||
" if parent_as_left_child is not None:\n",
|
||||
" left_subtree = left_subtree_nodes_per_node[parent_as_left_child]\n",
|
||||
" left_subtree.add(node)\n",
|
||||
" left_subtree.update(left_subtree_nodes_per_node[node])\n",
|
||||
" left_subtree.update(right_subtree_nodes_per_node[node])\n",
|
||||
" next_nodes.update({parent_as_left_child: None})\n",
|
||||
"\n",
|
||||
" parent_as_right_child = is_right_child_of.get(node, None)\n",
|
||||
" if parent_as_right_child is not None:\n",
|
||||
" right_subtree = right_subtree_nodes_per_node[parent_as_right_child]\n",
|
||||
" right_subtree.add(node)\n",
|
||||
" right_subtree.update(left_subtree_nodes_per_node[node])\n",
|
||||
" right_subtree.update(right_subtree_nodes_per_node[node])\n",
|
||||
" next_nodes.update({parent_as_right_child: None})\n",
|
||||
"\n",
|
||||
" current_nodes = next_nodes\n",
|
||||
"\n",
|
||||
" return left_subtree_nodes_per_node, right_subtree_nodes_per_node"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def create_hummingbird_tensor_c(\n",
|
||||
" all_nodes, internal_nodes, leaf_nodes, is_left_child_of: dict, is_right_child_of: dict\n",
|
||||
"):\n",
|
||||
" \"\"\"Create Hummingbird tensor C.\"\"\"\n",
|
||||
" left_subtree_nodes_per_node, right_subtree_nodes_per_node = create_subtree_nodes_set_per_node(\n",
|
||||
" all_nodes, leaf_nodes, is_left_child_of, is_right_child_of\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" c = numpy.zeros((len(internal_nodes), len(leaf_nodes)), dtype=numpy.int64)\n",
|
||||
"\n",
|
||||
" for i in range(c.shape[0]):\n",
|
||||
" for j in range(c.shape[1]):\n",
|
||||
" if leaf_nodes[j] in right_subtree_nodes_per_node[internal_nodes[i]]:\n",
|
||||
" c[i, j] = -1\n",
|
||||
" elif leaf_nodes[j] in left_subtree_nodes_per_node[internal_nodes[i]]:\n",
|
||||
" c[i, j] = 1\n",
|
||||
"\n",
|
||||
" return c"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def create_hummingbird_tensor_d(leaf_nodes, is_left_child_of, is_right_child_of):\n",
|
||||
" \"\"\"Create Hummingbird tensor D.\"\"\"\n",
|
||||
" d = numpy.zeros((len(leaf_nodes)), dtype=numpy.int64)\n",
|
||||
" for k in range(d.shape[0]):\n",
|
||||
" current_node = leaf_nodes[k]\n",
|
||||
" num_left_children = 0\n",
|
||||
" while True:\n",
|
||||
" if (parent_as_left_child := is_left_child_of.get(current_node, None)) is not None:\n",
|
||||
" num_left_children += 1\n",
|
||||
" current_node = parent_as_left_child\n",
|
||||
" elif (parent_as_right_child := is_right_child_of.get(current_node, None)) is not None:\n",
|
||||
" current_node = parent_as_right_child\n",
|
||||
" else:\n",
|
||||
" break\n",
|
||||
" d[k] = num_left_children\n",
|
||||
"\n",
|
||||
" return d"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def create_hummingbird_tensor_e(tree_, leaf_nodes, classes):\n",
|
||||
" \"\"\"Create Hummingbird tensor E.\"\"\"\n",
|
||||
" e = numpy.zeros((len(leaf_nodes), len(classes)), dtype=numpy.int64)\n",
|
||||
" for i in range(e.shape[0]):\n",
|
||||
" leaf_node = leaf_nodes[i]\n",
|
||||
" assert tree_.feature[leaf_node] == _tree.TREE_UNDEFINED # Sanity check\n",
|
||||
" for j in range(e.shape[1]):\n",
|
||||
" value = None\n",
|
||||
" if tree_.n_outputs == 1:\n",
|
||||
" value = tree_.value[leaf_node][0]\n",
|
||||
" else:\n",
|
||||
" value = tree_.value[leaf_node].T[0]\n",
|
||||
" class_name = numpy.argmax(value)\n",
|
||||
" e[i, j] = class_name == j\n",
|
||||
"\n",
|
||||
" return e"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def tree_to_numpy(tree, num_features, classes):\n",
|
||||
" \"\"\"Convert an sklearn tree to its Hummingbird tensor equivalent.\"\"\"\n",
|
||||
" tree_ = tree.tree_\n",
|
||||
"\n",
|
||||
" number_of_nodes = tree_.node_count\n",
|
||||
" all_nodes = list(range(number_of_nodes))\n",
|
||||
" internal_nodes = [\n",
|
||||
" node_idx\n",
|
||||
" for node_idx, feature in enumerate(tree_.feature)\n",
|
||||
" if feature != _tree.TREE_UNDEFINED\n",
|
||||
" ]\n",
|
||||
" leaf_nodes = [\n",
|
||||
" node_idx\n",
|
||||
" for node_idx, feature in enumerate(tree_.feature)\n",
|
||||
" if feature == _tree.TREE_UNDEFINED\n",
|
||||
" ]\n",
|
||||
"\n",
|
||||
" features = list(range(num_features))\n",
|
||||
"\n",
|
||||
" a = create_hummingbird_tensor_a(tree_, features, internal_nodes)\n",
|
||||
"\n",
|
||||
" b = create_hummingbird_tensor_b(tree_, internal_nodes, is_integer_tree=True)\n",
|
||||
"\n",
|
||||
" is_left_child_of = {\n",
|
||||
" left_child: parent\n",
|
||||
" for parent, left_child in enumerate(tree_.children_left)\n",
|
||||
" if left_child != _tree.TREE_UNDEFINED\n",
|
||||
" }\n",
|
||||
" is_right_child_of = {\n",
|
||||
" right_child: parent\n",
|
||||
" for parent, right_child in enumerate(tree_.children_right)\n",
|
||||
" if right_child != _tree.TREE_UNDEFINED\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
" c = create_hummingbird_tensor_c(\n",
|
||||
" all_nodes, internal_nodes, leaf_nodes, is_left_child_of, is_right_child_of\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" d = create_hummingbird_tensor_d(leaf_nodes, is_left_child_of, is_right_child_of)\n",
|
||||
"\n",
|
||||
" e = create_hummingbird_tensor_e(tree_, leaf_nodes, classes)\n",
|
||||
"\n",
|
||||
" def tree_predict(inputs):\n",
|
||||
" t = inputs @ a\n",
|
||||
" t = t <= b\n",
|
||||
" t = t @ c\n",
|
||||
" t = t == d\n",
|
||||
" r = t @ e\n",
|
||||
" return r\n",
|
||||
"\n",
|
||||
" return tree_predict"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# We can finally convert our tree!\n",
|
||||
"tree_predict = tree_to_numpy(clf, num_features, classes=[0, 1])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Results are identical: True\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Let's see if it works as expected\n",
|
||||
"tensor_predictions = tree_predict(q_x_test)\n",
|
||||
"tensor_predictions = numpy.argmax(tensor_predictions, axis=1)\n",
|
||||
"\n",
|
||||
"tree_predictions = clf.predict(q_x_test)\n",
|
||||
"\n",
|
||||
"print(f\"Results are identical: {numpy.array_equal(tensor_predictions, tree_predictions)}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We now have a tensor equivalent of our `DecisionTreeClassifier`, pretty neat isn't it?\n",
|
||||
"\n",
|
||||
"Last step is compiling the tensor equivalent to FHE using the Concrete Numpy and it's nearly as easy as 1, 2, 3.\n",
|
||||
"\n",
|
||||
"We use the training input data as well as some synthetic data to calibrate the circuit during compilation."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import concrete.numpy as hnp\n",
|
||||
"\n",
|
||||
"compiler = hnp.NPFHECompiler(tree_predict, {\"inputs\": \"encrypted\"})\n",
|
||||
"fhe_tree = compiler.compile_on_inputset((sample for sample in q_x_train))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"And now we can start running the tree homomorphically!"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|██████████| 10/10 [05:01<00:00, 30.17s/it]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Same predictions of FHE compared to clear: 10/10 (1.0)\n",
|
||||
"FHE evaluation #1 took 30.765692999993917 s\n",
|
||||
"FHE evaluation #2 took 30.604038099998434 s\n",
|
||||
"FHE evaluation #3 took 30.70741419999831 s\n",
|
||||
"FHE evaluation #4 took 30.64609560000099 s\n",
|
||||
"FHE evaluation #5 took 29.945520399996894 s\n",
|
||||
"FHE evaluation #6 took 30.155333900002006 s\n",
|
||||
"FHE evaluation #7 took 29.776400299997476 s\n",
|
||||
"FHE evaluation #8 took 30.12118709999777 s\n",
|
||||
"FHE evaluation #9 took 29.526597299998684 s\n",
|
||||
"FHE evaluation #10 took 29.392055899996194 s\n",
|
||||
"Mean FHE evaluation time: 30.16403357999807\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from tqdm import tqdm\n",
|
||||
"from time import perf_counter\n",
|
||||
"\n",
|
||||
"num_runs = 10\n",
|
||||
"fhe_preds = []\n",
|
||||
"clear_preds = []\n",
|
||||
"fhe_eval_times = []\n",
|
||||
"for i in tqdm(range(num_runs)):\n",
|
||||
" start = perf_counter()\n",
|
||||
" fhe_pred = fhe_tree.run(q_x_test[i].astype(numpy.uint8))\n",
|
||||
" stop = perf_counter()\n",
|
||||
" fhe_eval_times.append(stop - start)\n",
|
||||
" fhe_pred = numpy.argmax(fhe_pred)\n",
|
||||
" fhe_preds.append(fhe_pred)\n",
|
||||
" clear_pred = clf.predict(numpy.expand_dims(q_x_test[i], axis=0))\n",
|
||||
" clear_pred = clear_pred[0]\n",
|
||||
" clear_preds.append(clear_pred)\n",
|
||||
"\n",
|
||||
"fhe_preds = numpy.array(fhe_preds)\n",
|
||||
"clear_preds = numpy.array(clear_preds)\n",
|
||||
"\n",
|
||||
"same_preds = fhe_preds == clear_preds\n",
|
||||
"n_same_preds = sum(same_preds)\n",
|
||||
"print(\n",
|
||||
" f\"Same predictions of FHE compared to clear: {n_same_preds}/{num_runs} \"\n",
|
||||
" f\"({numpy.mean(same_preds)})\"\n",
|
||||
")\n",
|
||||
"for idx, eval_time in enumerate(fhe_eval_times, 1):\n",
|
||||
" print(f\"FHE evaluation #{idx} took {eval_time} s\")\n",
|
||||
"\n",
|
||||
"print(f\"Mean FHE evaluation time: {numpy.mean(fhe_eval_times)}\")\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Conclusion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"In this notebook we showed how to quantize a dataset to train a tree directly on integer data so that it is FHE friendly. We saw that despite quantization and its smaller depth, the quantized tree classification capabilities were close to a tree trained on the original real-valued dataset.\n",
|
||||
"\n",
|
||||
"We then used the Hummingbird paper's algorithm to transform a tree evaluation to a few tensor operations which can be compiled by the Concrete Numpy to an FHE circuit.\n",
|
||||
"\n",
|
||||
"Finally we ran the compiled circuit on a few samples (because inference times are a bit high) to show that clear and FHE computations were the same."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"execution": {
|
||||
"timeout": 10800
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 21 KiB |
@@ -1,11 +0,0 @@
|
||||
Advanced examples
|
||||
=================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
FullyConnectedNeuralNetwork.ipynb
|
||||
LinearRegression.ipynb
|
||||
LogisticRegression.ipynb
|
||||
PoissonRegression.ipynb
|
||||
DecisionTreeClassifier.ipynb
|
||||
@@ -107,4 +107,3 @@ Today, we cannot simulate a client / server API in python, but it is for very so
|
||||
|
||||
- [Working With Floating Points Tutorial](../tutorial/working_with_floating_points.md)
|
||||
- [Table Lookup Tutorial](../tutorial/table_lookup.md)
|
||||
- [Compiling a torch model](../howto/compiling_torch_model.md)
|
||||
|
||||
@@ -7,5 +7,4 @@ Getting Started
|
||||
intro.md
|
||||
installing.md
|
||||
compiling_and_executing.md
|
||||
../howto/compiling_torch_model.md
|
||||
benchmarks.md
|
||||
|
||||
@@ -14,7 +14,6 @@ With **Concrete Numpy**, data scientists can implement machine learning models u
|
||||
**Concrete Numpy** is made of several parts:
|
||||
- an entry API, which is the main function of the so-called **Concrete frontend**, which takes programs made from a subset of numpy, and converts them to an FHE program
|
||||
- the **Concrete compiler**, which is called by the frontend, which allows you to turn an MLIR program into an FHE program, on the top of **Concrete Library**, which contains the core cryptographic APIs for computing with FHE;
|
||||
- some ML tools, in an early version, allowing for example to turn some torch programs into numpy, and then to use the main API stack to finally get an FHE program.
|
||||
|
||||
In a further release, **Concrete Numpy** will be divided into a **Concrete Framework** package, containing the compiler, the core lib and the frontend(s), and in a **Concrete ML**, which will contain ML tools, made on top of the **Concrete Framework**. Names of these packages are succeptible to change.
|
||||
|
||||
@@ -36,5 +35,3 @@ The main _current_ limits are:
|
||||
- **Concrete** only supports unsigned integers
|
||||
- **Concrete** needs integers to fit in a maximum of 7 bits
|
||||
- **Concrete** computations are exact (except a very small probability) for computations on 6 bits or less, and exact at a probability close to 90% for 7 bits computations
|
||||
|
||||
To overcome the above limitations, Concrete has a [popular quantization](../explanation/quantization.md) method built in the framework that allows map floating point values to integers. We can [use this approach](../howto/use_quantization.md) to run models in FHE. Lastly, we give hints to the user on how to [reduce the precision](../howto/reduce_needed_precision.md) of a model to make it work in Concrete.
|
||||
|
||||
@@ -10,7 +10,7 @@ However, one still has to consider that FHE is slow, as compared to the vanilla
|
||||
|
||||
### Multiplying by constants
|
||||
|
||||
In the scheme used in **Concrete Numpy**, namely [TFHE](https://tfhe.github.io/tfhe/), multiplications by constants is only defined for integer constants. Notably, one can't multiply by floats. As float multiplication is very usual in the data science (think of weights of dense layers, for example), this could be a problem, but quantization is at our rescue. See [this](quantization.md) section for more details.
|
||||
In the scheme used in **Concrete Numpy**, namely [TFHE](https://tfhe.github.io/tfhe/), multiplications by constants is only defined for integer constants. Notably, one can't multiply by floats. As float multiplication is very usual in the data science (think of weights of dense layers, for example), this could be a problem, but quantization is at our rescue. See [Quantization](https://docs.preprod.zama.ai/concrete-ml/main/user/explanation/quantization.html) section of Concrete ML documentation for more details.
|
||||
|
||||
### Achieving computations of not-linear functions
|
||||
|
||||
@@ -30,5 +30,4 @@ As we explained, we wanted to focus first on cryptographic challenges. Performan
|
||||
|
||||
### Currently restricted to 7 bits computations
|
||||
|
||||
For the moment, we can only perform computations with 7 bits or less. Furthermore, the exactness of computations is only ensured for 6 bits or less; for 7 bits, the computations are exact with a probability close to 90%. Of course, we are working on increasing this limit, and making the probability of a wrong computation as close to 0% as possible. Don't hesitate to look at our [quantization](quantization.md) section to know how to use smaller integers.
|
||||
|
||||
For the moment, we can only perform computations with 7 bits or less. Furthermore, the exactness of computations is only ensured for 6 bits or less; for 7 bits, the computations are exact with a probability close to 90%. Of course, we are working on increasing this limit, and making the probability of a wrong computation as close to 0% as possible. Don't hesitate to look at [Quantization](https://docs.preprod.zama.ai/concrete-ml/main/user/explanation/quantization.html) section of Concrete ML documentation to know how to use smaller integers.
|
||||
|
||||
@@ -11,17 +11,3 @@ for example) and faster production execution (with distribution over a set of ma
|
||||
- **more complete benchmarks**: we will have an extended benchmark, containing lots of functions that you may want to compile; then, we will measure the framework progress by tracking the number of successfully compiled functions over time. Also, this public benchmark will be a way for other competing frameworks or technologies to compare fairly with us, in terms of functionality or performance
|
||||
- **client/server APIs**: today, the `run` function is performing the key generation, the encryption, the inference and the decryption to allow machine learning practitioners to test both performance and accuracy of FHE friendly models. Soon, we are going to have separated APIs to perform the steps one by one, and thus, a full client / server API
|
||||
- **serialization**: we are going to add several utils, to serialize ciphertexts or keys
|
||||
|
||||
## Regarding machine learning
|
||||
|
||||
We will continue to consider our `NPFHECompiler` class (compilation of numpy programs) as the main entry point for **Concrete Numpy**. In the future, we may move all ML tools currently present in **Concrete Numpy** to a new to-be-named ML specific package.
|
||||
|
||||
Our plans to extend machine learning support in the future are:
|
||||
|
||||
- **extend support for torch**: having more layers and more complex `forward `patterns, and also having ready-to-use neural networks and neural network blocks that are compatible with FHE
|
||||
- **support for other ML frameworks**: we will provide FHE compatible model architectures for classical ML models which will be trainable with popular frameworks such as sklearn. Tools for quantization aware training and FHE compatible algorithms are also in our plans.
|
||||
|
||||
If you are looking for a specific new feature, you can drop a message to <hello@zama.ai>.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,5 +6,4 @@ Explanations
|
||||
|
||||
what_is_fhe.md
|
||||
fhe_and_framework_limits.md
|
||||
quantization.md
|
||||
future_features.md
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
# Quantization
|
||||
|
||||
```{note}
|
||||
from [Wikipedia](https://en.wikipedia.org/wiki/Quantization):
|
||||
|
||||
> Quantization is the process of constraining an input from a continuous or otherwise large set of values (such as the real numbers) to a discrete set (such as the integers).
|
||||
```
|
||||
|
||||
## Why is it needed?
|
||||
|
||||
Modern computing has long been using data types that use 32 or 64 bits (be that integers or floating point numbers), or even bigger data types. However due to the costly nature of FHE computations (see [the limits of FHE](fhe_and_framework_limits.md)), using such types with FHE is impractical (or plain impossible) to have computations executing in a reasonable amount of time.
|
||||
|
||||
## The gist of quantization
|
||||
|
||||
The basic idea of quantization is to take a range of values represented by a _large_ data type and represent it by a _smaller_ data type. This means some accuracy in the number's representation is lost, but in a lot of cases it is possible to adapt computations to still give meaningful results while using significantly less bits to sent the data used during those computations.
|
||||
|
||||
## Quantization in practice
|
||||
|
||||
Let's first define some notations. Let $ [\alpha, \beta ] $ be the range of our value to quantize where $ \alpha $ is the minimum and $ \beta $ is the maximum.
|
||||
|
||||
To quantize a range with floating point values (in $ \mathbb{R} $) to unsigned integer values (in $ \mathbb{N} $), we first need to choose the data type that is going to be used. **Concrete Library**, the library used in the **Concrete Numpy**, is currently limited to 7 bits unsigned integers, so we'll use that for the example. Knowing that, for a value in the range $ [\alpha, \beta ] $, we can compute the `scale` $ S $ of the quantization:
|
||||
|
||||
$$ S = \frac{\beta - \alpha}{2^n - 1} $$
|
||||
|
||||
|
||||
where $ n $ is the number of bits (here 7). In practice the quantization scale is then $ S = \frac{\beta - \alpha}{127} $. This means the gap between consecutive representible values cannot be smaller than that $ S $ value which means there can be a substantial loss of precision. Every interval of length $ S $ will be represented by a value within the range $ [0..127] $.
|
||||
|
||||
The other important parameter from this quantization schema is the `zero point` $ Z $ value. This essentially brings the 0 floating point value to a specific integer. Doing this allows us to have an asymetric quantization where the resulting integer is in the unsigned integer realm, $ \mathbb{N} $.
|
||||
|
||||
$$ Z = \mathtt{round} \left(- \frac{\alpha}{S} \right) $$
|
||||
|
||||
There is more mathematics involved in how computations change when replacing floating point values by integers for a fully connected or a convolution layer. The IntelLabs distiller quantization documentation goes into a [detailed explanation](https://intellabs.github.io/distiller/algo_quantization.html) about the maths to quantize values and how to keep computations consistent.
|
||||
|
||||
Regarding quantization and FHE compilation, it is important to understand the difference between two modes:
|
||||
|
||||
1. the quantization is done before the compilation; notably, the quantization is completely controlled by the user, and can be done by any means, including by using third party frameworks
|
||||
2. the quantization is done during the compilation (inside our framework), with much less control by the user.
|
||||
|
||||
For the moment, only the second method is available in **Concrete Numpy**, but we plan to have the first method available in a further release, since it should give more freedom and better results to the user.
|
||||
|
||||
We detail the use of quantization within **Concrete Numpy** in [here](../howto/use_quantization.md).
|
||||
|
||||
## Resources
|
||||
|
||||
- IntelLabs distiller explanation of quantization: [Distiller documentation](https://intellabs.github.io/distiller/algo_quantization.html)
|
||||
- Lei Mao's blog on quantization: [Quantization for Neural Networks](https://leimao.github.io/article/Neural-Networks-Quantization/)
|
||||
- Google paper on Neural Network quantization and integer only inference: [Quantization and Training of Neural Networks for Efficient Integer-Arithmetic-Only Inference](https://arxiv.org/abs/1712.05877)
|
||||
@@ -1,67 +0,0 @@
|
||||
# Compiling a Torch Model
|
||||
|
||||
**Concrete Numpy** allows you to compile a torch model to its FHE counterpart.
|
||||
|
||||
|
||||
A simple command can compile a torch model to its FHE counterpart. This process executes most of the concepts described in the documentation on [how to use quantization](use_quantization.md) and triggers the compilation to be able to run the model over homomorphically encrypted data.
|
||||
|
||||
|
||||
```python
|
||||
from torch import nn
|
||||
import torch
|
||||
class LogisticRegression(nn.Module):
|
||||
"""LogisticRegression with Torch"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(in_features=14, out_features=1)
|
||||
self.sigmoid1 = nn.Sigmoid()
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
out = self.fc1(x)
|
||||
out = self.sigmoid1(out)
|
||||
return out
|
||||
|
||||
torch_model = LogisticRegression()
|
||||
```
|
||||
|
||||
```{warning}
|
||||
Note that the architecture of the neural network passed to be compiled must respect some hard constraints given by FHE. Please read the our [detailed documentation](../howto/reduce_needed_precision.md) on these limitations.
|
||||
```
|
||||
|
||||
Once your model is trained you can simply call the `compile_torch_model` function to execute the compilation.
|
||||
|
||||
<!--pytest-codeblocks:cont-->
|
||||
```python
|
||||
from concrete.torch.compile import compile_torch_model
|
||||
import numpy
|
||||
torch_input = torch.randn(100, 14)
|
||||
quantized_numpy_module = compile_torch_model(
|
||||
torch_model, # our model
|
||||
torch_input, # a representative inputset to be used for both quantization and compilation
|
||||
n_bits = 2,
|
||||
)
|
||||
```
|
||||
|
||||
You can then call `quantized_numpy_module.forward_fhe.run()` to have the FHE inference.
|
||||
|
||||
Now your model is ready to infer in FHE settings.
|
||||
|
||||
<!--pytest-codeblocks:cont-->
|
||||
```python
|
||||
enc_x = numpy.array([numpy.random.randn(14)]).astype(numpy.uint8) # An example that is going to be encrypted, and used for homomorphic inference.
|
||||
fhe_prediction = quantized_numpy_module.forward_fhe.run(enc_x)
|
||||
```
|
||||
|
||||
`fhe_prediction` contains the clear quantized output. The user can now dequantize the output to get the actual floating point prediction as follows:
|
||||
|
||||
<!--pytest-codeblocks:cont-->
|
||||
```python
|
||||
clear_output = quantized_numpy_module.dequantize_output(
|
||||
numpy.array(fhe_prediction, dtype=numpy.float32)
|
||||
)
|
||||
```
|
||||
|
||||
If you want to see more compilation examples, you can check out the [Fully Connected Neural Network](../advanced_examples/FullyConnectedNeuralNetwork.ipynb)
|
||||
@@ -6,8 +6,6 @@ How To
|
||||
|
||||
numpy_support.md
|
||||
printing_and_drawing.md
|
||||
compiling_torch_model.md
|
||||
use_quantization.md
|
||||
reduce_needed_precision.md
|
||||
debug_support_submit_issues.md
|
||||
faq.md
|
||||
|
||||
@@ -61,16 +61,10 @@ The input contains 28x28x8 = 6272 bits of information. In practice you could sti
|
||||
|
||||
This shows how adapting your data can allow you to use models that may require smaller data types (i.e. use less precision) to perform their computations.
|
||||
|
||||
```{note}
|
||||
Binarizing here is an extreme case of quantization which is introduced [here](../explanation/quantization.md). You can also find further resources on the linked page.
|
||||
```
|
||||
|
||||
### Model accuracy when quantizing for FHE
|
||||
|
||||
Quantization and binarization increase inference speed, reduce model byte-size and are required to run computation in FHE. However, quantization and, especially, binarization, induce a loss in the accuracy of the model since it's representation power is diminished. Choosing quantization parameters carefully can alleviate the accuracy loss all the while allowing compilation to FHE.
|
||||
|
||||
This is illustrated in both advanced examples [Linear Regression](../advanced_examples/LinearRegression.ipynb) and [Logistic Regression](../advanced_examples/LogisticRegression.ipynb).
|
||||
|
||||
The end result has a granularity/imprecision linked to the data types used and for the Quantized Logistic Regression to the lattice used to evaluate the logistic model.
|
||||
|
||||
## Limitations for FHE friendly neural network
|
||||
|
||||
@@ -1,159 +0,0 @@
|
||||
# Using Quantization in **Concrete Numpy**
|
||||
|
||||
In this section we detail some usage of [quantization](../explanation/quantization.md) as implemented in **Concrete**.
|
||||
|
||||
## Quantization Basics
|
||||
|
||||
**Concrete Numpy** implements some basic concepts of quantization. The very basic purpose of it is to convert floating point values to integers. We can apply such conversion using `QuantizedArray` available in `concrete.quantization`.
|
||||
|
||||
`QuantizedArray` takes 2 arguments:
|
||||
- `n_bits` that defines the precision of the quantization. Currently, `n_bits` is limited to 7, due to some **Concrete Library** limits.
|
||||
- `values` that will be converted to integers
|
||||
|
||||
```python
|
||||
from concrete.quantization import QuantizedArray
|
||||
import numpy
|
||||
numpy.random.seed(0)
|
||||
A = numpy.random.uniform(-2, 2, 10)
|
||||
# array([ 0.19525402, 0.86075747, 0.4110535, 0.17953273, -0.3053808,
|
||||
# 0.58357645, -0.24965115, 1.567092 , 1.85465104, -0.46623392])
|
||||
q_A = QuantizedArray(7, A)
|
||||
q_A.qvalues
|
||||
# array([ 37, 73, 48, 36, 9,
|
||||
# 58, 12, 112, 127, 0])
|
||||
# the quantized integers values from A.
|
||||
q_A.scale
|
||||
# 0.018274684777173276, the scale S.
|
||||
q_A.zero_point
|
||||
# 26, the zero point Z.
|
||||
q_A.dequant()
|
||||
# array([ 0.20102153, 0.85891018, 0.40204307, 0.18274685, -0.31066964,
|
||||
# 0.58478991, -0.25584559, 1.57162289, 1.84574316, -0.4751418 ])
|
||||
# Dequantized values.
|
||||
```
|
||||
|
||||
## Neural networks in the Quantized Realm
|
||||
|
||||
Neural networks are implemented with a diverse set of operations, such as convolution, linear transformations, activation functions and element-wise operations. When working with quantized values, these operations can not be carried out the same way as for floating point values. With quantization it is necessary to re-scale the input and output values of each operation to fit in the quantization domain.
|
||||
|
||||
Re-scaling raw input values to the quantized domain implies that we need to make use of floating point operations. In the FHE setting where we only work with integers, this could be a problem, but luckily, the FHE implementation behind **Concrete Numpy** provides a workaround. We essentially make use of a [table lookup](../tutorial/table_lookup.md) which is later translated into a [PBS](https://whitepaper.zama.ai).
|
||||
|
||||
Of course, having a PBS for every quantized addition isn't recommended for computational cost reasons. Also, **Concrete Numpy** allows PBS only for univariate operations (i.e. matrix multiplication can't be done in a PBS). Therefore, our quantized modules split the computation of floating point values and unsigned integers as it is currently done in `concrete.quantization.QuantizedLinear`.
|
||||
|
||||
|
||||
The above operations are all implemented in **Concrete Numpy** and transparent to the user via our Quantized Modules.
|
||||
|
||||
**Concrete Numpy** allows you to convert numpy operations to their FHE counterparts. This essentially opens the door to any python computing framework such as [PyTorch](https://pytorch.org/). **Concrete Numpy** implements a Torch to Numpy converter that makes it easy for the user to use a torch model.
|
||||
|
||||
First we define a model:
|
||||
|
||||
<!--pytest-codeblocks:cont-->
|
||||
```python
|
||||
from torch import nn
|
||||
import torch
|
||||
class LogisticRegression(nn.Module):
|
||||
"""LogisticRegression with Torch"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(in_features=14, out_features=1)
|
||||
self.sigmoid1 = nn.Sigmoid()
|
||||
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
out = self.fc1(x)
|
||||
out = self.sigmoid1(out)
|
||||
return out
|
||||
|
||||
torch_model = LogisticRegression()
|
||||
```
|
||||
|
||||
We then convert this model to numpy only operations:
|
||||
<!--pytest-codeblocks:cont-->
|
||||
```python
|
||||
from concrete.torch import NumpyModule
|
||||
numpy_model = NumpyModule(torch_model)
|
||||
```
|
||||
|
||||
The `NumpyModule` allows us to runs inference as for a `nn.Module`. Here, the prediction of the numpy module should be exactly the same.
|
||||
|
||||
We can then quantize the numpy module with `PostTrainingAffineQuantization` as follows:
|
||||
|
||||
<!--pytest-codeblocks:cont-->
|
||||
```python
|
||||
from concrete.quantization import PostTrainingAffineQuantization
|
||||
numpy_input = numpy.random.uniform(-1, 1, size=(10,14)) # some input with 14 features to calibrate the quantization
|
||||
n_bits = 2 # number of bits of precision for the weights, activation, inputs and outputs.
|
||||
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_model)
|
||||
quantized_numpy_module = post_training_quant.quantize_module(numpy_input)
|
||||
```
|
||||
|
||||
Here, the quantized model takes a quantized array and runs inference in the quantized paradigm.
|
||||
|
||||
We can then easily verify that all models give similar predictions. Obviously, the `n_bits` chosen may adversely affect the prediction of the `quantized_numpy_module`. You can try increasing this parameter to see the effect on your model but keep in mind that the compilation will require all the values of your network to be less than 7 bits of precision.
|
||||
|
||||
<!--pytest-codeblocks:cont-->
|
||||
```python
|
||||
torch_model(torch.from_numpy(numpy_input).float())
|
||||
# tensor([[-0.0690],
|
||||
# [-0.1108],
|
||||
# [-0.0743],
|
||||
# [-0.0464],
|
||||
# [ 0.0261],
|
||||
# [-0.1380],
|
||||
# [-0.0941],
|
||||
# [-0.1589],
|
||||
# [ 0.0374],
|
||||
# [-0.1088]])
|
||||
numpy_model(numpy_input)
|
||||
# array([[-0.06901879],
|
||||
# [-0.11081327],
|
||||
# [-0.07429631],
|
||||
# [-0.04636377],
|
||||
# [ 0.02613242],
|
||||
# [-0.13795333],
|
||||
# [-0.09408965],
|
||||
# [-0.15885062],
|
||||
# [ 0.03735061],
|
||||
# [-0.10878125]])
|
||||
quantized_numpy_module.forward_and_dequant(QuantizedArray(2, numpy_input))
|
||||
# array([[-0.03792994],
|
||||
# [-0.15551274],
|
||||
# [-0.03792994],
|
||||
# [ 0.08154936],
|
||||
# [ 0.08154936],
|
||||
# [-0.15551274],
|
||||
# [-0.03792994],
|
||||
# [-0.15551274],
|
||||
# [ 0.08154936],
|
||||
# [-0.15551274]])
|
||||
```
|
||||
|
||||
```{warning}
|
||||
The current implementation of the framework parses the layers in the order of their definition in the nn.Module. Thus, the order of instantiation of the layers in the constructor (init function) is crucial for the conversion to numpy to work properly.
|
||||
```
|
||||
|
||||
```{warning}
|
||||
Do not reuse a layer or an activation multiple times in the forward (i.e. self.sigmoid for each layer activation) and always place them at the correct position (the order of appearance in the forward function) in the init function.
|
||||
```
|
||||
|
||||
It is now possible to compile the `quantized_numpy_module`. Details on how to compile the model are available in the [torch compilation documentation](compiling_torch_model.md).
|
||||
## Building your own QuantizedModule
|
||||
|
||||
**Concrete Numpy** also offers the possibility to build your own models and use them in the FHE settings. The `QuantizedModule` is a very simple abstraction that allows to create any model using the available operators:
|
||||
|
||||
- QuantizedSigmoid, the quantized version of `nn.Sigmoid`
|
||||
- QuantizedLinear, the quantized version of `nn.Linear`
|
||||
- QuantizedReLU6, the quantized version of `nn.ReLU6`
|
||||
|
||||
|
||||
A well detailed example is available for a [Linear Regression](../advanced_examples/LinearRegression.ipynb).
|
||||
|
||||
|
||||
## Future releases
|
||||
|
||||
Currently, the quantization is only available via `PostTrainingAffineQuantization` which is a [popular](https://arxiv.org/pdf/1712.05877.pdf) approach for quantization but has some constraints.
|
||||
|
||||
In future releases we plan to offer the possibility to the user to apply quantization beforehand and convert the model directly to our `QuantizedModule`. This will allow users to take advantage of Quantization Aware Training (QAT) that allow neural networks to reach better accuracies.
|
||||
|
||||
@@ -7,5 +7,4 @@ User guide
|
||||
Getting started <basics/index>
|
||||
Tutorial <tutorial/index>
|
||||
How To <howto/index>
|
||||
Advanced examples <advanced_examples/index>
|
||||
Explanations <explanation/index>
|
||||
|
||||
444
poetry.lock
generated
444
poetry.lock
generated
@@ -58,6 +58,20 @@ lazy-object-proxy = ">=1.4.0"
|
||||
typing-extensions = {version = ">=3.10", markers = "python_version < \"3.10\""}
|
||||
wrapt = ">=1.11,<1.14"
|
||||
|
||||
[[package]]
|
||||
name = "asttokens"
|
||||
version = "2.0.5"
|
||||
description = "Annotate AST trees with source code positions"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
six = "*"
|
||||
|
||||
[package.extras]
|
||||
test = ["astroid", "pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "atomicwrites"
|
||||
version = "1.4.0"
|
||||
@@ -159,6 +173,7 @@ optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
lockfile = {version = ">=0.9", optional = true, markers = "extra == \"filecache\""}
|
||||
msgpack = ">=0.5.2"
|
||||
requests = "*"
|
||||
|
||||
@@ -355,6 +370,14 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
[package.extras]
|
||||
testing = ["pre-commit"]
|
||||
|
||||
[[package]]
|
||||
name = "executing"
|
||||
version = "0.8.2"
|
||||
description = "Get the currently executing AST node of a frame, and other information"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "flake8"
|
||||
version = "4.0.1"
|
||||
@@ -462,7 +485,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
|
||||
|
||||
[[package]]
|
||||
name = "importlib-metadata"
|
||||
version = "4.10.0"
|
||||
version = "4.10.1"
|
||||
description = "Read metadata from Python packages"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -509,7 +532,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "ipykernel"
|
||||
version = "6.6.1"
|
||||
version = "6.7.0"
|
||||
description = "IPython Kernel for Jupyter"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -530,15 +553,16 @@ test = ["pytest (!=5.3.4)", "pytest-cov", "flaky", "ipyparallel"]
|
||||
|
||||
[[package]]
|
||||
name = "ipython"
|
||||
version = "7.31.0"
|
||||
version = "8.0.0"
|
||||
description = "IPython: Productive Interactive Computing"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
|
||||
[package.dependencies]
|
||||
appnope = {version = "*", markers = "sys_platform == \"darwin\""}
|
||||
backcall = "*"
|
||||
black = "*"
|
||||
colorama = {version = "*", markers = "sys_platform == \"win32\""}
|
||||
decorator = "*"
|
||||
jedi = ">=0.16"
|
||||
@@ -547,10 +571,11 @@ pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
|
||||
pickleshare = "*"
|
||||
prompt-toolkit = ">=2.0.0,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.1.0"
|
||||
pygments = "*"
|
||||
traitlets = ">=4.2"
|
||||
stack-data = "*"
|
||||
traitlets = ">=5"
|
||||
|
||||
[package.extras]
|
||||
all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy (>=1.17)", "pygments", "qtconsole", "requests", "testpath"]
|
||||
all = ["Sphinx (>=1.3)", "curio", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.19)", "pandas", "pygments", "pytest", "pytest-asyncio", "qtconsole", "testpath", "trio"]
|
||||
doc = ["Sphinx (>=1.3)"]
|
||||
kernel = ["ipykernel"]
|
||||
nbconvert = ["nbconvert"]
|
||||
@@ -558,7 +583,8 @@ nbformat = ["nbformat"]
|
||||
notebook = ["notebook", "ipywidgets"]
|
||||
parallel = ["ipyparallel"]
|
||||
qtconsole = ["qtconsole"]
|
||||
test = ["nose (>=0.10.1)", "requests", "testpath", "pygments", "nbformat", "ipykernel", "numpy (>=1.17)"]
|
||||
test = ["pytest", "pytest-asyncio", "testpath", "pygments"]
|
||||
test_extra = ["pytest", "testpath", "curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.19)", "pandas", "pygments", "trio"]
|
||||
|
||||
[[package]]
|
||||
name = "ipython-genutils"
|
||||
@@ -643,17 +669,9 @@ MarkupSafe = ">=2.0"
|
||||
[package.extras]
|
||||
i18n = ["Babel (>=2.7)"]
|
||||
|
||||
[[package]]
|
||||
name = "joblib"
|
||||
version = "1.1.0"
|
||||
description = "Lightweight pipelining with Python functions"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "4.3.3"
|
||||
version = "4.4.0"
|
||||
description = "An implementation of JSON Schema validation for Python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -686,7 +704,7 @@ qtconsole = "*"
|
||||
|
||||
[[package]]
|
||||
name = "jupyter-client"
|
||||
version = "7.1.0"
|
||||
version = "7.1.1"
|
||||
description = "Jupyter protocol implementation and client libraries"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -957,11 +975,11 @@ testing = ["beautifulsoup4", "coverage", "docutils (>=0.17.0,<0.18.0)", "pytest
|
||||
|
||||
[[package]]
|
||||
name = "nbclient"
|
||||
version = "0.5.9"
|
||||
version = "0.5.10"
|
||||
description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor."
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6.1"
|
||||
python-versions = ">=3.7.0"
|
||||
|
||||
[package.dependencies]
|
||||
jupyter-client = ">=6.1.5"
|
||||
@@ -970,9 +988,8 @@ nest-asyncio = "*"
|
||||
traitlets = ">=4.2"
|
||||
|
||||
[package.extras]
|
||||
dev = ["codecov", "coverage", "ipython", "ipykernel", "ipywidgets", "pytest (>=4.1)", "pytest-cov (>=2.6.1)", "check-manifest", "flake8", "mypy", "tox", "xmltodict", "pip (>=18.1)", "wheel (>=0.31.0)", "setuptools (>=38.6.0)", "twine (>=1.11.0)", "black"]
|
||||
sphinx = ["Sphinx (>=1.7)", "sphinx-book-theme", "mock", "moto", "myst-parser"]
|
||||
test = ["codecov", "coverage", "ipython", "ipykernel", "ipywidgets", "pytest (>=4.1)", "pytest-cov (>=2.6.1)", "check-manifest", "flake8", "mypy", "tox", "xmltodict", "pip (>=18.1)", "wheel (>=0.31.0)", "setuptools (>=38.6.0)", "twine (>=1.11.0)", "black"]
|
||||
test = ["ipython", "ipykernel", "ipywidgets (<8.0.0)", "pytest (>=4.1)", "pytest-cov (>=2.6.1)", "check-manifest", "flake8", "mypy", "xmltodict", "black", "pip (>=18.1)", "wheel (>=0.31.0)", "setuptools (>=38.6.0)", "twine (>=1.11.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "nbconvert"
|
||||
@@ -1079,7 +1096,7 @@ test = ["pytest (>=6.2)", "pytest-cov (>=2.12)", "codecov (>=2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "notebook"
|
||||
version = "6.4.6"
|
||||
version = "6.4.7"
|
||||
description = "A web-based notebook environment for interactive computing"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1109,7 +1126,7 @@ test = ["pytest", "coverage", "requests", "nbval", "selenium", "pytest-cov", "re
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "1.22.0"
|
||||
version = "1.22.1"
|
||||
description = "NumPy is the fundamental package for array computing with Python."
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -1137,26 +1154,6 @@ python-versions = ">=3.6"
|
||||
[package.dependencies]
|
||||
pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
|
||||
|
||||
[[package]]
|
||||
name = "pandas"
|
||||
version = "1.3.5"
|
||||
description = "Powerful data structures for data analysis, time series, and statistics"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7.1"
|
||||
|
||||
[package.dependencies]
|
||||
numpy = [
|
||||
{version = ">=1.17.3", markers = "platform_machine != \"aarch64\" and platform_machine != \"arm64\" and python_version < \"3.10\""},
|
||||
{version = ">=1.19.2", markers = "platform_machine == \"aarch64\" and python_version < \"3.10\""},
|
||||
{version = ">=1.20.0", markers = "platform_machine == \"arm64\" and python_version < \"3.10\""},
|
||||
]
|
||||
python-dateutil = ">=2.7.3"
|
||||
pytz = ">=2017.3"
|
||||
|
||||
[package.extras]
|
||||
test = ["hypothesis (>=3.58)", "pytest (>=6.0)", "pytest-xdist"]
|
||||
|
||||
[[package]]
|
||||
name = "pandocfilters"
|
||||
version = "1.5.0"
|
||||
@@ -1222,17 +1219,16 @@ python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "pip-audit"
|
||||
version = "1.1.1"
|
||||
version = "1.1.2"
|
||||
description = "A tool for scanning Python environments for known vulnerabilities"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[package.dependencies]
|
||||
CacheControl = ">=0.12.10"
|
||||
cyclonedx-python-lib = ">=0.11.1"
|
||||
CacheControl = {version = ">=0.12.10", extras = ["filecache"]}
|
||||
cyclonedx-python-lib = ">=0.11.1,<1.0.0"
|
||||
html5lib = ">=1.1"
|
||||
lockfile = ">=0.12.2"
|
||||
packaging = ">=21.0.0"
|
||||
pip-api = ">=0.0.26"
|
||||
progress = ">=1.6"
|
||||
@@ -1347,6 +1343,17 @@ category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "pure-eval"
|
||||
version = "0.2.1"
|
||||
description = "Safely evaluate AST nodes without side effects"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.extras]
|
||||
tests = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "py"
|
||||
version = "1.11.0"
|
||||
@@ -1365,7 +1372,7 @@ python-versions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "py-progress-tracker"
|
||||
version = "0.3.3"
|
||||
version = "0.4.0"
|
||||
description = "A simple benchmarking library"
|
||||
category = "dev"
|
||||
optional = false
|
||||
@@ -1488,11 +1495,11 @@ diagrams = ["jinja2", "railroad-diagrams"]
|
||||
|
||||
[[package]]
|
||||
name = "pyrsistent"
|
||||
version = "0.18.0"
|
||||
version = "0.18.1"
|
||||
description = "Persistent/Functional/Immutable data structures"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
@@ -1805,37 +1812,6 @@ python-versions = ">=3.7"
|
||||
[package.extras]
|
||||
idna2008 = ["idna"]
|
||||
|
||||
[[package]]
|
||||
name = "scikit-learn"
|
||||
version = "1.0.2"
|
||||
description = "A set of python modules for machine learning and data mining"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.dependencies]
|
||||
joblib = ">=0.11"
|
||||
numpy = ">=1.14.6"
|
||||
scipy = ">=1.1.0"
|
||||
threadpoolctl = ">=2.0.0"
|
||||
|
||||
[package.extras]
|
||||
benchmark = ["matplotlib (>=2.2.3)", "pandas (>=0.25.0)", "memory-profiler (>=0.57.0)"]
|
||||
docs = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)", "memory-profiler (>=0.57.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "numpydoc (>=1.0.0)", "Pillow (>=7.1.2)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
|
||||
examples = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)"]
|
||||
tests = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "flake8 (>=3.8.2)", "black (>=21.6b0)", "mypy (>=0.770)", "pyamg (>=4.0.0)"]
|
||||
|
||||
[[package]]
|
||||
name = "scipy"
|
||||
version = "1.7.3"
|
||||
description = "SciPy: Scientific Library for Python"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.7,<3.11"
|
||||
|
||||
[package.dependencies]
|
||||
numpy = ">=1.16.5,<1.23.0"
|
||||
|
||||
[[package]]
|
||||
name = "secretstorage"
|
||||
version = "3.3.1"
|
||||
@@ -1871,7 +1847,7 @@ win32 = ["pywin32"]
|
||||
|
||||
[[package]]
|
||||
name = "setuptools-scm"
|
||||
version = "6.3.2"
|
||||
version = "6.4.1"
|
||||
description = "the blessed package to manage your versions by scm tags"
|
||||
category = "main"
|
||||
optional = false
|
||||
@@ -1882,7 +1858,8 @@ packaging = ">=20.0"
|
||||
tomli = ">=1.0.0"
|
||||
|
||||
[package.extras]
|
||||
toml = ["setuptools (>=42)", "tomli (>=1.0.0)"]
|
||||
test = ["pytest (>=6.2)", "virtualenv (>20)"]
|
||||
toml = ["setuptools (>=42)"]
|
||||
|
||||
[[package]]
|
||||
name = "six"
|
||||
@@ -2048,6 +2025,22 @@ python-versions = ">=3.5"
|
||||
lint = ["flake8", "mypy", "docutils-stubs"]
|
||||
test = ["pytest"]
|
||||
|
||||
[[package]]
|
||||
name = "stack-data"
|
||||
version = "0.1.4"
|
||||
description = "Extract data from python stack frames and tracebacks for informative displays"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.dependencies]
|
||||
asttokens = "*"
|
||||
executing = "*"
|
||||
pure-eval = "*"
|
||||
|
||||
[package.extras]
|
||||
tests = ["pytest", "typeguard", "pygments", "littleutils"]
|
||||
|
||||
[[package]]
|
||||
name = "tabulate"
|
||||
version = "0.8.9"
|
||||
@@ -2094,14 +2087,6 @@ python-versions = ">= 3.5"
|
||||
[package.extras]
|
||||
test = ["pytest", "pathlib2"]
|
||||
|
||||
[[package]]
|
||||
name = "threadpoolctl"
|
||||
version = "3.0.0"
|
||||
description = "threadpoolctl"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.10.2"
|
||||
@@ -2126,17 +2111,6 @@ category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
|
||||
|
||||
[[package]]
|
||||
name = "torch"
|
||||
version = "1.10.1"
|
||||
description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.6.2"
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = "*"
|
||||
|
||||
[[package]]
|
||||
name = "tornado"
|
||||
version = "6.1"
|
||||
@@ -2211,7 +2185,7 @@ python-versions = "*"
|
||||
name = "typing-extensions"
|
||||
version = "4.0.1"
|
||||
description = "Backported and Experimental Type Hints for Python 3.6+"
|
||||
category = "main"
|
||||
category = "dev"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
|
||||
@@ -2293,7 +2267,7 @@ full = ["pygraphviz"]
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = ">=3.8,<3.10"
|
||||
content-hash = "172f6d91b9fb861532cdadd18d926808b8aff097788f169ab76b3a74d7753544"
|
||||
content-hash = "70a6a696538026ddba976c0d8dcb191ce48d42a2c822b0e6a9cb1778b9b9c03f"
|
||||
|
||||
[metadata.files]
|
||||
alabaster = [
|
||||
@@ -2335,6 +2309,10 @@ astroid = [
|
||||
{file = "astroid-2.8.6-py3-none-any.whl", hash = "sha256:cd8326b424c971e7d87678609cf6275d22028afd37d6ac59c16d47f1245882f6"},
|
||||
{file = "astroid-2.8.6.tar.gz", hash = "sha256:5f6f75e45f15290e73b56f9dfde95b4bf96382284cde406ef4203e928335a495"},
|
||||
]
|
||||
asttokens = [
|
||||
{file = "asttokens-2.0.5-py2.py3-none-any.whl", hash = "sha256:0844691e88552595a6f4a4281a9f7f79b8dd45ca4ccea82e5e05b4bbdb76705c"},
|
||||
{file = "asttokens-2.0.5.tar.gz", hash = "sha256:9a54c114f02c7a9480d56550932546a3f1fe71d8a02f1bc7ccd0ee3ee35cf4d5"},
|
||||
]
|
||||
atomicwrites = [
|
||||
{file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"},
|
||||
{file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"},
|
||||
@@ -2572,6 +2550,10 @@ execnet = [
|
||||
{file = "execnet-1.9.0-py2.py3-none-any.whl", hash = "sha256:a295f7cc774947aac58dde7fdc85f4aa00c42adf5d8f5468fc630c1acf30a142"},
|
||||
{file = "execnet-1.9.0.tar.gz", hash = "sha256:8f694f3ba9cc92cab508b152dcfe322153975c29bda272e2fd7f3f00f36e47c5"},
|
||||
]
|
||||
executing = [
|
||||
{file = "executing-0.8.2-py2.py3-none-any.whl", hash = "sha256:32fc6077b103bd19e6494a72682d66d5763cf20a106d5aa7c5ccbea4e47b0df7"},
|
||||
{file = "executing-0.8.2.tar.gz", hash = "sha256:c23bf42e9a7b9b212f185b1b2c3c91feb895963378887bb10e64a2e612ec0023"},
|
||||
]
|
||||
flake8 = [
|
||||
{file = "flake8-4.0.1-py2.py3-none-any.whl", hash = "sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d"},
|
||||
{file = "flake8-4.0.1.tar.gz", hash = "sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d"},
|
||||
@@ -2605,8 +2587,8 @@ imagesize = [
|
||||
{file = "imagesize-1.3.0.tar.gz", hash = "sha256:cd1750d452385ca327479d45b64d9c7729ecf0b3969a58148298c77092261f9d"},
|
||||
]
|
||||
importlib-metadata = [
|
||||
{file = "importlib_metadata-4.10.0-py3-none-any.whl", hash = "sha256:b7cf7d3fef75f1e4c80a96ca660efbd51473d7e8f39b5ab9210febc7809012a4"},
|
||||
{file = "importlib_metadata-4.10.0.tar.gz", hash = "sha256:92a8b58ce734b2a4494878e0ecf7d79ccd7a128b5fc6014c401e0b61f006f0f6"},
|
||||
{file = "importlib_metadata-4.10.1-py3-none-any.whl", hash = "sha256:899e2a40a8c4a1aec681feef45733de8a6c58f3f6a0dbed2eb6574b4387a77b6"},
|
||||
{file = "importlib_metadata-4.10.1.tar.gz", hash = "sha256:951f0d8a5b7260e9db5e41d429285b5f451e928479f19d80818878527d36e95e"},
|
||||
]
|
||||
importlib-resources = [
|
||||
{file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"},
|
||||
@@ -2622,12 +2604,12 @@ invoke = [
|
||||
{file = "invoke-1.6.0.tar.gz", hash = "sha256:374d1e2ecf78981da94bfaf95366216aaec27c2d6a7b7d5818d92da55aa258d3"},
|
||||
]
|
||||
ipykernel = [
|
||||
{file = "ipykernel-6.6.1-py3-none-any.whl", hash = "sha256:de99f6c1caa72578305cc96122ee3a19669e9c1958694a2b564ed1be28240ab9"},
|
||||
{file = "ipykernel-6.6.1.tar.gz", hash = "sha256:91ff0058b45660aad4a68088041059c0d378cd53fc8aff60e5abc91bcc049353"},
|
||||
{file = "ipykernel-6.7.0-py3-none-any.whl", hash = "sha256:6203ccd5510ff148e9433fd4a2707c5ce8d688f026427f46e13d7ebf9b3e9787"},
|
||||
{file = "ipykernel-6.7.0.tar.gz", hash = "sha256:d82b904fdc2fd8c7b1fbe0fa481c68a11b4cd4c8ef07e6517da1f10cc3114d24"},
|
||||
]
|
||||
ipython = [
|
||||
{file = "ipython-7.31.0-py3-none-any.whl", hash = "sha256:4c4234cdcc6b8f87c5b5c7af9899aa696ac5cfcf0e9f6d0688018bbee5c73bce"},
|
||||
{file = "ipython-7.31.0.tar.gz", hash = "sha256:346c74db7312c41fa566d3be45d2e759a528dcc2994fe48aac1a03a70cd668a3"},
|
||||
{file = "ipython-8.0.0-py3-none-any.whl", hash = "sha256:5b58cf977635abad74d76be49dbb2e97fddd825fb8503083d55496aa1160b854"},
|
||||
{file = "ipython-8.0.0.tar.gz", hash = "sha256:004a0d05aeecd32adec4841b6e2586d5ca35785b1477db4d8333a39333e0ce98"},
|
||||
]
|
||||
ipython-genutils = [
|
||||
{file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"},
|
||||
@@ -2653,13 +2635,9 @@ jinja2 = [
|
||||
{file = "Jinja2-3.0.3-py3-none-any.whl", hash = "sha256:077ce6014f7b40d03b47d1f1ca4b0fc8328a692bd284016f806ed0eaca390ad8"},
|
||||
{file = "Jinja2-3.0.3.tar.gz", hash = "sha256:611bb273cd68f3b993fabdc4064fc858c5b47a973cb5aa7999ec1ba405c87cd7"},
|
||||
]
|
||||
joblib = [
|
||||
{file = "joblib-1.1.0-py2.py3-none-any.whl", hash = "sha256:f21f109b3c7ff9d95f8387f752d0d9c34a02aa2f7060c2135f465da0e5160ff6"},
|
||||
{file = "joblib-1.1.0.tar.gz", hash = "sha256:4158fcecd13733f8be669be0683b96ebdbbd38d23559f54dca7205aea1bf1e35"},
|
||||
]
|
||||
jsonschema = [
|
||||
{file = "jsonschema-4.3.3-py3-none-any.whl", hash = "sha256:eb7a69801beb7325653aa8fd373abbf9ff8f85b536ab2812e5e8287b522fb6a2"},
|
||||
{file = "jsonschema-4.3.3.tar.gz", hash = "sha256:f210d4ce095ed1e8af635d15c8ee79b586f656ab54399ba87b8ab87e5bff0ade"},
|
||||
{file = "jsonschema-4.4.0-py3-none-any.whl", hash = "sha256:77281a1f71684953ee8b3d488371b162419767973789272434bbc3f29d9c8823"},
|
||||
{file = "jsonschema-4.4.0.tar.gz", hash = "sha256:636694eb41b3535ed608fe04129f26542b59ed99808b4f688aa32dcf55317a83"},
|
||||
]
|
||||
jupyter = [
|
||||
{file = "jupyter-1.0.0-py2.py3-none-any.whl", hash = "sha256:5b290f93b98ffbc21c0c7e749f054b3267782166d72fa5e3ed1ed4eaf34a2b78"},
|
||||
@@ -2667,8 +2645,8 @@ jupyter = [
|
||||
{file = "jupyter-1.0.0.zip", hash = "sha256:3e1f86076bbb7c8c207829390305a2b1fe836d471ed54be66a3b8c41e7f46cc7"},
|
||||
]
|
||||
jupyter-client = [
|
||||
{file = "jupyter_client-7.1.0-py3-none-any.whl", hash = "sha256:64d93752d8cbfba0c1030c3335c3f0d9797cd1efac012652a14aac1653db11a3"},
|
||||
{file = "jupyter_client-7.1.0.tar.gz", hash = "sha256:a5f995a73cffb314ed262713ae6dfce53c6b8216cea9f332071b8ff44a6e1654"},
|
||||
{file = "jupyter_client-7.1.1-py3-none-any.whl", hash = "sha256:f0c576cce235c727e30b0a0da88c2755d0947d0070fa1bc45f195079ffd64e66"},
|
||||
{file = "jupyter_client-7.1.1.tar.gz", hash = "sha256:540ca35e57e83c5ece81abd9b781a57cba39a37c60a2a30c8c1b2f6663544343"},
|
||||
]
|
||||
jupyter-console = [
|
||||
{file = "jupyter_console-6.4.0-py3-none-any.whl", hash = "sha256:7799c4ea951e0e96ba8260575423cb323ea5a03fcf5503560fa3e15748869e27"},
|
||||
@@ -2978,8 +2956,8 @@ myst-parser = [
|
||||
{file = "myst_parser-0.15.2-py3-none-any.whl", hash = "sha256:40124b6f27a4c42ac7f06b385e23a9dcd03d84801e9c7130b59b3729a554b1f9"},
|
||||
]
|
||||
nbclient = [
|
||||
{file = "nbclient-0.5.9-py3-none-any.whl", hash = "sha256:8a307be4129cce5f70eb83a57c3edbe45656623c31de54e38bb6fdfbadc428b3"},
|
||||
{file = "nbclient-0.5.9.tar.gz", hash = "sha256:99e46ddafacd0b861293bf246fed8540a184adfa3aa7d641f89031ec070701e0"},
|
||||
{file = "nbclient-0.5.10-py3-none-any.whl", hash = "sha256:5b582e21c8b464e6676a9d60acc6871d7fbc3b080f74bef265a9f90411b31f6f"},
|
||||
{file = "nbclient-0.5.10.tar.gz", hash = "sha256:b5fdea88d6fa52ca38de6c2361401cfe7aaa7cd24c74effc5e489cec04d79088"},
|
||||
]
|
||||
nbconvert = [
|
||||
{file = "nbconvert-6.4.0-py3-none-any.whl", hash = "sha256:f5ec6a1fad9e3aa2bee7c6a1c4ad3e0fafaa7ff64f29ba56d9da7e1669f8521c"},
|
||||
@@ -3006,32 +2984,32 @@ networkx = [
|
||||
{file = "networkx-2.6.3.tar.gz", hash = "sha256:c0946ed31d71f1b732b5aaa6da5a0388a345019af232ce2f49c766e2d6795c51"},
|
||||
]
|
||||
notebook = [
|
||||
{file = "notebook-6.4.6-py3-none-any.whl", hash = "sha256:5cad068fa82cd4fb98d341c052100ed50cd69fbfb4118cb9b8ab5a346ef27551"},
|
||||
{file = "notebook-6.4.6.tar.gz", hash = "sha256:7bcdf79bd1cda534735bd9830d2cbedab4ee34d8fe1df6e7b946b3aab0902ba3"},
|
||||
{file = "notebook-6.4.7-py3-none-any.whl", hash = "sha256:968e9c09639fe4b9dbf4b9f028daf861b563c124d735a99d6d48c09317553f31"},
|
||||
{file = "notebook-6.4.7.tar.gz", hash = "sha256:b01da66f11a203b3839d6afa4013674bcfff41c36552f9ad0fbcb2d93c92764a"},
|
||||
]
|
||||
numpy = [
|
||||
{file = "numpy-1.22.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d22662b4b10112c545c91a0741f2436f8ca979ab3d69d03d19322aa970f9695"},
|
||||
{file = "numpy-1.22.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:11a1f3816ea82eed4178102c56281782690ab5993251fdfd75039aad4d20385f"},
|
||||
{file = "numpy-1.22.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5dc65644f75a4c2970f21394ad8bea1a844104f0fe01f278631be1c7eae27226"},
|
||||
{file = "numpy-1.22.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c16cec1c8cf2728f1d539bd55aaa9d6bb48a7de2f41eb944697293ef65a559"},
|
||||
{file = "numpy-1.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97e82c39d9856fe7d4f9b86d8a1e66eff99cf3a8b7ba48202f659703d27c46f"},
|
||||
{file = "numpy-1.22.0-cp310-cp310-win_amd64.whl", hash = "sha256:e41e8951749c4b5c9a2dc5fdbc1a4eec6ab2a140fdae9b460b0f557eed870f4d"},
|
||||
{file = "numpy-1.22.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:bece0a4a49e60e472a6d1f70ac6cdea00f9ab80ff01132f96bd970cdd8a9e5a9"},
|
||||
{file = "numpy-1.22.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:818b9be7900e8dc23e013a92779135623476f44a0de58b40c32a15368c01d471"},
|
||||
{file = "numpy-1.22.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47ee7a839f5885bc0c63a74aabb91f6f40d7d7b639253768c4199b37aede7982"},
|
||||
{file = "numpy-1.22.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a024181d7aef0004d76fb3bce2a4c9f2e67a609a9e2a6ff2571d30e9976aa383"},
|
||||
{file = "numpy-1.22.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f71d57cc8645f14816ae249407d309be250ad8de93ef61d9709b45a0ddf4050c"},
|
||||
{file = "numpy-1.22.0-cp38-cp38-win32.whl", hash = "sha256:283d9de87c0133ef98f93dfc09fad3fb382f2a15580de75c02b5bb36a5a159a5"},
|
||||
{file = "numpy-1.22.0-cp38-cp38-win_amd64.whl", hash = "sha256:2762331de395739c91f1abb88041f94a080cb1143aeec791b3b223976228af3f"},
|
||||
{file = "numpy-1.22.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:76ba7c40e80f9dc815c5e896330700fd6e20814e69da9c1267d65a4d051080f1"},
|
||||
{file = "numpy-1.22.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0cfe07133fd00b27edee5e6385e333e9eeb010607e8a46e1cd673f05f8596595"},
|
||||
{file = "numpy-1.22.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6ed0d073a9c54ac40c41a9c2d53fcc3d4d4ed607670b9e7b0de1ba13b4cbfe6f"},
|
||||
{file = "numpy-1.22.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41388e32e40b41dd56eb37fcaa7488b2b47b0adf77c66154d6b89622c110dfe9"},
|
||||
{file = "numpy-1.22.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b55b953a1bdb465f4dc181758570d321db4ac23005f90ffd2b434cc6609a63dd"},
|
||||
{file = "numpy-1.22.0-cp39-cp39-win32.whl", hash = "sha256:5a311ee4d983c487a0ab546708edbdd759393a3dc9cd30305170149fedd23c88"},
|
||||
{file = "numpy-1.22.0-cp39-cp39-win_amd64.whl", hash = "sha256:a97a954a8c2f046d3817c2bce16e3c7e9a9c2afffaf0400f5c16df5172a67c9c"},
|
||||
{file = "numpy-1.22.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb02929b0d6bfab4c48a79bd805bd7419114606947ec8284476167415171f55b"},
|
||||
{file = "numpy-1.22.0.zip", hash = "sha256:a955e4128ac36797aaffd49ab44ec74a71c11d6938df83b1285492d277db5397"},
|
||||
{file = "numpy-1.22.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d62d6b0870b53799204515145935608cdeb4cebb95a26800b6750e48884cc5b"},
|
||||
{file = "numpy-1.22.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:831f2df87bd3afdfc77829bc94bd997a7c212663889d56518359c827d7113b1f"},
|
||||
{file = "numpy-1.22.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8d1563060e77096367952fb44fca595f2b2f477156de389ce7c0ade3aef29e21"},
|
||||
{file = "numpy-1.22.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69958735d5e01f7b38226a6c6e7187d72b7e4d42b6b496aca5860b611ca0c193"},
|
||||
{file = "numpy-1.22.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45a7dfbf9ed8d68fd39763940591db7637cf8817c5bce1a44f7b56c97cbe211e"},
|
||||
{file = "numpy-1.22.1-cp310-cp310-win_amd64.whl", hash = "sha256:7e957ca8112c689b728037cea9c9567c27cf912741fabda9efc2c7d33d29dfa1"},
|
||||
{file = "numpy-1.22.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:800dfeaffb2219d49377da1371d710d7952c9533b57f3d51b15e61c4269a1b5b"},
|
||||
{file = "numpy-1.22.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:65f5e257987601fdfc63f1d02fca4d1c44a2b85b802f03bd6abc2b0b14648dd2"},
|
||||
{file = "numpy-1.22.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:632e062569b0fe05654b15ef0e91a53c0a95d08ffe698b66f6ba0f927ad267c2"},
|
||||
{file = "numpy-1.22.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d245a2bf79188d3f361137608c3cd12ed79076badd743dc660750a9f3074f7c"},
|
||||
{file = "numpy-1.22.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26b4018a19d2ad9606ce9089f3d52206a41b23de5dfe8dc947d2ec49ce45d015"},
|
||||
{file = "numpy-1.22.1-cp38-cp38-win32.whl", hash = "sha256:f8ad59e6e341f38266f1549c7c2ec70ea0e3d1effb62a44e5c3dba41c55f0187"},
|
||||
{file = "numpy-1.22.1-cp38-cp38-win_amd64.whl", hash = "sha256:60f19c61b589d44fbbab8ff126640ae712e163299c2dd422bfe4edc7ec51aa9b"},
|
||||
{file = "numpy-1.22.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2db01d9838a497ba2aa9a87515aeaf458f42351d72d4e7f3b8ddbd1eba9479f2"},
|
||||
{file = "numpy-1.22.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bcd19dab43b852b03868796f533b5f5561e6c0e3048415e675bec8d2e9d286c1"},
|
||||
{file = "numpy-1.22.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78bfbdf809fc236490e7e65715bbd98377b122f329457fffde206299e163e7f3"},
|
||||
{file = "numpy-1.22.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c51124df17f012c3b757380782ae46eee85213a3215e51477e559739f57d9bf6"},
|
||||
{file = "numpy-1.22.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88d54b7b516f0ca38a69590557814de2dd638d7d4ed04864826acaac5ebb8f01"},
|
||||
{file = "numpy-1.22.1-cp39-cp39-win32.whl", hash = "sha256:b5ec9a5eaf391761c61fd873363ef3560a3614e9b4ead17347e4deda4358bca4"},
|
||||
{file = "numpy-1.22.1-cp39-cp39-win_amd64.whl", hash = "sha256:4ac4d7c9f8ea2a79d721ebfcce81705fc3cd61a10b731354f1049eb8c99521e8"},
|
||||
{file = "numpy-1.22.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e60ef82c358ded965fdd3132b5738eade055f48067ac8a5a8ac75acc00cad31f"},
|
||||
{file = "numpy-1.22.1.zip", hash = "sha256:e348ccf5bc5235fc405ab19d53bec215bb373300e5523c7b476cc0da8a5e9973"},
|
||||
]
|
||||
packageurl-python = [
|
||||
{file = "packageurl-python-0.9.6.tar.gz", hash = "sha256:c01fbaf62ad2eb791e97158d1f30349e830bee2dd3e9503a87f6c3ffae8d1cf0"},
|
||||
@@ -3041,33 +3019,6 @@ packaging = [
|
||||
{file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
|
||||
{file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
|
||||
]
|
||||
pandas = [
|
||||
{file = "pandas-1.3.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:62d5b5ce965bae78f12c1c0df0d387899dd4211ec0bdc52822373f13a3a022b9"},
|
||||
{file = "pandas-1.3.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adfeb11be2d54f275142c8ba9bf67acee771b7186a5745249c7d5a06c670136b"},
|
||||
{file = "pandas-1.3.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a8c055d58873ad81cae290d974d13dd479b82cbb975c3e1fa2cf1920715296"},
|
||||
{file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd541ab09e1f80a2a1760032d665f6e032d8e44055d602d65eeea6e6e85498cb"},
|
||||
{file = "pandas-1.3.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2651d75b9a167cc8cc572cf787ab512d16e316ae00ba81874b560586fa1325e0"},
|
||||
{file = "pandas-1.3.5-cp310-cp310-win_amd64.whl", hash = "sha256:aaf183a615ad790801fa3cf2fa450e5b6d23a54684fe386f7e3208f8b9bfbef6"},
|
||||
{file = "pandas-1.3.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:344295811e67f8200de2390093aeb3c8309f5648951b684d8db7eee7d1c81fb7"},
|
||||
{file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:552020bf83b7f9033b57cbae65589c01e7ef1544416122da0c79140c93288f56"},
|
||||
{file = "pandas-1.3.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cce0c6bbeb266b0e39e35176ee615ce3585233092f685b6a82362523e59e5b4"},
|
||||
{file = "pandas-1.3.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d28a3c65463fd0d0ba8bbb7696b23073efee0510783340a44b08f5e96ffce0c"},
|
||||
{file = "pandas-1.3.5-cp37-cp37m-win32.whl", hash = "sha256:a62949c626dd0ef7de11de34b44c6475db76995c2064e2d99c6498c3dba7fe58"},
|
||||
{file = "pandas-1.3.5-cp37-cp37m-win_amd64.whl", hash = "sha256:8025750767e138320b15ca16d70d5cdc1886e8f9cc56652d89735c016cd8aea6"},
|
||||
{file = "pandas-1.3.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fe95bae4e2d579812865db2212bb733144e34d0c6785c0685329e5b60fcb85dd"},
|
||||
{file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f261553a1e9c65b7a310302b9dbac31cf0049a51695c14ebe04e4bfd4a96f02"},
|
||||
{file = "pandas-1.3.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b6dbec5f3e6d5dc80dcfee250e0a2a652b3f28663492f7dab9a24416a48ac39"},
|
||||
{file = "pandas-1.3.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3bc49af96cd6285030a64779de5b3688633a07eb75c124b0747134a63f4c05f"},
|
||||
{file = "pandas-1.3.5-cp38-cp38-win32.whl", hash = "sha256:b6b87b2fb39e6383ca28e2829cddef1d9fc9e27e55ad91ca9c435572cdba51bf"},
|
||||
{file = "pandas-1.3.5-cp38-cp38-win_amd64.whl", hash = "sha256:a395692046fd8ce1edb4c6295c35184ae0c2bbe787ecbe384251da609e27edcb"},
|
||||
{file = "pandas-1.3.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd971a3f08b745a75a86c00b97f3007c2ea175951286cdda6abe543e687e5f2f"},
|
||||
{file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37f06b59e5bc05711a518aa10beaec10942188dccb48918bb5ae602ccbc9f1a0"},
|
||||
{file = "pandas-1.3.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c21778a688d3712d35710501f8001cdbf96eb70a7c587a3d5613573299fdca6"},
|
||||
{file = "pandas-1.3.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3345343206546545bc26a05b4602b6a24385b5ec7c75cb6059599e3d56831da2"},
|
||||
{file = "pandas-1.3.5-cp39-cp39-win32.whl", hash = "sha256:c69406a2808ba6cf580c2255bcf260b3f214d2664a3a4197d0e640f573b46fd3"},
|
||||
{file = "pandas-1.3.5-cp39-cp39-win_amd64.whl", hash = "sha256:32e1a26d5ade11b547721a72f9bfc4bd113396947606e00d5b4a5b79b3dcb006"},
|
||||
{file = "pandas-1.3.5.tar.gz", hash = "sha256:1e4285f5de1012de20ca46b188ccf33521bff61ba5c5ebd78b4fb28e5416a9f1"},
|
||||
]
|
||||
pandocfilters = [
|
||||
{file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"},
|
||||
{file = "pandocfilters-1.5.0.tar.gz", hash = "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38"},
|
||||
@@ -3127,8 +3078,8 @@ pip-api = [
|
||||
{file = "pip_api-0.0.26-py3-none-any.whl", hash = "sha256:b24e94e5d5d3f161a2db49653798e6a4c1f0ed6b379e511b45a8fa57c185d711"},
|
||||
]
|
||||
pip-audit = [
|
||||
{file = "pip-audit-1.1.1.tar.gz", hash = "sha256:61d772968b6ef644f43ecceace89665d28d7ea521a9390e59188c4189d580856"},
|
||||
{file = "pip_audit-1.1.1-py3-none-any.whl", hash = "sha256:86aff3427a544757d1d30e8a0ee83eb040c85a94e7b8b6541ed4058493090b44"},
|
||||
{file = "pip-audit-1.1.2.tar.gz", hash = "sha256:374e8528a1376145cbe0f0ec4a7b6a5ebfd6152f665d274498ea49d8bffef24c"},
|
||||
{file = "pip_audit-1.1.2-py3-none-any.whl", hash = "sha256:48325027b803376bee22ca273f8a1b477324c10663c6218a5acebfdc4a107328"},
|
||||
]
|
||||
pip-licenses = [
|
||||
{file = "pip-licenses-3.5.3.tar.gz", hash = "sha256:f44860e00957b791c6c6005a3328f2d5eaeee96ddb8e7d87d4b0aa25b02252e4"},
|
||||
@@ -3198,6 +3149,10 @@ ptyprocess = [
|
||||
{file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
|
||||
{file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
|
||||
]
|
||||
pure-eval = [
|
||||
{file = "pure_eval-0.2.1-py3-none-any.whl", hash = "sha256:94eeb505a88721bec7bb21a4ac49758b8b1a01530da1a70d4ffc1d9937689d71"},
|
||||
{file = "pure_eval-0.2.1.tar.gz", hash = "sha256:0f04483b16c9429532d2c0ddc96e2b3bb6b2dc37a2bfb0e986248dbfd0b78873"},
|
||||
]
|
||||
py = [
|
||||
{file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"},
|
||||
{file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"},
|
||||
@@ -3206,8 +3161,8 @@ py-cpuinfo = [
|
||||
{file = "py-cpuinfo-8.0.0.tar.gz", hash = "sha256:5f269be0e08e33fd959de96b34cd4aeeeacac014dd8305f70eb28d06de2345c5"},
|
||||
]
|
||||
py-progress-tracker = [
|
||||
{file = "py-progress-tracker-0.3.3.tar.gz", hash = "sha256:344a312bc183f4ab4fca5deb5d7d8b94195d3e4c81a2aa929cefee63952ac4d2"},
|
||||
{file = "py_progress_tracker-0.3.3-py3-none-any.whl", hash = "sha256:f298f203c86c32539ba50ee955e8f7121e1095e0704436057f405e2527c7695c"},
|
||||
{file = "py-progress-tracker-0.4.0.tar.gz", hash = "sha256:579344440781f5895b5630ab6be1a640320e22baab78af8d726b40cad619f162"},
|
||||
{file = "py_progress_tracker-0.4.0-py3-none-any.whl", hash = "sha256:5c39a94527e005a220b85ad02f6dc95691b5174458d97055f5ab8c28b827eec4"},
|
||||
]
|
||||
pycodestyle = [
|
||||
{file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"},
|
||||
@@ -3282,27 +3237,27 @@ pyparsing = [
|
||||
{file = "pyparsing-3.0.6.tar.gz", hash = "sha256:d9bdec0013ef1eb5a84ab39a3b3868911598afa494f5faa038647101504e2b81"},
|
||||
]
|
||||
pyrsistent = [
|
||||
{file = "pyrsistent-0.18.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f4c8cabb46ff8e5d61f56a037974228e978f26bfefce4f61a4b1ac0ba7a2ab72"},
|
||||
{file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:da6e5e818d18459fa46fac0a4a4e543507fe1110e808101277c5a2b5bab0cd2d"},
|
||||
{file = "pyrsistent-0.18.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:5e4395bbf841693eaebaa5bb5c8f5cdbb1d139e07c975c682ec4e4f8126e03d2"},
|
||||
{file = "pyrsistent-0.18.0-cp36-cp36m-win32.whl", hash = "sha256:527be2bfa8dc80f6f8ddd65242ba476a6c4fb4e3aedbf281dfbac1b1ed4165b1"},
|
||||
{file = "pyrsistent-0.18.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2aaf19dc8ce517a8653746d98e962ef480ff34b6bc563fc067be6401ffb457c7"},
|
||||
{file = "pyrsistent-0.18.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58a70d93fb79dc585b21f9d72487b929a6fe58da0754fa4cb9f279bb92369396"},
|
||||
{file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:4916c10896721e472ee12c95cdc2891ce5890898d2f9907b1b4ae0f53588b710"},
|
||||
{file = "pyrsistent-0.18.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:73ff61b1411e3fb0ba144b8f08d6749749775fe89688093e1efef9839d2dcc35"},
|
||||
{file = "pyrsistent-0.18.0-cp37-cp37m-win32.whl", hash = "sha256:b29b869cf58412ca5738d23691e96d8aff535e17390128a1a52717c9a109da4f"},
|
||||
{file = "pyrsistent-0.18.0-cp37-cp37m-win_amd64.whl", hash = "sha256:097b96f129dd36a8c9e33594e7ebb151b1515eb52cceb08474c10a5479e799f2"},
|
||||
{file = "pyrsistent-0.18.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:772e94c2c6864f2cd2ffbe58bb3bdefbe2a32afa0acb1a77e472aac831f83427"},
|
||||
{file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c1a9ff320fa699337e05edcaae79ef8c2880b52720bc031b219e5b5008ebbdef"},
|
||||
{file = "pyrsistent-0.18.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:cd3caef37a415fd0dae6148a1b6957a8c5f275a62cca02e18474608cb263640c"},
|
||||
{file = "pyrsistent-0.18.0-cp38-cp38-win32.whl", hash = "sha256:e79d94ca58fcafef6395f6352383fa1a76922268fa02caa2272fff501c2fdc78"},
|
||||
{file = "pyrsistent-0.18.0-cp38-cp38-win_amd64.whl", hash = "sha256:a0c772d791c38bbc77be659af29bb14c38ced151433592e326361610250c605b"},
|
||||
{file = "pyrsistent-0.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d5ec194c9c573aafaceebf05fc400656722793dac57f254cd4741f3c27ae57b4"},
|
||||
{file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:6b5eed00e597b5b5773b4ca30bd48a5774ef1e96f2a45d105db5b4ebb4bca680"},
|
||||
{file = "pyrsistent-0.18.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:48578680353f41dca1ca3dc48629fb77dfc745128b56fc01096b2530c13fd426"},
|
||||
{file = "pyrsistent-0.18.0-cp39-cp39-win32.whl", hash = "sha256:f3ef98d7b76da5eb19c37fda834d50262ff9167c65658d1d8f974d2e4d90676b"},
|
||||
{file = "pyrsistent-0.18.0-cp39-cp39-win_amd64.whl", hash = "sha256:404e1f1d254d314d55adb8d87f4f465c8693d6f902f67eb6ef5b4526dc58e6ea"},
|
||||
{file = "pyrsistent-0.18.0.tar.gz", hash = "sha256:773c781216f8c2900b42a7b638d5b517bb134ae1acbebe4d1e8f1f41ea60eb4b"},
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"},
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"},
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"},
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"},
|
||||
{file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"},
|
||||
{file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"},
|
||||
{file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"},
|
||||
{file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"},
|
||||
{file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"},
|
||||
{file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"},
|
||||
{file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"},
|
||||
{file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"},
|
||||
{file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"},
|
||||
{file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"},
|
||||
{file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"},
|
||||
{file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"},
|
||||
{file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"},
|
||||
{file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"},
|
||||
{file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"},
|
||||
{file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"},
|
||||
{file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"},
|
||||
]
|
||||
pytest = [
|
||||
{file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"},
|
||||
@@ -3486,71 +3441,6 @@ rfc3986 = [
|
||||
{file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"},
|
||||
{file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"},
|
||||
]
|
||||
scikit-learn = [
|
||||
{file = "scikit-learn-1.0.2.tar.gz", hash = "sha256:b5870959a5484b614f26d31ca4c17524b1b0317522199dc985c3b4256e030767"},
|
||||
{file = "scikit_learn-1.0.2-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:da3c84694ff693b5b3194d8752ccf935a665b8b5edc33a283122f4273ca3e687"},
|
||||
{file = "scikit_learn-1.0.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:75307d9ea39236cad7eea87143155eea24d48f93f3a2f9389c817f7019f00705"},
|
||||
{file = "scikit_learn-1.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f14517e174bd7332f1cca2c959e704696a5e0ba246eb8763e6c24876d8710049"},
|
||||
{file = "scikit_learn-1.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9aac97e57c196206179f674f09bc6bffcd0284e2ba95b7fe0b402ac3f986023"},
|
||||
{file = "scikit_learn-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:d93d4c28370aea8a7cbf6015e8a669cd5d69f856cc2aa44e7a590fb805bb5583"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:85260fb430b795d806251dd3bb05e6f48cdc777ac31f2bcf2bc8bbed3270a8f5"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a053a6a527c87c5c4fa7bf1ab2556fa16d8345cf99b6c5a19030a4a7cd8fd2c0"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:245c9b5a67445f6f044411e16a93a554edc1efdcce94d3fc0bc6a4b9ac30b752"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158faf30684c92a78e12da19c73feff9641a928a8024b4fa5ec11d583f3d8a87"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08ef968f6b72033c16c479c966bf37ccd49b06ea91b765e1cc27afefe723920b"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16455ace947d8d9e5391435c2977178d0ff03a261571e67f627c8fee0f9d431a"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:2f3b453e0b149898577e301d27e098dfe1a36943f7bb0ad704d1e548efc3b448"},
|
||||
{file = "scikit_learn-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:46f431ec59dead665e1370314dbebc99ead05e1c0a9df42f22d6a0e00044820f"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ff3fa8ea0e09e38677762afc6e14cad77b5e125b0ea70c9bba1992f02c93b028"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9369b030e155f8188743eb4893ac17a27f81d28a884af460870c7c072f114243"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7d6b2475f1c23a698b48515217eb26b45a6598c7b1840ba23b3c5acece658dbb"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:285db0352e635b9e3392b0b426bc48c3b485512d3b4ac3c7a44ec2a2ba061e66"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb33fe1dc6f73dc19e67b264dbb5dde2a0539b986435fdd78ed978c14654830"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1391d1a6e2268485a63c3073111fe3ba6ec5145fc957481cfd0652be571226d"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc3744dabc56b50bec73624aeca02e0def06b03cb287de26836e730659c5d29c"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-win32.whl", hash = "sha256:a999c9f02ff9570c783069f1074f06fe7386ec65b84c983db5aeb8144356a355"},
|
||||
{file = "scikit_learn-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:7626a34eabbf370a638f32d1a3ad50526844ba58d63e3ab81ba91e2a7c6d037e"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:a90b60048f9ffdd962d2ad2fb16367a87ac34d76e02550968719eb7b5716fd10"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:7a93c1292799620df90348800d5ac06f3794c1316ca247525fa31169f6d25855"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:eabceab574f471de0b0eb3f2ecf2eee9f10b3106570481d007ed1c84ebf6d6a1"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:55f2f3a8414e14fbee03782f9fe16cca0f141d639d2b1c1a36779fa069e1db57"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80095a1e4b93bd33261ef03b9bc86d6db649f988ea4dbcf7110d0cded8d7213d"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa38a1b9b38ae1fad2863eff5e0d69608567453fdfc850c992e6e47eb764e846"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff746a69ff2ef25f62b36338c615dd15954ddc3ab8e73530237dd73235e76d62"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-win32.whl", hash = "sha256:e174242caecb11e4abf169342641778f68e1bfaba80cd18acd6bc84286b9a534"},
|
||||
{file = "scikit_learn-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b54a62c6e318ddbfa7d22c383466d38d2ee770ebdb5ddb668d56a099f6eaf75f"},
|
||||
]
|
||||
scipy = [
|
||||
{file = "scipy-1.7.3-1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c9e04d7e9b03a8a6ac2045f7c5ef741be86727d8f49c45db45f244bdd2bcff17"},
|
||||
{file = "scipy-1.7.3-1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b0e0aeb061a1d7dcd2ed59ea57ee56c9b23dd60100825f98238c06ee5cc4467e"},
|
||||
{file = "scipy-1.7.3-1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:b78a35c5c74d336f42f44106174b9851c783184a85a3fe3e68857259b37b9ffb"},
|
||||
{file = "scipy-1.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:173308efba2270dcd61cd45a30dfded6ec0085b4b6eb33b5eb11ab443005e088"},
|
||||
{file = "scipy-1.7.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:21b66200cf44b1c3e86495e3a436fc7a26608f92b8d43d344457c54f1c024cbc"},
|
||||
{file = "scipy-1.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceebc3c4f6a109777c0053dfa0282fddb8893eddfb0d598574acfb734a926168"},
|
||||
{file = "scipy-1.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7eaea089345a35130bc9a39b89ec1ff69c208efa97b3f8b25ea5d4c41d88094"},
|
||||
{file = "scipy-1.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:304dfaa7146cffdb75fbf6bb7c190fd7688795389ad060b970269c8576d038e9"},
|
||||
{file = "scipy-1.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:033ce76ed4e9f62923e1f8124f7e2b0800db533828c853b402c7eec6e9465d80"},
|
||||
{file = "scipy-1.7.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4d242d13206ca4302d83d8a6388c9dfce49fc48fdd3c20efad89ba12f785bf9e"},
|
||||
{file = "scipy-1.7.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8499d9dd1459dc0d0fe68db0832c3d5fc1361ae8e13d05e6849b358dc3f2c279"},
|
||||
{file = "scipy-1.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca36e7d9430f7481fc7d11e015ae16fbd5575615a8e9060538104778be84addf"},
|
||||
{file = "scipy-1.7.3-cp37-cp37m-win32.whl", hash = "sha256:e2c036492e673aad1b7b0d0ccdc0cb30a968353d2c4bf92ac8e73509e1bf212c"},
|
||||
{file = "scipy-1.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:866ada14a95b083dd727a845a764cf95dd13ba3dc69a16b99038001b05439709"},
|
||||
{file = "scipy-1.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:65bd52bf55f9a1071398557394203d881384d27b9c2cad7df9a027170aeaef93"},
|
||||
{file = "scipy-1.7.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:f99d206db1f1ae735a8192ab93bd6028f3a42f6fa08467d37a14eb96c9dd34a3"},
|
||||
{file = "scipy-1.7.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5f2cfc359379c56b3a41b17ebd024109b2049f878badc1e454f31418c3a18436"},
|
||||
{file = "scipy-1.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb7ae2c4dbdb3c9247e07acc532f91077ae6dbc40ad5bd5dca0bb5a176ee9bda"},
|
||||
{file = "scipy-1.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c2d250074cfa76715d58830579c64dff7354484b284c2b8b87e5a38321672c"},
|
||||
{file = "scipy-1.7.3-cp38-cp38-win32.whl", hash = "sha256:87069cf875f0262a6e3187ab0f419f5b4280d3dcf4811ef9613c605f6e4dca95"},
|
||||
{file = "scipy-1.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:7edd9a311299a61e9919ea4192dd477395b50c014cdc1a1ac572d7c27e2207fa"},
|
||||
{file = "scipy-1.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eef93a446114ac0193a7b714ce67659db80caf940f3232bad63f4c7a81bc18df"},
|
||||
{file = "scipy-1.7.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:eb326658f9b73c07081300daba90a8746543b5ea177184daed26528273157294"},
|
||||
{file = "scipy-1.7.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:93378f3d14fff07572392ce6a6a2ceb3a1f237733bd6dcb9eb6a2b29b0d19085"},
|
||||
{file = "scipy-1.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edad1cf5b2ce1912c4d8ddad20e11d333165552aba262c882e28c78bbc09dbf6"},
|
||||
{file = "scipy-1.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d1cc2c19afe3b5a546ede7e6a44ce1ff52e443d12b231823268019f608b9b12"},
|
||||
{file = "scipy-1.7.3-cp39-cp39-win32.whl", hash = "sha256:2c56b820d304dffcadbbb6cbfbc2e2c79ee46ea291db17e288e73cd3c64fefa9"},
|
||||
{file = "scipy-1.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:3f78181a153fa21c018d346f595edd648344751d7f03ab94b398be2ad083ed3e"},
|
||||
{file = "scipy-1.7.3.tar.gz", hash = "sha256:ab5875facfdef77e0a47d5fd39ea178b58e60e454a4c85aa1e52fcb80db7babf"},
|
||||
]
|
||||
secretstorage = [
|
||||
{file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"},
|
||||
{file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"},
|
||||
@@ -3564,8 +3454,8 @@ send2trash = [
|
||||
{file = "Send2Trash-1.8.0.tar.gz", hash = "sha256:d2c24762fd3759860a0aff155e45871447ea58d2be6bdd39b5c8f966a0c99c2d"},
|
||||
]
|
||||
setuptools-scm = [
|
||||
{file = "setuptools_scm-6.3.2-py3-none-any.whl", hash = "sha256:4c64444b1d49c4063ae60bfe1680f611c8b13833d556fd1d6050c0023162a119"},
|
||||
{file = "setuptools_scm-6.3.2.tar.gz", hash = "sha256:a49aa8081eeb3514eb9728fa5040f2eaa962d6c6f4ec9c32f6c1fba88f88a0f2"},
|
||||
{file = "setuptools_scm-6.4.1-py3-none-any.whl", hash = "sha256:93bbcc1d3e92f20eaef42df31e4b5e5a348f8cb0e48eaff3e060184a57e94b07"},
|
||||
{file = "setuptools_scm-6.4.1.tar.gz", hash = "sha256:9bd9ff7fd5fa1cf469fe28a632336b9cfd351476c6d09379ff676d3945f669b9"},
|
||||
]
|
||||
six = [
|
||||
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
|
||||
@@ -3618,6 +3508,10 @@ sphinxcontrib-serializinghtml = [
|
||||
{file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
|
||||
{file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
|
||||
]
|
||||
stack-data = [
|
||||
{file = "stack_data-0.1.4-py3-none-any.whl", hash = "sha256:02cc0683cbc445ae4ca8c4e3a0e58cb1df59f252efb0aa016b34804a707cf9bc"},
|
||||
{file = "stack_data-0.1.4.tar.gz", hash = "sha256:7769ed2482ce0030e00175dd1bf4ef1e873603b6ab61cd3da443b410e64e9477"},
|
||||
]
|
||||
tabulate = [
|
||||
{file = "tabulate-0.8.9-py3-none-any.whl", hash = "sha256:d7c013fe7abbc5e491394e10fa845f8f32fe54f8dc60c6622c6cf482d25d47e4"},
|
||||
{file = "tabulate-0.8.9.tar.gz", hash = "sha256:eb1d13f25760052e8931f2ef80aaf6045a6cceb47514db8beab24cded16f13a7"},
|
||||
@@ -3633,10 +3527,6 @@ testpath = [
|
||||
{file = "testpath-0.5.0-py3-none-any.whl", hash = "sha256:8044f9a0bab6567fc644a3593164e872543bb44225b0e24846e2c89237937589"},
|
||||
{file = "testpath-0.5.0.tar.gz", hash = "sha256:1acf7a0bcd3004ae8357409fc33751e16d37ccc650921da1094a86581ad1e417"},
|
||||
]
|
||||
threadpoolctl = [
|
||||
{file = "threadpoolctl-3.0.0-py3-none-any.whl", hash = "sha256:4fade5b3b48ae4b1c30f200b28f39180371104fccc642e039e0f2435ec8cc211"},
|
||||
{file = "threadpoolctl-3.0.0.tar.gz", hash = "sha256:d03115321233d0be715f0d3a5ad1d6c065fe425ddc2d671ca8e45e9fd5d7a52a"},
|
||||
]
|
||||
toml = [
|
||||
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
|
||||
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
|
||||
@@ -3649,26 +3539,6 @@ tomlkit = [
|
||||
{file = "tomlkit-0.7.0-py2.py3-none-any.whl", hash = "sha256:6babbd33b17d5c9691896b0e68159215a9387ebfa938aa3ac42f4a4beeb2b831"},
|
||||
{file = "tomlkit-0.7.0.tar.gz", hash = "sha256:ac57f29693fab3e309ea789252fcce3061e19110085aa31af5446ca749325618"},
|
||||
]
|
||||
torch = [
|
||||
{file = "torch-1.10.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:adbb5f292e260e39715d67478823e03e3001db1af5b02c18caa34549dccb421e"},
|
||||
{file = "torch-1.10.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:ac8cae04458cc47555fa07a760496c2fdf687223bcc13df5fed56ea3aead37f5"},
|
||||
{file = "torch-1.10.1-cp36-cp36m-win_amd64.whl", hash = "sha256:40508d67288c46ff1fad301fa6e996e0e936a733f2401475fc92c21dc3ef702d"},
|
||||
{file = "torch-1.10.1-cp36-none-macosx_10_9_x86_64.whl", hash = "sha256:8b47bd113c6cbd9a49669aaaa233ad5f25852d6ca3e640f9c71c808e65a1fdf4"},
|
||||
{file = "torch-1.10.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:50360868ad3f039cf99f0250300dbec51bf686a7b84dc6bbdb8dff4b1171c0f0"},
|
||||
{file = "torch-1.10.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:e3d2154722189ed74747a494dce9588978dd55e43ca24c5bd307fb52620b232b"},
|
||||
{file = "torch-1.10.1-cp37-cp37m-win_amd64.whl", hash = "sha256:d9c495bcd5f00becff5b051b5e4be86b7eaa0433cd0fe57f77c02bc1b93ab5b1"},
|
||||
{file = "torch-1.10.1-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:6b327d7b4eb2461b16d46763d46df71e597235ccc428650538a2735a0898270d"},
|
||||
{file = "torch-1.10.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:1c6c56178e5dacf7602ad00dc79c263d6c41c0f76261e9641e6bd2679678ceb3"},
|
||||
{file = "torch-1.10.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:2ffa2db4ccb6466c59e3f95b7a582d47ae721e476468f4ffbcaa2832e0b92b9b"},
|
||||
{file = "torch-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:af577602e884c5e40fbd29ec978f052202355da93cd31e0a23251bd7aaff5a99"},
|
||||
{file = "torch-1.10.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:725d86e9809073eef868a3ddf4189878ee7af46fac71403834dd0925b3db9b82"},
|
||||
{file = "torch-1.10.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:fa197cfe047d0515bef238f42472721406609ebaceff2fd4e17f2ad4692ee51c"},
|
||||
{file = "torch-1.10.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:cca660b27a90dbbc0af06c859260f6b875aef37c0897bd353e5deed085d2c877"},
|
||||
{file = "torch-1.10.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:01f4ffdafbfbd7d106fb4e487feee2cf29cced9903df8cb0444b0e308f9c5e92"},
|
||||
{file = "torch-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:607eccb7d539a11877cd02d95f4b164b7941fcf538ac7ff087bfed19e3644283"},
|
||||
{file = "torch-1.10.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:26b6dfbe21e247e67c615bfab0017ec391ed1517f88bbeea6228a49edd24cd88"},
|
||||
{file = "torch-1.10.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:5644280d88c5b6de27eacc0d911f968aad41a4bab297af4df5e571bc0927d3e4"},
|
||||
]
|
||||
tornado = [
|
||||
{file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"},
|
||||
{file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"},
|
||||
|
||||
2
pylintrc
2
pylintrc
@@ -438,7 +438,7 @@ contextmanager-decorators=contextlib.contextmanager
|
||||
# List of members which are set dynamically and missed by pylint inference
|
||||
# system, and so shouldn't trigger E1101 when accessed. Python regular
|
||||
# expressions are accepted.
|
||||
generated-members=torch
|
||||
generated-members=
|
||||
|
||||
# Tells whether missing members accessed in mixin class should be ignored. A
|
||||
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
||||
|
||||
@@ -44,7 +44,6 @@ numpy = "^1.22.0"
|
||||
pygraphviz = { version = "^1.7", optional = true }
|
||||
Pillow = "^9.0.0"
|
||||
loguru = "^0.5.3"
|
||||
torch = "^1.10.1"
|
||||
setuptools = "*"
|
||||
concrete-compiler = "^0.1.1"
|
||||
|
||||
@@ -79,11 +78,9 @@ pygments-style-tomorrow = "^1.0.0"
|
||||
beautifulsoup4 = "^4.10.0"
|
||||
pip-licenses = "^3.5.3"
|
||||
sphinx-zama-theme = "2.0.8"
|
||||
scikit-learn = "^1.0.2"
|
||||
pandas = "^1.3.5"
|
||||
pip-audit = "^1.1.1"
|
||||
pytest-codeblocks = "^0.12.2"
|
||||
py-progress-tracker = "^0.3.3"
|
||||
py-progress-tracker = "^0.4.0"
|
||||
twine = "^3.7.1"
|
||||
|
||||
[build-system]
|
||||
|
||||
@@ -27,7 +27,7 @@ mkdir -p /tmp/keycache
|
||||
mkdir -p logs
|
||||
|
||||
initial_concrete_log=logs/$(date -u --iso-8601=seconds).concrete.log
|
||||
make -s concrete_benchmark 2>&1 | tee -a "$initial_concrete_log"
|
||||
make -s benchmark 2>&1 | tee -a "$initial_concrete_log"
|
||||
|
||||
final_concrete_log=logs/$(date -u --iso-8601=seconds).concrete.log
|
||||
cat -s "$initial_concrete_log" | sed '1d; $d' > "$final_concrete_log"
|
||||
@@ -44,22 +44,3 @@ curl \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d @progress.json \
|
||||
-X POST "$CONCRETE_PROGRESS_TRACKER_URL"/measurement
|
||||
|
||||
initial_ml_log=logs/$(date -u --iso-8601=seconds).ml.log
|
||||
make -s ml_benchmark 2>&1 | tee -a "$initial_ml_log"
|
||||
|
||||
final_ml_log=logs/$(date -u --iso-8601=seconds).ml.log
|
||||
cat -s "$initial_ml_log" | sed '1d; $d' > "$final_ml_log"
|
||||
|
||||
# sed above removes the first and the last lines of the log
|
||||
# which are empty to provide a nice console output
|
||||
# but empty lines are useless for logs so we get rid of them
|
||||
|
||||
rm "$initial_ml_log"
|
||||
cp "$final_ml_log" logs/latest.ml.log
|
||||
|
||||
curl \
|
||||
-H 'Authorization: Bearer '"$ML_PROGRESS_TRACKER_TOKEN"'' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d @progress.json \
|
||||
-X POST "$ML_PROGRESS_TRACKER_URL"/measurement
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
"""PyTest configuration file"""
|
||||
import json
|
||||
import operator
|
||||
import random
|
||||
import re
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
@@ -11,7 +10,6 @@ import networkx as nx
|
||||
import networkx.algorithms.isomorphism as iso
|
||||
import numpy
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from concrete.common.compilation import CompilationConfiguration
|
||||
from concrete.common.fhe_circuit import FHECircuit
|
||||
@@ -367,21 +365,6 @@ def remove_color_codes():
|
||||
return lambda x: REMOVE_COLOR_CODES_RE.sub("", x)
|
||||
|
||||
|
||||
def function_to_seed_torch():
|
||||
"""Function to seed torch"""
|
||||
|
||||
# Seed torch with something which is seed by pytest-randomly
|
||||
torch.manual_seed(random.randint(0, 2 ** 64 - 1))
|
||||
torch.use_deterministic_algorithms(True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def seed_torch():
|
||||
"""Fixture to seed torch"""
|
||||
|
||||
return function_to_seed_torch
|
||||
|
||||
|
||||
def check_is_good_execution_impl(
|
||||
fhe_circuit: FHECircuit,
|
||||
function: Callable,
|
||||
|
||||
@@ -261,3 +261,20 @@ def test_np_fhe_compiler_full_compilation(default_compilation_configuration, che
|
||||
|
||||
for i in range(64):
|
||||
assert fhe_circuit.run(i) == function_to_compile(i)
|
||||
|
||||
|
||||
def test_np_fhe_compiler_compile_on_inputset(default_compilation_configuration):
|
||||
"""Test the case where we generate an FHE circuit with a single call."""
|
||||
|
||||
def function_to_compile(x):
|
||||
return x + 42
|
||||
|
||||
compiler = NPFHECompiler(
|
||||
function_to_compile,
|
||||
{"x": "encrypted"},
|
||||
default_compilation_configuration,
|
||||
)
|
||||
circuit = compiler.compile_on_inputset(numpy.arange(64))
|
||||
|
||||
for i in range(64):
|
||||
assert circuit.run(i) == function_to_compile(i)
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
"""Test Neural Networks compilations"""
|
||||
import numpy
|
||||
import pytest
|
||||
from torch import nn
|
||||
|
||||
from concrete.quantization import PostTrainingAffineQuantization, QuantizedArray
|
||||
from concrete.torch import NumpyModule
|
||||
|
||||
# INPUT_OUTPUT_FEATURE is the number of input and output of each of the network layers.
|
||||
# (as well as the input of the network itself)
|
||||
# Currently, with 7 bits maximum, we can use 15 weights max in the theoretical case.
|
||||
INPUT_OUTPUT_FEATURE = [1, 2, 3]
|
||||
|
||||
|
||||
class FC(nn.Module):
|
||||
"""Torch model for the tests"""
|
||||
|
||||
def __init__(self, input_output):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(in_features=input_output, out_features=input_output)
|
||||
self.sigmoid1 = nn.Sigmoid()
|
||||
self.fc2 = nn.Linear(in_features=input_output, out_features=input_output)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
out = self.fc1(x)
|
||||
out = self.sigmoid1(out)
|
||||
out = self.fc2(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model",
|
||||
[pytest.param(FC)],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"input_output_feature",
|
||||
[pytest.param(input_output_feature) for input_output_feature in INPUT_OUTPUT_FEATURE],
|
||||
)
|
||||
def test_quantized_module_compilation(
|
||||
input_output_feature,
|
||||
model,
|
||||
seed_torch,
|
||||
default_compilation_configuration,
|
||||
check_is_good_execution,
|
||||
):
|
||||
"""Test a neural network compilation for FHE inference."""
|
||||
# Seed torch
|
||||
seed_torch()
|
||||
|
||||
n_bits = 2
|
||||
|
||||
# Define an input shape (n_examples, n_features)
|
||||
input_shape = (50, input_output_feature)
|
||||
|
||||
# Build a random Quantized Fully Connected Neural Network
|
||||
|
||||
# Define the torch model
|
||||
torch_fc_model = model(input_output_feature)
|
||||
# Create random input
|
||||
numpy_input = numpy.random.uniform(-100, 100, size=input_shape)
|
||||
|
||||
# Create corresponding numpy model
|
||||
numpy_fc_model = NumpyModule(torch_fc_model)
|
||||
# Quantize with post-training static method
|
||||
post_training_quant = PostTrainingAffineQuantization(n_bits, numpy_fc_model)
|
||||
quantized_model = post_training_quant.quantize_module(numpy_input)
|
||||
# Quantize input
|
||||
q_input = QuantizedArray(n_bits, numpy_input)
|
||||
quantized_model(q_input)
|
||||
|
||||
# Compile
|
||||
quantized_model.compile(q_input, default_compilation_configuration)
|
||||
|
||||
for x_q in q_input.qvalues:
|
||||
x_q = numpy.expand_dims(x_q, 0)
|
||||
check_is_good_execution(
|
||||
fhe_circuit=quantized_model.forward_fhe,
|
||||
function=quantized_model.forward,
|
||||
args=[x_q.astype(numpy.uint8)],
|
||||
postprocess_output_func=lambda x: quantized_model.dequantize_output(
|
||||
x.astype(numpy.float32)
|
||||
),
|
||||
check_function=numpy.isclose,
|
||||
verbose=False,
|
||||
)
|
||||
@@ -1,55 +0,0 @@
|
||||
"""Tests for the quantized activation functions."""
|
||||
import numpy
|
||||
import pytest
|
||||
|
||||
from concrete.quantization import QuantizedArray, QuantizedReLU6, QuantizedSigmoid
|
||||
|
||||
N_BITS_ATOL_TUPLE_LIST = [
|
||||
(32, 10 ** -2),
|
||||
(28, 10 ** -2),
|
||||
(20, 10 ** -2),
|
||||
(16, 10 ** -1),
|
||||
(8, 10 ** -0),
|
||||
(5, 10 ** -0),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"n_bits, atol",
|
||||
[pytest.param(n_bits, atol) for n_bits, atol in N_BITS_ATOL_TUPLE_LIST],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"input_range",
|
||||
[pytest.param((-1, 1)), pytest.param((-2, 2)), pytest.param((-10, 10)), pytest.param((0, 20))],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"input_shape",
|
||||
[pytest.param((10, 40, 20)), pytest.param((100, 400))],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"quant_activation",
|
||||
[
|
||||
pytest.param(QuantizedSigmoid),
|
||||
pytest.param(QuantizedReLU6),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("is_signed", [pytest.param(True), pytest.param(False)])
|
||||
def test_activations(quant_activation, input_shape, input_range, n_bits, atol, is_signed):
|
||||
"""Test activation functions."""
|
||||
values = numpy.random.uniform(input_range[0], input_range[1], size=input_shape)
|
||||
q_inputs = QuantizedArray(n_bits, values, is_signed)
|
||||
quant_sigmoid = quant_activation(n_bits)
|
||||
quant_sigmoid.calibrate(values)
|
||||
expected_output = quant_sigmoid.q_out.values
|
||||
q_output = quant_sigmoid(q_inputs)
|
||||
qvalues = q_output.qvalues
|
||||
|
||||
# Quantized values must be contained between 0 and 2**n_bits - 1.
|
||||
assert numpy.max(qvalues) <= 2 ** n_bits - 1
|
||||
assert numpy.min(qvalues) >= 0
|
||||
|
||||
# Dequantized values must be close to original values
|
||||
dequant_values = q_output.dequant()
|
||||
|
||||
# Check that all values are close
|
||||
assert numpy.isclose(dequant_values.ravel(), expected_output.ravel(), atol=atol).all()
|
||||
@@ -1,59 +0,0 @@
|
||||
"""Tests for the quantized array/tensors."""
|
||||
import numpy
|
||||
import pytest
|
||||
|
||||
from concrete.quantization import QuantizedArray
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"n_bits",
|
||||
[32, 28, 20, 16, 8, 4],
|
||||
)
|
||||
@pytest.mark.parametrize("is_signed", [pytest.param(True), pytest.param(False)])
|
||||
@pytest.mark.parametrize("values", [pytest.param(numpy.random.randn(2000))])
|
||||
def test_quant_dequant_update(values, n_bits, is_signed, check_array_equality):
|
||||
"""Test the quant and dequant function."""
|
||||
|
||||
quant_array = QuantizedArray(n_bits, values, is_signed)
|
||||
qvalues = quant_array.quant()
|
||||
|
||||
# Quantized values must be contained between 0 and 2**n_bits
|
||||
assert numpy.max(qvalues) <= 2 ** (n_bits) - 1 - quant_array.offset
|
||||
assert numpy.min(qvalues) >= -quant_array.offset
|
||||
|
||||
# Dequantized values must be close to original values
|
||||
dequant_values = quant_array.dequant()
|
||||
|
||||
# Check that all values are close
|
||||
tolerance = quant_array.scale / 2
|
||||
assert numpy.isclose(dequant_values, values, atol=tolerance).all()
|
||||
|
||||
# Explain the choice of tolerance
|
||||
# This test checks the values are quantized and dequantized correctly
|
||||
# Each quantization have a maximum error per quantized value an it's `scale / 2`
|
||||
|
||||
# To give an intuition, let's say you have the scale of 0.5
|
||||
# the range `[a + 0.00, a + 0.25]` will be quantized into 0, dequantized into `a + 0.00`
|
||||
# the range `[a + 0.25, a + 0.75]` will be quantized into 1, dequantized into `a + 0.50`
|
||||
# the range `[a + 0.75, a + 1.25]` will be quantized into 2, dequantized into `a + 1.00`
|
||||
# ...
|
||||
|
||||
# So for each quantization-then-dequantization operation,
|
||||
# the maximum error is `0.25`, which is `scale / 2`
|
||||
|
||||
# Test update functions
|
||||
new_values = numpy.array([0.3, 0.5, -1.2, -3.4])
|
||||
new_qvalues_ = quant_array.update_values(new_values)
|
||||
|
||||
# Make sure the shape changed for the qvalues
|
||||
assert new_qvalues_.shape != qvalues.shape
|
||||
|
||||
new_qvalues = numpy.array([1, 4, 7, 29])
|
||||
new_values_updated = quant_array.update_qvalues(new_qvalues)
|
||||
|
||||
# Make sure that we can see at least one change.
|
||||
assert not numpy.array_equal(new_qvalues, new_qvalues_)
|
||||
assert not numpy.array_equal(new_values, new_values_updated)
|
||||
|
||||
# Check that the __call__ returns also the qvalues.
|
||||
check_array_equality(quant_array(), new_qvalues)
|
||||
@@ -1,62 +0,0 @@
|
||||
"""Tests for the quantized layers."""
|
||||
import numpy
|
||||
import pytest
|
||||
|
||||
from concrete.quantization import QuantizedArray, QuantizedLinear
|
||||
|
||||
# QuantizedLinear unstable with n_bits>23
|
||||
# and hard to test with numpy.isclose with n_bits < 8
|
||||
N_BITS_LIST = [20, 16, 8]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"n_bits",
|
||||
[pytest.param(n_bits) for n_bits in N_BITS_LIST],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"n_examples, n_features, n_neurons",
|
||||
[
|
||||
pytest.param(50, 3, 4),
|
||||
pytest.param(20, 500, 30),
|
||||
pytest.param(200, 300, 50),
|
||||
pytest.param(10000, 100, 1),
|
||||
pytest.param(10, 20, 1),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize("is_signed", [pytest.param(True), pytest.param(False)])
|
||||
def test_quantized_linear(n_examples, n_features, n_neurons, n_bits, is_signed):
|
||||
"""Test the quantization linear layer of numpy.array.
|
||||
|
||||
With n_bits>>0 we expect the results of the quantized linear
|
||||
to be the same as the standard linear layer.
|
||||
"""
|
||||
inputs = numpy.random.uniform(size=(n_examples, n_features))
|
||||
q_inputs = QuantizedArray(n_bits, inputs)
|
||||
|
||||
# shape of weights: (n_features, n_neurons)
|
||||
weights = numpy.random.uniform(size=(n_features, n_neurons))
|
||||
q_weights = QuantizedArray(n_bits, weights, is_signed)
|
||||
|
||||
bias = numpy.random.uniform(size=(1, n_neurons))
|
||||
q_bias = QuantizedArray(n_bits, bias, is_signed)
|
||||
|
||||
# Define our QuantizedLinear layer
|
||||
q_linear = QuantizedLinear(n_bits, q_weights, q_bias)
|
||||
|
||||
# Calibrate the Quantized layer
|
||||
q_linear.calibrate(inputs)
|
||||
|
||||
expected_outputs = q_linear.q_out.values
|
||||
actual_output = q_linear(q_inputs).dequant()
|
||||
|
||||
assert numpy.isclose(expected_outputs, actual_output, atol=10 ** -0).all()
|
||||
|
||||
# Same test without bias
|
||||
q_linear = QuantizedLinear(n_bits, q_weights)
|
||||
|
||||
# Calibrate the Quantized layer
|
||||
q_linear.calibrate(inputs)
|
||||
expected_outputs = q_linear.q_out.values
|
||||
actual_output = q_linear(q_inputs).dequant()
|
||||
|
||||
assert numpy.isclose(expected_outputs, actual_output, atol=10 ** -0).all()
|
||||
@@ -1,113 +0,0 @@
|
||||
"""Tests for the quantized module."""
|
||||
import numpy
|
||||
import pytest
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from concrete.quantization import PostTrainingAffineQuantization, QuantizedArray
|
||||
from concrete.torch import NumpyModule
|
||||
|
||||
|
||||
class CNN(nn.Module):
|
||||
"""Torch CNN model for the tests."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5)
|
||||
self.pool = nn.AvgPool2d(2, 2)
|
||||
self.conv2 = nn.Conv2d(6, 16, 5)
|
||||
self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
x = self.pool(torch.relu(self.conv1(x)))
|
||||
x = self.pool(torch.relu(self.conv2(x)))
|
||||
x = torch.flatten(x, 1)
|
||||
x = torch.relu(self.fc1(x))
|
||||
x = torch.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
|
||||
|
||||
class FC(nn.Module):
|
||||
"""Torch model for the tests"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(in_features=32 * 32 * 3, out_features=128)
|
||||
self.sigmoid1 = nn.Sigmoid()
|
||||
self.fc2 = nn.Linear(in_features=128, out_features=64)
|
||||
self.sigmoid2 = nn.Sigmoid()
|
||||
self.fc3 = nn.Linear(in_features=64, out_features=64)
|
||||
self.sigmoid3 = nn.Sigmoid()
|
||||
self.fc4 = nn.Linear(in_features=64, out_features=64)
|
||||
self.sigmoid4 = nn.Sigmoid()
|
||||
self.fc5 = nn.Linear(in_features=64, out_features=10)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
out = self.fc1(x)
|
||||
out = self.sigmoid1(out)
|
||||
out = self.fc2(out)
|
||||
out = self.sigmoid2(out)
|
||||
out = self.fc3(out)
|
||||
out = self.sigmoid3(out)
|
||||
out = self.fc4(out)
|
||||
out = self.sigmoid4(out)
|
||||
out = self.fc5(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
N_BITS_ATOL_TUPLE_LIST = [
|
||||
(28, 10 ** -2),
|
||||
(20, 10 ** -2),
|
||||
(16, 10 ** -1),
|
||||
(8, 10 ** -0),
|
||||
(4, 10 ** -0),
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"n_bits, atol",
|
||||
[pytest.param(n_bits, atol) for n_bits, atol in N_BITS_ATOL_TUPLE_LIST],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"model, input_shape",
|
||||
[
|
||||
pytest.param(FC, (100, 32 * 32 * 3)),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"is_signed",
|
||||
[pytest.param([False, True])],
|
||||
)
|
||||
def test_quantized_linear(model, input_shape, n_bits, atol, is_signed, seed_torch):
|
||||
"""Test the quantized module with a post-training static quantization.
|
||||
|
||||
With n_bits>>0 we expect the results of the quantized module
|
||||
to be the same as the standard module.
|
||||
"""
|
||||
# Seed torch
|
||||
seed_torch()
|
||||
# Define the torch model
|
||||
torch_fc_model = model()
|
||||
# Create random input
|
||||
numpy_input = numpy.random.uniform(size=input_shape)
|
||||
# Create corresponding numpy model
|
||||
numpy_fc_model = NumpyModule(torch_fc_model)
|
||||
# Predict with real model
|
||||
numpy_prediction = numpy_fc_model(numpy_input)
|
||||
# Quantize with post-training static method
|
||||
post_training_quant = PostTrainingAffineQuantization(
|
||||
n_bits, numpy_fc_model, is_signed=is_signed
|
||||
)
|
||||
quantized_model = post_training_quant.quantize_module(numpy_input)
|
||||
# Quantize input
|
||||
q_input = QuantizedArray(n_bits, numpy_input)
|
||||
# Forward and Dequantize to get back to real values
|
||||
dequant_prediction = quantized_model.forward_and_dequant(q_input)
|
||||
|
||||
assert numpy.isclose(numpy_prediction, dequant_prediction, atol=atol).all()
|
||||
@@ -1,95 +0,0 @@
|
||||
"""Tests for the torch to numpy module."""
|
||||
import numpy
|
||||
import pytest
|
||||
from torch import nn
|
||||
|
||||
from concrete.quantization import QuantizedArray
|
||||
from concrete.torch.compile import compile_torch_model
|
||||
|
||||
# INPUT_OUTPUT_FEATURE is the number of input and output of each of the network layers.
|
||||
# (as well as the input of the network itself)
|
||||
INPUT_OUTPUT_FEATURE = [1, 2]
|
||||
|
||||
|
||||
class FC(nn.Module):
|
||||
"""Torch model for the tests"""
|
||||
|
||||
def __init__(self, input_output, activation_function):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(in_features=input_output, out_features=input_output)
|
||||
self.act_f = activation_function()
|
||||
self.fc2 = nn.Linear(in_features=input_output, out_features=input_output)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
out = self.fc1(x)
|
||||
out = self.act_f(out)
|
||||
out = self.fc2(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"activation_function",
|
||||
[
|
||||
pytest.param(nn.Sigmoid, id="sigmoid"),
|
||||
pytest.param(nn.ReLU6, id="relu"),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"model",
|
||||
[pytest.param(FC)],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"input_output_feature",
|
||||
[pytest.param(input_output_feature) for input_output_feature in INPUT_OUTPUT_FEATURE],
|
||||
)
|
||||
def test_compile_torch(
|
||||
input_output_feature,
|
||||
model,
|
||||
activation_function,
|
||||
seed_torch,
|
||||
default_compilation_configuration,
|
||||
check_is_good_execution,
|
||||
):
|
||||
"""Test the different model architecture from torch numpy."""
|
||||
|
||||
# Seed torch
|
||||
seed_torch()
|
||||
|
||||
n_bits = 2
|
||||
|
||||
# Define an input shape (n_examples, n_features)
|
||||
n_examples = 50
|
||||
|
||||
# Define the torch model
|
||||
torch_fc_model = model(input_output_feature, activation_function)
|
||||
# Create random input
|
||||
inputset = [
|
||||
numpy.random.uniform(-100, 100, size=input_output_feature) for _ in range(n_examples)
|
||||
]
|
||||
|
||||
# Compile
|
||||
quantized_numpy_module = compile_torch_model(
|
||||
torch_fc_model,
|
||||
inputset,
|
||||
default_compilation_configuration,
|
||||
n_bits=n_bits,
|
||||
)
|
||||
|
||||
# Quantize inputs all at once to have meaningful scale and zero point
|
||||
q_input = QuantizedArray(n_bits, numpy.array(inputset))
|
||||
|
||||
# Compare predictions between FHE and QuantizedModule
|
||||
for x_q in q_input.qvalues:
|
||||
x_q = numpy.expand_dims(x_q, 0)
|
||||
check_is_good_execution(
|
||||
fhe_circuit=quantized_numpy_module.forward_fhe,
|
||||
function=quantized_numpy_module.forward,
|
||||
args=[x_q.astype(numpy.uint8)],
|
||||
postprocess_output_func=lambda x: quantized_numpy_module.dequantize_output(
|
||||
x.astype(numpy.float32)
|
||||
),
|
||||
check_function=numpy.isclose,
|
||||
verbose=False,
|
||||
)
|
||||
@@ -1,127 +0,0 @@
|
||||
"""Tests for the torch to numpy module."""
|
||||
import numpy
|
||||
import pytest
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from concrete.torch import NumpyModule
|
||||
|
||||
|
||||
class CNN(nn.Module):
|
||||
"""Torch CNN model for the tests."""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.conv1 = nn.Conv2d(3, 6, 5)
|
||||
self.pool = nn.AvgPool2d(2, 2)
|
||||
self.conv2 = nn.Conv2d(6, 16, 5)
|
||||
self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
||||
self.fc2 = nn.Linear(120, 84)
|
||||
self.fc3 = nn.Linear(84, 10)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
x = self.pool(torch.relu(self.conv1(x)))
|
||||
x = self.pool(torch.relu(self.conv2(x)))
|
||||
x = torch.flatten(x, 1)
|
||||
x = torch.relu(self.fc1(x))
|
||||
x = torch.relu(self.fc2(x))
|
||||
x = self.fc3(x)
|
||||
return x
|
||||
|
||||
|
||||
class FC(nn.Module):
|
||||
"""Torch model for the tests"""
|
||||
|
||||
def __init__(self, activation_function):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(in_features=32 * 32 * 3, out_features=128)
|
||||
self.act_1 = activation_function()
|
||||
self.fc2 = nn.Linear(in_features=128, out_features=64)
|
||||
self.act_2 = activation_function()
|
||||
self.fc3 = nn.Linear(in_features=64, out_features=64)
|
||||
self.act_3 = activation_function()
|
||||
self.fc4 = nn.Linear(in_features=64, out_features=64)
|
||||
self.act_4 = activation_function()
|
||||
self.fc5 = nn.Linear(in_features=64, out_features=10)
|
||||
|
||||
def forward(self, x):
|
||||
"""Forward pass."""
|
||||
out = self.fc1(x)
|
||||
out = self.act_1(out)
|
||||
out = self.fc2(out)
|
||||
out = self.act_2(out)
|
||||
out = self.fc3(out)
|
||||
out = self.act_3(out)
|
||||
out = self.fc4(out)
|
||||
out = self.act_4(out)
|
||||
out = self.fc5(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model, input_shape",
|
||||
[
|
||||
pytest.param(FC, (100, 32 * 32 * 3)),
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
"activation_function",
|
||||
[
|
||||
pytest.param(nn.Sigmoid, id="sigmoid"),
|
||||
pytest.param(nn.ReLU6, id="relu"),
|
||||
],
|
||||
)
|
||||
def test_torch_to_numpy(model, input_shape, activation_function, seed_torch):
|
||||
"""Test the different model architecture from torch numpy."""
|
||||
|
||||
# Seed torch
|
||||
seed_torch()
|
||||
# Define the torch model
|
||||
torch_fc_model = model(activation_function)
|
||||
# Create random input
|
||||
torch_input_1 = torch.randn(input_shape)
|
||||
# Predict with torch model
|
||||
torch_predictions = torch_fc_model(torch_input_1).detach().numpy()
|
||||
# Create corresponding numpy model
|
||||
numpy_fc_model = NumpyModule(torch_fc_model)
|
||||
# Torch input to numpy
|
||||
numpy_input_1 = torch_input_1.detach().numpy()
|
||||
# Predict with numpy model
|
||||
numpy_predictions = numpy_fc_model(numpy_input_1)
|
||||
|
||||
# Test: the output of the numpy model is the same as the torch model.
|
||||
assert numpy_predictions.shape == torch_predictions.shape
|
||||
# Test: prediction from the numpy model are the same as the torh model.
|
||||
assert numpy.isclose(torch_predictions, numpy_predictions, rtol=10 - 3).all()
|
||||
|
||||
# Test: dynamics between layers is working (quantized input and activations)
|
||||
torch_input_2 = torch.randn(input_shape)
|
||||
# Make sure both inputs are different
|
||||
assert (torch_input_1 != torch_input_2).any()
|
||||
# Predict with torch
|
||||
torch_predictions = torch_fc_model(torch_input_2).detach().numpy()
|
||||
# Torch input to numpy
|
||||
numpy_input_2 = torch_input_2.detach().numpy()
|
||||
# Numpy predictions using the previous model
|
||||
numpy_predictions = numpy_fc_model(numpy_input_2)
|
||||
assert numpy.isclose(torch_predictions, numpy_predictions, rtol=10 - 3).all()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"model, incompatible_layer",
|
||||
[pytest.param(CNN, "Conv2d")],
|
||||
)
|
||||
def test_raises(model, incompatible_layer, seed_torch):
|
||||
"""Function to test incompatible layers."""
|
||||
|
||||
seed_torch()
|
||||
torch_incompatible_model = model()
|
||||
expected_errmsg = (
|
||||
f"The following module is currently not implemented: {incompatible_layer}. "
|
||||
f"Please stick to the available torch modules: "
|
||||
f"{', '.join(sorted(module.__name__ for module in NumpyModule.IMPLEMENTED_MODULES))}."
|
||||
)
|
||||
with pytest.raises(ValueError, match=expected_errmsg):
|
||||
NumpyModule(torch_incompatible_model)
|
||||
Reference in New Issue
Block a user