mirror of
https://github.com/zama-ai/concrete.git
synced 2026-02-08 19:44:57 -05:00
feat: introduce maxpool extension for virtual circuits
This commit is contained in:
@@ -127,6 +127,11 @@ class GraphConverter:
|
||||
if inputs[0].is_encrypted and inputs[1].is_encrypted:
|
||||
return "only matrix multiplication between encrypted and clear is supported"
|
||||
|
||||
elif name == "maxpool":
|
||||
assert_that(len(inputs) == 1)
|
||||
if not inputs[0].is_encrypted:
|
||||
return "only encrypted maxpool is supported"
|
||||
|
||||
elif name == "multiply":
|
||||
assert_that(len(inputs) == 2)
|
||||
if not virtual and inputs[0].is_encrypted and inputs[1].is_encrypted:
|
||||
|
||||
@@ -187,6 +187,9 @@ class NodeConverter:
|
||||
elif name == "matmul":
|
||||
result = self._convert_matmul()
|
||||
|
||||
elif name == "maxpool":
|
||||
result = self._convert_maxpool()
|
||||
|
||||
elif name == "multiply":
|
||||
result = self._convert_mul()
|
||||
|
||||
@@ -516,6 +519,17 @@ class NodeConverter:
|
||||
|
||||
return result
|
||||
|
||||
def _convert_maxpool(self) -> OpResult:
|
||||
"""
|
||||
Convert "maxpool" node to its corresponding MLIR representation.
|
||||
|
||||
Returns:
|
||||
OpResult:
|
||||
in-memory MLIR representation corresponding to `self.node`
|
||||
"""
|
||||
|
||||
raise NotImplementedError("MaxPool operation cannot be compiled yet")
|
||||
|
||||
def _convert_mul(self) -> OpResult:
|
||||
"""
|
||||
Convert "multiply" node to its corresponding MLIR representation.
|
||||
|
||||
@@ -329,6 +329,7 @@ class Node:
|
||||
"expand_dims",
|
||||
"index.static",
|
||||
"matmul",
|
||||
"maxpool",
|
||||
"multiply",
|
||||
"negative",
|
||||
"ones",
|
||||
|
||||
@@ -3,3 +3,4 @@ Implement machine learning operations as specified by ONNX.
|
||||
"""
|
||||
|
||||
from .convolution import conv
|
||||
from .maxpool import maxpool
|
||||
|
||||
333
concrete/onnx/maxpool.py
Normal file
333
concrete/onnx/maxpool.py
Normal file
@@ -0,0 +1,333 @@
|
||||
"""
|
||||
Tracing and evaluation of maxpool function.
|
||||
"""
|
||||
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from ..numpy.internal.utils import assert_that
|
||||
from ..numpy.representation import Node
|
||||
from ..numpy.tracing import Tracer
|
||||
from ..numpy.values import Value
|
||||
|
||||
# pylint: disable=too-many-branches,too-many-statements
|
||||
|
||||
|
||||
AVAILABLE_AUTO_PAD = {
|
||||
"NOTSET",
|
||||
"SAME_UPPER",
|
||||
"SAME_LOWER",
|
||||
"VALID",
|
||||
}
|
||||
|
||||
AVAILABLE_CEIL_MODE = {
|
||||
0,
|
||||
1,
|
||||
}
|
||||
|
||||
AVAILABLE_STORAGE_ORDER = {
|
||||
0,
|
||||
1,
|
||||
}
|
||||
|
||||
|
||||
SUPPORTED_AUTO_PAD = {
|
||||
"NOTSET",
|
||||
}
|
||||
|
||||
SUPPORTED_CEIL_MODE = {
|
||||
0,
|
||||
}
|
||||
|
||||
SUPPORTED_STORAGE_ORDER = {
|
||||
0,
|
||||
}
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
|
||||
_EVALUATORS = {
|
||||
1: torch.max_pool1d,
|
||||
2: torch.max_pool2d,
|
||||
3: torch.max_pool3d,
|
||||
}
|
||||
|
||||
# pylint: enable=no-member
|
||||
|
||||
|
||||
def maxpool(
|
||||
x: Union[np.ndarray, Tracer],
|
||||
kernel_shape: Union[Tuple[int, ...], List[int]],
|
||||
strides: Optional[Union[Tuple[int, ...], List[int]]] = None,
|
||||
auto_pad: str = "NOTSET",
|
||||
pads: Optional[Union[Tuple[int, ...], List[int]]] = None,
|
||||
dilations: Optional[Union[Tuple[int, ...], List[int]]] = None,
|
||||
ceil_mode: int = 0,
|
||||
storage_order: int = 0,
|
||||
) -> Union[np.ndarray, Tracer]:
|
||||
"""
|
||||
Evaluate or trace MaxPool operation.
|
||||
|
||||
Refer to https://github.com/onnx/onnx/blob/main/docs/Operators.md#maxpool for more info.
|
||||
|
||||
Args:
|
||||
x (Union[np.ndarray, Tracer]):
|
||||
input of shape (N, C, D1, ..., DN)
|
||||
|
||||
kernel_shape (Union[Tuple[int, ...], List[int]]):
|
||||
shape of the kernel
|
||||
|
||||
strides (Optional[Union[Tuple[int, ...], List[int]]]):
|
||||
stride along each spatial axis
|
||||
set to 1 along each spatial axis if not set
|
||||
|
||||
auto_pad (str, default = "NOTSET"):
|
||||
padding strategy
|
||||
|
||||
pads (Optional[Union[Tuple[int, ...], List[int]]]):
|
||||
padding for the beginning and ending along each spatial axis
|
||||
(D1_begin, D2_begin, ..., D1_end, D2_end, ...)
|
||||
set to 0 along each spatial axis if not set
|
||||
|
||||
dilations (Optional[Union[Tuple[int, ...], List[int]]]):
|
||||
dilation along each spatial axis
|
||||
set to 1 along each spatial axis if not set
|
||||
|
||||
ceil_mode (int, default = 1):
|
||||
ceiling mode
|
||||
|
||||
storage_order (int, default = 0):
|
||||
storage order, 0 for row major, 1 for column major
|
||||
|
||||
Raises:
|
||||
TypeError:
|
||||
if arguments are inappropriately typed
|
||||
|
||||
ValueError:
|
||||
if arguments are inappropriate
|
||||
|
||||
NotImplementedError:
|
||||
if desired operation is not supported yet
|
||||
|
||||
Returns:
|
||||
Union[np.ndarray, Tracer]:
|
||||
maxpool over the input or traced computation
|
||||
"""
|
||||
|
||||
def check_value_is_a_tuple_or_list_of_ints_of_size(value_name, value, size) -> Tuple[int, ...]:
|
||||
if isinstance(value, list):
|
||||
value = tuple(value)
|
||||
|
||||
if not isinstance(value, tuple):
|
||||
raise TypeError(
|
||||
f"Expected {value_name} to be a tuple or a list but it's {type(value).__name__}"
|
||||
)
|
||||
|
||||
for element in value:
|
||||
if not isinstance(element, int):
|
||||
raise TypeError(
|
||||
f"Expected {value_name} to consist of integers "
|
||||
f"but it has an element of type {type(element).__name__}"
|
||||
)
|
||||
|
||||
if len(value) != size:
|
||||
raise ValueError(
|
||||
f"Expected {value_name} to have {size} elements but it has {len(value)}"
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
# check x
|
||||
|
||||
if isinstance(x, list): # pragma: no cover
|
||||
try:
|
||||
x = np.array(x)
|
||||
except Exception: # pylint: disable=broad-except
|
||||
pass
|
||||
|
||||
if isinstance(x, np.ndarray):
|
||||
if not (
|
||||
np.issubdtype(x.dtype, np.integer)
|
||||
or np.issubdtype(x.dtype, np.floating)
|
||||
or np.issubdtype(x.dtype, np.bool_)
|
||||
):
|
||||
raise TypeError(
|
||||
f"Expected input elements to be of type np.integer, np.floating, or np.bool_ "
|
||||
f"but it's {type(x.dtype).__name__}"
|
||||
)
|
||||
elif not isinstance(x, Tracer):
|
||||
raise TypeError(
|
||||
f"Expected input to be of type np.ndarray or Tracer "
|
||||
f"but it's {type(auto_pad).__name__}"
|
||||
)
|
||||
|
||||
if x.ndim < 3:
|
||||
raise ValueError(
|
||||
f"Expected input to have at least 3 dimensions (N, C, D1, ...) "
|
||||
f"but it only has {x.ndim}"
|
||||
)
|
||||
|
||||
if x.ndim > 5:
|
||||
raise NotImplementedError(f"{x.ndim - 2}D maximum pooling is not supported yet")
|
||||
|
||||
# check kernel_shape
|
||||
|
||||
kernel_shape = check_value_is_a_tuple_or_list_of_ints_of_size(
|
||||
"kernel_shape", kernel_shape, x.ndim - 2
|
||||
)
|
||||
|
||||
# check strides
|
||||
|
||||
if strides is None:
|
||||
strides = (1,) * (x.ndim - 2)
|
||||
|
||||
strides = check_value_is_a_tuple_or_list_of_ints_of_size("strides", strides, x.ndim - 2)
|
||||
|
||||
# check auto_pad
|
||||
|
||||
if not isinstance(auto_pad, str):
|
||||
raise TypeError(
|
||||
f"Expected auto_pad to be of type str but it's {type(auto_pad).__name__}",
|
||||
)
|
||||
|
||||
if auto_pad not in AVAILABLE_AUTO_PAD:
|
||||
raise ValueError(
|
||||
f"Expected auto_pad to be one of "
|
||||
f"{', '.join(sorted(AVAILABLE_AUTO_PAD))} "
|
||||
f"but it's {auto_pad}",
|
||||
)
|
||||
|
||||
if auto_pad not in SUPPORTED_AUTO_PAD:
|
||||
raise NotImplementedError(
|
||||
f"Desired auto_pad of {auto_pad} is not supported yet",
|
||||
)
|
||||
|
||||
# check pads
|
||||
|
||||
if pads is None:
|
||||
pads = (0,) * (2 * (x.ndim - 2))
|
||||
|
||||
pads = check_value_is_a_tuple_or_list_of_ints_of_size("pads", pads, 2 * (x.ndim - 2))
|
||||
|
||||
for i in range(len(pads) // 2):
|
||||
pad_begin = pads[i]
|
||||
pad_end = pads[i + len(pads) // 2]
|
||||
if pad_begin != pad_end:
|
||||
raise NotImplementedError(
|
||||
f"Desired pads of {pads} is not supported yet because of uneven padding"
|
||||
)
|
||||
|
||||
# check dilations
|
||||
|
||||
if dilations is None:
|
||||
dilations = (1,) * (x.ndim - 2)
|
||||
|
||||
dilations = check_value_is_a_tuple_or_list_of_ints_of_size("dilations", dilations, x.ndim - 2)
|
||||
|
||||
# check ceil_mode
|
||||
|
||||
if not isinstance(ceil_mode, int):
|
||||
raise TypeError(
|
||||
f"Expected ceil_mode to be of type int but it's {type(ceil_mode).__name__}",
|
||||
)
|
||||
|
||||
if ceil_mode not in AVAILABLE_CEIL_MODE:
|
||||
raise ValueError(
|
||||
f"Expected ceil_mode to be one of "
|
||||
f"{', '.join(sorted(str(x) for x in AVAILABLE_CEIL_MODE))} "
|
||||
f"but it's {ceil_mode}",
|
||||
)
|
||||
|
||||
if ceil_mode not in SUPPORTED_CEIL_MODE:
|
||||
raise NotImplementedError(
|
||||
f"Desired ceil_mode of {ceil_mode} is not supported yet",
|
||||
)
|
||||
|
||||
# check storage_order
|
||||
|
||||
if not isinstance(storage_order, int):
|
||||
raise TypeError(
|
||||
f"Expected storage_order to be of type int but it's {type(storage_order).__name__}",
|
||||
)
|
||||
|
||||
if storage_order not in AVAILABLE_STORAGE_ORDER:
|
||||
raise ValueError(
|
||||
f"Expected storage_order to be one of "
|
||||
f"{', '.join(sorted(str(x) for x in AVAILABLE_STORAGE_ORDER))} "
|
||||
f"but it's {storage_order}",
|
||||
)
|
||||
|
||||
if storage_order not in SUPPORTED_STORAGE_ORDER:
|
||||
raise NotImplementedError(
|
||||
f"Desired storage_order of {storage_order} is not supported yet",
|
||||
)
|
||||
|
||||
# trace or evaluate
|
||||
return _trace_or_evaluate(x, kernel_shape, strides, pads, dilations, ceil_mode == 1)
|
||||
|
||||
|
||||
def _trace_or_evaluate(
|
||||
x: Union[np.ndarray, Tracer],
|
||||
kernel_shape: Tuple[int, ...],
|
||||
strides: Tuple[int, ...],
|
||||
pads: Tuple[int, ...],
|
||||
dilations: Tuple[int, ...],
|
||||
ceil_mode: bool,
|
||||
):
|
||||
if not isinstance(x, Tracer):
|
||||
return _evaluate(x, kernel_shape, strides, pads, dilations, ceil_mode == 1)
|
||||
|
||||
result = _evaluate(np.zeros(x.shape), kernel_shape, strides, pads, dilations, ceil_mode == 1)
|
||||
resulting_value = Value.of(result)
|
||||
|
||||
resulting_value.is_encrypted = x.output.is_encrypted
|
||||
resulting_value.dtype = x.output.dtype
|
||||
|
||||
computation = Node.generic(
|
||||
"maxpool",
|
||||
[x.output],
|
||||
resulting_value,
|
||||
_evaluate,
|
||||
kwargs={
|
||||
"kernel_shape": kernel_shape,
|
||||
"strides": strides,
|
||||
"pads": pads,
|
||||
"dilations": dilations,
|
||||
"ceil_mode": ceil_mode,
|
||||
},
|
||||
)
|
||||
return Tracer(computation, [x])
|
||||
|
||||
|
||||
def _evaluate(
|
||||
x: np.ndarray,
|
||||
kernel_shape: Tuple[int, ...],
|
||||
strides: Tuple[int, ...],
|
||||
pads: Tuple[int, ...],
|
||||
dilations: Tuple[int, ...],
|
||||
ceil_mode: bool,
|
||||
) -> np.ndarray:
|
||||
# pylint: disable=no-member
|
||||
|
||||
dims = x.ndim - 2
|
||||
assert_that(dims in {1, 2, 3})
|
||||
|
||||
evaluator = _EVALUATORS[dims]
|
||||
result = (
|
||||
evaluator(
|
||||
torch.from_numpy(x.astype(np.float64)), # torch only supports float maxpools
|
||||
kernel_shape,
|
||||
strides,
|
||||
pads[: len(pads) // 2],
|
||||
dilations,
|
||||
ceil_mode,
|
||||
)
|
||||
.numpy()
|
||||
.astype(x.dtype)
|
||||
)
|
||||
|
||||
# pylint: enable=no-member
|
||||
|
||||
return result
|
||||
393
tests/execution/test_maxpool.py
Normal file
393
tests/execution/test_maxpool.py
Normal file
@@ -0,0 +1,393 @@
|
||||
"""
|
||||
Tests of execution of maxpool operation.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import concrete.numpy as cnp
|
||||
import concrete.onnx as connx
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"operation,sample_input,expected_output",
|
||||
[
|
||||
pytest.param(
|
||||
{"kernel_shape": (3,)},
|
||||
[1, 2, 2, 3, 2, 2, 2, 4, 1, 5, 2, 6],
|
||||
[2, 3, 3, 3, 2, 4, 4, 5, 5, 6],
|
||||
),
|
||||
pytest.param(
|
||||
{"kernel_shape": (3,), "strides": (2,)},
|
||||
[1, 2, 2, 3, 2, 2, 2, 4, 1, 5, 2, 6, 7],
|
||||
[2, 3, 2, 4, 5, 7],
|
||||
),
|
||||
pytest.param(
|
||||
{
|
||||
"kernel_shape": (2, 2),
|
||||
},
|
||||
[
|
||||
[3, 1, 2],
|
||||
[1, 1, 1],
|
||||
[2, 3, 4],
|
||||
[4, 1, 2],
|
||||
],
|
||||
[
|
||||
[3, 2],
|
||||
[3, 4],
|
||||
[4, 4],
|
||||
],
|
||||
),
|
||||
pytest.param(
|
||||
{
|
||||
"kernel_shape": (2, 2),
|
||||
"strides": (2, 1),
|
||||
},
|
||||
[
|
||||
[3, 1, 2],
|
||||
[1, 1, 1],
|
||||
[2, 3, 4],
|
||||
[4, 1, 2],
|
||||
],
|
||||
[
|
||||
[3, 2],
|
||||
[4, 4],
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_maxpool(
|
||||
operation,
|
||||
sample_input,
|
||||
expected_output,
|
||||
helpers,
|
||||
):
|
||||
"""
|
||||
Test maxpool.
|
||||
"""
|
||||
|
||||
sample_input = np.expand_dims(np.array(sample_input), axis=(0, 1))
|
||||
expected_output = np.expand_dims(np.array(expected_output), axis=(0, 1))
|
||||
|
||||
assert np.array_equal(connx.maxpool(sample_input, **operation), expected_output)
|
||||
|
||||
@cnp.compiler({"x": "encrypted"})
|
||||
def function(x):
|
||||
return connx.maxpool(x, **operation)
|
||||
|
||||
circuit = function.compile([sample_input], helpers.configuration(), virtual=True)
|
||||
helpers.check_execution(circuit, function, sample_input)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_shape,operation,expected_error,expected_message",
|
||||
[
|
||||
pytest.param(
|
||||
(10, 10),
|
||||
{
|
||||
"kernel_shape": (),
|
||||
},
|
||||
ValueError,
|
||||
"Expected input to have at least 3 dimensions (N, C, D1, ...) but it only has 2",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4, 3, 2),
|
||||
{
|
||||
"kernel_shape": (),
|
||||
},
|
||||
NotImplementedError,
|
||||
"4D maximum pooling is not supported yet",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": "",
|
||||
},
|
||||
TypeError,
|
||||
"Expected kernel_shape to be a tuple or a list but it's str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": ["0"],
|
||||
},
|
||||
TypeError,
|
||||
"Expected kernel_shape to consist of integers but it has an element of type str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (3,),
|
||||
},
|
||||
ValueError,
|
||||
"Expected kernel_shape to have 2 elements but it has 1",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"strides": "",
|
||||
},
|
||||
TypeError,
|
||||
"Expected strides to be a tuple or a list but it's str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"strides": ["0"],
|
||||
},
|
||||
TypeError,
|
||||
"Expected strides to consist of integers but it has an element of type str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"strides": (3,),
|
||||
},
|
||||
ValueError,
|
||||
"Expected strides to have 2 elements but it has 1",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"auto_pad": True,
|
||||
},
|
||||
TypeError,
|
||||
"Expected auto_pad to be of type str but it's bool",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"auto_pad": "YES_PLEASE",
|
||||
},
|
||||
ValueError,
|
||||
"Expected auto_pad to be one of NOTSET, SAME_LOWER, SAME_UPPER, VALID "
|
||||
"but it's YES_PLEASE",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"auto_pad": "VALID",
|
||||
},
|
||||
NotImplementedError,
|
||||
"Desired auto_pad of VALID is not supported yet",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"pads": "",
|
||||
},
|
||||
TypeError,
|
||||
"Expected pads to be a tuple or a list but it's str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"pads": ["0"],
|
||||
},
|
||||
TypeError,
|
||||
"Expected pads to consist of integers but it has an element of type str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"pads": (3,),
|
||||
},
|
||||
ValueError,
|
||||
"Expected pads to have 4 elements but it has 1",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"pads": (1, 1, 2, 2),
|
||||
},
|
||||
NotImplementedError,
|
||||
"Desired pads of (1, 1, 2, 2) is not supported yet because of uneven padding",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"dilations": "",
|
||||
},
|
||||
TypeError,
|
||||
"Expected dilations to be a tuple or a list but it's str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"dilations": ["0"],
|
||||
},
|
||||
TypeError,
|
||||
"Expected dilations to consist of integers but it has an element of type str",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"dilations": (3,),
|
||||
},
|
||||
ValueError,
|
||||
"Expected dilations to have 2 elements but it has 1",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"ceil_mode": None,
|
||||
},
|
||||
TypeError,
|
||||
"Expected ceil_mode to be of type int but it's NoneType",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"ceil_mode": 10,
|
||||
},
|
||||
ValueError,
|
||||
"Expected ceil_mode to be one of 0, 1 but it's 10",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"ceil_mode": 1,
|
||||
},
|
||||
NotImplementedError,
|
||||
"Desired ceil_mode of 1 is not supported yet",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"storage_order": None,
|
||||
},
|
||||
TypeError,
|
||||
"Expected storage_order to be of type int but it's NoneType",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"storage_order": 10,
|
||||
},
|
||||
ValueError,
|
||||
"Expected storage_order to be one of 0, 1 but it's 10",
|
||||
),
|
||||
pytest.param(
|
||||
(1, 1, 5, 4),
|
||||
{
|
||||
"kernel_shape": (2, 3),
|
||||
"storage_order": 1,
|
||||
},
|
||||
NotImplementedError,
|
||||
"Desired storage_order of 1 is not supported yet",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_bad_maxpool(
|
||||
input_shape,
|
||||
operation,
|
||||
expected_error,
|
||||
expected_message,
|
||||
helpers,
|
||||
):
|
||||
"""
|
||||
Test maxpool with bad parameters.
|
||||
"""
|
||||
|
||||
with pytest.raises(expected_error) as excinfo:
|
||||
connx.maxpool(np.random.randint(0, 10, size=input_shape), **operation)
|
||||
|
||||
helpers.check_str(expected_message, str(excinfo.value))
|
||||
|
||||
|
||||
def test_bad_maxpool_special(helpers):
|
||||
"""
|
||||
Test maxpool with bad parameters for special cases.
|
||||
"""
|
||||
|
||||
# without virtual
|
||||
# ---------------
|
||||
|
||||
@cnp.compiler({"x": "encrypted"})
|
||||
def without_virtual(x):
|
||||
return connx.maxpool(x, kernel_shape=(4, 3))
|
||||
|
||||
inputset = [np.random.randint(0, 10, size=(1, 1, 10, 10)) for i in range(100)]
|
||||
with pytest.raises(NotImplementedError) as excinfo:
|
||||
without_virtual.compile(inputset, helpers.configuration())
|
||||
|
||||
helpers.check_str("MaxPool operation cannot be compiled yet", str(excinfo.value))
|
||||
|
||||
# clear input
|
||||
# -----------
|
||||
|
||||
@cnp.compiler({"x": "clear"})
|
||||
def clear_input(x):
|
||||
return connx.maxpool(x, kernel_shape=(4, 3, 2))
|
||||
|
||||
inputset = [np.random.randint(0, 10, size=(1, 1, 10, 10, 10)) for i in range(100)]
|
||||
with pytest.raises(RuntimeError) as excinfo:
|
||||
clear_input.compile(inputset, helpers.configuration())
|
||||
|
||||
helpers.check_str(
|
||||
# pylint: disable=line-too-long
|
||||
"""
|
||||
|
||||
Function you are trying to compile cannot be converted to MLIR
|
||||
|
||||
%0 = x # ClearTensor<uint4, shape=(1, 1, 10, 10, 10)>
|
||||
%1 = maxpool(%0, kernel_shape=(4, 3, 2), strides=(1, 1, 1), pads=(0, 0, 0, 0, 0, 0), dilations=(1, 1, 1), ceil_mode=False) # ClearTensor<uint4, shape=(1, 1, 7, 8, 9)>
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ only encrypted maxpool is supported
|
||||
return %1
|
||||
|
||||
""".strip(), # noqa: E501
|
||||
# pylint: enable=line-too-long
|
||||
str(excinfo.value),
|
||||
)
|
||||
|
||||
# badly typed ndarray input
|
||||
# -------------------------
|
||||
|
||||
with pytest.raises(TypeError) as excinfo:
|
||||
connx.maxpool(np.array([{}, None]), ())
|
||||
|
||||
helpers.check_str(
|
||||
# pylint: disable=line-too-long
|
||||
"""
|
||||
|
||||
Expected input elements to be of type np.integer, np.floating, or np.bool_ but it's dtype[object_]
|
||||
|
||||
""".strip(), # noqa: E501
|
||||
# pylint: enable=line-too-long
|
||||
str(excinfo.value),
|
||||
)
|
||||
|
||||
# badly typed input
|
||||
# -----------------
|
||||
|
||||
with pytest.raises(TypeError) as excinfo:
|
||||
connx.maxpool("", ())
|
||||
|
||||
helpers.check_str(
|
||||
# pylint: disable=line-too-long
|
||||
"""
|
||||
|
||||
Expected input to be of type np.ndarray or Tracer but it's str
|
||||
|
||||
""".strip(), # noqa: E501
|
||||
# pylint: enable=line-too-long
|
||||
str(excinfo.value),
|
||||
)
|
||||
Reference in New Issue
Block a user