Refactor testing framework. (#117)

This commit is contained in:
Ean Garvey
2022-06-12 22:15:30 -05:00
committed by GitHub
parent eb3781ddb2
commit cbd131d588
20 changed files with 1013 additions and 257 deletions

View File

@@ -74,7 +74,7 @@ jobs:
# Install the built wheel
pip install ./wheelhouse/nodai*
# Validate the Models
pytest -k 'not benchmark' --ignore=shark/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py
pytest -k 'not benchmark' --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py
- name: Upload Release Assets
id: upload-release-assets

View File

@@ -52,7 +52,7 @@ jobs:
cd $GITHUB_WORKSPACE
IMPORTER=1 ./setup_venv.sh
source shark.venv/bin/activate
pytest -k 'not benchmark' --ignore=shark/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py
pytest -k 'not benchmark' --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py
perf-macOS:
runs-on: MacStudio
@@ -76,7 +76,7 @@ jobs:
cd $GITHUB_WORKSPACE
PYTHON=python3.10 IMPORTER=1 ./setup_venv.sh
source shark.venv/bin/activate
pytest -k 'not benchmark' --ignore=shark/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py
pytest -k 'not benchmark' --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py
perf-linux:
runs-on: a100
@@ -106,4 +106,4 @@ jobs:
cd $GITHUB_WORKSPACE
IMPORTER=1 ./setup_venv.sh
source shark.venv/bin/activate
pytest -k 'not benchmark' --ignore=shark/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py --ignore shark/tests/test_benchmark.py
pytest -k 'not benchmark' --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=shark/tests/test_shark_importer.py --ignore benchmarks/tests/test_benchmark.py

View File

@@ -83,12 +83,17 @@ python -m shark.examples.shark_inference.resnet50_script --device="cpu" # Use g
```
### Run all tests on CPU/GPU/VULKAN/Metal
### Run all model tests on CPU/GPU/VULKAN/Metal
```shell
pytest
pytest shark/tests/models
# If on Linux for quicker results:
pytest --workers auto
pytest shark/tests/models -n auto
```
### Run all model benchmark tests on CPU/GPU/VULKAN/Metal
```shell
pytest shark/tests/benchmarks
```
</details>

View File

@@ -8,6 +8,7 @@ import torchvision.models as models
from transformers import AutoModelForSequenceClassification, BertTokenizer, TFBertModel
import importlib
import pytest
import unittest
torch.manual_seed(0)
@@ -113,13 +114,34 @@ def get_vision_model(torch_model):
############################# Benchmark Tests ####################################
# Test running benchmark module without failing.
pytest_benchmark_param = pytest.mark.parametrize(
('dynamic', 'device'),
[
pytest.param(False, 'cpu'),
# TODO: Language models are failing for dynamic case..
pytest.param(True, 'cpu', marks=pytest.mark.skip),
pytest.param(False,
'gpu',
marks=pytest.mark.skipif(check_device_drivers("gpu"),
reason="nvidia-smi not found")),
pytest.param(True,
'gpu',
marks=pytest.mark.skipif(check_device_drivers("gpu"),
reason="nvidia-smi not found")),
pytest.param(
False,
'vulkan',
marks=pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)),
pytest.param(
True,
'vulkan',
marks=pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)),
])

View File

@@ -1,3 +1,3 @@
[pytest]
addopts = --verbose -p no:warnings
norecursedirs = inference tank
norecursedirs = inference tank/tflite

View File

@@ -94,3 +94,15 @@ class SharkInference:
@benchmark_mode
def benchmark_all(self, inputs):
self.shark_runner.benchmark_all(inputs)
@benchmark_mode
def benchmark_frontend(self, inputs):
self.shark_runner.benchmark_frontend(inputs)
@benchmark_mode
def benchmark_python(self, inputs):
self.shark_runner.benchmark_python(inputs)
@benchmark_mode
def benchmark_c(self):
self.shark_runner.benchmark_c()

View File

@@ -1,248 +0,0 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
import torch
import numpy as np
import torchvision.models as models
from transformers import AutoModelForSequenceClassification
import pytest
torch.manual_seed(0)
##################### Hugging Face LM Models ###################################
class HuggingFaceLanguage(torch.nn.Module):
def __init__(self, hf_model_name):
super().__init__()
self.model = AutoModelForSequenceClassification.from_pretrained(
hf_model_name, # The pretrained model.
num_labels=
2, # The number of output labels--2 for binary classification.
output_attentions=
False, # Whether the model returns attentions weights.
output_hidden_states=
False, # Whether the model returns all hidden-states.
torchscript=True,
)
def forward(self, tokens):
return self.model.forward(tokens)[0]
def get_hf_model(name):
model = HuggingFaceLanguage(name)
# TODO: Currently the test input is set to (1,128)
test_input = torch.randint(2, (1, 128))
actual_out = model(test_input)
return model, test_input, actual_out
################################################################################
##################### Torch Vision Models ###################################
class VisionModule(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
self.train(False)
def forward(self, input):
return self.model.forward(input)
def get_vision_model(torch_model):
model = VisionModule(torch_model)
# TODO: Currently the test input is set to (1,128)
test_input = torch.randn(1, 3, 224, 224)
actual_out = model(test_input)
return model, test_input, actual_out
################################################################################
# Utility function for comparing two tensors.
def compare_tensors(torch_tensor, numpy_tensor):
# setting the absolute and relative tolerance
rtol = 1e-02
atol = 1e-03
torch_to_numpy = torch_tensor.detach().numpy()
return np.allclose(torch_to_numpy, numpy_tensor, rtol, atol)
################################################################################
############################# Model Tests ####################################
# A specific case can be run by commenting different cases. Runs all the test
# across cpu, gpu and vulkan according to available drivers.
pytest_param = pytest.mark.parametrize(
('dynamic', 'device'),
[
pytest.param(False, 'cpu'),
# TODO: Language models are failing for dynamic case..
pytest.param(True, 'cpu', marks=pytest.mark.skip),
pytest.param(False,
'gpu',
marks=pytest.mark.skipif(check_device_drivers("gpu"),
reason="nvidia-smi not found")),
pytest.param(True,
'gpu',
marks=pytest.mark.skipif(check_device_drivers("gpu"),
reason="nvidia-smi not found")),
pytest.param(
False,
'vulkan',
marks=pytest.mark.skipif(
check_device_drivers("vulkan"),
reason=
"vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)),
pytest.param(
True,
'vulkan',
marks=pytest.mark.skipif(
check_device_drivers("vulkan"),
reason=
"vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)),
])
@pytest_param
def test_bert(dynamic, device):
model, input, act_out = get_hf_model("bert-base-uncased")
shark_module = SharkInference(model, (input,),
device=device,
dynamic=dynamic,
jit_trace=True)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_albert(dynamic, device):
model, input, act_out = get_hf_model("albert-base-v2")
shark_module = SharkInference(model, (input,),
device=device,
dynamic=dynamic,
jit_trace=True)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_resnet18(dynamic, device):
model, input, act_out = get_vision_model(models.resnet18(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=device,
dynamic=dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_resnet50(dynamic, device):
model, input, act_out = get_vision_model(models.resnet50(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=device,
dynamic=dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_wide_resnet50(dynamic, device):
model, input, act_out = get_vision_model(
models.wide_resnet50_2(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=device,
dynamic=dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_minilm(dynamic, device):
model, input, act_out = get_hf_model("microsoft/MiniLM-L12-H384-uncased")
shark_module = SharkInference(model, (input,),
device=device,
dynamic=dynamic,
jit_trace=True)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_squeezenet(dynamic, device):
model, input, act_out = get_vision_model(
models.squeezenet1_0(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=device,
dynamic=dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_alexnet(dynamic, device):
model, input, act_out = get_vision_model(models.alexnet(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=device,
dynamic=dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_resnet101(dynamic, device):
model, input, act_out = get_vision_model(models.resnet101(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=device,
dynamic=dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
@pytest_param
def test_distilbert(dynamic, device):
model, input, act_out = get_hf_model("distilbert-base-uncased")
shark_module = SharkInference(model, (input,),
device=device,
dynamic=dynamic,
jit_trace=True)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)

View File

@@ -0,0 +1,82 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_hf_model, compare_tensors
import torch
import unittest
import numpy as np
import pytest
#torch.manual_seed(0)
class AlbertModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_hf_model("albert-base-v2")
shark_module = SharkInference(model, (input,),
device=self.device,
dynamic=self.dynamic,
jit_trace=True)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class AlbertModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = AlbertModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="Albert model on GPU currently fails to produce torch numbers")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,82 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_vision_model, compare_tensors
import torch
import unittest
import numpy as np
import torchvision.models as models
import pytest
torch.manual_seed(0)
class AlexnetModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_vision_model(models.alexnet(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=self.device,
dynamic=self.dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class AlexnetModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = AlexnetModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,83 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_hf_model, compare_tensors
import torch
import unittest
import numpy as np
import pytest
#torch.manual_seed(0)
class BertModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_hf_model("bert-base-uncased")
shark_module = SharkInference(model, (input,),
device=self.device,
dynamic=self.dynamic,
jit_trace=True)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class BertModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = BertModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="BERT model on GPU currently fails to produce torch numbers")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,83 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_hf_model, compare_tensors
import torch
import unittest
import numpy as np
import pytest
torch.manual_seed(0)
class MiniLMModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_hf_model("microsoft/MiniLM-L12-H384-uncased")
shark_module = SharkInference(model, (input,),
device=self.device,
dynamic=self.dynamic,
jit_trace=True)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class MiniLMModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = MiniLMModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="language models failing for dynamic case")
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="minilm inference on gpu currently returns invalid results")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="language models failing for dynamic case")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.xfail(reason="language models failing for dynamic case")
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,81 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_vision_model, compare_tensors
import torch
import unittest
import numpy as np
import torchvision.models as models
import pytest
torch.manual_seed(0)
class Resnet101ModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_vision_model(models.resnet101(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=self.device,
dynamic=self.dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class Resnet101ModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = Resnet101ModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,82 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_vision_model, compare_tensors
import torch
import unittest
import numpy as np
import torchvision.models as models
import pytest
torch.manual_seed(0)
class Resnet18ModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_vision_model(models.resnet18(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=self.device,
dynamic=self.dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class Resnet18ModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = Resnet18ModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,82 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_vision_model, compare_tensors
import torch
import unittest
import numpy as np
import torchvision.models as models
import pytest
torch.manual_seed(0)
class Resnet50ModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_vision_model(models.resnet50(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=self.device,
dynamic=self.dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class Resnet50ModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = Resnet50ModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,82 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_vision_model, compare_tensors
import torch
import unittest
import numpy as np
import torchvision.models as models
import pytest
torch.manual_seed(0)
class SqueezenetModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_vision_model(models.squeezenet1_0(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=self.device,
dynamic=self.dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class SqueezenetModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = SqueezenetModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,74 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
import torch
import numpy as np
import torchvision.models as models
from transformers import AutoModelForSequenceClassification, BertTokenizer, TFBertModel
import importlib
torch.manual_seed(0)
##################### Hugging Face LM Models ###################################
class HuggingFaceLanguage(torch.nn.Module):
def __init__(self, hf_model_name):
super().__init__()
self.model = AutoModelForSequenceClassification.from_pretrained(
hf_model_name, # The pretrained model.
num_labels=
2, # The number of output labels--2 for binary classification.
output_attentions=
False, # Whether the model returns attentions weights.
output_hidden_states=
False, # Whether the model returns all hidden-states.
torchscript=True,
)
def forward(self, tokens):
return self.model.forward(tokens)[0]
def get_hf_model(name):
model = HuggingFaceLanguage(name)
# TODO: Currently the test input is set to (1,128)
test_input = torch.randint(2, (1, 128))
actual_out = model(test_input)
return model, test_input, actual_out
################################################################################
##################### Torch Vision Models ###################################
class VisionModule(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
self.train(False)
def forward(self, input):
return self.model.forward(input)
def get_vision_model(torch_model):
model = VisionModule(torch_model)
# TODO: Currently the test input is set to (1,128)
test_input = torch.randn(1, 3, 224, 224)
actual_out = model(test_input)
return model, test_input, actual_out
################################################################################
# Utility function for comparing two tensors (torch).
def compare_tensors(torch_tensor, numpy_tensor):
# setting the absolute and relative tolerance
rtol = 1e-02
atol = 1e-03
torch_to_numpy = torch_tensor.detach().numpy()
return np.allclose(torch_to_numpy, numpy_tensor, rtol, atol)

View File

@@ -0,0 +1,82 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.pytorch.tests.test_utils import get_vision_model, compare_tensors
import torch
import unittest
import numpy as np
import torchvision.models as models
import pytest
torch.manual_seed(0)
class WideResnet50ModuleTester:
def __init__(
self,
dynamic=False,
device="cpu",
):
self.dynamic = dynamic
self.device = device
def create_and_check_module(self):
model, input, act_out = get_vision_model(models.wide_resnet50_2(pretrained=True))
shark_module = SharkInference(
model,
(input,),
device=self.device,
dynamic=self.dynamic,
)
shark_module.compile()
results = shark_module.forward((input,))
assert True == compare_tensors(act_out, results)
class WideResnet50ModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = WideResnet50ModuleTester(self)
def test_module_static_cpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
def test_module_dynamic_cpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "cpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
self.module_tester.dynamic = False
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
self.module_tester.dynamic = True
self.module_tester.device = "gpu"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
self.module_tester.dynamic = False
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
self.module_tester.dynamic = True
self.module_tester.device = "vulkan"
self.module_tester.create_and_check_module()
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,89 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
from tank.tf.tests.test_utils_tf import get_TFhf_model, compare_tensors_tf
import tensorflow as tf
import unittest
import numpy as np
import pytest
MAX_SEQUENCE_LENGTH = 512
BATCH_SIZE = 1
#Create a set of 2-dimensional inputs
tf_bert_input = [
tf.TensorSpec(shape=[BATCH_SIZE, MAX_SEQUENCE_LENGTH], dtype=tf.int32),
tf.TensorSpec(shape=[BATCH_SIZE, MAX_SEQUENCE_LENGTH], dtype=tf.int32),
tf.TensorSpec(shape=[BATCH_SIZE, MAX_SEQUENCE_LENGTH], dtype=tf.int32)
]
class MiniLMTFModuleTester:
def create_and_check_module(self, dynamic, device):
model, input, act_out = get_TFhf_model("microsoft/MiniLM-L12-H384-uncased")
shark_module = SharkInference(model, (input,),
device=device,
dynamic=dynamic,
jit_trace=True)
shark_module.set_frontend("tensorflow")
shark_module.compile()
results = shark_module.forward((input))
assert True == compare_tensors_tf(act_out, results)
class MiniLMTFModuleTest(unittest.TestCase):
def setUp(self):
self.module_tester = MiniLMTFModuleTester()
@pytest.mark.skip(reason="TF testing temporarily unavailable.")
def test_module_static_cpu(self):
dynamic = False
device = "cpu"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.skip(reason="TF testing temporarily unavailable.")
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
def test_module_dynamic_cpu(self):
dynamic = True
device = "cpu"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.skip(reason="TF testing temporarily unavailable.")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_static_gpu(self):
dynamic = False
device = "gpu"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.skip(reason="TF testing temporarily unavailable.")
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
@pytest.mark.skipif(check_device_drivers("gpu"), reason="nvidia-smi not found")
def test_module_dynamic_gpu(self):
dynamic = True
device = "gpu"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.skip(reason="TF testing temporarily unavailable.")
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_static_vulkan(self):
dynamic = False
device = "vulkan"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.skip(reason="TF testing temporarily unavailable.")
@pytest.mark.xfail(reason="Language models currently failing for dynamic case")
@pytest.mark.skipif(
check_device_drivers("vulkan"),
reason="vulkaninfo not found, install from https://github.com/KhronosGroup/MoltenVK/releases"
)
def test_module_dynamic_vulkan(self):
dynamic = True
device = "vulkan"
self.module_tester.create_and_check_module(dynamic, device)
if __name__ == '__main__':
unittest.main()

View File

@@ -0,0 +1,63 @@
from shark.shark_inference import SharkInference
from shark.iree_utils import check_device_drivers
import tensorflow as tf
import numpy as np
from transformers import AutoModelForSequenceClassification, BertTokenizer, TFBertModel
import importlib
##################### Tensorflow Hugging Face LM Models ###################################
MAX_SEQUENCE_LENGTH = 512
BATCH_SIZE = 1
# Create a set of 2-dimensional inputs
tf_bert_input = [
tf.TensorSpec(shape=[BATCH_SIZE, MAX_SEQUENCE_LENGTH], dtype=tf.int32),
tf.TensorSpec(shape=[BATCH_SIZE, MAX_SEQUENCE_LENGTH], dtype=tf.int32),
tf.TensorSpec(shape=[BATCH_SIZE, MAX_SEQUENCE_LENGTH], dtype=tf.int32)
]
class TFHuggingFaceLanguage(tf.Module):
def __init__(self, hf_model_name):
super(TFHuggingFaceLanguage, self).__init__()
# Create a BERT trainer with the created network.
self.m = TFBertModel.from_pretrained(
hf_model_name, from_pt=True)
# Invoke the trainer model on the inputs. This causes the layer to be built.
self.m.predict = lambda x, y, z: self.m.call(
input_ids=x, attention_mask=y, token_type_ids=z, training=False)
@tf.function(input_signature=tf_bert_input)
def forward(self, input_ids, attention_mask, token_type_ids):
return self.m.predict(input_ids, attention_mask, token_type_ids)
def get_TFhf_model(name):
model = TFHuggingFaceLanguage(name)
tokenizer = BertTokenizer.from_pretrained(
"microsoft/MiniLM-L12-H384-uncased")
text = "Replace me by any text you'd like."
encoded_input = tokenizer(text,
padding='max_length',
truncation=True,
max_length=MAX_SEQUENCE_LENGTH)
for key in encoded_input:
encoded_input[key] = tf.expand_dims(
tf.convert_to_tensor(encoded_input[key]), 0)
test_input = (encoded_input["input_ids"], encoded_input["attention_mask"],
encoded_input["token_type_ids"])
actual_out = model.forward(*test_input)
return model, test_input, actual_out
# Utility function for comparing two tensors (tensorflow).
def compare_tensors_tf(tf_tensor, numpy_tensor):
# setting the absolute and relative tolerance
rtol = 1e-02
atol = 1e-03
tf_to_numpy = tf_tensor.pooler_output.numpy()
return np.allclose(tf_to_numpy, numpy_tensor, rtol, atol)