Force TensorFlow to use CPU when generating model and golden values. (#199)

Split up gpu pytest runs temporarily
This commit is contained in:
Ean Garvey
2022-07-22 22:21:16 -05:00
committed by GitHub
parent 49fc6d2f4b
commit 921ccdc40b
22 changed files with 92 additions and 336 deletions

View File

@@ -85,7 +85,8 @@ jobs:
cd $GITHUB_WORKSPACE
PYTHON=python${{ matrix.python-version }} IMPORTER=1 ./setup_venv.sh
source shark.venv/bin/activate
pytest -k 'gpu' --ignore=shark/tests/test_shark_importer.py --ignore=benchmarks/tests/test_hf_benchmark.py --ignore=benchmarks/tests/test_benchmark.py
chmod u+rx tank/gpu_suite.sh
./tank/gpu_suite.sh
- name: Validate Vulkan Models
if: matrix.suite == 'vulkan'

3
tank/gpu_suite.sh Normal file
View File

@@ -0,0 +1,3 @@
#! /bin/sh
pytest tank/ -k "gpu" --ignore=tank/tf/
pytest tank/tf/ -k "gpu"

View File

@@ -1,6 +1,16 @@
from transformers import TFAutoModelForMaskedLM, AutoTokenizer
import tensorflow as tf
visible_default = tf.config.list_physical_devices("GPU")
try:
tf.config.set_visible_devices([], "GPU")
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != "GPU"
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
# The max_sequence_length is set small for testing purpose.
BATCH_SIZE = 1
MAX_SEQUENCE_LENGTH = 16
@@ -43,9 +53,9 @@ class MaskedLM(tf.Module):
def get_causal_lm_model(hf_name, text="Hello, this is the default text."):
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# gpus = tf.config.experimental.list_physical_devices("GPU")
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
model = MaskedLM(hf_name)
encoded_input = preprocess_input(hf_name, text)
test_input = (encoded_input["input_ids"], encoded_input["attention_mask"])

View File

@@ -8,6 +8,16 @@ from transformers import (
TFBertModel,
)
visible_default = tf.config.list_physical_devices("GPU")
try:
tf.config.set_visible_devices([], "GPU")
visible_devices = tf.config.get_visible_devices()
for device in visible_devices:
assert device.device_type != "GPU"
except:
# Invalid device or cannot modify virtual devices once initialized.
pass
##################### Tensorflow Hugging Face LM Models ###################################
MAX_SEQUENCE_LENGTH = 512
BATCH_SIZE = 1
@@ -37,9 +47,9 @@ class TFHuggingFaceLanguage(tf.Module):
def get_TFhf_model(name):
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# gpus = tf.config.experimental.list_physical_devices("GPU")
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
model = TFHuggingFaceLanguage(name)
tokenizer = BertTokenizer.from_pretrained(
"microsoft/MiniLM-L12-H384-uncased"

View File

@@ -1,35 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class MiniLMModuleTester:
def __init__(
self,
save_temps=False,
save_mlir=False,
save_vmfb=False,
# benchmark=False,
benchmark=False,
):
self.save_temps = save_temps
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.benchmark = benchmark
# self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"microsoft/MiniLM-L12-H384-uncased"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -43,17 +32,14 @@ class MiniLMModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = MiniLMModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False
device = "cpu"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.skip(reason="MiniLM numerics issues on gpu")
@pytest.mark.skipif(
check_device_drivers("gpu"), reason=device_driver_info("gpu")
)

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class AlbertBaseModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"albert-base-v2"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,9 +32,7 @@ class AlbertBaseModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = AlbertBaseModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,35 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class BertBaseUncasedModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
self.benchmark = benchmark
# self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"bert-base-uncased"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -43,11 +32,7 @@ class BertBaseUncasedModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = BertBaseUncasedModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class CamemBertModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"camembert-base"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class CamemBertModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = CamemBertModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class ConvBertModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"dbmdz/convbert-base-turkish-cased"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class ConvBertModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = ConvBertModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -14,22 +14,14 @@ import os
class DebertaBaseModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
self.benchmark = benchmark
# self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"microsoft/deberta-base"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +36,7 @@ class DebertaBaseModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = DebertaBaseModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class DistilBertModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"distilbert-base-uncased"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class DistilBertModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = DistilBertModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class ElectraModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"google/electra-small-discriminator"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class ElectraModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = ElectraModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class FunnelModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"funnel-transformer/small"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,17 +32,16 @@ class FunnelModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = FunnelModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False
device = "cpu"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.xfail(
reason="failing in the iree-compiler passes, see https://github.com/nod-ai/SHARK/issues/201"
)
@pytest.mark.skipif(
check_device_drivers("gpu"), reason=device_driver_info("gpu")
)
@@ -63,7 +50,9 @@ class FunnelModuleTest(unittest.TestCase):
device = "gpu"
self.module_tester.create_and_check_module(dynamic, device)
@pytest.mark.xfail(reason="failing in the iree-compiler passes.")
@pytest.mark.xfail(
reason="failing in the iree-compiler passes, see https://github.com/nod-ai/SHARK/issues/201"
)
@pytest.mark.skipif(
check_device_drivers("vulkan"), reason=device_driver_info("vulkan")
)

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class LayoutLMModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"microsoft/layoutlm-base-uncased"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class LayoutLMModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = LayoutLMModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class LongformerModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"allenai/longformer-base-4096"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -45,11 +33,7 @@ class LongformerModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = LongformerModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class MobileBertModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"google/mobilebert-uncased"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class MobileBertModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = MobileBertModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class MpNetModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"microsoft/mpnet-base"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class MpNetModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = MpNetModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,35 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class RemBertModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
self.benchmark = benchmark
# self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"google/rembert"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +33,7 @@ class RemBertModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = RemBertModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -14,23 +14,14 @@ import os
class RobertaBaseModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"roberta-base"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +35,8 @@ class RobertaBaseModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = RobertaBaseModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False
device = "cpu"

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class TapasBaseModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"google/tapas-base"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -47,11 +35,8 @@ class TapasBaseModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = TapasBaseModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False
device = "cpu"

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class FlauBertModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"hf-internal-testing/tiny-random-flaubert"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class FlauBertModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = FlauBertModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False

View File

@@ -1,36 +1,24 @@
from shark.iree_utils._common import check_device_drivers, device_driver_info
from shark.shark_inference import SharkInference
from shark.shark_downloader import download_tf_model
from shark.parser import shark_args
import iree.compiler as ireec
import unittest
import pytest
import numpy as np
import tempfile
import os
class XLMRobertaModuleTester:
def __init__(
self,
save_mlir=False,
save_vmfb=False,
save_temps=False,
# benchmark=False,
benchmark=False,
):
self.save_mlir = save_mlir
self.save_vmfb = save_vmfb
self.save_temps = save_temps
# self.benchmark = benchmark
self.benchmark = benchmark
def create_and_check_module(self, dynamic, device):
model, func_name, inputs, golden_out = download_tf_model(
"xlm-roberta-base"
)
shark_args.save_mlir = self.save_mlir
shark_args.save_vmfb = self.save_vmfb
shark_module = SharkInference(
model, func_name, device=device, mlir_dialect="mhlo"
@@ -44,11 +32,7 @@ class XLMRobertaModuleTest(unittest.TestCase):
@pytest.fixture(autouse=True)
def configure(self, pytestconfig):
self.module_tester = XLMRobertaModuleTester(self)
self.module_tester.save_temps = pytestconfig.getoption("save_temps")
self.module_tester.save_mlir = pytestconfig.getoption("save_mlir")
self.module_tester.save_vmfb = pytestconfig.getoption("save_vmfb")
# self.module_tester.benchmark = pytestconfig.getoption("benchmark")
self.module_tester.benchmark = pytestconfig.getoption("benchmark")
def test_module_static_cpu(self):
dynamic = False