enable METAL tests now runner is M1 and no fast-math (#3523)

This commit is contained in:
chenyu
2024-02-28 14:14:23 -05:00
committed by GitHub
parent 1136e2a82a
commit d89e3c4e08
3 changed files with 3 additions and 13 deletions

View File

@@ -351,12 +351,10 @@ class TestOps(unittest.TestCase):
helper_test_op([()], lambda x: x/2)
helper_test_op([()], lambda x: 2/x)
@unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "METAL has issues with -inf")
def test_mul_naninf(self):
helper_test_op([(45,65)], lambda x: x*math.inf)
helper_test_op([(45,65)], lambda x: x*-math.inf)
helper_test_op([(45,65)], lambda x: x*math.nan)
@unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "METAL has issues with -inf")
def test_div_naninf(self):
helper_test_op([(45,65)], lambda x: x/math.inf)
helper_test_op([(45,65)], lambda x: x/-math.inf)
@@ -474,8 +472,7 @@ class TestOps(unittest.TestCase):
def test_gelu(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu)
if not (CI and Device.DEFAULT == "METAL"):
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu, low=300, high=303)
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu, low=300, high=303)
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu, low=-300, high=-297)
def test_quick_gelu(self):
helper_test_op([(45,65)], lambda x: x * torch.sigmoid(1.702 * x), Tensor.quick_gelu)