diff --git a/test/test_ops.py b/test/test_ops.py index bbc33147ee..6e413d3dff 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -2,7 +2,7 @@ import time, math, unittest, functools, platform, warnings import numpy as np from typing import List, Callable import torch -from tinygrad.helpers import getenv, IMAGE, DEBUG, CI, Context, TRANSCENDENTAL, CPU_LLVM, AMD_LLVM +from tinygrad.helpers import getenv, IMAGE, DEBUG, CI, Context, CPU_LLVM, AMD_LLVM from tinygrad import Tensor, Device, dtypes from tinygrad.tensor import _to_np_dtype from tinygrad.device import is_dtype_supported @@ -901,7 +901,6 @@ class TestOps(unittest.TestCase): def test_abs_exact(self): helper_test_op(None, torch.abs, Tensor.abs, vals=[[-1.,0,1]]) - @unittest.skipIf(TRANSCENDENTAL and Device.DEFAULT=="AMD", "TODO: remu crashes") def test_log(self): helper_test_op([(45,65)], torch.log, Tensor.log) helper_test_op(None, torch.log, Tensor.log, vals=[[math.inf, -math.inf, math.nan]]) @@ -911,7 +910,6 @@ class TestOps(unittest.TestCase): helper_test_op(None, torch.log2, Tensor.log2, vals=[[math.inf, -math.inf, math.nan]]) helper_test_op([()], torch.log2, Tensor.log2) - @unittest.skipIf(TRANSCENDENTAL and Device.DEFAULT=="AMD", "TODO: remu crashes") def test_exp(self): helper_test_op([(45,65)], torch.exp, Tensor.exp) helper_test_op(None, torch.exp, Tensor.exp, vals=[[math.inf, -math.inf, math.nan]]) @@ -1549,7 +1547,6 @@ class TestOps(unittest.TestCase): helper_test_op([(3,4,5,6)], lambda x: torch.stack(torch.std_mean(x, axis=(1,2))), lambda x: Tensor.stack(*x.std_mean(axis=(1,2)))) - @unittest.skip("TODO: this fails because of loaded nan in mul folding") def test_std_mean_loaded_nan(self): helper_test_op([(1,0,3,0,5)], lambda x: torch.stack(torch.std_mean(x, axis=(1,3))), lambda x: Tensor.stack(*x.std_mean(axis=(1,3))))