only skip AMD_LLVM (#11934)

Co-authored-by: b1tg <b1tg@users.noreply.github.com>
This commit is contained in:
b1tg
2025-08-31 23:15:47 +08:00
committed by GitHub
parent 75d380a77c
commit c1eeb3b99c

View File

@@ -3,7 +3,7 @@ import numpy as np
import torch
from typing import Any, List
from tinygrad.device import is_dtype_supported
from tinygrad.helpers import getenv, DEBUG, CI
from tinygrad.helpers import getenv, DEBUG, CI, AMD_LLVM
from tinygrad.dtype import DType, DTYPES_DICT, least_upper_dtype, fp8_to_float, float_to_fp8, _to_np_dtype, _to_torch_dtype
from tinygrad import Device, Tensor, dtypes
from hypothesis import assume, given, settings, strategies as strat
@@ -428,8 +428,7 @@ class TestOpsBFloat16(unittest.TestCase):
data = [60000.0, 70000.0, 80000.0]
np.testing.assert_allclose(Tensor(data).cast("bfloat16").numpy(), torch.tensor(data).type(torch.bfloat16).float().numpy())
# TODO: AMD_LLVM failed on this
@unittest.skipUnless(Device.DEFAULT == "PYTHON", "only test on PYTHON now")
@unittest.skipIf(Device.DEFAULT == "AMD" and AMD_LLVM, "AMD_LLVM failed on this")
def test_no_approximation(self):
data = [326.0, 339.0, 10603200512.0]
expected = torch.tensor(data, dtype=torch.bfloat16).sqrt().float().numpy()