mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
failed test cases for bf16 Tensor.full (#3729)
fixable with float const then cast to bf16. cast folding with bitcast is incorrectly skipped
This commit is contained in:
@@ -137,6 +137,20 @@ class TestBFloat16(unittest.TestCase):
|
||||
assert tnp.dtype == np.float32
|
||||
np.testing.assert_allclose(tnp, np.array(data))
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_bf16_ones(self):
|
||||
# TODO: fix this with correct bfloat16 cast
|
||||
t = Tensor.ones(3, 5, dtype=dtypes.bfloat16)
|
||||
assert t.dtype == dtypes.bfloat16
|
||||
np.testing.assert_allclose(t.numpy(), np.ones((3, 5)))
|
||||
|
||||
@unittest.expectedFailure
|
||||
def test_bf16_eye(self):
|
||||
# TODO: fix this with correct bfloat16 cast
|
||||
t = Tensor.eye(3, dtype=dtypes.bfloat16)
|
||||
assert t.dtype == dtypes.bfloat16
|
||||
np.testing.assert_allclose(t.numpy(), np.eye(3))
|
||||
|
||||
@unittest.skipUnless(Device.DEFAULT in ["LLVM", "HIP"], "bfloat16 not supported")
|
||||
class TestBFloat16DType(unittest.TestCase):
|
||||
def test_bf16_to_float(self):
|
||||
|
||||
Reference in New Issue
Block a user