mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-07 03:00:26 -04:00
Torch/LLVM/arm F64 support (#1551)
This commit is contained in:
@@ -91,6 +91,12 @@ class TestHalfDtype(unittest.TestCase):
|
||||
def test_half_upcast_ops(self): _test_ops(a_dtype=dtypes.float16, b_dtype=dtypes.float32, target_dtype=dtypes.float32)
|
||||
def test_upcast_to_half_ops(self): _test_ops(a_dtype=dtypes.int8, b_dtype=dtypes.float16, target_dtype=dtypes.float16)
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT in ["WEBGPU", "METAL"], "float64 is not supported by some backends")
|
||||
class TestDoubleDtype(unittest.TestCase):
|
||||
def test_float64_to_np(self): _test_to_np(Tensor([1,2,3,4], dtype=dtypes.double), np.double, [1,2,3,4])
|
||||
def test_casts_to_float64(self): _test_casts_to([1,2,3,4], source_dtypes=[dtypes.float32, dtypes.int32, dtypes.uint8], target_dtype=dtypes.float64)
|
||||
def test_upcast_to_float64_ops(self): _test_ops(a_dtype=dtypes.int8, b_dtype=dtypes.float64, target_dtype=dtypes.float64)
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "WEBGPU", "webgpu does not support int8")
|
||||
class TestInt8Dtype(unittest.TestCase):
|
||||
def test_int8_to_np(self): _test_to_np(Tensor([1,2,3,4], dtype=dtypes.int8), np.int8, [1,2,3,4])
|
||||
|
||||
Reference in New Issue
Block a user