mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-29 03:00:14 -04:00
Fix cuda tc emu test (#5663)
* fix acc folding for NV tensor cores * fix correctness of reduce_before_expand * fix test emulated CUDA tensor cores * test_gemm_fp16 on some devices
This commit is contained in:
@@ -795,10 +795,13 @@ class TestOps(unittest.TestCase):
|
||||
np.arange(64,128,dtype=np.float32).reshape(8,8)])
|
||||
def test_small_gemm_eye(self):
|
||||
helper_test_op(None, lambda x,y: x.matmul(y), lambda x,y: x@y, vals=[np.eye(8).astype(np.float32), np.eye(8).astype(np.float32)])
|
||||
@unittest.skipIf(Device.DEFAULT in ["NV", "LLVM", "GPU", "CUDA"], "not supported on these in CI")
|
||||
def test_gemm_fp16(self):
|
||||
helper_test_op([(64,64), (64,64)], lambda x,y: x.half().matmul(y.half()))
|
||||
def test_gemm(self):
|
||||
helper_test_op([(64,64), (64,64)], lambda x,y: x.matmul(y), Tensor.dot)
|
||||
helper_test_op([(64,64), (64,64)], lambda x,y: x.matmul(y))
|
||||
def test_big_gemm(self):
|
||||
helper_test_op([(256,256), (256,256)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4)
|
||||
helper_test_op([(256,256), (256,256)], lambda x,y: x.matmul(y), atol=1e-4)
|
||||
@unittest.skipIf(IMAGE>0, "no 0 in shape matmul on images")
|
||||
def test_gemm_with_zeros_shape(self):
|
||||
helper_test_op([(8,8), (8,0)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-7)
|
||||
|
||||
Reference in New Issue
Block a user