diff --git a/test/backend/test_multitensor.py b/test/backend/test_multitensor.py index 39bdec3d13..bdda8656d7 100644 --- a/test/backend/test_multitensor.py +++ b/test/backend/test_multitensor.py @@ -305,7 +305,8 @@ class TestMultiTensor(unittest.TestCase): Xs = X.shard(device, shard_x) Ws = W.shard(device, shard_w) O = (Xs@Ws) - np.testing.assert_allclose(X.numpy() @ W.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5) + with np.errstate(all='ignore'): + np.testing.assert_allclose(X.numpy() @ W.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5) def _test_double_matmul_shard_axis(self, shard_x, shard_w, device): X = Tensor.kaiming_uniform(N, N).realize() @@ -315,7 +316,8 @@ class TestMultiTensor(unittest.TestCase): W1s = W1.shard(device, shard_w) W2s = W2.shard(device, shard_w) O = (Xs@W1s)@W2s - np.testing.assert_allclose((X.numpy() @ W1.numpy()) @ W2.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5) + with np.errstate(all='ignore'): + np.testing.assert_allclose((X.numpy() @ W1.numpy()) @ W2.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5) def test_matmul_shard_none(self): return self._test_matmul_shard_axis(None, None, devices_2) def test_matmul_shard_X_0(self): return self._test_matmul_shard_axis(0, None, devices_2) diff --git a/test/backend/test_opt_gemm.py b/test/backend/test_opt_gemm.py index 12d1bda436..83d8933563 100644 --- a/test/backend/test_opt_gemm.py +++ b/test/backend/test_opt_gemm.py @@ -12,7 +12,8 @@ class TestOptGemm(unittest.TestCase): N = 64 cls.a = Tensor.randn(N, N).contiguous().realize() cls.b = Tensor.randn(N, N).contiguous().realize() - cls.res = cls.a.T.numpy() @ cls.b.T.numpy() + with np.errstate(all='ignore'): + cls.res = cls.a.T.numpy() @ cls.b.T.numpy() def _test_gemm_unrolled_permute_l(self, opts=[]): t = self.a.T @ self.b.T diff --git a/test/backend/test_schedule.py b/test/backend/test_schedule.py index 9723273e31..494f0079f0 100644 --- a/test/backend/test_schedule.py +++ b/test/backend/test_schedule.py @@ -312,8 +312,9 @@ class TestSchedule(unittest.TestCase): np.testing.assert_allclose(out0.numpy(), np_out0:=np.exp2(a.numpy().sum()), atol=1e-4, rtol=1e-4) np.testing.assert_allclose(out1.numpy(), np_out1:=a.numpy().sum()+np_out0, atol=1e-4, rtol=1e-4) np_b = (a.numpy() + np_out0 + np_out1) - np.testing.assert_allclose(out2.numpy(), np_out2:=np.exp2(np_b.sum()), atol=1e-4, rtol=1e-4) - np.testing.assert_allclose(out3.numpy(), np_b.sum()+np_out2, atol=1e-4, rtol=1e-4) + with np.errstate(over='ignore'): + np.testing.assert_allclose(out2.numpy(), np_out2:=np.exp2(np_b.sum()), atol=1e-4, rtol=1e-4) + np.testing.assert_allclose(out3.numpy(), np_b.sum()+np_out2, atol=1e-4, rtol=1e-4) def test_reduce_ext_reduce_child(self): Tensor.manual_seed(0) diff --git a/test/unit/test_dtype_spec.py b/test/unit/test_dtype_spec.py index e0f8f514f7..477faf22bb 100644 --- a/test/unit/test_dtype_spec.py +++ b/test/unit/test_dtype_spec.py @@ -64,7 +64,8 @@ class TestTypeSpec(unittest.TestCase): tested = 0 for dtype_str, dtype in [ ("bool", dtypes.bool), ("int8", dtypes.int8), ("int", dtypes.int), ("uint32", dtypes.uint32), ("float32", dtypes.float32)]: - np.testing.assert_equal(Tensor(n, dtype=dtype_str).numpy(), Tensor(n, dtype=dtype).numpy()) + with np.errstate(invalid='ignore'): + np.testing.assert_equal(Tensor(n, dtype=dtype_str).numpy(), Tensor(n, dtype=dtype).numpy()) np.testing.assert_equal(Tensor(n).cast(dtype_str).numpy(), Tensor(n).cast(dtype).numpy()) if dtype.itemsize == 4: np.testing.assert_equal(Tensor(n).bitcast(dtype_str).numpy(), Tensor(n).bitcast(dtype).numpy()) diff --git a/test/unit/test_gguf.py b/test/unit/test_gguf.py index dfd587f29c..3b611a2488 100644 --- a/test/unit/test_gguf.py +++ b/test/unit/test_gguf.py @@ -162,7 +162,8 @@ class TestGGUFGEMV(unittest.TestCase): _, tensors = gguf_load(Tensor(np.frombuffer(buf, dtype=np.uint8)).to(None)) x = rng.standard_normal(cols).astype(np.float32) - np.testing.assert_allclose((tensors["weight"] @ Tensor(x)).numpy(), ref @ x, atol=1e-2, rtol=1e-2) + with np.errstate(all='ignore'): + np.testing.assert_allclose((tensors["weight"] @ Tensor(x)).numpy(), ref @ x, atol=1e-2, rtol=1e-2) if qtype == GGMLQuantizationType.BF16 or is_dtype_supported(dtypes.half): np.testing.assert_equal(tensors["weight"].numpy(), ref) assert np.isfinite(ref).all() and np.isfinite(tensors["weight"].numpy()).all(), f"{qtype.name} has NaN/Inf"