suppress test warnings from numpy (#15688)

This commit is contained in:
chenyu
2026-04-11 22:33:20 -04:00
committed by GitHub
parent 938cba4fdf
commit e706f408cb
5 changed files with 13 additions and 7 deletions

View File

@@ -305,7 +305,8 @@ class TestMultiTensor(unittest.TestCase):
Xs = X.shard(device, shard_x)
Ws = W.shard(device, shard_w)
O = (Xs@Ws)
np.testing.assert_allclose(X.numpy() @ W.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5)
with np.errstate(all='ignore'):
np.testing.assert_allclose(X.numpy() @ W.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5)
def _test_double_matmul_shard_axis(self, shard_x, shard_w, device):
X = Tensor.kaiming_uniform(N, N).realize()
@@ -315,7 +316,8 @@ class TestMultiTensor(unittest.TestCase):
W1s = W1.shard(device, shard_w)
W2s = W2.shard(device, shard_w)
O = (Xs@W1s)@W2s
np.testing.assert_allclose((X.numpy() @ W1.numpy()) @ W2.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5)
with np.errstate(all='ignore'):
np.testing.assert_allclose((X.numpy() @ W1.numpy()) @ W2.numpy(), O.to(Device.DEFAULT).numpy(), atol=1e-5)
def test_matmul_shard_none(self): return self._test_matmul_shard_axis(None, None, devices_2)
def test_matmul_shard_X_0(self): return self._test_matmul_shard_axis(0, None, devices_2)

View File

@@ -12,7 +12,8 @@ class TestOptGemm(unittest.TestCase):
N = 64
cls.a = Tensor.randn(N, N).contiguous().realize()
cls.b = Tensor.randn(N, N).contiguous().realize()
cls.res = cls.a.T.numpy() @ cls.b.T.numpy()
with np.errstate(all='ignore'):
cls.res = cls.a.T.numpy() @ cls.b.T.numpy()
def _test_gemm_unrolled_permute_l(self, opts=[]):
t = self.a.T @ self.b.T

View File

@@ -312,8 +312,9 @@ class TestSchedule(unittest.TestCase):
np.testing.assert_allclose(out0.numpy(), np_out0:=np.exp2(a.numpy().sum()), atol=1e-4, rtol=1e-4)
np.testing.assert_allclose(out1.numpy(), np_out1:=a.numpy().sum()+np_out0, atol=1e-4, rtol=1e-4)
np_b = (a.numpy() + np_out0 + np_out1)
np.testing.assert_allclose(out2.numpy(), np_out2:=np.exp2(np_b.sum()), atol=1e-4, rtol=1e-4)
np.testing.assert_allclose(out3.numpy(), np_b.sum()+np_out2, atol=1e-4, rtol=1e-4)
with np.errstate(over='ignore'):
np.testing.assert_allclose(out2.numpy(), np_out2:=np.exp2(np_b.sum()), atol=1e-4, rtol=1e-4)
np.testing.assert_allclose(out3.numpy(), np_b.sum()+np_out2, atol=1e-4, rtol=1e-4)
def test_reduce_ext_reduce_child(self):
Tensor.manual_seed(0)

View File

@@ -64,7 +64,8 @@ class TestTypeSpec(unittest.TestCase):
tested = 0
for dtype_str, dtype in [
("bool", dtypes.bool), ("int8", dtypes.int8), ("int", dtypes.int), ("uint32", dtypes.uint32), ("float32", dtypes.float32)]:
np.testing.assert_equal(Tensor(n, dtype=dtype_str).numpy(), Tensor(n, dtype=dtype).numpy())
with np.errstate(invalid='ignore'):
np.testing.assert_equal(Tensor(n, dtype=dtype_str).numpy(), Tensor(n, dtype=dtype).numpy())
np.testing.assert_equal(Tensor(n).cast(dtype_str).numpy(), Tensor(n).cast(dtype).numpy())
if dtype.itemsize == 4:
np.testing.assert_equal(Tensor(n).bitcast(dtype_str).numpy(), Tensor(n).bitcast(dtype).numpy())

View File

@@ -162,7 +162,8 @@ class TestGGUFGEMV(unittest.TestCase):
_, tensors = gguf_load(Tensor(np.frombuffer(buf, dtype=np.uint8)).to(None))
x = rng.standard_normal(cols).astype(np.float32)
np.testing.assert_allclose((tensors["weight"] @ Tensor(x)).numpy(), ref @ x, atol=1e-2, rtol=1e-2)
with np.errstate(all='ignore'):
np.testing.assert_allclose((tensors["weight"] @ Tensor(x)).numpy(), ref @ x, atol=1e-2, rtol=1e-2)
if qtype == GGMLQuantizationType.BF16 or is_dtype_supported(dtypes.half): np.testing.assert_equal(tensors["weight"].numpy(), ref)
assert np.isfinite(ref).all() and np.isfinite(tensors["weight"].numpy()).all(), f"{qtype.name} has NaN/Inf"