From 888aaab15197deb0324ed7c0b38b2105a8fcca8d Mon Sep 17 00:00:00 2001 From: chenyu Date: Fri, 14 Nov 2025 08:11:32 -0800 Subject: [PATCH] test_tiny cleanup (#13276) --- test/test_tiny.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/test/test_tiny.py b/test/test_tiny.py index 72f274a6b8..74aae5676a 100644 --- a/test/test_tiny.py +++ b/test/test_tiny.py @@ -32,8 +32,8 @@ class TestTiny(unittest.TestCase): self.assertListEqual(out.tolist(), [2]*16) def test_cat(self): - out = Tensor.cat(Tensor.ones(8).contiguous(), Tensor.ones(8).contiguous()) - self.assertListEqual(out.tolist(), [1]*16) + out = Tensor.cat(Tensor.ones(8).contiguous(), Tensor.zeros(8).contiguous()) + self.assertListEqual(out.tolist(), [1]*8+[0]*8) def test_sum(self): out = Tensor.ones(256).contiguous().sum() @@ -62,7 +62,7 @@ class TestTiny(unittest.TestCase): out = Tensor.rand(10) for x in out.tolist(): self.assertGreaterEqual(x, 0.0) - self.assertLessEqual(x, 1.0) + self.assertLess(x, 1.0) # *** JIT (for Python speed) *** @@ -138,9 +138,7 @@ class TestTiny(unittest.TestCase): nn.Conv2d(8, 8, 5), Tensor.relu] # replace random weights with ones - # TODO: there's a bug here where it's tying two of the biases together. we need UNIQUE const - #Tensor.realize(*[p.replace(Tensor.ones_like(p).contiguous()) for p in nn.state.get_parameters(layers)]) - for p in nn.state.get_parameters(layers): p.replace(Tensor.empty(p.shape)) + Tensor.realize(*[p.replace(Tensor.ones_like(p).contiguous()) for p in nn.state.get_parameters(layers)]) # realize gradients for x in nn.state.get_parameters(layers): x.requires_grad_()