mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
test_tiny cleanup (#13276)
This commit is contained in:
@@ -32,8 +32,8 @@ class TestTiny(unittest.TestCase):
|
||||
self.assertListEqual(out.tolist(), [2]*16)
|
||||
|
||||
def test_cat(self):
|
||||
out = Tensor.cat(Tensor.ones(8).contiguous(), Tensor.ones(8).contiguous())
|
||||
self.assertListEqual(out.tolist(), [1]*16)
|
||||
out = Tensor.cat(Tensor.ones(8).contiguous(), Tensor.zeros(8).contiguous())
|
||||
self.assertListEqual(out.tolist(), [1]*8+[0]*8)
|
||||
|
||||
def test_sum(self):
|
||||
out = Tensor.ones(256).contiguous().sum()
|
||||
@@ -62,7 +62,7 @@ class TestTiny(unittest.TestCase):
|
||||
out = Tensor.rand(10)
|
||||
for x in out.tolist():
|
||||
self.assertGreaterEqual(x, 0.0)
|
||||
self.assertLessEqual(x, 1.0)
|
||||
self.assertLess(x, 1.0)
|
||||
|
||||
# *** JIT (for Python speed) ***
|
||||
|
||||
@@ -138,9 +138,7 @@ class TestTiny(unittest.TestCase):
|
||||
nn.Conv2d(8, 8, 5), Tensor.relu]
|
||||
|
||||
# replace random weights with ones
|
||||
# TODO: there's a bug here where it's tying two of the biases together. we need UNIQUE const
|
||||
#Tensor.realize(*[p.replace(Tensor.ones_like(p).contiguous()) for p in nn.state.get_parameters(layers)])
|
||||
for p in nn.state.get_parameters(layers): p.replace(Tensor.empty(p.shape))
|
||||
Tensor.realize(*[p.replace(Tensor.ones_like(p).contiguous()) for p in nn.state.get_parameters(layers)])
|
||||
|
||||
# realize gradients
|
||||
for x in nn.state.get_parameters(layers): x.requires_grad_()
|
||||
|
||||
Reference in New Issue
Block a user