mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-29 03:00:14 -04:00
few setitem test cases diff from numpy (#14684)
have claude fuzzed frontend and found some real bugs
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import unittest, random
|
||||
import unittest
|
||||
from tinygrad import Tensor, TinyJit, Variable, dtypes, Device
|
||||
from tinygrad.helpers import Context, getenv
|
||||
from tinygrad.helpers import Context
|
||||
import numpy as np
|
||||
|
||||
class TestSetitem(unittest.TestCase):
|
||||
@@ -211,19 +211,63 @@ class TestSetitem(unittest.TestCase):
|
||||
n[index.numpy()] = v.numpy()
|
||||
np.testing.assert_equal(t.numpy(), n)
|
||||
|
||||
@unittest.skip("slow")
|
||||
def test_setitem_tensor_indexing_fuzz(self):
|
||||
random.seed(getenv("SEED", 42))
|
||||
for _ in range(getenv("ITERS", 100)):
|
||||
size = random.randint(5, 10)
|
||||
d0, d1, d2 = random.randint(1,5), random.randint(1,5), random.randint(1,5)
|
||||
t = Tensor.zeros(size).contiguous()
|
||||
n = np.zeros((size,))
|
||||
index = Tensor.randint((d0, d1, d2), low=0, high=size)
|
||||
v = Tensor.arange(d0*d1*d2).reshape(d0, d1, d2)
|
||||
t[index] = v
|
||||
n[index.numpy()] = v.numpy()
|
||||
np.testing.assert_allclose(t.numpy(), n, err_msg=f"failed with index={index.numpy().tolist()} and v={v.numpy().tolist()}")
|
||||
def test_setitem_swap_rows(self):
|
||||
t = Tensor.arange(6, dtype=dtypes.float).reshape(3, 2).contiguous().realize()
|
||||
tmp = t[0]
|
||||
t[0] = t[1]
|
||||
t[2] = tmp
|
||||
# NOTE: not [[2, 3], [2, 3], [0, 1]], same with eager
|
||||
np.testing.assert_allclose(t.numpy(), [[2, 3], [2, 3], [2, 3]])
|
||||
|
||||
# eager version
|
||||
t = Tensor.arange(6, dtype=dtypes.float).reshape(3, 2).contiguous().realize()
|
||||
tmp = t[0].realize()
|
||||
t[0] = t[1].realize()
|
||||
t[2] = tmp.realize()
|
||||
np.testing.assert_allclose(t.numpy(), [[2, 3], [2, 3], [2, 3]])
|
||||
|
||||
def test_lazy_sum_between_writes(self):
|
||||
# lazy sums should capture buffer state at the time they were created
|
||||
t = Tensor.zeros(6).contiguous().realize()
|
||||
s0 = t.sum()
|
||||
t[:3].assign(1.0)
|
||||
s1 = t.sum()
|
||||
t[3:].assign(2.0)
|
||||
s2 = t.sum()
|
||||
# TODO: s0 and s1 see final buffer state, should be [0.0, 3.0, 9.0]
|
||||
np.testing.assert_allclose([s0.item(), s1.item(), s2.item()], [9.0, 9.0, 9.0])
|
||||
|
||||
# eager version
|
||||
t = Tensor.zeros(6).contiguous().realize()
|
||||
s0 = t.sum().realize()
|
||||
t[:3].assign(1.0).realize()
|
||||
s1 = t.sum().realize()
|
||||
t[3:].assign(2.0).realize()
|
||||
s2 = t.sum().realize()
|
||||
np.testing.assert_allclose([s0.item(), s1.item(), s2.item()], [0.0, 3.0, 9.0])
|
||||
|
||||
def test_cross_assign_independence(self):
|
||||
# when assigning to two tensors using computations from both,
|
||||
# both assigns should see the OLD values of both tensors
|
||||
a = Tensor.arange(4, dtype=dtypes.float).contiguous().realize()
|
||||
b = Tensor.arange(4, 8, dtype=dtypes.float).contiguous().realize()
|
||||
new_a = a + b # [4, 6, 8, 10]
|
||||
new_b = a * 2 # [0, 2, 4, 6] -- should use OLD a
|
||||
a.assign(new_a)
|
||||
b.assign(new_b)
|
||||
np.testing.assert_allclose(a.numpy(), [4, 6, 8, 10])
|
||||
# TODO: new_b sees mutated a, should be [0, 2, 4, 6]
|
||||
np.testing.assert_allclose(b.numpy(), [8, 12, 16, 20])
|
||||
|
||||
# eager version
|
||||
a = Tensor.arange(4, dtype=dtypes.float).contiguous().realize()
|
||||
b = Tensor.arange(4, 8, dtype=dtypes.float).contiguous().realize()
|
||||
new_a = (a + b).realize()
|
||||
new_b = (a * 2).realize()
|
||||
a.assign(new_a).realize()
|
||||
b.assign(new_b).realize()
|
||||
np.testing.assert_allclose(a.numpy(), [4, 6, 8, 10])
|
||||
np.testing.assert_allclose(b.numpy(), [0, 2, 4, 6])
|
||||
|
||||
|
||||
class TestWithGrad(unittest.TestCase):
|
||||
|
||||
Reference in New Issue
Block a user