mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-08 22:48:25 -05:00
delete test_tensor_uop (#13951)
old test for shape tracker. also update tests that refer shapetracker names
This commit is contained in:
@@ -1,117 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import numpy as np
|
||||
import unittest
|
||||
from tinygrad import Tensor, Device, dtypes
|
||||
from tinygrad.engine.realize import run_schedule
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.helpers import SPLIT_REDUCEOP
|
||||
|
||||
class TestTensorUOp(unittest.TestCase):
|
||||
def test_fromcpu_shape_tracker(self):
|
||||
def helper(a: np.ndarray):
|
||||
print(a.shape, a.strides, a.flags.c_contiguous)
|
||||
b = Tensor(a).uop
|
||||
assert b.shape == a.shape
|
||||
np.testing.assert_equal(a, Tensor(b).numpy())
|
||||
|
||||
for ndims in range(1, 4):
|
||||
a = np.random.randn(*(4,)*ndims).astype(np.float32)
|
||||
for stride in [-2, 1, 2]:
|
||||
for start in [0, 1]:
|
||||
helper(a[(slice(start, None, stride),)*ndims])
|
||||
|
||||
def test_shuffle_pad_ops_cmpeq(self):
|
||||
y = Tensor([1]).cat(Tensor([1]) == 0).numpy()
|
||||
z = Tensor([1, 0]).numpy()
|
||||
np.testing.assert_allclose(y, z)
|
||||
|
||||
def test_shuffle_pad_ops_div(self):
|
||||
y = Tensor([1]).cat(Tensor([1]).div(Tensor([2.0]))).numpy()
|
||||
z = Tensor([1, 0.5]).numpy()
|
||||
np.testing.assert_allclose(y, z)
|
||||
|
||||
def test_shuffle_pad_ops_log(self):
|
||||
y = Tensor([1]).cat(Tensor([1]).log()).numpy()
|
||||
z = Tensor([1, 0]).numpy()
|
||||
np.testing.assert_allclose(y, z)
|
||||
|
||||
def test_shuffle_pad_ops_exp(self):
|
||||
y = Tensor([1]).cat(Tensor([1]).exp()).numpy()
|
||||
z = Tensor([1, np.e]).numpy()
|
||||
np.testing.assert_allclose(y, z)
|
||||
|
||||
def test_device_0_is_the_same_device(self):
|
||||
a = Tensor([1, 2, 3], f"{Device.DEFAULT}")
|
||||
b = Tensor([1, 2, 3], f"{Device.DEFAULT}:0")
|
||||
assert a.device == b.device
|
||||
|
||||
def test_shrink_const_into_zero(self):
|
||||
# regression test to make sure the shapetracker is preserved
|
||||
a = Tensor.zeros(4,4,4).shrink((None, (0,0), None))
|
||||
b = Tensor.zeros(4,1,4)
|
||||
c = a.cat(b, dim=1)
|
||||
np.testing.assert_allclose(c.numpy(), np.concatenate((a.numpy(), b.numpy()), axis=1))
|
||||
|
||||
def test_shrink_const_then_cast(self):
|
||||
# regression test to make sure the shapetracker is preserved
|
||||
a = Tensor.zeros(4,4,4).shrink((None, (0,0), None)).cast(dtypes.int32)
|
||||
b = Tensor.zeros(4,1,4)
|
||||
c = a.cat(b, dim=1)
|
||||
np.testing.assert_allclose(c.numpy(), np.concatenate((a.numpy(), b.numpy()), axis=1))
|
||||
|
||||
def test_const_dtype(self):
|
||||
lb: UOp = Tensor([1], dtype=dtypes.int).uop
|
||||
assert lb.const_like(1).base.arg == 1
|
||||
assert type(lb.const_like(1).base.arg) is int
|
||||
|
||||
lb: UOp = Tensor([1], dtype=dtypes.float).uop
|
||||
assert lb.const_like(1).base.arg == 1.0
|
||||
assert type(lb.const_like(1).base.arg) is float
|
||||
|
||||
def test_contiguous_alu(self):
|
||||
a = Tensor.randn(2, 2).realize()
|
||||
b = Tensor.randn(2, 2).realize()
|
||||
add = (a+b).contiguous()
|
||||
out = add+2
|
||||
sched = out.schedule()
|
||||
self.assertEqual(len(sched), 2)
|
||||
run_schedule(sched)
|
||||
np.testing.assert_allclose(out.numpy(), a.numpy()+b.numpy()+2)
|
||||
|
||||
# NOTE: contiguous on a buffer collapses
|
||||
@unittest.skip("contiguous on a buffer no longer collapses")
|
||||
def test_contiguous_empty(self):
|
||||
empty = Tensor.empty(1).contiguous()
|
||||
sched = empty.schedule()
|
||||
self.assertEqual(len(sched), 0)
|
||||
|
||||
def test_contiguous_folded_alu(self):
|
||||
a = Tensor.empty(8, 8)
|
||||
# NOTE: the buffer for mul_0 late folds to just a CONST
|
||||
mul_0 = a*0
|
||||
out = mul_0.shrink(((4, 8), (0, 8))).contiguous()
|
||||
out.realize()
|
||||
self.assertEqual(out.tolist(), Tensor.zeros(4, 8).tolist())
|
||||
|
||||
@unittest.skipUnless(SPLIT_REDUCEOP, "only for SPLIT_REDUCEOP")
|
||||
class TestReduceOp(unittest.TestCase):
|
||||
def test_no_split_reduce_kernel(self):
|
||||
a = Tensor.rand(4, 4).realize()
|
||||
a = a.sum()
|
||||
sched = a.schedule()
|
||||
assert len(sched) == 1
|
||||
|
||||
def test_split_reduce_kernel_dim0(self):
|
||||
a = Tensor.rand(256, 255).realize()
|
||||
a = a.sum()
|
||||
sched = a.schedule()
|
||||
assert len(sched) == 2
|
||||
|
||||
def test_split_reduce_kernel_dim1(self):
|
||||
a = Tensor.rand(255, 256).realize()
|
||||
a = a.sum()
|
||||
sched = a.schedule()
|
||||
assert len(sched) == 2
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,13 +1,12 @@
|
||||
import unittest
|
||||
from tinygrad.tensor import Tensor
|
||||
|
||||
class TestMaskedShapeTracker(unittest.TestCase):
|
||||
class TestMaskedTensor(unittest.TestCase):
|
||||
def test_mul_masked(self):
|
||||
a = Tensor([1,1,1,1,1])
|
||||
b = Tensor([1,1]).pad(((0,3),))
|
||||
c = a*b
|
||||
assert c.shape == a.shape
|
||||
#assert c.uop.st.views[0].mask is not None
|
||||
ret = c.data()
|
||||
assert ret.tolist() == [1.0, 1.0, 0.0, 0.0, 0.0]
|
||||
|
||||
@@ -16,7 +15,6 @@ class TestMaskedShapeTracker(unittest.TestCase):
|
||||
b = Tensor([1,1]).pad(((0,3),))
|
||||
c = a*b
|
||||
assert c.shape == a.shape
|
||||
#assert c.uop.st.views[0].mask is not None
|
||||
ret = c.data()
|
||||
assert ret.tolist() == [1.0, 1.0, 0.0, 0.0, 0.0]
|
||||
|
||||
@@ -24,7 +22,6 @@ class TestMaskedShapeTracker(unittest.TestCase):
|
||||
a = Tensor([1,1]).pad(((0,2),))
|
||||
b = Tensor([1,1]).pad(((0,2),))
|
||||
c = a+b
|
||||
#assert c.uop.st.views[0].mask is not None
|
||||
ret = c.data()
|
||||
assert ret.tolist() == [2.0, 2.0, 0.0, 0.0]
|
||||
|
||||
Reference in New Issue
Block a user