From 71a60762ed5fad0037c427992ba041067baa27ba Mon Sep 17 00:00:00 2001 From: chenyu Date: Sun, 17 Dec 2023 02:03:52 -0500 Subject: [PATCH] Revert "Make Tensor creation allow multi-dim list of int and bool (#2793)" (#2810) This reverts commit 798bf813b104f2eb486eb7e1e6b1415403b2f4fa. --- test/test_tensor.py | 44 ++++----------------------------------- test/unit/test_helpers.py | 11 +--------- tinygrad/helpers.py | 1 - tinygrad/tensor.py | 8 ++----- 4 files changed, 7 insertions(+), 57 deletions(-) diff --git a/test/test_tensor.py b/test/test_tensor.py index 5984e28b1c..f97e8441c0 100644 --- a/test/test_tensor.py +++ b/test/test_tensor.py @@ -239,46 +239,10 @@ class TestTinygrad(unittest.TestCase): assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 # check that it works for something else def test_tensor_list_dtype(self): - for arr in ([1], [[[1]]], [[1,1],[1,1]], [[[1,1],[1,1]],[[1,1],[1,1]]]): - assert Tensor(arr).dtype == dtypes.int32 - assert Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32 - assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 - - for arr in ([True], [[[False]]], [[True,False],[True,False]], [[[False,True],[False,False]],[[True,True],[False,True]]]): - assert Tensor(arr).dtype == dtypes.bool - assert Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32 - assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 - - # empty tensor defaults - for arr in ([], [[[]]], [[],[]]): - t = Tensor(arr) - assert t.dtype == Tensor.default_type - np.testing.assert_allclose(t.numpy(), np.array(arr)) - - # mixture of bool and int - for arr in ([True, 3], [[True],[3]], [[[True]], [[3]]], [[True, 3], [3, True]]): - t = Tensor(arr) - assert t.dtype == dtypes.int32 - np.testing.assert_allclose(t.numpy(), np.array(arr)) - - # mixture of bool, int and float - for arr in ([[True,True],[3.,True]], [[0,1],[3.,4]], [[[0],[1]],[[3.],[4]]], [[[True],[1]],[[3.],[4]]]): - t = Tensor(arr) - assert t.dtype == Tensor.default_type - np.testing.assert_allclose(t.numpy(), np.array(arr)) - - def test_tensor_list_shapes(self): - self.assertEqual(Tensor([[[]]]).shape, (1,1,0)) - self.assertEqual(Tensor([[],[]]).shape, (2,0)) - self.assertEqual(Tensor([[[[]],[[]]], [[[]],[[]]], [[[]],[[]]]]).shape, (3,2,1,0)) - - def test_tensor_list_errors(self): - # inhomogeneous shape - with self.assertRaises(ValueError): Tensor([[],[[]]]) - with self.assertRaises(ValueError): Tensor([[1],[]]) - with self.assertRaises(ValueError): Tensor([[1],[1],1]) - with self.assertRaises(ValueError): Tensor([[[1,1,1],[1,1]]]) - with self.assertRaises(ValueError): Tensor([[1,1,1],[[1,1,1]]]) + arr = [1] + assert Tensor(arr).dtype == dtypes.int32 + assert Tensor(arr, dtype=dtypes.float32).dtype == dtypes.float32 + assert Tensor(arr, dtype=dtypes.float64).dtype == dtypes.float64 def test_tensor_copy(self): x = copy.deepcopy(Tensor.ones((3,3,3))) diff --git a/test/unit/test_helpers.py b/test/unit/test_helpers.py index 3980c6faf4..3ddd59565c 100644 --- a/test/unit/test_helpers.py +++ b/test/unit/test_helpers.py @@ -1,7 +1,7 @@ import unittest import numpy as np from PIL import Image -from tinygrad.helpers import Context, ContextVar, DType, dtypes, merge_dicts, strip_parens, prod, round_up, fetch, fully_flatten +from tinygrad.helpers import Context, ContextVar, DType, dtypes, merge_dicts, strip_parens, prod, round_up, fetch from tinygrad.shape.symbolic import Variable, NumNode VARIABLE = ContextVar("VARIABLE", 0) @@ -160,14 +160,5 @@ class TestFetch(unittest.TestCase): with Image.open(img) as pimg: assert pimg.size == (705, 1024) -class TestFullyFlatten(unittest.TestCase): - def test_fully_flatten(self): - self.assertEqual(fully_flatten([[1, 3], [1, 2]]), [1, 3, 1, 2]) - self.assertEqual(fully_flatten(((1, 3), (1, 2))), [1, 3, 1, 2]) - self.assertEqual(fully_flatten([[[1], [3]], [[1], [2]]]), [1, 3, 1, 2]) - self.assertEqual(fully_flatten([[[[1], 2], 3], 4]), [1, 2, 3, 4]) - self.assertEqual(fully_flatten([[1, 2, [3, 4]], [5, 6], 7]), [1, 2, 3, 4, 5, 6, 7]) - self.assertEqual(fully_flatten([[1, "ab"], [True, None], [3.14, [5, "b"]]]), [1, "ab", True, None, 3.14, 5, "b"]) - if __name__ == '__main__': unittest.main() \ No newline at end of file diff --git a/tinygrad/helpers.py b/tinygrad/helpers.py index 260c95c0bb..9f6c65fc64 100644 --- a/tinygrad/helpers.py +++ b/tinygrad/helpers.py @@ -26,7 +26,6 @@ def ansistrip(s:str): return re.sub('\x1b\\[(K|.*?m)', '', s) def ansilen(s:str): return len(ansistrip(s)) def make_pair(x:Union[int, Tuple[int, ...]], cnt=2) -> Tuple[int, ...]: return (x,)*cnt if isinstance(x, int) else x def flatten(l:Iterable[Iterable[T]]): return [item for sublist in l for item in sublist] -def fully_flatten(l): return [item for sublist in l for item in (fully_flatten(sublist) if isinstance(sublist, (tuple, list)) else [sublist])] def fromimport(mod, frm): return getattr(__import__(mod, fromlist=[frm]), frm) def strip_parens(fst:str): return fst[1:-1] if fst[0] == '(' and fst[-1] == ')' and fst[1:-1].find('(') <= fst[1:-1].find(')') else fst def round_up(num, amt:int): return (num+amt-1)//amt * amt diff --git a/tinygrad/tensor.py b/tinygrad/tensor.py index 39bd9bd1b1..c204afc7c9 100644 --- a/tinygrad/tensor.py +++ b/tinygrad/tensor.py @@ -7,8 +7,7 @@ from functools import partialmethod, reduce from itertools import accumulate import numpy as np -from tinygrad.helpers import DType, dtypes, ImageDType -from tinygrad.helpers import argfix, make_pair, getenv, IMAGE, DEBUG, flatten, prod, all_int, round_up, merge_dicts, fully_flatten +from tinygrad.helpers import ImageDType, argfix, make_pair, getenv, IMAGE, DEBUG, flatten, DType, dtypes, prod, all_int, round_up, merge_dicts from tinygrad.lazy import LazyBuffer from tinygrad.ops import LoadOps from tinygrad.device import Device, Buffer @@ -66,11 +65,8 @@ class Tensor: elif isinstance(data, bytes): data = LazyBuffer.fromCPU(np.frombuffer(data, np.uint8)) elif data is None: data = LazyBuffer.fromCPU(np.array([], dtype=(dtype or Tensor.default_type).np)) elif isinstance(data, list): - if (d := fully_flatten(data)) and all(isinstance(s, bool) for s in d): dtype = dtype or dtypes.bool - if d and all_int(d): dtype = dtype or dtypes.int32 - else: dtype = dtype or Tensor.default_type # NOTE: cast at the end for the types that do not have a numpy dtype - data = LazyBuffer.fromCPU(np.array(data, dtype.np)).cast(dtype) + data = LazyBuffer.fromCPU(np.array(data, (dtype:=(dtype or (dtypes.int32 if data and all_int(data) else Tensor.default_type))).np)).cast(dtype) elif isinstance(data, np.ndarray): if data.shape == (): data = LazyBuffer.loadop(LoadOps.CONST, tuple(), dtype or dtypes.from_np(data.dtype), device, data.item()) else: data = LazyBuffer.fromCPU(data.astype(dtype.np) if dtype is not None and dtype.np is not None else data)