diff --git a/test/test_schedule.py b/test/test_schedule.py index 5037b47748..f333e89a7c 100644 --- a/test/test_schedule.py +++ b/test/test_schedule.py @@ -445,7 +445,7 @@ class TestSchedule(unittest.TestCase): def test_double_from(self): x = Tensor([1,2,3,4]) - out = x.to('python') + out = x.to('npy') check_schedule(out, 0, filter_loadops=False) def test_pow_const_tensor_simplified(self): diff --git a/test/test_tensor.py b/test/test_tensor.py index df9c17b09d..c791aa2c79 100644 --- a/test/test_tensor.py +++ b/test/test_tensor.py @@ -320,6 +320,14 @@ class TestTinygrad(unittest.TestCase): # data = data + [-x for x in data] # np.testing.assert_allclose(Tensor(data, dtype=dtypes.int32).numpy(), np.array(data, dtype=np.int32)) + def test_tensor_list_ndarray(self): + data = [np.array([1, 2, 3]), np.array([1, 2, 3]), np.array([1, 2, 3])] + np.testing.assert_equal(Tensor(data).numpy(), np.array(data)) + data = [np.array([1.0, 2.0, 3.0]), np.array([1, 2, 3]), np.array([1, 2, 3])] + np.testing.assert_equal(Tensor(data).numpy(), np.array(data)) + data = [np.array(1.0), np.array(2.0), np.array(3.0)] + np.testing.assert_equal(Tensor(data).numpy(), np.array(data)) + def test_tensor_bytes(self): data = b"abc123" t = Tensor(data) diff --git a/tinygrad/tensor.py b/tinygrad/tensor.py index 7896e4c00f..8b6a2d4cb6 100644 --- a/tinygrad/tensor.py +++ b/tinygrad/tensor.py @@ -46,6 +46,13 @@ def _loadop(op, shape:Tuple[sint,...], dtype:DType, device:Union[str, Tuple[str, def _from_np_dtype(npdtype:type) -> DType: return dtypes.fields()[np.dtype(npdtype).name] def _to_np_dtype(dtype:DType) -> Optional[type]: return np.dtype(dtype.fmt).type if dtype.fmt is not None else None +def _fromnp(x: np.ndarray) -> LazyBuffer: + ret = LazyBuffer.loadop(LoadOps.EMPTY, x.shape, _from_np_dtype(x.dtype), "NPY") + # fake realize + ret.buffer.allocate(x) + del ret.srcs + return ret + def _frompy(x:Union[List, Tuple, bytes], dtype:DType) -> LazyBuffer: if isinstance(x, bytes): ret, data = LazyBuffer.loadop(LoadOps.EMPTY, (len(x),), dtype, "PYTHON"), x else: @@ -117,20 +124,15 @@ class Tensor: elif isinstance(data, Variable): data = _loadop(LoadOps.CONST, tuple(), dtype or dtypes.from_py(data.unbind()[1]), device, data) elif isinstance(data, bytes): data = _frompy(data, dtypes.uint8) elif isinstance(data, (list, tuple)): - if dtype is None: dtype = dtypes.from_py(data) - if dtype == dtypes.bfloat16: data = Tensor(_frompy(data, dtypes.float32), device=device).cast(dtypes.bfloat16).lazydata - else: data = _frompy(data, dtype) + if dtype is None: + if (d := fully_flatten(data)) and all(isinstance(s, bool) for s in d): dtype = dtypes.bool + else: dtype = dtypes.default_int if d and all_int(d) else dtypes.default_float + if dtype == dtypes.bfloat16: data = Tensor(_fromnp(np.array(data, np.float32)), device=device).cast(dtypes.bfloat16).lazydata + else: data = _fromnp(np.array(data, _to_np_dtype(dtype))) elif data is None: data = _loadop(LoadOps.EMPTY, (0,), dtype or dtypes.default_float, device) elif isinstance(data, np.ndarray): if data.shape == (): data = _loadop(LoadOps.CONST, tuple(), dtype or _from_np_dtype(data.dtype), device, data.item()) - else: - def _fromnp(x: np.ndarray) -> LazyBuffer: - ret = LazyBuffer.loadop(LoadOps.EMPTY, x.shape, _from_np_dtype(x.dtype), "NPY") - # fake realize - ret.buffer.allocate(x) - del ret.srcs - return ret - data = _fromnp(data.astype(npdtype) if dtype is not None and (npdtype:=_to_np_dtype(dtype)) is not None else data) + else: data = _fromnp(data.astype(npdtype) if dtype is not None and (npdtype:=_to_np_dtype(dtype)) is not None else data) # by this point, it has to be a LazyBuffer if not isinstance(data, (LazyBuffer, MultiLazyBuffer)):