mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
bugfix Tensor.item when it's unbased (#2913)
it's possible for numel 1 tensor lazydata to be unbased and should call lazydata.base.realized
This commit is contained in:
@@ -303,6 +303,18 @@ class TestTinygrad(unittest.TestCase):
|
||||
# force device copy - to() is opt'd away - Tensor(dev)/1 is ignored
|
||||
np.testing.assert_allclose(ua_arr, (Tensor(ua_arr)/Tensor(1)).numpy())
|
||||
|
||||
def test_item_to_tensor_to_item(self):
|
||||
for a in [0, 1, 2, 3, -1, -100, 100, -101.1, 2.345, 100.1, True, False]:
|
||||
item = Tensor(a).item()
|
||||
assert type(item) == type(a), a
|
||||
np.testing.assert_allclose(item, a), a
|
||||
buffered_item = Tensor([a]).item()
|
||||
assert type(buffered_item) == type(a), a
|
||||
np.testing.assert_allclose(buffered_item, a), a
|
||||
reshaped_item = Tensor([a]).reshape((1, 1, 1, 1, 1)).item()
|
||||
assert type(reshaped_item) == type(a), a
|
||||
np.testing.assert_allclose(reshaped_item, a), a
|
||||
|
||||
class TestZeroShapeTensor(unittest.TestCase):
|
||||
def test_shape_stride(self):
|
||||
t = Tensor.rand(3, 2, 0)
|
||||
|
||||
@@ -118,9 +118,9 @@ class Tensor:
|
||||
def detach(self) -> Tensor: return Tensor(self.lazydata, device=self.device, requires_grad=False)
|
||||
|
||||
# TODO: these are good places to start removing numpy
|
||||
def item(self) -> Union[float, int]:
|
||||
def item(self) -> Union[float, int, bool]:
|
||||
assert self.numel() == 1, "must have one element for item"
|
||||
return cast(Buffer, self.contiguous().realize().lazydata.realized).toCPU().item()
|
||||
return cast(Buffer, self.contiguous().realize().lazydata.base.realized).toCPU().item()
|
||||
def data(self) -> memoryview: return self.numpy().data
|
||||
|
||||
# TODO: this should import numpy and use .data() to construct the array
|
||||
@@ -128,7 +128,7 @@ class Tensor:
|
||||
assert all_int(self.shape), f"no numpy if shape is symbolic, {self.shape=}"
|
||||
assert self.dtype.np is not None, f"no numpy dtype for {self.dtype}"
|
||||
if 0 in self.shape: return np.zeros(self.shape, dtype=self.dtype.np)
|
||||
return self.detach().cast(dtypes.from_np(self.dtype.np)).contiguous().to('CPU').realize().lazydata.base.realized.toCPU().astype(self.dtype.np, copy=True).reshape(self.shape) # noqa: E501
|
||||
return self.cast(self.dtype.scalar()).contiguous().realize().lazydata.base.realized.toCPU().astype(self.dtype.np, copy=True).reshape(self.shape)
|
||||
|
||||
def to(self, device:Optional[str]) -> Tensor:
|
||||
if device is None or device == self.device: return self
|
||||
|
||||
Reference in New Issue
Block a user