Failing test: different behavior on different devices (#7193)

* add minimal failing test

* more tiny makes linter happy

* tinyfy

* no walrus in assert

* a tiny bit simpler

* minimal

* better place, better name, expected failure

* skip devices with correct behavior
This commit is contained in:
Maximilian Wolf
2024-10-27 02:53:58 +01:00
committed by GitHub
parent e920f1d663
commit 3c992250d5

View File

@@ -1,5 +1,5 @@
import unittest
from tinygrad import Tensor, TinyJit, Variable, dtypes
from tinygrad import Device, Tensor, TinyJit, Variable, dtypes
import numpy as np
class TestSetitem(unittest.TestCase):
@@ -115,6 +115,13 @@ class TestSetitem(unittest.TestCase):
np.testing.assert_allclose(t.numpy(), n)
np.testing.assert_allclose(t.numpy(), [[1,1,1,1,1,1],[2,2,2,2,2,2],[3,3,3,3,3,3],[4,4,4,4,4,4],[5,5,5,5,5,5],[6,6,6,6,6,6]])
@unittest.skipUnless(Device.DEFAULT in {"LLVM", "AMD"}, "only fails on LLVM and AMD")
@unittest.expectedFailure
def test_setitem_overlapping_inplace(self):
t = Tensor([[3.0], [2.0], [1.0]]).contiguous()
t[1:] = t[:-1]
assert t.tolist() == [[3.0], [3.0], [2.0]]
class TestWithGrad(unittest.TestCase):
def test_no_requires_grad_works(self):
z = Tensor.rand(8, 8)