mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-29 03:00:14 -04:00
remove MultiLazyBuffer.from_sharded [pr] (#8620)
it's eqivalent to taking the lazydata from Tensor.split, then copy to devices
This commit is contained in:
@@ -75,7 +75,7 @@ class TestMultiTensor(unittest.TestCase):
|
||||
ei.run()
|
||||
assert names[-2] == names[-1], "function was relinearized"
|
||||
|
||||
@unittest.skip("this doesn't fold because from_sharded calls contiguous on all lbs")
|
||||
@unittest.skip("this doesn't fold because shard_ calls contiguous on all lbs")
|
||||
def test_sharded_memory(self):
|
||||
# Buffer may be stuck in track_cross_buffer
|
||||
for x in (d0, d1, d2, d3, d4): Device[x].synchronize()
|
||||
|
||||
Reference in New Issue
Block a user