remove MultiLazyBuffer.from_sharded [pr] (#8620)

it's eqivalent to taking the lazydata from Tensor.split, then copy to devices
This commit is contained in:
chenyu
2025-01-14 18:00:49 -05:00
committed by GitHub
parent c85737c200
commit 0790d8059f
3 changed files with 13 additions and 17 deletions

View File

@@ -75,7 +75,7 @@ class TestMultiTensor(unittest.TestCase):
ei.run()
assert names[-2] == names[-1], "function was relinearized"
@unittest.skip("this doesn't fold because from_sharded calls contiguous on all lbs")
@unittest.skip("this doesn't fold because shard_ calls contiguous on all lbs")
def test_sharded_memory(self):
# Buffer may be stuck in track_cross_buffer
for x in (d0, d1, d2, d3, d4): Device[x].synchronize()