remove cpu and torch backends (#3399)

* remove cpu and torch backends

* don't copy to cpu

* use clang instead of cpu

* multitensor gathers on the first device

* clang is cpu + use default

* fixup

* bugfix
This commit is contained in:
George Hotz
2024-02-15 16:55:39 +01:00
committed by GitHub
parent 75f7e21a80
commit b1c0d8c99d
12 changed files with 23 additions and 151 deletions

View File

@@ -76,12 +76,10 @@ from tinygrad.lazy import LazyBuffer, LoadOps
from tinygrad.realize import run_schedule, create_schedule
# allocate some values + load in values
# TODO: remove numpy here
import numpy as np
a = LazyBuffer.loadop(LoadOps.EMPTY, (1,), dtypes.int32, "CPU")
b = LazyBuffer.loadop(LoadOps.EMPTY, (1,), dtypes.int32, "CPU")
a.realized = Buffer("CPU", 1, dtypes.int32, np.array([2], np.int32).flatten())
b.realized = Buffer("CPU", 1, dtypes.int32, np.array([3], np.int32).flatten())
a = LazyBuffer.loadop(LoadOps.EMPTY, (1,), dtypes.int32, "CLANG")
b = LazyBuffer.loadop(LoadOps.EMPTY, (1,), dtypes.int32, "CLANG")
a.realized = Buffer("CLANG", 1, dtypes.int32).copyin(memoryview(bytearray(struct.pack("I", 2))))
b.realized = Buffer("CLANG", 1, dtypes.int32).copyin(memoryview(bytearray(struct.pack("I", 3))))
# describe the computation
out = a.e(BinaryOps.ADD, b)