mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-10 07:28:15 -05:00
delete files that import ShapeTracker (#13805)
This commit is contained in:
61
test/external/external_test_hcq_fuzz_failures.py
vendored
61
test/external/external_test_hcq_fuzz_failures.py
vendored
File diff suppressed because one or more lines are too long
60
test/external/external_test_nv.py
vendored
60
test/external/external_test_nv.py
vendored
@@ -1,60 +0,0 @@
|
||||
import unittest, struct, array, ctypes
|
||||
from tinygrad import Device, dtypes, Tensor
|
||||
from tinygrad.helpers import to_mv
|
||||
from tinygrad.runtime.ops_nv import NVDevice, HWQueue
|
||||
from tinygrad.codegen.opt.search import Opt, OptOps
|
||||
from tinygrad.engine.realize import get_runner, CompiledRunner, get_program
|
||||
from test.external.fuzz_linearizer import get_fuzz_rawbufs
|
||||
|
||||
from tinygrad.codegen.opt.kernel import Kernel
|
||||
from tinygrad.uop.ops import LazyOp, Ops, ReduceOps, BufferOps, MemBuffer
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
|
||||
@unittest.skipUnless(Device.DEFAULT == "NV", "NV specific tests/fixes")
|
||||
class TestNV(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
TestNV.d0: NVDevice = Device["NV"]
|
||||
TestNV.a = Tensor([0.,1.], device="NV").realize()
|
||||
TestNV.b = self.a + 1
|
||||
si = self.b.schedule()[-1]
|
||||
TestNV.d0_runner = get_runner(TestNV.d0.device, si.ast)
|
||||
TestNV.b.uop.buffer.allocate()
|
||||
TestNV.addr = struct.pack("QQ", TestNV.b.uop.buffer._buf.va_addr, TestNV.a.uop.buffer._buf.va_addr)
|
||||
|
||||
def test_error_on_huge_dims(self):
|
||||
ast = LazyOp(op=BufferOps.STORE, src=(LazyOp(op=ReduceOps.SUM, src=(LazyOp(op=Ops.CAST, src=(LazyOp(op=Ops.MUL, src=(LazyOp(op=BufferOps.LOAD, src=(), arg=MemBuffer(idx=1, dtype=dtypes.half, st=ShapeTracker(views=(View(shape=(1, 1, 1024, 683), strides=(0, 0, 0, 1), offset=0, mask=None, contiguous=False),)))), LazyOp(op=BufferOps.LOAD, src=(), arg=MemBuffer(idx=2, dtype=dtypes.half, st=ShapeTracker(views=(View(shape=(1, 1, 1024, 683), strides=(0, 0, 683, 1), offset=0, mask=None, contiguous=True),))))), arg=None),), arg=dtypes.float),), arg=(3,)),), arg=MemBuffer(idx=0, dtype=dtypes.float, st=ShapeTracker(views=(View(shape=(1, 1, 1024, 1), strides=(0, 0, 1, 0), offset=0, mask=None, contiguous=True),)))) # noqa: E501
|
||||
opts = [Opt(op=OptOps.GROUP, axis=0, arg=0), Opt(op=OptOps.PADTO, axis=1, arg=32), Opt(op=OptOps.UNROLL, axis=0, arg=4), Opt(op=OptOps.LOCAL, axis=0, arg=2), Opt(op=OptOps.LOCAL, axis=0, arg=2)] # noqa: E501
|
||||
with self.assertRaises(RuntimeError) as cm:
|
||||
lin = Kernel(ast)
|
||||
lin.apply_opts(opts)
|
||||
rawbufs = get_fuzz_rawbufs(lin)
|
||||
prg = CompiledRunner(get_program(lin.get_optimized_ast(), lin.opts))
|
||||
prg(rawbufs, {}, wait=True)
|
||||
self.assertEqual(str(cm.exception), "This is a runtime error message")
|
||||
|
||||
def test_buf4_usage(self):
|
||||
TestNV.along = Tensor([105615], device="NV").realize()
|
||||
ast = LazyOp(op=BufferOps.STORE, src=(LazyOp(op=Ops.SIN, src=(LazyOp(op=Ops.CAST, src=(LazyOp(op=BufferOps.LOAD, src=(), arg=MemBuffer(idx=1, dtype=dtypes.ulong, st=ShapeTracker(views=(View(shape=(3,), strides=(1,), offset=0, mask=None, contiguous=True),)))),), arg=dtypes.float),), arg=None),), arg=MemBuffer(idx=0, dtype=dtypes.float, st=ShapeTracker(views=(View(shape=(3,), strides=(1,), offset=0, mask=None, contiguous=True),)))) # noqa: E501
|
||||
temp_runner = get_runner(TestNV.d0.device, (ast,))
|
||||
temp_runner([TestNV.b.uop.buffer, TestNV.along.uop.buffer], var_vals={})
|
||||
val = TestNV.b.uop.buffer.as_buffer().cast("f")[0]
|
||||
assert abs(val - 0.80647) < 0.001, f"got val {val}"
|
||||
|
||||
def test_kernargs_no_oob_access(self):
|
||||
kernargs_start = TestNV.d0._gpu_alloc((2 << 20), map_to_cpu=True).va_addr
|
||||
kernargs = kernargs_start + ((2 << 20) - TestNV.d0_runner._prg.kernargs_alloc_size)
|
||||
to_mv(kernargs, 0x160).cast('I')[:] = array.array('I', TestNV.d0_runner._prg.constbuffer_0)
|
||||
ctypes.memmove(kernargs + TestNV.d0_runner._prg.kernargs_offset, TestNV.addr, len(TestNV.addr))
|
||||
|
||||
q = HWQueue()
|
||||
q.exec(TestNV.d0_runner._prg, kernargs, TestNV.d0_runner.global_size, TestNV.d0_runner.local_size)
|
||||
q.signal(TestNV.d0.timeline_signal, TestNV.d0.timeline_value).submit(TestNV.d0)
|
||||
TestNV.d0._wait_signal(TestNV.d0.timeline_signal, TestNV.d0.timeline_value)
|
||||
TestNV.d0.timeline_value += 1
|
||||
val = TestNV.b.uop.buffer.as_buffer().cast("f")[0]
|
||||
assert val == 1.0, f"got val {val}"
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
61
test/external/fuzz_shapetracker.py
vendored
61
test/external/fuzz_shapetracker.py
vendored
@@ -1,61 +0,0 @@
|
||||
import random
|
||||
from tinygrad.helpers import DEBUG, getenv
|
||||
from test.unit.test_shapetracker import CheckingShapeTracker
|
||||
|
||||
def do_permute(st):
|
||||
perm = list(range(0, len(st.shape)))
|
||||
random.shuffle(perm)
|
||||
perm = tuple(perm)
|
||||
if DEBUG >= 1: print("st.permute(", perm, ")")
|
||||
st.permute(perm)
|
||||
|
||||
def do_pad(st):
|
||||
c = random.randint(0, len(st.shape)-1)
|
||||
pad = tuple((random.randint(0,2), random.randint(0,2)) if i==c else (0,0) for i in range(len(st.shape)))
|
||||
if DEBUG >= 1: print("st.pad(", pad, ")")
|
||||
st.pad(pad)
|
||||
|
||||
def do_reshape_split_one(st):
|
||||
c = random.randint(0, len(st.shape)-1)
|
||||
poss = [n for n in [1,2,3,4,5] if st.shape[c]%n == 0]
|
||||
spl = random.choice(poss)
|
||||
shp = st.shape[0:c] + (st.shape[c]//spl, spl) + st.shape[c+1:]
|
||||
if DEBUG >= 1: print("st.reshape(", shp, ")")
|
||||
st.reshape(shp)
|
||||
|
||||
def do_reshape_combine_two(st):
|
||||
if len(st.shape) < 2: return
|
||||
c = random.randint(0, len(st.shape)-2)
|
||||
shp = st.shape[:c] + (st.shape[c] * st.shape[c+1], ) + st.shape[c+2:]
|
||||
if DEBUG >= 1: print("st.reshape(", shp, ")")
|
||||
st.reshape(shp)
|
||||
|
||||
def do_shrink(st):
|
||||
c = random.randint(0, len(st.shape)-1)
|
||||
while 1:
|
||||
shrink = tuple((random.randint(0,s), random.randint(0,s)) if i == c else (0,s) for i,s in enumerate(st.shape))
|
||||
if all(x<y for (x,y) in shrink): break
|
||||
if DEBUG >= 1: print("st.shrink(", shrink, ")")
|
||||
st.shrink(shrink)
|
||||
|
||||
def do_flip(st):
|
||||
flip = tuple(random.random() < 0.5 for _ in st.shape)
|
||||
if DEBUG >= 1: print("st.flip(", flip, ")")
|
||||
st.flip(flip)
|
||||
|
||||
def do_expand(st):
|
||||
c = [i for i,s in enumerate(st.shape) if s==1]
|
||||
if len(c) == 0: return
|
||||
c = random.choice(c)
|
||||
expand = tuple(random.choice([2,3,4]) if i==c else s for i,s in enumerate(st.shape))
|
||||
if DEBUG >= 1: print("st.expand(", expand, ")")
|
||||
st.expand(expand)
|
||||
|
||||
shapetracker_ops = [do_permute, do_pad, do_shrink, do_reshape_split_one, do_reshape_combine_two, do_flip, do_expand]
|
||||
|
||||
if __name__ == "__main__":
|
||||
random.seed(42)
|
||||
for _ in range(getenv("CNT", 200)):
|
||||
st = CheckingShapeTracker((random.randint(2, 10), random.randint(2, 10), random.randint(2, 10)))
|
||||
for i in range(8): random.choice(shapetracker_ops)(st)
|
||||
st.assert_same()
|
||||
34
test/external/fuzz_shapetracker_math.py
vendored
34
test/external/fuzz_shapetracker_math.py
vendored
@@ -1,34 +0,0 @@
|
||||
import random
|
||||
from tinygrad.helpers import getenv, DEBUG, colored, trange
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from test.external.fuzz_shapetracker import shapetracker_ops
|
||||
from test.unit.test_shapetracker_math import st_equal, MultiShapeTracker
|
||||
|
||||
def fuzz_plus() -> tuple[ShapeTracker, ShapeTracker]:
|
||||
m = MultiShapeTracker([ShapeTracker.from_shape((random.randint(1, 10), random.randint(1, 10), random.randint(1, 10)))])
|
||||
for _ in range(4): random.choice(shapetracker_ops)(m)
|
||||
backup = m.sts[0]
|
||||
m.sts.append(ShapeTracker.from_shape(m.sts[0].shape))
|
||||
for _ in range(4): random.choice(shapetracker_ops)(m)
|
||||
st_sum = backup + m.sts[1]
|
||||
return m.sts[0], st_sum
|
||||
|
||||
if __name__ == "__main__":
|
||||
if seed:=getenv("SEED"): random.seed(seed)
|
||||
total = getenv("CNT", 1000)
|
||||
for fuzz in [globals()[f'fuzz_{x}'] for x in getenv("FUZZ", "plus").split(",")]:
|
||||
same_but_neq = 0
|
||||
for _ in trange(total, desc=f"{fuzz}"):
|
||||
st1, st2 = fuzz()
|
||||
eq = st_equal(st1, st2)
|
||||
if getenv("CHECK_NEQ") and eq and st1.simplify() != st2.simplify():
|
||||
print(colored("same but unequal", "yellow"))
|
||||
print(st1.simplify())
|
||||
print(st2.simplify())
|
||||
same_but_neq += 1
|
||||
if DEBUG >= 1:
|
||||
print(f"EXP: {st1}")
|
||||
print(f"GOT: {st2}")
|
||||
print(colored("****", "green" if eq else "red"))
|
||||
if not eq: exit(0)
|
||||
if getenv("CHECK_NEQ"): print(f"same but unequal {same_but_neq}/{total} = {(same_but_neq/total)*100:.2f}%")
|
||||
Reference in New Issue
Block a user