mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-07 03:00:26 -04:00
it's a python int. priority should be higher than bool, but we are not using it in type promo now.
73 lines
3.2 KiB
Python
73 lines
3.2 KiB
Python
import sys, unittest
|
|
from typing import Optional, Set, Tuple
|
|
import numpy as np
|
|
from tinygrad import Tensor, Device, dtypes
|
|
from tinygrad.codegen.uops import UOp
|
|
from tinygrad.tensor import _to_np_dtype
|
|
from tinygrad.engine.realize import Runner
|
|
from tinygrad.dtype import DType
|
|
from tinygrad.nn.state import get_parameters
|
|
from tinygrad.helpers import Context, CI, OSX, getenv
|
|
|
|
def derandomize_model(model):
|
|
with Context(GRAPH=0):
|
|
for p in get_parameters(model):
|
|
p.lazydata = Tensor.empty(p.shape, device=p.device, dtype=p.dtype).lazydata
|
|
p.realize()
|
|
|
|
def assert_jit_cache_len(fxn, expected_len):
|
|
if not fxn.jit_cache:
|
|
assert expected_len == 0, expected_len
|
|
return
|
|
# until we have a better way of typing the prg in ExecItem
|
|
if issubclass(type(fxn.jit_cache[0].prg), Runner) and not type(fxn.jit_cache[0].prg).__name__.endswith('Graph'):
|
|
assert len(fxn.jit_cache) == expected_len, len(fxn.jit_cache)
|
|
else:
|
|
assert len(fxn.jit_cache) == 1, len(fxn.jit_cache)
|
|
# until we have a better way of typing the prg in ExecItem
|
|
assert type(fxn.jit_cache[0].prg).__name__.endswith('Graph')
|
|
assert len(fxn.jit_cache[0].prg.jit_cache) == expected_len
|
|
|
|
def is_dtype_supported(dtype: DType, device: str = Device.DEFAULT):
|
|
if dtype == dtypes.pyint and device != "PYTHON": return False
|
|
if dtype == dtypes.bfloat16:
|
|
# NOTE: this requires bf16 buffer support
|
|
return device in {"AMD"} or (device in {"CUDA", "NV"} and not CI and not getenv("PTX"))
|
|
if device in ["WEBGPU", "WEBGL"]: return dtype in [dtypes.float, dtypes.int32, dtypes.uint32]
|
|
# for CI GPU and OSX, cl_khr_fp16 isn't supported
|
|
# for CI LLVM, it segfaults because it can't link to the casting function
|
|
# CI CUDA architecture is sm_35 but we need at least sm_70 to run fp16 ALUs
|
|
# PYTHON supports half memoryview in 3.12+ https://github.com/python/cpython/issues/90751
|
|
if dtype == dtypes.half:
|
|
if device == "GPU": return not CI and not OSX
|
|
if device in ["LLVM", "CUDA", "NV"]: return not CI
|
|
if device == "PYTHON": return sys.version_info >= (3, 12)
|
|
if dtype == dtypes.float64: return device != "METAL" and not (OSX and device == "GPU")
|
|
return True
|
|
|
|
def rand_for_dtype(dt:DType, size:int):
|
|
if dtypes.is_unsigned(dt):
|
|
return np.random.randint(0, 100, size=size, dtype=_to_np_dtype(dt))
|
|
elif dtypes.is_int(dt):
|
|
return np.random.randint(-100, 100, size=size, dtype=_to_np_dtype(dt))
|
|
elif dt == dtypes.bool:
|
|
return np.random.choice([True, False], size=size)
|
|
return np.random.uniform(-10, 10, size=size).astype(_to_np_dtype(dt))
|
|
|
|
class TestUOps(unittest.TestCase):
|
|
def assert_equiv_uops(self, uop1:UOp, uop2:UOp, cache:Optional[Set[Tuple[UOp, UOp]]]=None):
|
|
if cache is None: cache = set()
|
|
if (uop1, uop2) in cache: return
|
|
cache.add((uop1, uop2))
|
|
# NOTE: direct UOps __eq__ is comparing object reference, use this function to compare two uops
|
|
try:
|
|
self.assertIs(uop1.op, uop2.op)
|
|
self.assertEqual(uop1.dtype, uop2.dtype)
|
|
self.assertEqual(uop1.arg, uop2.arg)
|
|
self.assertEqual(len(uop1.src), len(uop2.src))
|
|
for s1, s2 in zip(uop1.src, uop2.src): self.assert_equiv_uops(s1, s2)
|
|
except AssertionError as e:
|
|
print(f"{uop1=}")
|
|
print(f"{uop2=}")
|
|
raise e
|