cleanup tests Device[Device.DEFAULT] is always Compiled (#3645)

This commit is contained in:
chenyu
2024-03-07 11:15:42 -05:00
committed by GitHub
parent bdd62c7fd8
commit 906cc3a69b
7 changed files with 5 additions and 20 deletions

View File

@@ -4,7 +4,7 @@ from examples.llama import Transformer, MODEL_PARAMS
from tinygrad.tensor import Tensor
from tinygrad import Device
from tinygrad.nn.state import get_state_dict
from tinygrad.device import Compiled, Allocator
from tinygrad.device import Allocator
from tinygrad.helpers import Profiling
class FakeProgram:
@@ -16,7 +16,6 @@ class FakeAllocator(Allocator):
def copyin(self, dest, src:memoryview): pass
class TestLLaMASpeed(unittest.TestCase):
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "only test for compiled backends")
def test_llama_compile(self):
backup_program = Device[Device.DEFAULT].runtime
backup_allocator = Device[Device.DEFAULT].allocator

View File

@@ -1,9 +1,7 @@
import unittest
from tinygrad import Device
from tinygrad.helpers import Timing, Profiling
from tinygrad.device import Compiled
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "only for compiled backend")
class TestDeviceSpeed(unittest.TestCase):
@classmethod
def setUpClass(cls):

View File

@@ -3,7 +3,7 @@ import unittest
from tinygrad.codegen.kernel import Opt, OptOps, tensor_cores
from tinygrad.codegen.linearizer import Linearizer, UOp, UOps, expand_node, expand_idxs
from tinygrad.device import Compiled, Device, Buffer
from tinygrad.device import Device, Buffer
from tinygrad.ops import BinaryOps, BufferOps, MemBuffer, ConstBuffer, LazyOp, LoadOps, TernaryOps
from tinygrad.shape.shapetracker import ShapeTracker
from tinygrad.shape.view import View
@@ -15,7 +15,6 @@ from tinygrad.helpers import prod, Context
from tinygrad.dtype import DType, dtypes
from tinygrad.codegen.uops import UOpGraph
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "linearizer is only for compiled backends")
class TestLinearizer(unittest.TestCase):
def test_arg_dedup(self):
a, b = Tensor.randn(4), Tensor.randn(4)
@@ -227,8 +226,7 @@ def helper_realized_ast(r:Tensor):
output_buffer = Buffer(s[-1].out.device, prod((s if isinstance(s, int) else s.max for s in s[-1].out.shape)), s[-1].out.dtype)
return s[-1].ast, [output_buffer] + [l.realized for l in s[-1].inputs]
@unittest.skipUnless(isinstance(Device[Device.DEFAULT], Compiled) and Device[Device.DEFAULT].compiler.linearizer_opts.supports_float4,
"need Compiled backends that support float4")
@unittest.skipUnless(Device[Device.DEFAULT].compiler.linearizer_opts.supports_float4, "need backends that support float4")
class TestFloat4(unittest.TestCase):
@staticmethod
def count_float4(k):
@@ -370,7 +368,6 @@ class TestFloat4(unittest.TestCase):
assert TestFloat4.count_float4(k) == (1, 1)
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "linearizer is only for compiled backends")
class TestHandCodedOpts(unittest.TestCase):
def test_masked_upcast(self):
layer_1 = Tensor.cat(*[Tensor.rand(5) for _ in range(4)])
@@ -487,7 +484,6 @@ def helper_linearizer_opt(r:Tensor, opts=[], apply_tc=False, atol=1e-4, rtol=1e-
for i, x in enumerate(opts): # Check custom transformations if any.
check_opt(x, lambda: Linearizer(realized_ast), Device[Device.DEFAULT].to_program, color_sizes[i] if i < len(color_sizes) else None)
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "linearizer is only for compiled backends")
class TestLinearizerOpts(unittest.TestCase):
def test_local_and_grouped_reduce(self):
if not Device[Device.DEFAULT].compiler.linearizer_opts.has_local or not Device[Device.DEFAULT].compiler.linearizer_opts.has_shared:

View File

@@ -1,10 +1,8 @@
import unittest
from tinygrad import Tensor, Device, Variable
from tinygrad.device import Compiled
from examples.gpt2 import Transformer
from tinygrad.nn.state import get_state_dict
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "only test for compiled backends")
class TestMethodCache(unittest.TestCase):
def setUp(self):
self.backup_compiler = Device[Device.DEFAULT].compiler

View File

@@ -6,7 +6,6 @@ import unittest
from typing import List, Optional
from tinygrad.tensor import Tensor
from tinygrad.ops import LoadOps
from tinygrad.device import Device, Compiled
from tinygrad.helpers import DEBUG, GRAPH
from tinygrad.codegen.linearizer import Linearizer
from tinygrad.features.graph import print_tree, realized_lazybuffer
@@ -79,7 +78,6 @@ class TestSchedule(unittest.TestCase):
d = (a+b).permute(1,0)+c
check_schedule(d, 1)
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled) or Device.DEFAULT == "LLVM", "only test for compiled backends")
def test_constants_are_embedded(self):
a = Tensor.empty(3,3) * 2
check_schedule(a, 2, filter_loadops=False)

View File

@@ -3,14 +3,11 @@ import unittest
from tinygrad.codegen.linearizer import Linearizer
from tinygrad.realize import create_schedule
from tinygrad.features.search import time_linearizer, bufs_from_lin
from tinygrad.device import Compiled, Device, Buffer
from tinygrad.device import Device, Buffer
from tinygrad.ops import LoadOps
from tinygrad.tensor import Tensor
class TestTimeLinearizer(unittest.TestCase):
def setUp(self) -> None:
if not isinstance(Device[Device.DEFAULT], Compiled): raise unittest.SkipTest("only test for compiled backends")
def test_reasonable_time(self):
si = [i for i in create_schedule([Tensor([1,2,3,4]).add(1).lazydata]) if i.ast.op not in LoadOps][0]
rawbufs = [Buffer(Device.DEFAULT, si.out.st.real_size(), si.out.dtype)] + [Buffer(Device.DEFAULT, x.st.real_size(), x.dtype) for x in si.inputs]

View File

@@ -4,7 +4,7 @@ import numpy as np
from tinygrad.dtype import dtypes, DType, PtrDType
from tinygrad.device import Buffer, Device
from tinygrad.ops import UnaryOps, BinaryOps, TernaryOps
from tinygrad.device import CompiledASTRunner, Compiled
from tinygrad.device import CompiledASTRunner
from tinygrad.codegen.linearizer import UOps, UOp
from tinygrad.codegen.uops import exec_alu, UOpGraph
from test.test_dtype import is_dtype_supported
@@ -70,7 +70,6 @@ class TestUOps(unittest.TestCase):
for c in [-4.0, 4.0]:
self._equal(f([a,b,c], op, dts), fxn(a,b,c))
@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "only test for compiled backends")
class TestFloatUOps(TestUOps):
def test_neg(self): self._test_uop_fxn(UnaryOps.NEG, lambda a: -a)
def test_exp2(self): self._test_uop_fxn(UnaryOps.EXP2, lambda a: np.exp2(a))