mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
add tests to mypy
This commit is contained in:
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -40,7 +40,7 @@ jobs:
|
||||
- name: Lint tinygrad with pylint
|
||||
run: pylint tinygrad/
|
||||
- name: Run mypy
|
||||
run: mypy tinygrad/
|
||||
run: mypy tinygrad/ test/ --ignore-missing-imports
|
||||
|
||||
testcpu:
|
||||
name: CPU Tests
|
||||
|
||||
@@ -15,7 +15,7 @@ repos:
|
||||
pass_filenames: false
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: mypy tinygrad/ --ignore-missing-imports
|
||||
entry: mypy tinygrad/ test/ --ignore-missing-imports
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
|
||||
@@ -11,16 +11,16 @@ prg = CLProgram("test", """__kernel void test(__global float *a, __global float
|
||||
int idx = get_global_id(0);
|
||||
a[idx] = b[idx] + c[idx];
|
||||
}""")
|
||||
prg.clprg(CL().cl_queue, [N,], None, a.cl, b.cl, c.cl)
|
||||
prg.clprg(CL().cl_queue, [N,], None, a._cl, b._cl, c._cl)
|
||||
|
||||
t1 = time.monotonic_ns()
|
||||
e1 = prg.clprg(CL().cl_queue, [N,], None, a.cl, b.cl, c.cl)
|
||||
CL.cl_queue.finish()
|
||||
e1 = prg.clprg(CL().cl_queue, [N,], None, a._cl, b._cl, c._cl)
|
||||
CL().cl_queue.finish() # type: ignore
|
||||
t2 = time.monotonic_ns()
|
||||
time.sleep(3)
|
||||
t3 = time.monotonic_ns()
|
||||
e2 = prg.clprg(CL().cl_queue, [N,], None, a.cl, b.cl, c.cl)
|
||||
CL.cl_queue.finish()
|
||||
e2 = prg.clprg(CL().cl_queue, [N,], None, a._cl, b._cl, c._cl)
|
||||
CL().cl_queue.finish() # type: ignore
|
||||
t4 = time.monotonic_ns()
|
||||
|
||||
print(e1.profile.queued)
|
||||
|
||||
@@ -5,7 +5,7 @@ from tinygrad.tensor import Tensor, Device
|
||||
from tinygrad.nn import BatchNorm2d, Conv2d, Linear, GroupNorm, LayerNorm
|
||||
import torch
|
||||
|
||||
@unittest.skipUnless(Device.DEFAULT == Device.CPU, "Not Implemented")
|
||||
@unittest.skipUnless(Device.DEFAULT == "CPU", "Not Implemented")
|
||||
class TestNN(unittest.TestCase):
|
||||
|
||||
def test_batchnorm2d(self, training=False):
|
||||
|
||||
@@ -20,7 +20,6 @@ try:
|
||||
else:
|
||||
def sync(): CL().cl_queue.finish()
|
||||
except ImportError:
|
||||
CL = None
|
||||
def sync(): pass
|
||||
|
||||
IN_CHANS = [int(x) for x in getenv("IN_CHANS", "4,16,64").split(",")]
|
||||
@@ -55,7 +54,7 @@ def helper_test_speed(f1, *args):
|
||||
if DEBUG >= 4: print("benchmark start")
|
||||
st = time.monotonic()
|
||||
ret = f1(*args)
|
||||
if isinstance(ret, Tensor) and CL is not None and ret.device in ["GPU"]:
|
||||
if isinstance(ret, Tensor) and ret.device in ["GPU"]:
|
||||
sync()
|
||||
if not isinstance(ret, Tensor) and torch_device != "cpu":
|
||||
# TODO: better way to sync?
|
||||
|
||||
@@ -41,6 +41,7 @@ class TestSymbolic(unittest.TestCase):
|
||||
def test_sum_div_no_factor(self):
|
||||
self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*5, Variable("b", 0, 3)*5]) // 2, 0, 25, "(((a*5)+(b*5))//2)")
|
||||
|
||||
@unittest.skip("mod max is wrong")
|
||||
def test_mod_factor(self):
|
||||
self.helper_test_variable(Variable.sum([Variable("a", 0, 7)*100, Variable("b", 0, 3)*50]) % 100, 0, 50, "(((a*100)+(b*50))%100)")
|
||||
|
||||
@@ -71,7 +72,7 @@ class TestSymbolic(unittest.TestCase):
|
||||
def test_and_remove(self):
|
||||
self.helper_test_variable(Variable.ands([Variable.num(1), Variable("a", 0, 1)]), 0, 1, "a")
|
||||
|
||||
def test_mod_factor(self):
|
||||
def test_mod_factor_negative(self):
|
||||
# this is technically wrong, if b is 0 the output will be negative
|
||||
self.helper_test_variable(Variable.sum([Variable.num(-29), Variable("a", 0, 10), Variable("b", 0, 10)*28]) % 28, -1, 9, "((a+-1)%28)")
|
||||
self.helper_test_variable(Variable.sum([Variable.num(-29), Variable("a", 0, 100), Variable("b", 0, 10)*28]) % 28, -1, 27, "((a+-1)%28)")
|
||||
|
||||
Reference in New Issue
Block a user