* flake8: Ignore frequent violations, correct infrequent ones

* Ignore some rules in test

* Reorder test ignores

* Lint test + main

* EOF indent

* Include all E71,E72 errors

* Test the failing case in CI

* Revert "Test the failing case in CI"

This reverts commit 110add0a70.

* Push to test!
This reverts commit f317532779.

* ok back to passing
This reverts commit ba5052685f.

* Prove that CI fails when formatting is incorrect.

* Fix formatting

* Remove duplicitous E117 rule

* Use flake8 config for precommit

---------

Co-authored-by: waifairer <waifairer@gmail.com>
This commit is contained in:
waifairer
2023-07-24 09:19:58 -06:00
committed by GitHub
parent 51173f0a48
commit d89fb729e5
8 changed files with 23 additions and 18 deletions

8
.flake8 Normal file
View File

@@ -0,0 +1,8 @@
[flake8] # https://flake8.pycqa.org/en/6.0.0/user/options.html#cmdoption-flake8-select
filename =
*/tinygrad/*.py,
*/test/*.py
select=F,W6,E71,E72,E112,E113,E124,E203,E272,E303,E304,E502,E702,E703,E731,W191
indent-size=2
per-file-ignores =
test/*: F401, F403, F405, F541, E722, E731, F811, F821, F841

View File

@@ -30,7 +30,7 @@ jobs:
- name: Lint with pylint - name: Lint with pylint
run: python -m pylint --disable=all -e W0311 -e C0303 --jobs=0 --indent-string=' ' **/*.py run: python -m pylint --disable=all -e W0311 -e C0303 --jobs=0 --indent-string=' ' **/*.py
- name: Lint with flake8 - name: Lint with flake8
run: flake8 tinygrad/ --indent-size=2 --select=F,E112,E113,E203,E304,E502,E702,E703,E71,E72,E731,W191,W6 --statistics -j4 run: flake8 --statistics -j4
- name: Lint tinygrad with pylint - name: Lint tinygrad with pylint
run: pylint tinygrad/ run: pylint tinygrad/
- name: Run mypy - name: Run mypy

View File

@@ -9,7 +9,7 @@ repos:
pass_filenames: false pass_filenames: false
- id: flake8 - id: flake8
name: flake8 name: flake8
entry: flake8 tinygrad/ --indent-size=2 --select=F,E112,E113,E203,E304,E502,E702,E703,E71,E72,E731,W191,W6 --statistics -j4 entry: flake8 --statistics -j4
language: system language: system
always_run: true always_run: true
pass_filenames: false pass_filenames: false

View File

@@ -79,7 +79,7 @@ class TestInferenceMinKernels(unittest.TestCase):
img = Tensor.randn(1, 3, 224, 224) img = Tensor.randn(1, 3, 224, 224)
with CLCache(223): # NOTE: this is way too high with CLCache(223): # NOTE: this is way too high
out = model.forward(img) out = model.forward(img)
assert len(GlobalCounters.cache) == 0, f"ViT prerealized?" assert len(GlobalCounters.cache) == 0, "ViT prerealized?"
out.realize() out.realize()
def test_llama(self): def test_llama(self):

View File

@@ -34,7 +34,7 @@ def atan2_cpu(ret:LazyBuffer, a:LazyBuffer, b:LazyBuffer):
# NOTE: The derivative of atan2 doesn't need a custom op! https://www.liquisearch.com/atan2/derivative # NOTE: The derivative of atan2 doesn't need a custom op! https://www.liquisearch.com/atan2/derivative
# In general, it is also optional to write a backward function, just your backward pass won't work without it # In general, it is also optional to write a backward function, just your backward pass won't work without it
from tinygrad.ops import ASTRunner, LazyOp, LoadOps, BinaryOps, UnaryOps from tinygrad.ops import LazyOp, LoadOps, BinaryOps, UnaryOps
from tinygrad.lazy import LazyBuffer from tinygrad.lazy import LazyBuffer
from tinygrad.tensor import Function from tinygrad.tensor import Function
@@ -51,7 +51,7 @@ class ATan2(Function):
# *** third, we use our lovely new mlop in some tests *** # *** third, we use our lovely new mlop in some tests ***
from tinygrad.tensor import Tensor, Device from tinygrad.tensor import Tensor
@unittest.skipUnless(Device.DEFAULT in ["CPU", "GPU"], "atan2 is only implemented for CPU and GPU") @unittest.skipUnless(Device.DEFAULT in ["CPU", "GPU"], "atan2 is only implemented for CPU and GPU")
class TestCustomFunction(unittest.TestCase): class TestCustomFunction(unittest.TestCase):

View File

@@ -233,7 +233,7 @@ class TestOps(unittest.TestCase):
def test_mul_number(self): def test_mul_number(self):
helper_test_op([(), ()], lambda x,y: x*y, Tensor.mul) helper_test_op([(), ()], lambda x,y: x*y, Tensor.mul)
def test_mul_const(self): def test_mul_const(self):
helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2) helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2)
helper_test_op([(45,65)], lambda x: x*-1, lambda x: x*-1) helper_test_op([(45,65)], lambda x: x*-1, lambda x: x*-1)
helper_test_op([(45,65)], lambda x: 255*x, lambda x: 255*x) helper_test_op([(45,65)], lambda x: 255*x, lambda x: 255*x)
def test_div(self): def test_div(self):
@@ -249,17 +249,17 @@ class TestOps(unittest.TestCase):
helper_test_op([()], lambda x: 2/x, lambda x: 2/x) helper_test_op([()], lambda x: 2/x, lambda x: 2/x)
@unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "WEBGPU does not have support for inf/nan, METAL has issues with -inf") @unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "WEBGPU does not have support for inf/nan, METAL has issues with -inf")
def test_mul_const_naninf(self): def test_mul_const_naninf(self):
helper_test_op([(45,65)], lambda x: x*float("inf"), lambda x: x*float("inf")) helper_test_op([(45,65)], lambda x: x*float("inf"), lambda x: x*float("inf"))
helper_test_op([(45,65)], lambda x: x*-float("inf"), lambda x: x*-float("inf")) helper_test_op([(45,65)], lambda x: x*-float("inf"), lambda x: x*-float("inf"))
helper_test_op([(45,65)], lambda x: x*float("nan"), lambda x: x*float("nan")) helper_test_op([(45,65)], lambda x: x*float("nan"), lambda x: x*float("nan"))
@unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "WEBGPU does not have support for inf/nan, METAL has issues with -inf") @unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "WEBGPU does not have support for inf/nan, METAL has issues with -inf")
def test_div_const_naninf(self): def test_div_const_naninf(self):
helper_test_op([(45,65)], lambda x: x/float("inf"), lambda x: x/float("inf")) helper_test_op([(45,65)], lambda x: x/float("inf"), lambda x: x/float("inf"))
helper_test_op([(45,65)], lambda x: x/-float("inf"), lambda x: x/-float("inf")) helper_test_op([(45,65)], lambda x: x/-float("inf"), lambda x: x/-float("inf"))
helper_test_op([(45,65)], lambda x: x/float("nan"), lambda x: x/float("nan")) helper_test_op([(45,65)], lambda x: x/float("nan"), lambda x: x/float("nan"))
helper_test_op([(45,65)], lambda x: float("inf")/x, lambda x: float("inf")/x) helper_test_op([(45,65)], lambda x: float("inf")/x, lambda x: float("inf")/x)
helper_test_op([(45,65)], lambda x: (-float("inf"))/x, lambda x: (-float("inf"))/x) helper_test_op([(45,65)], lambda x: (-float("inf"))/x, lambda x: (-float("inf"))/x)
helper_test_op([(45,65)], lambda x: float("nan")/x, lambda x: float("nan")/x) helper_test_op([(45,65)], lambda x: float("nan")/x, lambda x: float("nan")/x)
def test_pow(self): def test_pow(self):
# TODO: why is a=0 for these tests? # TODO: why is a=0 for these tests?
helper_test_op([(45,65)], lambda x: x**2, lambda x: Tensor.pow(x,2), a=0) helper_test_op([(45,65)], lambda x: x**2, lambda x: Tensor.pow(x,2), a=0)
@@ -608,7 +608,6 @@ class TestOps(unittest.TestCase):
helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.permute(order=(3,2,1,0))) helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.permute(order=(3,2,1,0)))
helper_test_op([()], lambda x: x.permute(()), lambda x: x.permute(())) helper_test_op([()], lambda x: x.permute(()), lambda x: x.permute(()))
def test_reshape(self): def test_reshape(self):
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6))) helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6))) helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))
@@ -978,8 +977,7 @@ class TestOps(unittest.TestCase):
def test_padding_add(self): def test_padding_add(self):
helper_test_op([(64,64), (60,60)], helper_test_op([(64,64), (60,60)],
lambda x,w: x+torch.nn.functional.pad(w, (2,2,2,2)), lambda x,w: x+torch.nn.functional.pad(w, (2,2,2,2)),
lambda x,w: x+w.pad2d((2,2,2,2)), lambda x,w: x+w.pad2d((2,2,2,2)))
)
def test_dilated_conv2d(self): def test_dilated_conv2d(self):
bs = 4 bs = 4

View File

@@ -100,7 +100,7 @@ def get_grouped_maybe_float4(*values:List[Token], grouping_allowed=True):
# TODO: generic visitor pattern? # TODO: generic visitor pattern?
def expand_node(idx:Node) -> List[Node]: def expand_node(idx:Node) -> List[Node]:
if isinstance(idx, Variable): return [idx] if idx.expr is not None else [Variable.num(j) for j in range(idx.min, idx.max+1)] if isinstance(idx, Variable): return [idx] if idx.expr is not None else [Variable.num(j) for j in range(idx.min, idx.max+1)]
if isinstance(idx, NumNode): return [idx] if isinstance(idx, NumNode): return [idx]
if isinstance(idx, MulNode): return [x*idx.b for x in expand_node(idx.a)] if isinstance(idx, MulNode): return [x*idx.b for x in expand_node(idx.a)]
if isinstance(idx, SumNode): return [Variable.sum(list(it)) for it in itertools.product(*[expand_node(x) for x in idx.nodes])] if isinstance(idx, SumNode): return [Variable.sum(list(it)) for it in itertools.product(*[expand_node(x) for x in idx.nodes])]

View File

@@ -170,7 +170,6 @@ class Tensor:
def eye(dim:int, **kwargs): def eye(dim:int, **kwargs):
return Tensor([1], **kwargs).pad(((0,dim),)).reshape(1, dim+1).expand(dim, dim+1).reshape(dim*(dim+1)).shrink(((0,dim*dim),)).reshape(dim, dim) return Tensor([1], **kwargs).pad(((0,dim),)).reshape(1, dim+1).expand(dim, dim+1).reshape(dim*(dim+1)).shrink(((0,dim*dim),)).reshape(dim, dim)
# ***** rng hlops ***** # ***** rng hlops *****
@staticmethod @staticmethod
@@ -282,7 +281,7 @@ class Tensor:
valid_slices = list(filterfalse(lambda x: x is None, orig_slices)) valid_slices = list(filterfalse(lambda x: x is None, orig_slices))
valid_slices = [v if isinstance(v, slice) else slice(y := normalize_int(v, i, dim_sz), y+1) for i, (v, dim_sz) in enumerate(zip(valid_slices, self.shape))] valid_slices = [v if isinstance(v, slice) else slice(y := normalize_int(v, i, dim_sz), y+1) for i, (v, dim_sz) in enumerate(zip(valid_slices, self.shape))]
start, stop, strides = zip(*y) if (y := [s.indices(dim_sz) for s, dim_sz in zip(valid_slices, self.shape)]) else ((), (), ()) start, stop, strides = zip(*y) if (y := [s.indices(dim_sz) for s, dim_sz in zip(valid_slices, self.shape)]) else ((), (), ())
new_slice = tuple((s, e) if st > 0 else (e+1, s+1) for s, e, st in zip(start, stop, strides)) new_slice = tuple((s, e) if st > 0 else (e+1, s+1) for s, e, st in zip(start, stop, strides))
new_shape = tuple(e - s for s, e in new_slice) new_shape = tuple(e - s for s, e in new_slice)
# Shrink # Shrink
sliced_tensor = self.shrink(new_slice) sliced_tensor = self.shrink(new_slice)