* flake8: Ignore frequent violations, correct infrequent ones

* Ignore some rules in test

* Reorder test ignores

* Lint test + main

* EOF indent

* Include all E71,E72 errors

* Test the failing case in CI

* Revert "Test the failing case in CI"

This reverts commit 110add0a70.

* Push to test!
This reverts commit f317532779.

* ok back to passing
This reverts commit ba5052685f.

* Prove that CI fails when formatting is incorrect.

* Fix formatting

* Remove duplicitous E117 rule

* Use flake8 config for precommit

---------

Co-authored-by: waifairer <waifairer@gmail.com>
This commit is contained in:
waifairer
2023-07-24 09:19:58 -06:00
committed by GitHub
parent 51173f0a48
commit d89fb729e5
8 changed files with 23 additions and 18 deletions

8
.flake8 Normal file
View File

@@ -0,0 +1,8 @@
[flake8] # https://flake8.pycqa.org/en/6.0.0/user/options.html#cmdoption-flake8-select
filename =
*/tinygrad/*.py,
*/test/*.py
select=F,W6,E71,E72,E112,E113,E124,E203,E272,E303,E304,E502,E702,E703,E731,W191
indent-size=2
per-file-ignores =
test/*: F401, F403, F405, F541, E722, E731, F811, F821, F841

View File

@@ -30,7 +30,7 @@ jobs:
- name: Lint with pylint
run: python -m pylint --disable=all -e W0311 -e C0303 --jobs=0 --indent-string=' ' **/*.py
- name: Lint with flake8
run: flake8 tinygrad/ --indent-size=2 --select=F,E112,E113,E203,E304,E502,E702,E703,E71,E72,E731,W191,W6 --statistics -j4
run: flake8 --statistics -j4
- name: Lint tinygrad with pylint
run: pylint tinygrad/
- name: Run mypy

View File

@@ -9,7 +9,7 @@ repos:
pass_filenames: false
- id: flake8
name: flake8
entry: flake8 tinygrad/ --indent-size=2 --select=F,E112,E113,E203,E304,E502,E702,E703,E71,E72,E731,W191,W6 --statistics -j4
entry: flake8 --statistics -j4
language: system
always_run: true
pass_filenames: false

View File

@@ -79,7 +79,7 @@ class TestInferenceMinKernels(unittest.TestCase):
img = Tensor.randn(1, 3, 224, 224)
with CLCache(223): # NOTE: this is way too high
out = model.forward(img)
assert len(GlobalCounters.cache) == 0, f"ViT prerealized?"
assert len(GlobalCounters.cache) == 0, "ViT prerealized?"
out.realize()
def test_llama(self):

View File

@@ -34,7 +34,7 @@ def atan2_cpu(ret:LazyBuffer, a:LazyBuffer, b:LazyBuffer):
# NOTE: The derivative of atan2 doesn't need a custom op! https://www.liquisearch.com/atan2/derivative
# In general, it is also optional to write a backward function, just your backward pass won't work without it
from tinygrad.ops import ASTRunner, LazyOp, LoadOps, BinaryOps, UnaryOps
from tinygrad.ops import LazyOp, LoadOps, BinaryOps, UnaryOps
from tinygrad.lazy import LazyBuffer
from tinygrad.tensor import Function
@@ -51,7 +51,7 @@ class ATan2(Function):
# *** third, we use our lovely new mlop in some tests ***
from tinygrad.tensor import Tensor, Device
from tinygrad.tensor import Tensor
@unittest.skipUnless(Device.DEFAULT in ["CPU", "GPU"], "atan2 is only implemented for CPU and GPU")
class TestCustomFunction(unittest.TestCase):

View File

@@ -608,7 +608,6 @@ class TestOps(unittest.TestCase):
helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.permute(order=(3,2,1,0)))
helper_test_op([()], lambda x: x.permute(()), lambda x: x.permute(()))
def test_reshape(self):
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))
@@ -978,8 +977,7 @@ class TestOps(unittest.TestCase):
def test_padding_add(self):
helper_test_op([(64,64), (60,60)],
lambda x,w: x+torch.nn.functional.pad(w, (2,2,2,2)),
lambda x,w: x+w.pad2d((2,2,2,2)),
)
lambda x,w: x+w.pad2d((2,2,2,2)))
def test_dilated_conv2d(self):
bs = 4

View File

@@ -170,7 +170,6 @@ class Tensor:
def eye(dim:int, **kwargs):
return Tensor([1], **kwargs).pad(((0,dim),)).reshape(1, dim+1).expand(dim, dim+1).reshape(dim*(dim+1)).shrink(((0,dim*dim),)).reshape(dim, dim)
# ***** rng hlops *****
@staticmethod