mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-08 22:48:25 -05:00
ruff cleanup (#4594)
* check editor config * no editorconfig, it doesn't work * ruff cleanups
This commit is contained in:
@@ -1,4 +0,0 @@
|
||||
# 2 space indentation
|
||||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -84,7 +84,7 @@ jobs:
|
||||
- name: Lint with ruff
|
||||
run: |
|
||||
pip3 install --upgrade --force-reinstall ruff
|
||||
python3 -m ruff check . --preview
|
||||
python3 -m ruff check .
|
||||
- name: Lint tinygrad with pylint
|
||||
run: python -m pylint tinygrad/
|
||||
- name: Run mypy
|
||||
|
||||
@@ -9,7 +9,7 @@ repos:
|
||||
pass_filenames: false
|
||||
- id: ruff
|
||||
name: ruff
|
||||
entry: ruff check . --preview
|
||||
entry: ruff check .
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
indent-width = 2
|
||||
preview = true
|
||||
target-version = "py38"
|
||||
|
||||
lint.select = [
|
||||
"F",
|
||||
"F", # Pyflakes
|
||||
"W6",
|
||||
"E71",
|
||||
"E72",
|
||||
@@ -10,8 +12,8 @@ lint.select = [
|
||||
# "E124",
|
||||
"E203", # whitespace-before-punctuation
|
||||
"E272", # multiple-spaces-before-keyword
|
||||
# "E303",
|
||||
# "E304",
|
||||
"E303",
|
||||
"E304",
|
||||
"E501", # line-too-long
|
||||
# "E502",
|
||||
"E702", # multiple-statements-on-one-line-semicolon
|
||||
|
||||
@@ -378,7 +378,6 @@ class TestIndexing(unittest.TestCase):
|
||||
numpy_testing_assert_equal_helper(strided[rows, columns],
|
||||
np.array([[1, 3], [11, 13]]))
|
||||
|
||||
|
||||
# setting values
|
||||
|
||||
# strided is [[10, 11],
|
||||
|
||||
@@ -25,7 +25,6 @@ def st_shape(draw) -> tuple[int, ...]:
|
||||
assume(prod([d for d in s if d]) <= 1024 ** 4)
|
||||
return s
|
||||
|
||||
|
||||
def tensors_for_shape(s:tuple[int, ...]) -> tuple[torch.tensor, Tensor]:
|
||||
x = np.arange(prod(s)).reshape(s)
|
||||
return torch.from_numpy(x), Tensor(x)
|
||||
@@ -51,7 +50,6 @@ class TestShapeOps(unittest.TestCase):
|
||||
assert len(tor) == len(ten)
|
||||
assert all([np.array_equal(tor.numpy(), ten.numpy()) for (tor, ten) in zip(tor, ten)])
|
||||
|
||||
|
||||
@settings.get_profile(__file__)
|
||||
@given(st_shape(), st_int32, st_int32)
|
||||
def test_chunk(self, s:tuple[int, ...], dim:int, num:int):
|
||||
|
||||
@@ -182,7 +182,6 @@ class TestNN(unittest.TestCase):
|
||||
np.testing.assert_allclose(gb.numpy(), torch_layer.bias.grad.numpy(), atol=5e-4, rtol=1e-5)
|
||||
np.testing.assert_allclose(gx.numpy(), torch_x.grad.numpy(), atol=5e-4, rtol=1e-5)
|
||||
|
||||
|
||||
@unittest.skipIf(CI and Device.DEFAULT == "WEBGPU", "runs out of memory in CI")
|
||||
def test_conv_transpose1d(self):
|
||||
BS, C1, W = 4, 16, 224//4
|
||||
|
||||
@@ -388,7 +388,6 @@ class Kernel:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def apply_tensor_cores(self, use_tensor_cores=1, extra_opts:Optional[List[Opt]]=None, axis:int=0, tc_opt:int=getenv("TC_OPT")) -> bool:
|
||||
""" Attempts to apply a tensor core optimization to the kernel. If one exists and applies properly, return true, otherwise return false.
|
||||
Tensor cores are optimized instructions that matrix multiply-accumulate across a wave of threads: D(M, N) = A(M, K) * B(K, N) + C(M, N).
|
||||
|
||||
Reference in New Issue
Block a user