remove pylint from pre-commit and CI (#12658)

* remove pylint from pre-commit and CI

* multidevice test is fast

* faster pre-commit

* 8 is faster than 4

* better name

* how did that typecheck?
This commit is contained in:
George Hotz
2025-10-14 15:39:59 +08:00
committed by GitHub
parent 8ecaf839e2
commit 84d4589ed4
5 changed files with 10 additions and 18 deletions

View File

@@ -58,8 +58,8 @@ class TestExample(unittest.TestCase):
print(f"WARNING: {device} test isn't running")
return
x = Tensor.eye(64, device=device, requires_grad=True)
y = Tensor.eye(64, device=device, requires_grad=True)
x = Tensor.eye(8, device=device, requires_grad=True)
y = Tensor.eye(8, device=device, requires_grad=True)
z = y.matmul(x).sum()
z.backward()

View File

@@ -134,8 +134,8 @@ class TestTiny(unittest.TestCase):
def test_mnist_backward(self):
# NOTE: we don't have the whole model here for speed
layers = [
nn.Conv2d(1, 32, 5), Tensor.relu,
nn.Conv2d(32, 32, 5), Tensor.relu]
nn.Conv2d(1, 8, 5), Tensor.relu,
nn.Conv2d(8, 8, 5), Tensor.relu]
# replace random weights with ones
# TODO: there's a bug here where it's tying two of the biases together. we need UNIQUE const
@@ -144,7 +144,7 @@ class TestTiny(unittest.TestCase):
# realize gradients
for x in nn.state.get_parameters(layers): x.requires_grad_()
Tensor.empty(4, 1, 28, 28).sequential(layers).sum().backward()
Tensor.empty(4, 1, 14, 14).sequential(layers).sum().backward()
Tensor.realize(*[x.grad for x in nn.state.get_parameters(layers) if x.grad is not None])
# *** image ***