mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-10 07:28:15 -05:00
remove pylint from pre-commit and CI (#12658)
* remove pylint from pre-commit and CI * multidevice test is fast * faster pre-commit * 8 is faster than 4 * better name * how did that typecheck?
This commit is contained in:
4
test/external/external_test_example.py
vendored
4
test/external/external_test_example.py
vendored
@@ -58,8 +58,8 @@ class TestExample(unittest.TestCase):
|
||||
print(f"WARNING: {device} test isn't running")
|
||||
return
|
||||
|
||||
x = Tensor.eye(64, device=device, requires_grad=True)
|
||||
y = Tensor.eye(64, device=device, requires_grad=True)
|
||||
x = Tensor.eye(8, device=device, requires_grad=True)
|
||||
y = Tensor.eye(8, device=device, requires_grad=True)
|
||||
z = y.matmul(x).sum()
|
||||
z.backward()
|
||||
|
||||
|
||||
@@ -134,8 +134,8 @@ class TestTiny(unittest.TestCase):
|
||||
def test_mnist_backward(self):
|
||||
# NOTE: we don't have the whole model here for speed
|
||||
layers = [
|
||||
nn.Conv2d(1, 32, 5), Tensor.relu,
|
||||
nn.Conv2d(32, 32, 5), Tensor.relu]
|
||||
nn.Conv2d(1, 8, 5), Tensor.relu,
|
||||
nn.Conv2d(8, 8, 5), Tensor.relu]
|
||||
|
||||
# replace random weights with ones
|
||||
# TODO: there's a bug here where it's tying two of the biases together. we need UNIQUE const
|
||||
@@ -144,7 +144,7 @@ class TestTiny(unittest.TestCase):
|
||||
|
||||
# realize gradients
|
||||
for x in nn.state.get_parameters(layers): x.requires_grad_()
|
||||
Tensor.empty(4, 1, 28, 28).sequential(layers).sum().backward()
|
||||
Tensor.empty(4, 1, 14, 14).sequential(layers).sum().backward()
|
||||
Tensor.realize(*[x.grad for x in nn.state.get_parameters(layers) if x.grad is not None])
|
||||
|
||||
# *** image ***
|
||||
|
||||
Reference in New Issue
Block a user