mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
Apply ruff linting rules to tests (#2473)
* everything except F821 * enable F821 with noqa * dumb fix * fix remaining imports and (former) lambdas * replace _ with noqa to avoid gc
This commit is contained in:
committed by
GitHub
parent
136dbd8b36
commit
7f01dd04f0
@@ -112,12 +112,12 @@ class TestTinygrad(unittest.TestCase):
|
||||
|
||||
torch_x = torch.tensor(x, requires_grad=True)
|
||||
torch_W = torch.tensor(W, requires_grad=True)
|
||||
torch_func = lambda x: torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1)
|
||||
def torch_func(x): return torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1)
|
||||
PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy()
|
||||
|
||||
tiny_x = Tensor(x, requires_grad=True)
|
||||
tiny_W = Tensor(W, requires_grad=True)
|
||||
tiny_func = lambda x: x.dot(tiny_W).relu().log_softmax()
|
||||
def tiny_func(x): return x.dot(tiny_W).relu().log_softmax()
|
||||
J = jacobian(tiny_func, tiny_x)
|
||||
NJ = numerical_jacobian(tiny_func, tiny_x)
|
||||
|
||||
@@ -130,7 +130,7 @@ class TestTinygrad(unittest.TestCase):
|
||||
|
||||
tiny_x = Tensor(x, requires_grad=True)
|
||||
tiny_W = Tensor(W, requires_grad=True)
|
||||
tiny_func = lambda x: x.dot(tiny_W).relu().log_softmax()
|
||||
def tiny_func(x): return x.dot(tiny_W).relu().log_softmax()
|
||||
|
||||
self.assertTrue(gradcheck(tiny_func, tiny_x, eps = 1e-3))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user