From 2681c79bc56a42ab4ae67f4c8d04323b6943f266 Mon Sep 17 00:00:00 2001 From: George Hotz Date: Sun, 18 Oct 2020 14:55:20 -0700 Subject: [PATCH] simple tests, repr not str --- test/mnist.py | 14 ++++---------- test/test.py | 28 +++++++++++----------------- tinygrad/tensor.py | 2 +- 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/test/mnist.py b/test/mnist.py index 7aa4c47e77..55e349bbb7 100644 --- a/test/mnist.py +++ b/test/mnist.py @@ -3,17 +3,13 @@ import numpy as np from tinygrad.tensor import Tensor from tinygrad.utils import layer_init_uniform, fetch_mnist import tinygrad.optim as optim - from tqdm import trange - -# load the mnist dataset - -X_train, Y_train, X_test, Y_test = fetch_mnist() - -# train a model - np.random.seed(1337) +# load the mnist dataset +X_train, Y_train, X_test, Y_test = fetch_mnist() + +# create a model class TinyBobNet: def __init__(self): self.l1 = Tensor(layer_init_uniform(784, 128)) @@ -22,8 +18,6 @@ class TinyBobNet: def forward(self, x): return x.dot(self.l1).relu().dot(self.l2).logsoftmax() -# optimizer - model = TinyBobNet() optim = optim.SGD([model.l1, model.l2], lr=0.001) #optim = optim.Adam([model.l1, model.l2], lr=0.001) diff --git a/test/test.py b/test/test.py index 31255ca174..5f320fafbf 100644 --- a/test/test.py +++ b/test/test.py @@ -10,31 +10,25 @@ def test_tinygrad(): x = Tensor(x_init) W = Tensor(W_init) m = Tensor(m_init) - out = x.dot(W) - outr = out.relu() - outl = outr.logsoftmax() - outm = outl.mul(m) - outa = outm.add(m) - outx = outa.sum() - outx.backward() - return outx.data, x.grad, W.grad + out = x.dot(W).relu() + out = out.logsoftmax() + out = out.mul(m).add(m).sum() + out.backward() + return out.data, x.grad, W.grad def test_pytorch(): x = torch.tensor(x_init, requires_grad=True) W = torch.tensor(W_init, requires_grad=True) m = torch.tensor(m_init) - out = x.matmul(W) - outr = out.relu() - outl = torch.nn.functional.log_softmax(outr, dim=1) - outm = outl.mul(m) - outa = outm.add(m) - outx = outa.sum() - outx.backward() - return outx.detach().numpy(), x.grad, W.grad + out = x.matmul(W).relu() + out = torch.nn.functional.log_softmax(out, dim=1) + out = out.mul(m).add(m).sum() + out.backward() + return out.detach().numpy(), x.grad, W.grad for x,y in zip(test_tinygrad(), test_pytorch()): print(x,y) - np.testing.assert_allclose(x, y, atol=1e-6) + np.testing.assert_allclose(x, y, atol=1e-5) diff --git a/tinygrad/tensor.py b/tinygrad/tensor.py index 13ef238b0c..128f2eb6b4 100644 --- a/tinygrad/tensor.py +++ b/tinygrad/tensor.py @@ -16,7 +16,7 @@ class Tensor: # internal variables used for autograd graph construction self._ctx = None - def __str__(self): + def __repr__(self): return "Tensor %r with grad %r" % (self.data, self.grad) def backward(self, allow_fill=True):