Big MNIST model with PIL augmentation and load/save (#160)

* 2serious

* load/save

* fixing GPU

* added DEBUG

* needs BatchNorm or doesn't learn anything

* old file not needed

* added conv biases

* added extra/training.py and checkpoint

* assert in test only

* save

* padding

* num_classes

* checkpoint

* checkpoints for padding

* training was broken

* merge

* rotation augmentation

* more aug

* needs testing

* streamline augment, augment is fast thus bicubic

* tidying up
This commit is contained in:
Marcel Bischoff
2020-12-13 23:45:55 -05:00
committed by GitHub
parent f50dcc12ac
commit da72a0eed4
4 changed files with 216 additions and 84 deletions

View File

@@ -4,8 +4,8 @@ import unittest
import numpy as np
from tinygrad.tensor import Tensor, GPU
import tinygrad.optim as optim
from extra.training import train, evaluate
from extra.utils import fetch, get_parameters
from tqdm import trange
# mnist loader
def fetch_mnist():
@@ -54,47 +54,6 @@ class TinyConvNet:
x = x.reshape(shape=[x.shape[0], -1])
return x.dot(self.l1).logsoftmax()
def train(model, optim, steps, BS=128, gpu=False):
if gpu is True: [x.cuda_() for x in get_parameters([model, optim])]
losses, accuracies = [], []
for i in (t := trange(steps, disable=os.getenv('CI') is not None)):
samp = np.random.randint(0, X_train.shape[0], size=(BS))
x = Tensor(X_train[samp].reshape((-1, 28*28)).astype(np.float32), gpu=gpu)
Y = Y_train[samp]
y = np.zeros((len(samp),10), np.float32)
# correct loss for NLL, torch NLL loss returns one per row
y[range(y.shape[0]),Y] = -10.0
y = Tensor(y, gpu=gpu)
# network
out = model.forward(x)
# NLL loss function
loss = out.mul(y).mean()
optim.zero_grad()
loss.backward()
optim.step()
cat = np.argmax(out.cpu().data, axis=1)
accuracy = (cat == Y).mean()
# printing
loss = loss.cpu().data
losses.append(loss)
accuracies.append(accuracy)
t.set_description("loss %.2f accuracy %.2f" % (loss, accuracy))
def evaluate(model, gpu=False):
def numpy_eval():
Y_test_preds_out = model.forward(Tensor(X_test.reshape((-1, 28*28)).astype(np.float32), gpu=gpu)).cpu()
Y_test_preds = np.argmax(Y_test_preds_out.data, axis=1)
return (Y_test == Y_test_preds).mean()
accuracy = numpy_eval()
print("test set accuracy is %f" % accuracy)
assert accuracy > 0.95
class TestMNIST(unittest.TestCase):
gpu=False
@@ -102,22 +61,22 @@ class TestMNIST(unittest.TestCase):
np.random.seed(1337)
model = TinyConvNet()
optimizer = optim.Adam(model.parameters(), lr=0.001)
train(model, optimizer, steps=200, gpu=self.gpu)
evaluate(model, gpu=self.gpu)
train(model, X_train, Y_train, optimizer, steps=200, gpu=self.gpu)
assert evaluate(model, X_test, Y_test, gpu=self.gpu) > 0.95
def test_sgd(self):
np.random.seed(1337)
model = TinyBobNet()
optimizer = optim.SGD(model.parameters(), lr=0.001)
train(model, optimizer, steps=1000, gpu=self.gpu)
evaluate(model, gpu=self.gpu)
train(model, X_train, Y_train, optimizer, steps=1000, gpu=self.gpu)
assert evaluate(model, X_test, Y_test, gpu=self.gpu) > 0.95
def test_rmsprop(self):
np.random.seed(1337)
model = TinyBobNet()
optimizer = optim.RMSprop(model.parameters(), lr=0.0002)
train(model, optimizer, steps=1000, gpu=self.gpu)
evaluate(model, gpu=self.gpu)
train(model, X_train, Y_train, optimizer, steps=1000, gpu=self.gpu)
assert evaluate(model, X_test, Y_test, gpu=self.gpu) > 0.95
@unittest.skipUnless(GPU, "Requires GPU")
class TestMNISTGPU(TestMNIST):