mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-07 03:00:26 -04:00
* Fix examples * Remove training in parameters * Simplify a bit * Remove extra import * Fix linter errors * factor out Device * NumPy-like semantics for Tensor.__getitem__ (#506) * Rewrote Tensor.__getitem__ to fix negative indices and add support for np.newaxis/None * Fixed pad2d * mypy doesn't know about mlops methods * normal python behavior for out-of-bounds slicing * type: ignore * inlined idxfix * added comment for __getitem__ * Better comments, better tests, and fixed bug in np.newaxis * update cpu and torch to hold buffers (#542) * update cpu and torch to hold buffers * save lines, and probably faster * Mypy fun (#541) * mypy fun * things are just faster * running fast * mypy is fast * compile.sh * no gpu hack * refactor ops_cpu and ops_torch to not subclass * make weak buffer work * tensor works * fix test failing * cpu/torch cleanups * no or operator on dict in python 3.8 * that was junk * fix warnings * comment and touchup * dyn add of math ops * refactor ops_cpu and ops_torch to not share code * nn/optim.py compiles now * Reorder imports * call mkdir only if directory doesn't exist --------- Co-authored-by: George Hotz <geohot@gmail.com> Co-authored-by: Mitchell Goff <mitchellgoffpc@gmail.com> Co-authored-by: George Hotz <72895+geohot@users.noreply.github.com>
46 lines
1.4 KiB
Python
Executable File
46 lines
1.4 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
import numpy as np
|
|
from PIL import Image
|
|
|
|
from tinygrad.nn.optim import Adam
|
|
from tinygrad.helpers import getenv
|
|
from extra.utils import get_parameters
|
|
from extra.training import train, evaluate
|
|
from models.resnet import ResNet
|
|
from datasets import fetch_mnist
|
|
|
|
|
|
class ComposeTransforms:
|
|
def __init__(self, trans):
|
|
self.trans = trans
|
|
|
|
def __call__(self, x):
|
|
for t in self.trans:
|
|
x = t(x)
|
|
return x
|
|
|
|
if __name__ == "__main__":
|
|
X_train, Y_train, X_test, Y_test = fetch_mnist()
|
|
X_train = X_train.reshape(-1, 28, 28).astype(np.uint8)
|
|
X_test = X_test.reshape(-1, 28, 28).astype(np.uint8)
|
|
classes = 10
|
|
|
|
TRANSFER = getenv('TRANSFER')
|
|
model = ResNet(getenv('NUM', 18), num_classes=classes)
|
|
if TRANSFER:
|
|
model.load_from_pretrained()
|
|
|
|
lr = 5e-5
|
|
transform = ComposeTransforms([
|
|
lambda x: [Image.fromarray(xx, mode='L').resize((64, 64)) for xx in x],
|
|
lambda x: np.stack([np.asarray(xx) for xx in x], 0),
|
|
lambda x: x / 255.0,
|
|
lambda x: np.tile(np.expand_dims(x, 1), (1, 3, 1, 1)).astype(np.float32),
|
|
])
|
|
for _ in range(10):
|
|
optim = Adam(get_parameters(model), lr=lr)
|
|
train(model, X_train, Y_train, optim, 50, BS=32, transform=transform)
|
|
acc, Y_test_preds = evaluate(model, X_test, Y_test, num_classes=10, return_predict=True, transform=transform)
|
|
lr /= 1.2
|
|
print(f'reducing lr to {lr:.7f}')
|