diff --git a/examples/beautiful_cifar.py b/examples/beautiful_cifar.py index 66f693d9c4..cea8262f17 100644 --- a/examples/beautiful_cifar.py +++ b/examples/beautiful_cifar.py @@ -2,7 +2,6 @@ import time start_tm = time.perf_counter() import math from typing import Tuple, cast -import numpy as np from tinygrad import Tensor, nn, GlobalCounters, TinyJit, dtypes, Device from tinygrad.helpers import partition, trange, getenv, Context from extra.lr_scheduler import OneCycleLR @@ -150,13 +149,12 @@ if __name__ == "__main__": acc.append((out.argmax(-1) == Y).sum() / eval_batchsize) return Tensor.stack(*loss).mean() / (batchsize*loss_batchsize_scaler), Tensor.stack(*acc).mean() - np.random.seed(1337) + Tensor.manual_seed(1337) + num_train_samples = X_train.shape[0] + for epoch in range(math.ceil(hyp['misc']['train_epochs'])): - # TODO: move to tinygrad gst = time.perf_counter() - idxs = np.arange(X_train.shape[0]) - np.random.shuffle(idxs) - tidxs = Tensor(idxs, dtype='int')[:num_steps_per_epoch*batchsize].reshape(num_steps_per_epoch, batchsize) # NOTE: long doesn't fold + tidxs = Tensor.randperm(num_train_samples, dtype='int')[:num_steps_per_epoch*batchsize].reshape(num_steps_per_epoch, batchsize) train_loss:float = 0 for epoch_step in (t:=trange(num_steps_per_epoch)): st = time.perf_counter()