mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
switch beautiful_mnist to use new optimizer [pr] (#8231)
* switch beautiful_mnist to use new optimizer [pr] * fix abstractions3 + docs * fix OptimizerGroup with schedule_step api
This commit is contained in:
@@ -26,10 +26,11 @@ l1n, l2n = l1.numpy(), l2.numpy()
|
||||
from tinygrad.nn.optim import SGD
|
||||
optim = SGD([l1, l2])
|
||||
|
||||
Tensor.training = True
|
||||
X, Y = X_train[(samples:=Tensor.randint(128, high=X_train.shape[0]))], Y_train[samples]
|
||||
optim.zero_grad()
|
||||
model(X).sparse_categorical_crossentropy(Y).backward()
|
||||
optim._step() # this will step the optimizer without running realize
|
||||
optim.schedule_step() # this will step the optimizer without running realize
|
||||
|
||||
# *****
|
||||
# 3. Create a schedule.
|
||||
|
||||
@@ -31,4 +31,8 @@
|
||||
::: tinygrad.Tensor.shard_
|
||||
::: tinygrad.Tensor.contiguous
|
||||
::: tinygrad.Tensor.contiguous_backward
|
||||
|
||||
## Gradient
|
||||
|
||||
::: tinygrad.Tensor.gradient
|
||||
::: tinygrad.Tensor.backward
|
||||
|
||||
Reference in New Issue
Block a user