mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-29 03:00:14 -04:00
don't return loss that's scaled
This commit is contained in:
@@ -395,9 +395,9 @@ def train_retinanet():
|
||||
optim.zero_grad()
|
||||
|
||||
losses = model(normalize(x, GPUS), **kwargs)
|
||||
loss = (sum([l for l in losses.values()]) * loss_scaler)
|
||||
loss = sum([l for l in losses.values()])
|
||||
|
||||
loss.backward()
|
||||
(loss * loss_scaler).backward()
|
||||
for t in optim.params: t.grad = t.grad.contiguous() / loss_scaler
|
||||
|
||||
optim.step()
|
||||
|
||||
Reference in New Issue
Block a user