mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-02-17 18:11:49 -05:00
remove RMSprop, nobody uses it anymore
This commit is contained in:
@@ -11,13 +11,6 @@ class Optimizer:
|
||||
self.params: List[Tensor] = [x for x in params if x.requires_grad]
|
||||
self.buffers: List[Tensor] = [x for x in params if not x.requires_grad] # buffers are still realized
|
||||
|
||||
# TODO: this probably shouldn't change the gradients, just the ones used by the optimizer
|
||||
def clipnorm(self, amount=1):
|
||||
for param in self.params:
|
||||
assert param.grad is not None
|
||||
# clipnorm is the L2 norm, not value: is this right?
|
||||
param.grad.assign(param.grad.clip(-(amount**2), (amount**2)))
|
||||
|
||||
def zero_grad(self):
|
||||
for param in self.params: param.grad = None
|
||||
|
||||
@@ -44,21 +37,6 @@ class SGD(Optimizer):
|
||||
t.assign(t.detach() - g * self.lr)
|
||||
self.realize(self.b)
|
||||
|
||||
class RMSprop(Optimizer):
|
||||
def __init__(self, params: List[Tensor], lr=0.001, alpha=0.99, eps=1e-8):
|
||||
super().__init__(params)
|
||||
self.lr, self.alpha, self.eps = lr, alpha, eps
|
||||
|
||||
self.v = [Tensor.zeros(*t.shape, device=t.device, requires_grad=False) for t in self.params]
|
||||
|
||||
def step(self) -> None:
|
||||
for i, t in enumerate(self.params):
|
||||
assert t.grad is not None
|
||||
g = t.grad.realize()
|
||||
self.v[i].assign(self.alpha * self.v[i] + (1.0 - self.alpha) * (g * g)).realize()
|
||||
t.assign(t.detach() - (g * self.lr).div(self.v[i].sqrt() + self.eps))
|
||||
self.realize(self.v)
|
||||
|
||||
class AdamW(Optimizer):
|
||||
def __init__(self, params: List[Tensor], lr=0.001, b1=0.9, b2=0.999, eps=1e-8, wd=0.01):
|
||||
super().__init__(params)
|
||||
|
||||
Reference in New Issue
Block a user