mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-02-18 02:21:40 -05:00
fix batchnorm not realizing
This commit is contained in:
@@ -10,6 +10,8 @@ class Optimizer:
|
||||
x.requires_grad = True
|
||||
|
||||
self.params : List[Tensor] = [x for x in params if x.requires_grad]
|
||||
self.buffers : List[Tensor] = [x for x in params if not x.requires_grad] # buffers are still realized
|
||||
self.realize()
|
||||
|
||||
# TODO: this probably shouldn't change the gradients, just the ones used by the optimizer
|
||||
def clipnorm(self, amount=1):
|
||||
@@ -24,7 +26,7 @@ class Optimizer:
|
||||
|
||||
def realize(self, extra=None):
|
||||
# TODO: corealize
|
||||
for p in extra + self.params if extra is not None else self.params:
|
||||
for p in extra + self.params + self.buffers if extra is not None else self.params + self.buffers:
|
||||
p.realize()
|
||||
|
||||
class SGD(Optimizer):
|
||||
|
||||
Reference in New Issue
Block a user