apply flake8 E203 rule (#684)

This commit is contained in:
Cyril Roumégous
2023-03-11 20:35:16 +01:00
committed by GitHub
parent 784afc6c6f
commit 3f08613a2a
21 changed files with 121 additions and 121 deletions

View File

@@ -3,13 +3,13 @@ from typing import List
from tinygrad.tensor import Tensor
class Optimizer:
def __init__(self, params : List[Tensor]):
def __init__(self, params: List[Tensor]):
# if it's None, but being put into an optimizer, set it to True
for x in params:
if x.requires_grad is None: x.requires_grad = True
self.params : List[Tensor] = [x for x in params if x.requires_grad]
self.buffers : List[Tensor] = [x for x in params if not x.requires_grad] # buffers are still realized
self.params: List[Tensor] = [x for x in params if x.requires_grad]
self.buffers: List[Tensor] = [x for x in params if not x.requires_grad] # buffers are still realized
# TODO: this probably shouldn't change the gradients, just the ones used by the optimizer
def clipnorm(self, amount=1):
@@ -27,7 +27,7 @@ class Optimizer:
p.realize()
class SGD(Optimizer):
def __init__(self, params : List[Tensor], lr=0.001, momentum=0, nesterov=False):
def __init__(self, params: List[Tensor], lr=0.001, momentum=0, nesterov=False):
super().__init__(params)
self.lr, self.momentum, self.nesterov = lr, momentum, nesterov
self.b = [Tensor.zeros(*t.shape, device=params[0].device, requires_grad=False) for t in self.params] if self.momentum else []
@@ -44,7 +44,7 @@ class SGD(Optimizer):
self.realize(self.b)
class RMSprop(Optimizer):
def __init__(self, params : List[Tensor], lr=0.001, decay=0.9, eps=1e-8):
def __init__(self, params: List[Tensor], lr=0.001, decay=0.9, eps=1e-8):
super().__init__(params)
self.lr, self.decay, self.eps = lr, decay, eps
@@ -58,7 +58,7 @@ class RMSprop(Optimizer):
self.realize(self.v)
class Adam(Optimizer):
def __init__(self, params : List[Tensor], lr=0.001, b1=0.9, b2=0.999, eps=1e-8):
def __init__(self, params: List[Tensor], lr=0.001, b1=0.9, b2=0.999, eps=1e-8):
super().__init__(params)
# NOTE: self.t is a tensor so Adam can be jitted
self.lr, self.b1, self.b2, self.eps, self.t = lr, b1, b2, eps, Tensor([0], requires_grad=False).realize()
@@ -77,7 +77,7 @@ class Adam(Optimizer):
self.realize([self.t] + self.m + self.v)
def get_parameters(obj) -> List[Tensor]:
parameters : List[Tensor] = []
parameters: List[Tensor] = []
if isinstance(obj, Tensor):
parameters.append(obj)
elif isinstance(obj, (list, tuple)):