Files
tinygrad/test/graph_batchnorm.py
2022-07-02 17:54:04 -07:00

54 lines
1.4 KiB
Python

from tinygrad.tensor import Tensor
from tinygrad.nn import Conv2d, BatchNorm2D
from tinygrad import optim
from extra.utils import get_parameters # TODO: move to optim
import unittest
def model_step(lm):
Tensor.training = True
x = Tensor.ones(8,12,128,256, requires_grad=False)
loss = lm.forward(x).sum()
optimizer = optim.SGD(get_parameters(lm), lr=0.001)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#out = loss.detach().numpy()
for p in optimizer.params:
p.realize()
#x.grad.realize()
Tensor.training = False
class TestBatchnorm(unittest.TestCase):
def test_conv(self):
class LilModel:
def __init__(self):
self.c = Conv2d(12, 32, 3, padding=1, bias=False)
def forward(self, x):
return self.c(x).relu()
lm = LilModel()
model_step(lm)
def test_two_conv(self):
class LilModel:
def __init__(self):
self.c = Conv2d(12, 32, 3, padding=1, bias=False)
self.c2 = Conv2d(32, 32, 3, padding=1, bias=False)
def forward(self, x):
return self.c2(self.c(x)).relu()
lm = LilModel()
model_step(lm)
def test_conv_bn(self):
class LilModel:
def __init__(self):
self.c = Conv2d(12, 32, 3, padding=1, bias=False)
self.bn = BatchNorm2D(32, track_running_stats=False)
def forward(self, x):
return self.bn(self.c(x)).relu()
lm = LilModel()
model_step(lm)
if __name__ == '__main__':
unittest.main()