mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
Test split (#231)
* Split tests Split tests into "Test CPU" and "Test GPU". Add test flag "TEST_DEVICES" which is a comma separated list of devices: CPU,GPU,ANE * Run tests based on provided TEST_DEVICES flag By default will run all "CPU,GPU,ANE" * fix bad quote * Revert changes and use GPU=1 This is done through setting the default Tensor Device to Device.CPU of GPU=1 is set. Run GPU tests: GPU=1 pytest -s -v
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
import unittest
|
||||
from tinygrad.tensor import Tensor, GPU, ANE, Device
|
||||
from tinygrad.tensor import Tensor
|
||||
from tinygrad.optim import Adam, SGD, RMSprop
|
||||
from extra.utils import get_parameters
|
||||
|
||||
@@ -9,11 +9,9 @@ x_init = np.random.randn(1,3).astype(np.float32)
|
||||
W_init = np.random.randn(3,3).astype(np.float32)
|
||||
m_init = np.random.randn(1,3).astype(np.float32)
|
||||
|
||||
def step_tinygrad(optim, kwargs={}, device=Device.CPU):
|
||||
def step_tinygrad(optim, kwargs={}):
|
||||
net = TinyNet()
|
||||
optim = optim([net.x, net.W], **kwargs)
|
||||
if device==Device.GPU: [x.gpu_() for x in get_parameters([net, optim])]
|
||||
elif device==Device.ANE: [x.ane_() for x in get_parameters([net, optim])]
|
||||
out = net.forward()
|
||||
out.backward()
|
||||
optim.step()
|
||||
@@ -55,33 +53,22 @@ class TorchNet():
|
||||
|
||||
|
||||
class TestOptim(unittest.TestCase):
|
||||
device = Device.CPU
|
||||
|
||||
def test_adam(self):
|
||||
for x,y in zip(step_tinygrad(Adam, device=self.device),
|
||||
for x,y in zip(step_tinygrad(Adam),
|
||||
step_pytorch(torch.optim.Adam)):
|
||||
np.testing.assert_allclose(x, y, atol=1e-4)
|
||||
|
||||
def test_sgd(self):
|
||||
for x,y in zip(step_tinygrad(SGD, kwargs={'lr': 0.001}, device=self.device),
|
||||
for x,y in zip(step_tinygrad(SGD, kwargs={'lr': 0.001}),
|
||||
step_pytorch(torch.optim.SGD, kwargs={'lr': 0.001})):
|
||||
np.testing.assert_allclose(x, y, atol=1e-5)
|
||||
|
||||
def test_rmsprop(self):
|
||||
for x,y in zip(step_tinygrad(RMSprop, kwargs={'lr': 0.001, 'decay': 0.99}, device=self.device),
|
||||
for x,y in zip(step_tinygrad(RMSprop, kwargs={'lr': 0.001, 'decay': 0.99}),
|
||||
step_pytorch(torch.optim.RMSprop,
|
||||
kwargs={'lr': 0.001, 'alpha': 0.99})):
|
||||
np.testing.assert_allclose(x, y, atol=1e-5)
|
||||
|
||||
|
||||
@unittest.skipUnless(GPU, "Requires GPU")
|
||||
class TestOptimGPU(TestOptim):
|
||||
device = Device.GPU
|
||||
|
||||
@unittest.skipUnless(ANE, "Requires ANE")
|
||||
class TestOptimANE(TestOptim):
|
||||
device = Device.ANE
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user