Test split (#231)

* Split tests

Split tests into "Test CPU" and "Test GPU".

Add test flag "TEST_DEVICES" which is a comma separated list of devices:
CPU,GPU,ANE

* Run tests based on provided TEST_DEVICES flag

By default will run all "CPU,GPU,ANE"

* fix bad quote

* Revert changes and use GPU=1

This is done through setting the default Tensor Device to Device.CPU of
GPU=1 is set.

Run GPU tests: GPU=1 pytest -s -v
This commit is contained in:
Liam
2021-01-01 15:19:03 +01:00
committed by GitHub
parent 4a7cf2e420
commit ebd72ff437
10 changed files with 137 additions and 213 deletions

View File

@@ -1,7 +1,7 @@
import numpy as np
import torch
import unittest
from tinygrad.tensor import Tensor, GPU, ANE, Device
from tinygrad.tensor import Tensor, DEFAULT_DEVICE
from extra.gradcheck import numerical_jacobian, jacobian, gradcheck
x_init = np.random.randn(1,3).astype(np.float32)
@@ -11,13 +11,12 @@ W_init = np.random.randn(3,3).astype(np.float32)
m_init = np.random.randn(1,3).astype(np.float32)
class TestTinygrad(unittest.TestCase):
device = Device.CPU
def test_backward_pass(self):
def test_tinygrad():
x = Tensor(x_init, device=self.device)
W = Tensor(W_init, device=self.device)
m = Tensor(m_init, device=self.device)
x = Tensor(x_init)
W = Tensor(W_init)
m = Tensor(m_init)
out = x.dot(W).relu()
out = out.logsoftmax()
out = out.mul(m).add(m).sum()
@@ -39,9 +38,9 @@ class TestTinygrad(unittest.TestCase):
def test_backward_pass_diamond_model(self):
def test_tinygrad():
u = Tensor(U_init, device=self.device)
v = Tensor(V_init, device=self.device)
w = Tensor(W_init, device=self.device)
u = Tensor(U_init)
v = Tensor(V_init)
w = Tensor(W_init)
x = u.mul(v).relu()
y = u.mul(w).relu()
out = x.add(y).mul(y).relu()
@@ -65,6 +64,7 @@ class TestTinygrad(unittest.TestCase):
for x,y in zip(test_tinygrad(), test_pytorch()):
np.testing.assert_allclose(x, y, atol=1e-5)
@unittest.skipUnless(not DEFAULT_DEVICE, "float64 not supported on GPU")
def test_jacobian(self):
W = np.random.RandomState(1337).random((10, 5))
x = np.random.RandomState(7331).random((1, 10)) - 0.5
@@ -74,8 +74,8 @@ class TestTinygrad(unittest.TestCase):
torch_func = lambda x: torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1)
PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy()
tiny_x = Tensor(x, device=self.device)
tiny_W = Tensor(W, device=self.device)
tiny_x = Tensor(x)
tiny_W = Tensor(W)
tiny_func = lambda x: x.dot(tiny_W).relu().logsoftmax()
J = jacobian(tiny_func, tiny_x)
NJ = numerical_jacobian(tiny_func, tiny_x)
@@ -83,12 +83,13 @@ class TestTinygrad(unittest.TestCase):
np.testing.assert_allclose(PJ, J, atol = 1e-5)
np.testing.assert_allclose(PJ, NJ, atol = 1e-5)
@unittest.skipUnless(not DEFAULT_DEVICE, "float64 not supported on GPU")
def test_gradcheck(self):
W = np.random.RandomState(1337).random((10, 5))
x = np.random.RandomState(7331).random((1, 10)) - 0.5
tiny_x = Tensor(x, device=self.device)
tiny_W = Tensor(W, device=self.device)
tiny_x = Tensor(x)
tiny_W = Tensor(W)
tiny_func = lambda x: x.dot(tiny_W).relu().logsoftmax()
self.assertTrue(gradcheck(tiny_func, tiny_x))
@@ -96,20 +97,5 @@ class TestTinygrad(unittest.TestCase):
# coarse approx. since a "big" eps and the non-linearities of the model
self.assertFalse(gradcheck(tiny_func, tiny_x, eps = 0.1))
@unittest.skipUnless(GPU, "Requires GPU")
class TestTinygradGPU(TestTinygrad):
device = Device.GPU
@unittest.skip("float64 not supported on GPU")
def test_jacobian(self): pass
@unittest.skip("float64 not supported on GPU")
def test_gradcheck(self): pass
@unittest.skipUnless(ANE, "Requires ANE")
class TestOpsANE(TestTinygrad):
device=Device.ANE
if __name__ == '__main__':
unittest.main()