add transpose, start on transformer

This commit is contained in:
George Hotz
2020-12-27 16:59:12 -05:00
parent d379502c04
commit 2f1b2c0a3b
13 changed files with 91 additions and 19 deletions

View File

@@ -1,3 +0,0 @@
import os
ANE = os.environ.get('ANE', False)

View File

@@ -1,8 +1,7 @@
#!/usr/bin/env python
import gc
import unittest
from tinygrad.tensor import Tensor, GPU, Device
from .config import ANE
from tinygrad.tensor import Tensor, GPU, ANE, Device
def tensors_allocated():
return sum([isinstance(x, Tensor) for x in gc.get_objects()])

View File

@@ -2,11 +2,10 @@
import os
import unittest
import numpy as np
from tinygrad.tensor import Tensor, GPU, Device
from tinygrad.tensor import Tensor, GPU, ANE, Device
import tinygrad.optim as optim
from extra.training import train, evaluate
from extra.utils import fetch, get_parameters
from .config import ANE
# mnist loader
def fetch_mnist():

View File

@@ -4,8 +4,7 @@ import cProfile
import pstats
import unittest
import torch
from tinygrad.tensor import Tensor, GPU, Device
from .config import ANE
from tinygrad.tensor import Tensor, GPU, ANE, Device
def start_profile():
import time

View File

@@ -1,11 +1,10 @@
#!/usr/bin/env python
import unittest
import numpy as np
from tinygrad.tensor import GPU, Device
from tinygrad.tensor import GPU, ANE, Device
from tinygrad.nn import *
from extra.utils import get_parameters
import torch
from .config import ANE
class TestNN(unittest.TestCase):
device = Device.CPU

View File

@@ -4,8 +4,7 @@ import numpy as np
import unittest
import timeit
import functools
from tinygrad.tensor import Tensor, GPU, Device
from .config import ANE
from tinygrad.tensor import Tensor, GPU, ANE, Device
def helper_test_op(shps, torch_fxn, tinygrad_fxn, atol=0, rtol=1e-6, grad_atol=0, grad_rtol=1e-6, device=Device.CPU, forward_only=False):
torch.manual_seed(0)
@@ -108,6 +107,12 @@ class TestOps(unittest.TestCase):
def test_pad2d(self):
helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4)), device=self.device)
def test_transpose(self):
# TODO: transpose for GPU
if self.device == Device.GPU:
return
helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(order=(0,2,1)), device=self.device)
def test_reshape(self):
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)), device=self.device)
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)), device=self.device)

View File

@@ -1,10 +1,9 @@
import numpy as np
import torch
import unittest
from tinygrad.tensor import Tensor, GPU, Device
from tinygrad.tensor import Tensor, GPU, ANE, Device
from tinygrad.optim import Adam, SGD, RMSprop
from extra.utils import get_parameters
from .config import ANE
x_init = np.random.randn(1,3).astype(np.float32)
W_init = np.random.randn(3,3).astype(np.float32)

View File

@@ -1,10 +1,8 @@
import numpy as np
import torch
import unittest
from tinygrad.tensor import Tensor, GPU, Device
from tinygrad.tensor import Tensor, GPU, ANE, Device
from extra.gradcheck import numerical_jacobian, jacobian, gradcheck
from .config import ANE
x_init = np.random.randn(1,3).astype(np.float32)
U_init = np.random.randn(3,3).astype(np.float32)