commit 1bb258350092defd802cf6fbb94b1e8de96935cc Author: George Hotz Date: Sat Oct 17 22:57:01 2020 -0700 start tinygrad diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..a783405e0c --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +__pycache__ +notebooks +.*.swp diff --git a/README b/README new file mode 100644 index 0000000000..9da75a69c9 --- /dev/null +++ b/README @@ -0,0 +1,4 @@ +For something in between a grad and a karpathy/micrograd + +Requires numpy + diff --git a/tensor.py b/tensor.py new file mode 100644 index 0000000000..4ca1312332 --- /dev/null +++ b/tensor.py @@ -0,0 +1,76 @@ +# inspired by https://github.com/karpathy/micrograd/blob/master/micrograd/engine.py +from functools import partialmethod +import numpy as np + +# **** start with three base classes **** + +class Context: + def __init__(self): + self.saved_tensors = [] + + def save_for_backward(self, *x): + self.saved_tensors.extend(x) + +class Tensor: + def __init__(self, data, _children=()): + self.data = data + self.grad = np.zeros_like(data) + + # internal variables used for autograd graph construction + self._prev = set(_children) + +class Function: + def apply(self, arg, *x): + ctx = Context() + x = [self]+list(x) + ret = Tensor(arg.forward(ctx, *[t.data for t in x])) + return ret + +def register(name, fxn): + setattr(Tensor, name, partialmethod(fxn.apply, fxn)) + +# **** implement a few functions **** + +""" +class ReLU(Function): + @staticmethod + def forward(ctx, input): + ctx.save_for_backward(input) + return np.maximum(input, 0) + + @staticmethod + def backward(ctx, grad_output): + input, = ctx.saved_tensors + grad_input = grad_output.clone() + grad_input[input < 0] = 0 + return grad_input +setattr(Tensor, 'relu', partialmethod(run, ReLU)) +""" + +class Dot(Function): + @staticmethod + def forward(ctx, input, weight): + ctx.save_for_backward(input, weight) + return input.dot(weight) + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.saved_tensors + grad_input = grad_output.dot(weight.T) + grad_weight = grad_output.dot(input) + return grad_input, grad_weight +register('dot', Dot) + +# may be wrong +class Sum(Function): + @staticmethod + def forward(ctx, input): + ctx.save_for_backward(input) + return input.sum() + + @staticmethod + def backward(ctx, grad_output): + input = ctx.saved_tensors + return grad_output * input +register('sum', Sum) + diff --git a/test.py b/test.py new file mode 100644 index 0000000000..c324238566 --- /dev/null +++ b/test.py @@ -0,0 +1,10 @@ +import torch +from tensor import Tensor + +x = np.random.randn(1,3) +W = np.random.randn(3,3) +out = x.dot(W) +print(out) + + +