mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-08 22:48:25 -05:00
update readme examples
This commit is contained in:
36
README.md
36
README.md
@@ -35,7 +35,7 @@ tinygrad can run [LLaMA](/docs/showcase.md#llama) and [Stable Diffusion](/docs/s
|
|||||||
Try a matmul. See how, despite the style, it is fused into one kernel with the power of laziness.
|
Try a matmul. See how, despite the style, it is fused into one kernel with the power of laziness.
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
DEBUG=3 python3 -c "from tinygrad.tensor import Tensor;
|
DEBUG=3 python3 -c "from tinygrad import Tensor;
|
||||||
N = 1024; a, b = Tensor.rand(N, N), Tensor.rand(N, N);
|
N = 1024; a, b = Tensor.rand(N, N), Tensor.rand(N, N);
|
||||||
c = (a.reshape(N, 1, N) * b.permute(1,0).reshape(1, N, N)).sum(axis=2);
|
c = (a.reshape(N, 1, N) * b.permute(1,0).reshape(1, N, N)).sum(axis=2);
|
||||||
print((c.numpy() - (a.numpy() @ b.numpy())).mean())"
|
print((c.numpy() - (a.numpy() @ b.numpy())).mean())"
|
||||||
@@ -48,30 +48,28 @@ And we can change `DEBUG` to `4` to see the generated code.
|
|||||||
As it turns out, 90% of what you need for neural networks are a decent autograd/tensor library.
|
As it turns out, 90% of what you need for neural networks are a decent autograd/tensor library.
|
||||||
Throw in an optimizer, a data loader, and some compute, and you have all you need.
|
Throw in an optimizer, a data loader, and some compute, and you have all you need.
|
||||||
|
|
||||||
#### Neural network example (from test/models/test_mnist.py)
|
#### Neural network example (see examples/beautiful_mnist.py for the full thing)
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from tinygrad.tensor import Tensor
|
from tinygrad import Tensor, nn
|
||||||
import tinygrad.nn.optim as optim
|
|
||||||
|
|
||||||
class TinyBobNet:
|
class LinearNet:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.l1 = Tensor.uniform(784, 128)
|
self.l1 = Tensor.kaiming_uniform(784, 128)
|
||||||
self.l2 = Tensor.uniform(128, 10)
|
self.l2 = Tensor.kaiming_uniform(128, 10)
|
||||||
|
def __call__(self, x:Tensor) -> Tensor:
|
||||||
|
return x.flatten(1).dot(self.l1).relu().dot(self.l2)
|
||||||
|
|
||||||
def forward(self, x):
|
model = LinearNet()
|
||||||
return x.dot(self.l1).relu().dot(self.l2).log_softmax()
|
optim = nn.optim.Adam([model.l1, model.l2], lr=0.001)
|
||||||
|
|
||||||
model = TinyBobNet()
|
x, y = Tensor.rand(4, 1, 28, 28), Tensor([2,4,3,7]) # replace with real mnist dataloader
|
||||||
optim = optim.SGD([model.l1, model.l2], lr=0.001)
|
|
||||||
|
|
||||||
# ... complete data loader here
|
for i in range(10):
|
||||||
|
optim.zero_grad()
|
||||||
out = model.forward(x)
|
loss = model(x).sparse_categorical_crossentropy(y).backward()
|
||||||
loss = out.mul(y).mean()
|
optim.step()
|
||||||
optim.zero_grad()
|
print(i, loss.item())
|
||||||
loss.backward()
|
|
||||||
optim.step()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Accelerators
|
## Accelerators
|
||||||
@@ -112,7 +110,7 @@ Documentation along with a quick start guide can be found in the [docs/](/docs)
|
|||||||
### Quick example comparing to PyTorch
|
### Quick example comparing to PyTorch
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from tinygrad.tensor import Tensor
|
from tinygrad import Tensor
|
||||||
|
|
||||||
x = Tensor.eye(3, requires_grad=True)
|
x = Tensor.eye(3, requires_grad=True)
|
||||||
y = Tensor([[2.0,0,-2.0]], requires_grad=True)
|
y = Tensor([[2.0,0,-2.0]], requires_grad=True)
|
||||||
|
|||||||
Reference in New Issue
Block a user