diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2a811fd29f..5ea0fe771d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -135,6 +135,8 @@ jobs: python docs/abstractions2.py - name: Test Quickstart run: awk '/```python/{flag=1;next}/```/{flag=0}flag' docs/quickstart.md > quickstart.py && PYTHONPATH=. python quickstart.py + - name: Test README + run: awk '/```python/{flag=1;next}/```/{flag=0}flag' README.md > README.py && PYTHONPATH=. python README.py - name: Fuzz Test symbolic run: python test/external/fuzz_symbolic.py - name: Fuzz Test shapetracker diff --git a/README.md b/README.md index 0283ee8fa4..ad5021c293 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ And we can change `DEBUG` to `4` to see the generated code. As it turns out, 90% of what you need for neural networks are a decent autograd/tensor library. Throw in an optimizer, a data loader, and some compute, and you have all you need. -```py +```python from tinygrad import Tensor, nn class LinearNet: @@ -66,11 +66,12 @@ optim = nn.optim.Adam([model.l1, model.l2], lr=0.001) x, y = Tensor.rand(4, 1, 28, 28), Tensor([2,4,3,7]) # replace with real mnist dataloader -for i in range(10): - optim.zero_grad() - loss = model(x).sparse_categorical_crossentropy(y).backward() - optim.step() - print(i, loss.item()) +with Tensor.train(): + for i in range(10): + optim.zero_grad() + loss = model(x).sparse_categorical_crossentropy(y).backward() + optim.step() + print(i, loss.item()) ``` See [examples/beautiful_mnist.py](examples/beautiful_mnist.py) for the full version that gets 98% in ~5 seconds @@ -113,7 +114,7 @@ Documentation along with a quick start guide can be found on the [docs website]( ### Quick example comparing to PyTorch -```py +```python from tinygrad import Tensor x = Tensor.eye(3, requires_grad=True) @@ -126,7 +127,7 @@ print(y.grad.numpy()) # dz/dy ``` The same thing but in PyTorch: -```py +```python import torch x = torch.eye(3, requires_grad=True)