Files
tinygrad/test/test_conv_shapetracker.py
George Hotz bfcec234a2 Refactor ASTs (#622)
* ugh worst branch name

* compiler refactor continues

* scc -> cloc

* buf -> _buf

* finish _buf, and program -> runtime

* gpu is still working, clang isn't

* clang in new style

* ops_metal

* something broke it

* improve metal

* clean up tons of cl crap

* hack fix sync

* cleaner gpu

* gpu metal clang

* cleanups

* minor refactor

* GPUCodegen

* fix up LLVM

* blind CUDA refactor

* codegen / runtime

* keep ops naming

* linter passes

* woah, llvm was allocing 4x what it needed to

* bugfixes

* fix openpilot compiler

* fix compile_efficientnet

* method cache should fix tests

* deal with duped functions
2023-03-01 18:57:29 -08:00

25 lines
775 B
Python

#!/usr/bin/env python
import unittest
from tinygrad.tensor import Tensor, Device
from tinygrad.nn import Conv2d
from tinygrad.ops import GlobalCounters
@unittest.skipUnless(Device.DEFAULT == "GPU", "Only GPU supports cache")
class TestConvShapetracker(unittest.TestCase):
def test_conv_3x3_one_view(self):
inp = Tensor.randn(1,16,10,10).realize()
conv = Conv2d(16, 32, (3,3))
conv(inp).realize()
GlobalCounters.cache = []
conv(inp).realize()
test = GlobalCounters.cache
GlobalCounters.cache = None
assert len(test) == 1, f"conv should only have one kernel {[x[0].name for x in test]}"
print(test[0][0].prg)
for arg in test[0][1]:
print(arg.st)
assert len(arg.st.views) == 1
if __name__ == '__main__':
unittest.main()