Files
tinygrad/test/test_hip_rdna3.py
George Hotz 07df14aa0e HIP cleanups (#2843)
* move everything to code_for_op to reason about it

* loop the loopable parts

* its not that unreadable

* these are loopable too

* nitpick

* tests p1 - replace these with the actual compiler running alu ops tests

* tests p2: compile test_dtype_alu in HIP!

+add to CI

* nobody liked test_renderer

* revert test_dtypes change

* isolated mockhip tests

* dont need the WHERE hack after #2782

+ruff

* bf16 is broken in HIP

job failed in: https://github.com/tinygrad/tinygrad/actions/runs/7232101987/job/19705951290?pr=2778#step:8:73

* picking this back up

* add compile tests for unary ops and binary ops

* MOD is only in ints

* CMPLT wont work after the dtypes pr is merged because it will always be bool

* test all combinations

* Update cstyle.py

* don't use vload

* no getenv

* set seed

---------

Co-authored-by: qazal <qazal.software@gmail.com>
Co-authored-by: qazal <77887910+Qazalin@users.noreply.github.com>
2023-12-18 21:09:32 -08:00

77 lines
2.4 KiB
Python

#!/usr/bin/env python
import unittest
import operator
from tinygrad import Tensor, Device, dtypes
from tinygrad.helpers import DEBUG, to_function_name
from tinygrad.codegen.linearizer import Linearizer
from tinygrad.renderer.cstyle import HIPRenderer
from examples.beautiful_mnist import Model as MNIST
from examples.hlb_cifar10 import SpeedyResNet
from hypothesis import given, strategies as st, settings
settings.register_profile("my_profile", deadline=None)
settings.load_profile("my_profile")
print(settings.default)
@unittest.skipIf(Device.DEFAULT != "HIP", reason="testing HIP->rdna3 compilation needs HIP=1")
class TestHIPCompilationRDNA(unittest.TestCase):
def test_compile_hip_mnist(self):
model = MNIST()
input = Tensor.rand(512,1,28,28)
output = model(input)
output.numpy()
def test_compile_hip_speedyresnet(self):
W = Tensor.rand(12,3,2,2)
model = SpeedyResNet(W)
input = Tensor.rand(512, 3, 32, 32)
output = model(input)
output.numpy()
def test_compile_hip_speedyresnet_hf(self):
old_default_float = dtypes.default_float
dtypes.default_float = dtypes.float16
W = Tensor.rand(12,3,2,2)
model = SpeedyResNet(W)
input = Tensor.rand(512, 3, 32, 32)
output = model(input)
output.numpy()
dtypes.default_float = old_default_float
def compile_ast_to_hip(out: Tensor):
from tinygrad.runtime.ops_hip import compile_hip
lin = Linearizer(out.lazydata.schedule()[-1].ast)
lin.hand_coded_optimizations()
lin.linearize()
code = HIPRenderer(to_function_name(lin.name), lin.uops)[0]
if DEBUG >= 4: print(code)
compile_hip(code)
binary_operations = [operator.add, operator.sub, operator.mul]
unary_operations = [Tensor.exp, Tensor.log, operator.neg, Tensor.sin, Tensor.sqrt, Tensor.reciprocal]
float_dtypes = [dtypes.float16, dtypes.float32]
@unittest.skipIf(Device.DEFAULT != "HIP", reason="testing HIP->rdna3 compilation needs HIP=1")
class TestHIPALUCompilation(unittest.TestCase):
@given(st.sampled_from(unary_operations), st.sampled_from(float_dtypes))
def test_unary_ops(self, op, dtype):
a = Tensor.randn(4,4, dtype=dtype)
out = op(a)
compile_ast_to_hip(out)
@given(st.sampled_from(binary_operations), st.sampled_from(float_dtypes))
def test_binary_ops(self, op, dtype):
a = Tensor.randn(4,4, dtype=dtype)
b = Tensor.randn(4,4, dtype=dtype)
out = op(a,b)
compile_ast_to_hip(out)
if __name__ == "__main__":
unittest.main()