diff --git a/test/test_transcendental.py b/test/test_transcendental.py index 785c8ae6a9..ef4b2a7d16 100644 --- a/test/test_transcendental.py +++ b/test/test_transcendental.py @@ -1,7 +1,45 @@ import unittest -from tinygrad.tensor import Tensor -from tinygrad.helpers import Context +from tinygrad import Tensor, Device, dtypes +from tinygrad.tensor import _to_np_dtype +from tinygrad.helpers import Context, getenv from test.test_schedule import check_schedule +from test.test_dtype_alu import ht +from test.helpers import is_dtype_supported +import numpy as np +from hypothesis import given, settings, strategies as strat + +settings.register_profile("my_profile", max_examples=200, deadline=None, derandomize=getenv("DERANDOMIZE_CI", False)) +settings.load_profile("my_profile") + +class TestTranscendentalMath(unittest.TestCase): + @unittest.skipUnless(is_dtype_supported(dtypes.float64, Device.DEFAULT), f"no float64 on {Device.DEFAULT}") + @unittest.skipIf(getenv("CUDACPU") or (getenv("MOCKGPU") and Device.DEFAULT == "NV"), "crashed") + @given(ht.float64, strat.sampled_from([(Tensor.exp, np.exp), (Tensor.log, np.log), (Tensor.sin, np.sin)])) + def test_float64(self, x, op): + if op[0] == Tensor.sin: + # TODO: reduction does not work + if abs(x) > 2 ** 32: return + + with Context(TRANSCENDENTAL=2): + np.testing.assert_allclose(op[0](Tensor([x], dtype=dtypes.float64)).numpy(), + op[1](np.array([x], dtype=_to_np_dtype(dtypes.float64))), + atol=1e-5) + + @unittest.skipIf(getenv("CUDACPU") or (getenv("MOCKGPU") and Device.DEFAULT == "NV"), "crashed") + @given(ht.float32, strat.sampled_from([(Tensor.exp, np.exp), (Tensor.log, np.log), (Tensor.sin, np.sin)])) + def test_float32(self, x, op): + with Context(TRANSCENDENTAL=2): + np.testing.assert_allclose(op[0](Tensor([x], dtype=dtypes.float32)).numpy(), + op[1](np.array([x], dtype=_to_np_dtype(dtypes.float32))), + atol=1e-5) + + @unittest.skipUnless(is_dtype_supported(dtypes.float16, Device.DEFAULT), f"no float16 on {Device.DEFAULT}") + @given(ht.float16, strat.sampled_from([(Tensor.exp, np.exp), (Tensor.log, np.log), (Tensor.sin, np.sin)])) + def test_float16(self, x, op): + with Context(TRANSCENDENTAL=2): + np.testing.assert_allclose(op[0](Tensor([x], dtype=dtypes.float16)).numpy(), + op[1](np.array([x], dtype=_to_np_dtype(dtypes.float16))), + atol=1e-2, rtol=2e-3) class TestTranscendentalSchedule(unittest.TestCase): # w/ payne_hanek_reduction (fp32)