mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-08 22:48:25 -05:00
fuzz test transcend math (#5383)
* fuzz test transcend math found something wrong with float64 sin reduction ``` from tinygrad import Tensor, dtypes import numpy as np print(Tensor([39800.0], dtype=dtypes.float64).sin().numpy()) print(Tensor([39800.0], dtype=dtypes.float32).sin().numpy()) print(Tensor([39800.0], dtype=dtypes.float16).sin().numpy()) print(np.sin(np.array([39800.0], dtype=np.float64))) print(np.sin(np.array([39800.0], dtype=np.float32))) print(np.sin(np.array([39800.0], dtype=np.float16))) ``` ``` CLANG=1 python test.py [0.92785633] [0.7428573] [-0.7705] [0.74285722] [0.7428572] [-0.7705] ``` * fix test * abs * skip
This commit is contained in:
@@ -1,7 +1,45 @@
|
||||
import unittest
|
||||
from tinygrad.tensor import Tensor
|
||||
from tinygrad.helpers import Context
|
||||
from tinygrad import Tensor, Device, dtypes
|
||||
from tinygrad.tensor import _to_np_dtype
|
||||
from tinygrad.helpers import Context, getenv
|
||||
from test.test_schedule import check_schedule
|
||||
from test.test_dtype_alu import ht
|
||||
from test.helpers import is_dtype_supported
|
||||
import numpy as np
|
||||
from hypothesis import given, settings, strategies as strat
|
||||
|
||||
settings.register_profile("my_profile", max_examples=200, deadline=None, derandomize=getenv("DERANDOMIZE_CI", False))
|
||||
settings.load_profile("my_profile")
|
||||
|
||||
class TestTranscendentalMath(unittest.TestCase):
|
||||
@unittest.skipUnless(is_dtype_supported(dtypes.float64, Device.DEFAULT), f"no float64 on {Device.DEFAULT}")
|
||||
@unittest.skipIf(getenv("CUDACPU") or (getenv("MOCKGPU") and Device.DEFAULT == "NV"), "crashed")
|
||||
@given(ht.float64, strat.sampled_from([(Tensor.exp, np.exp), (Tensor.log, np.log), (Tensor.sin, np.sin)]))
|
||||
def test_float64(self, x, op):
|
||||
if op[0] == Tensor.sin:
|
||||
# TODO: reduction does not work
|
||||
if abs(x) > 2 ** 32: return
|
||||
|
||||
with Context(TRANSCENDENTAL=2):
|
||||
np.testing.assert_allclose(op[0](Tensor([x], dtype=dtypes.float64)).numpy(),
|
||||
op[1](np.array([x], dtype=_to_np_dtype(dtypes.float64))),
|
||||
atol=1e-5)
|
||||
|
||||
@unittest.skipIf(getenv("CUDACPU") or (getenv("MOCKGPU") and Device.DEFAULT == "NV"), "crashed")
|
||||
@given(ht.float32, strat.sampled_from([(Tensor.exp, np.exp), (Tensor.log, np.log), (Tensor.sin, np.sin)]))
|
||||
def test_float32(self, x, op):
|
||||
with Context(TRANSCENDENTAL=2):
|
||||
np.testing.assert_allclose(op[0](Tensor([x], dtype=dtypes.float32)).numpy(),
|
||||
op[1](np.array([x], dtype=_to_np_dtype(dtypes.float32))),
|
||||
atol=1e-5)
|
||||
|
||||
@unittest.skipUnless(is_dtype_supported(dtypes.float16, Device.DEFAULT), f"no float16 on {Device.DEFAULT}")
|
||||
@given(ht.float16, strat.sampled_from([(Tensor.exp, np.exp), (Tensor.log, np.log), (Tensor.sin, np.sin)]))
|
||||
def test_float16(self, x, op):
|
||||
with Context(TRANSCENDENTAL=2):
|
||||
np.testing.assert_allclose(op[0](Tensor([x], dtype=dtypes.float16)).numpy(),
|
||||
op[1](np.array([x], dtype=_to_np_dtype(dtypes.float16))),
|
||||
atol=1e-2, rtol=2e-3)
|
||||
|
||||
class TestTranscendentalSchedule(unittest.TestCase):
|
||||
# w/ payne_hanek_reduction (fp32)
|
||||
|
||||
Reference in New Issue
Block a user