hypothesis.st -> hypothesis.strat (#3179)

leave `st` for shapetracker
This commit is contained in:
chenyu
2024-01-19 11:55:26 -05:00
committed by GitHub
parent c4faedebf3
commit b2571d586c
3 changed files with 55 additions and 55 deletions

View File

@@ -7,7 +7,7 @@ from tinygrad.dtype import DType, DTYPES_DICT, ImageDType, PtrDType, least_upper
from tinygrad import Device
from tinygrad.tensor import Tensor, dtypes
from typing import Any, List
from hypothesis import given, settings, strategies as st
from hypothesis import given, settings, strategies as strat
core_dtypes = list(DTYPES_DICT.values())
floats = [dt for dt in core_dtypes if dtypes.is_float(dt)]
@@ -222,20 +222,20 @@ class TestHelpers(unittest.TestCase):
uints = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
floats = (dtypes.float16, dtypes.float32, dtypes.float64)
@given(st.sampled_from(signed_ints+uints), st.integers(min_value=1, max_value=8))
@given(strat.sampled_from(signed_ints+uints), strat.integers(min_value=1, max_value=8))
def test_is_int(self, dtype, amt):
assert dtypes.is_int(dtype.vec(amt) if amt > 1 else dtype)
assert not dtypes.is_float(dtype.vec(amt) if amt > 1 else dtype)
@given(st.sampled_from(uints), st.integers(min_value=1, max_value=8))
@given(strat.sampled_from(uints), strat.integers(min_value=1, max_value=8))
def test_is_unsigned_uints(self, dtype, amt):
assert dtypes.is_unsigned(dtype.vec(amt) if amt > 1 else dtype)
@given(st.sampled_from(signed_ints), st.integers(min_value=1, max_value=8))
@given(strat.sampled_from(signed_ints), strat.integers(min_value=1, max_value=8))
def test_is_unsigned_signed_ints(self, dtype, amt):
assert not dtypes.is_unsigned(dtype.vec(amt) if amt > 1 else dtype)
@given(st.sampled_from(floats), st.integers(min_value=1, max_value=8))
@given(strat.sampled_from(floats), strat.integers(min_value=1, max_value=8))
def test_is_float(self, dtype, amt):
assert dtypes.is_float(dtype.vec(amt) if amt > 1 else dtype)
assert not dtypes.is_int(dtype.vec(amt) if amt > 1 else dtype)
@@ -244,7 +244,7 @@ class TestHelpers(unittest.TestCase):
def test_bf16_is_float(self):
assert dtypes.is_float(dtypes.bfloat16)
@given(st.sampled_from([d for d in DTYPES_DICT.values() if dtypes.is_float(d) or dtypes.is_int(d)]), st.integers(min_value=2, max_value=8))
@given(strat.sampled_from([d for d in DTYPES_DICT.values() if dtypes.is_float(d) or dtypes.is_int(d)]), strat.integers(min_value=2, max_value=8))
def test_scalar(self, dtype, amt):
assert dtype.vec(amt).scalar() == dtype
@@ -266,7 +266,7 @@ class TestTypeSpec(unittest.TestCase):
dtypes.default_float = dtypes.float64
assert dtypes.default_float == dtypes.float64
@given(st.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), st.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
@given(strat.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), strat.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
def test_creation(self, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
assert Tensor(True).dtype == dtypes.bool
@@ -284,7 +284,7 @@ class TestTypeSpec(unittest.TestCase):
assert Tensor.eye(3, dtype=dtypes.int64).dtype == dtypes.int64
@given(st.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), st.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
@given(strat.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), strat.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
def test_full(self, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
@@ -311,7 +311,7 @@ class TestTypeSpec(unittest.TestCase):
assert Tensor.ones([2,3,0]).sum(2).dtype == dtypes.default_float
# assert Tensor.ones([2,3,0], dtype=dtypes.int).sum(2).dtype == dtypes.int # requires reduceop acc fix
@given(st.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), st.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
@given(strat.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), strat.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
def test_arange(self, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
@@ -324,12 +324,12 @@ class TestTypeSpec(unittest.TestCase):
assert Tensor.arange(3, 8.5, 3).dtype == dtypes.default_float
@unittest.skipIf(Device.DEFAULT == "WEBGPU", "WEBGPU doesn't follow the bool ops spec")
@given(st.sampled_from(core_dtypes), st.sampled_from([operator.gt, operator.ge, operator.le, operator.lt, operator.eq, operator.ne]))
@given(strat.sampled_from(core_dtypes), strat.sampled_from([operator.gt, operator.ge, operator.le, operator.lt, operator.eq, operator.ne]))
def test_bool_ops(self, dtype, op):
assert op(Tensor.rand(4, 4, dtype=dtype), Tensor.rand(4, 4, dtype=dtype)).dtype == dtypes.bool
@given(st.sampled_from(core_dtypes),
st.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), st.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
@given(strat.sampled_from(core_dtypes),
strat.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]), strat.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
def test_functions_return_index(self, dtype, default_int, default_float):
dtypes.default_int, dtypes.default_float = default_int, default_float
assert Tensor([0, 1], dtype=dtype).argmax().dtype == dtypes.default_int
@@ -337,13 +337,13 @@ class TestTypeSpec(unittest.TestCase):
assert Tensor([0, 1], dtype=dtype).multinomial().dtype == dtypes.default_int
class TestTypePromotion(unittest.TestCase):
@given(st.sampled_from(core_dtypes))
@given(strat.sampled_from(core_dtypes))
def test_self_promo_to_self(self, dtype):
assert least_upper_dtype(dtype) == dtype
assert least_upper_dtype(dtype, dtype) == dtype
assert least_upper_dtype(dtype, dtype, dtype) == dtype
@given(st.sampled_from(core_dtypes), st.sampled_from(core_dtypes))
@given(strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes))
def test_promo_resulted_higher_than_inputs(self, dtype1, dtype2):
result = least_upper_dtype(dtype1, dtype2)
assert result >= dtype1 and result >= dtype2
@@ -366,7 +366,7 @@ class TestTypePromotion(unittest.TestCase):
assert least_upper_dtype(dtypes.float16, dtypes.int64) == dtypes.float16
assert least_upper_dtype(dtypes.float16, dtypes.uint64) == dtypes.float16
@given(st.sampled_from(floats))
@given(strat.sampled_from(floats))
def test_float_to_float(self, dt):
assert least_upper_float(dt) == dt
@@ -376,7 +376,7 @@ class TestAutoCastType(unittest.TestCase):
def tearDown(self):
dtypes.default_int, dtypes.default_float = self.old_default_int, self.old_default_float
@given(st.sampled_from([d for d in DTYPES_DICT.values() if dtypes.is_int(d) and is_dtype_supported(d)]))
@given(strat.sampled_from([d for d in DTYPES_DICT.values() if dtypes.is_int(d) and is_dtype_supported(d)]))
@settings(deadline=None)
def test_int_to_float_unary_func(self, dtype):
for func in [
@@ -395,7 +395,7 @@ class TestAutoCastType(unittest.TestCase):
# float16 can have larger precision errors
np.testing.assert_allclose(func(Tensor(a, dtype=dtype)).numpy(), func(torch.tensor(a)), rtol=1e-3, atol=1e-3)
@given(st.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
@given(strat.sampled_from([dtypes.float16,dtypes.float32,dtypes.float64]))
def test_broadcast_float(self, default_float):
dtypes.default_float = default_float
assert (Tensor.rand(4, 4, dtype=dtypes.bool) + 2.3).dtype == dtypes.default_float
@@ -407,7 +407,7 @@ class TestAutoCastType(unittest.TestCase):
assert (Tensor.rand(4, 4, dtype=dtypes.float32) + 2.3).dtype == dtypes.float32
assert (Tensor.rand(4, 4, dtype=dtypes.float64) + 2.3).dtype == dtypes.float64
@given(st.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]))
@given(strat.sampled_from([dtypes.int8,dtypes.int16,dtypes.int32,dtypes.int64]))
def test_broadcast_int(self, default_int):
dtypes.default_int = default_int
assert (Tensor.rand(4, 4, dtype=dtypes.bool) + 2).dtype == dtypes.default_int
@@ -460,7 +460,7 @@ class TestAutoCastType(unittest.TestCase):
assert (Tensor([0, 1], dtype=dtypes.float32)).cumsum(0).dtype == dtypes.float32
assert (Tensor([0, 1], dtype=dtypes.float64)).cumsum(0).dtype == dtypes.float64
@given(st.sampled_from(core_dtypes), st.sampled_from(core_dtypes))
@given(strat.sampled_from(core_dtypes), strat.sampled_from(core_dtypes))
@settings(deadline=None)
def test_matmul(self, dt1, dt2):
assert (Tensor([0, 1], dtype=dt1) @ Tensor([0, 1], dtype=dt2)).dtype == least_upper_dtype(dt1, dt2)

View File

@@ -3,7 +3,7 @@ import unittest
from tinygrad import Tensor, dtypes, Device
import operator
import numpy as np
from hypothesis import given, strategies as st, settings
from hypothesis import given, strategies as strat, settings
from tinygrad.dtype import DType
from tinygrad.helpers import CI, getenv, OSX
from tinygrad.ops import UnaryOps, get_lazyop_info
@@ -36,18 +36,18 @@ unary_operations = [(Tensor.exp, np.exp), (Tensor.log, np.log), operator.neg, (T
if getenv("CUDACPU"): unary_operations.remove((Tensor.sin, np.sin))
class ht:
float64 = st.floats(width=64, allow_subnormal=False)
float32 = st.floats(width=32, allow_subnormal=False)
float16 = st.floats(width=16, allow_subnormal=False)
uint8 = st.integers(0, 255)
uint16 = st.integers(0, 65535)
uint32 = st.integers(0, 2**32-1)
uint64 = st.integers(0, 2**64-1)
int8 = st.integers(-128, 127)
int16 = st.integers(-32768, 32767)
int32 = st.integers(-2147483648, 2147483647)
int64 = st.integers(-9223372036854775808, 9223372036854775807)
bool = st.booleans()
float64 = strat.floats(width=64, allow_subnormal=False)
float32 = strat.floats(width=32, allow_subnormal=False)
float16 = strat.floats(width=16, allow_subnormal=False)
uint8 = strat.integers(0, 255)
uint16 = strat.integers(0, 65535)
uint32 = strat.integers(0, 2**32-1)
uint64 = strat.integers(0, 2**64-1)
int8 = strat.integers(-128, 127)
int16 = strat.integers(-32768, 32767)
int32 = strat.integers(-2147483648, 2147483647)
int64 = strat.integers(-9223372036854775808, 9223372036854775807)
bool = strat.booleans()
def universal_test(a, b, dtype, op):
if not isinstance(op, tuple): op = (op, op)
@@ -88,69 +88,69 @@ def universal_test_midcast(a, b, c, op1, op2, d1:DType, d2:DType):
class TestDTypeALU(unittest.TestCase):
@unittest.skipIf(OSX and Device.DEFAULT in {"GPU", "METAL"}, "no float64 on OSX GPU")
@given(ht.float64, ht.float64, st.sampled_from(binary_operations))
@given(ht.float64, ht.float64, strat.sampled_from(binary_operations))
def test_float64(self, a, b, op): universal_test(a, b, dtypes.float64, op)
@given(ht.float32, ht.float32, st.sampled_from(binary_operations))
@given(ht.float32, ht.float32, strat.sampled_from(binary_operations))
def test_float32(self, a, b, op): universal_test(a, b, dtypes.float32, op)
# GPU requires cl_khr_fp16
# for LLVM, it segfaults because it can't link to the casting function
# CUDACPU architecture is sm_35 but we need at least sm_70 to run fp16 ALUs
@unittest.skipIf((Device.DEFAULT in ["GPU", "LLVM"] and CI) or getenv("CUDACPU"), "")
@given(ht.float16, ht.float16, st.sampled_from(binary_operations))
@given(ht.float16, ht.float16, strat.sampled_from(binary_operations))
def test_float16(self, a, b, op): universal_test(a, b, dtypes.float16, op)
@given(ht.float32, st.sampled_from(unary_operations))
@given(ht.float32, strat.sampled_from(unary_operations))
def test_float32_unary(self, a, op): universal_test_unary(a, dtypes.float32, op)
@unittest.skipIf((Device.DEFAULT in ["GPU", "LLVM"] and CI) or getenv("CUDACPU"), "")
@given(ht.float16, st.sampled_from(unary_operations))
@given(ht.float16, strat.sampled_from(unary_operations))
def test_float16_unary(self, a, op): universal_test_unary(a, dtypes.float16, op)
@given(ht.uint8, ht.uint8, st.sampled_from(integer_binary_operations))
@given(ht.uint8, ht.uint8, strat.sampled_from(integer_binary_operations))
def test_uint8(self, a, b, op): universal_test(a, b, dtypes.uint8, op)
@unittest.skipIf(Device.DEFAULT == "TORCH", "no uint16 in torch")
@given(ht.uint16, ht.uint16, st.sampled_from(integer_binary_operations))
@given(ht.uint16, ht.uint16, strat.sampled_from(integer_binary_operations))
def test_uint16(self, a, b, op): universal_test(a, b, dtypes.uint16, op)
@unittest.skipIf(Device.DEFAULT == "TORCH", "no uint32 in torch")
@given(ht.uint32, ht.uint32, st.sampled_from(integer_binary_operations))
@given(ht.uint32, ht.uint32, strat.sampled_from(integer_binary_operations))
def test_uint32(self, a, b, op): universal_test(a, b, dtypes.uint32, op)
@unittest.skipIf(Device.DEFAULT == "TORCH", "no uint64 in torch")
@given(ht.uint64, ht.uint64, st.sampled_from(integer_binary_operations))
@given(ht.uint64, ht.uint64, strat.sampled_from(integer_binary_operations))
def test_uint64(self, a, b, op): universal_test(a, b, dtypes.uint64, op)
@given(ht.int8, ht.int8, st.sampled_from(integer_binary_operations))
@given(ht.int8, ht.int8, strat.sampled_from(integer_binary_operations))
def test_int8(self, a, b, op): universal_test(a, b, dtypes.int8, op)
@given(ht.int16, ht.int16, st.sampled_from(integer_binary_operations))
@given(ht.int16, ht.int16, strat.sampled_from(integer_binary_operations))
def test_int16(self, a, b, op): universal_test(a, b, dtypes.int16, op)
@given(ht.int32, ht.int32, st.sampled_from(integer_binary_operations))
@given(ht.int32, ht.int32, strat.sampled_from(integer_binary_operations))
def test_int32(self, a, b, op): universal_test(a, b, dtypes.int32, op)
@given(ht.int64, ht.int64, st.sampled_from(integer_binary_operations))
@given(ht.int64, ht.int64, strat.sampled_from(integer_binary_operations))
def test_int64(self, a, b, op): universal_test(a, b, dtypes.int64, op)
@given(ht.bool, ht.bool, st.sampled_from(((operator.add, operator.add), (operator.mul, operator.mul))))
@given(ht.bool, ht.bool, strat.sampled_from(((operator.add, operator.add), (operator.mul, operator.mul))))
def test_bool(self, a, b, op): universal_test(a, b, dtypes.bool, op)
@given(ht.int32, ht.int32, ht.float32, st.sampled_from(integer_binary_operations), st.sampled_from(binary_operations))
@given(ht.int32, ht.int32, ht.float32, strat.sampled_from(integer_binary_operations), strat.sampled_from(binary_operations))
def test_int32_midcast_float(self, a, b, c, op1, op2): universal_test_midcast(a, b, c, op1, op2, dtypes.int32, dtypes.float32)
# Metal and CUDACPU behave differently than numpy in CI for overflows
@given(st.floats(width=32, min_value=0, max_value=10.0) if CI and (Device.DEFAULT == "METAL" or getenv("CUDACPU")) else ht.float32,
st.floats(width=32, min_value=0, max_value=10.0) if CI and (Device.DEFAULT == "METAL" or getenv("CUDACPU")) else ht.float32,
ht.int32, st.sampled_from(binary_operations), st.sampled_from(integer_binary_operations))
@given(strat.floats(width=32, min_value=0, max_value=10.0) if CI and (Device.DEFAULT == "METAL" or getenv("CUDACPU")) else ht.float32,
strat.floats(width=32, min_value=0, max_value=10.0) if CI and (Device.DEFAULT == "METAL" or getenv("CUDACPU")) else ht.float32,
ht.int32, strat.sampled_from(binary_operations), strat.sampled_from(integer_binary_operations))
def test_float_midcast_int32(self, a, b, c, op1, op2): universal_test_midcast(a, b, c, op1, op2, dtypes.float32, dtypes.int32)
@given(ht.float32, st.sampled_from(dtypes_float+dtypes_int+dtypes_bool))
@given(ht.float32, strat.sampled_from(dtypes_float+dtypes_int+dtypes_bool))
def test_float_cast(self, a, dtype): universal_test_cast(a, dtypes.float32, dtype)
@given(ht.int32, st.sampled_from(dtypes_float+dtypes_int+dtypes_bool))
@given(ht.int32, strat.sampled_from(dtypes_float+dtypes_int+dtypes_bool))
def test_int32_cast(self, a, dtype): universal_test_cast(a, dtypes.int32, dtype)
if __name__ == '__main__':

View File

@@ -8,7 +8,7 @@ from tinygrad.renderer.cstyle import HIPRenderer
from examples.beautiful_mnist import Model as MNIST
from examples.hlb_cifar10 import SpeedyResNet
from hypothesis import given, strategies as st, settings
from hypothesis import given, strategies as strat, settings
settings.register_profile("my_profile", deadline=None)
settings.load_profile("my_profile")
print(settings.default)
@@ -59,13 +59,13 @@ float_dtypes = [dtypes.float16, dtypes.float32]
@unittest.skipIf(Device.DEFAULT != "HIP", reason="testing HIP->rdna3 compilation needs HIP=1")
class TestHIPALUCompilation(unittest.TestCase):
@given(st.sampled_from(unary_operations), st.sampled_from(float_dtypes))
@given(strat.sampled_from(unary_operations), strat.sampled_from(float_dtypes))
def test_unary_ops(self, op, dtype):
a = Tensor.randn(4,4, dtype=dtype)
out = op(a)
compile_ast_to_hip(out)
@given(st.sampled_from(binary_operations), st.sampled_from(float_dtypes))
@given(strat.sampled_from(binary_operations), strat.sampled_from(float_dtypes))
def test_binary_ops(self, op, dtype):
a = Tensor.randn(4,4, dtype=dtype)
b = Tensor.randn(4,4, dtype=dtype)