add Tensor.maximum in test_dtype_alu (#12025)

works except nan
This commit is contained in:
chenyu
2025-09-05 09:48:39 -04:00
committed by GitHub
parent 3e1c04bcdf
commit 9b6f1b86cb

View File

@@ -21,16 +21,13 @@ dtypes_bool = (dtypes.bool,)
binary_operations = [operator.add, operator.sub, operator.mul, operator.lt, operator.eq]
integer_binary_operations = binary_operations + [(Tensor.bitwise_xor, np.bitwise_xor), (Tensor.bitwise_and, np.bitwise_and),
(Tensor.bitwise_or, np.bitwise_or), operator.mod]
(Tensor.bitwise_or, np.bitwise_or), (Tensor.maximum, np.maximum), operator.mod]
unary_operations = [(Tensor.exp, np.exp), (Tensor.log, np.log), (Tensor.sin, np.sin),
(Tensor.sqrt, np.sqrt), (Tensor.reciprocal, np.reciprocal), (Tensor.cos, np.cos)]
# TODO: enable this (this is a dtype issue)
#binary_operations.append(operator.truediv)
# TODO: (a+b)/2 in tensor.py's maximum can overflow. This requires a new implementation of maximum that can be backpropagated
#binary_operations += [(Tensor.maximum, np.maximum)]
# TODO: CI CUDA segfaults on sin, WEBGPU sin is not precise enough for large numbers
if (getenv("MOCKGPU") and Device.DEFAULT in {"NV", "CUDA"}) or Device.DEFAULT == "WEBGPU":
unary_operations.remove((Tensor.sin, np.sin))
@@ -54,8 +51,8 @@ ht.bfloat16 = ht.uint16
def universal_test(a, b, dtype, op):
if not isinstance(op, tuple): op = (op, op)
if op[0] == operator.mod and b == 0: return
# lt with nan is undefined in tinygrad
if op[0] == operator.lt and (math.isnan(a) or math.isnan(b)): return
# lt and max with nan is undefined in tinygrad
if op[0] in (operator.lt, Tensor.maximum) and (math.isnan(a) or math.isnan(b)): return
ta, tb = Tensor([a], dtype=dtype), Tensor([b], dtype=dtype)
tensor_value = (op[0](ta, tb)).numpy()
numpy_value = op[1](ta.numpy(), tb.numpy())
@@ -87,9 +84,9 @@ def universal_test_cast(a, in_dtype, dtype):
def universal_test_midcast(a, b, c, op1, op2, d1:DType, d2:DType):
if not isinstance(op1, tuple): op1 = (op1, op1)
if not isinstance(op2, tuple): op2 = (op2, op2)
# lt with nan is undefined in tinygrad
if op1[0] == operator.lt and (math.isnan(a) or math.isnan(b)): return
if op2[0] == operator.lt and math.isnan(c): return
# lt and max with nan is undefined in tinygrad
if op1[0] in (operator.lt, Tensor.maximum) and (math.isnan(a) or math.isnan(b)): return
if op2[0] in (operator.lt, Tensor.maximum) and math.isnan(c): return
at, bt, ct = Tensor([a], dtype=d1), Tensor([b], dtype=d1), Tensor([c], dtype=d2)
an, bn, cn = np.array([a]).astype(_to_np_dtype(d1)), np.array([b]).astype(_to_np_dtype(d1)), np.array([c]).astype(_to_np_dtype(d2))
tensor_value = op2[0](op1[0](at, bt).cast(d2), ct).numpy()