add Swish op (#13541)

* add Swish ONNX operator

* add Swish regression test

* remove trailing whitespace

* upgrade ONNX to 1.20, add excludes for unimplemented ops

* upgrade ONNX to 1.19, add Swish op

* upgrade ONNX to 1.19, TensorFlow to 2.18, add Swish op

* exclude attention_3d and attention_4d_gqa tests

* exclude attention fp16 tests

* exclude all attention tests

* retrigger CI

* retrigger CI - worker crash
This commit is contained in:
Douglas Nyberg
2025-12-08 12:41:18 -05:00
committed by GitHub
parent dd8a1a10d4
commit 947c6eefc3
4 changed files with 15 additions and 13 deletions

View File

@@ -73,7 +73,7 @@ testing_unit = ["tinygrad[testing_minimal]", "tqdm", "safetensors", "tabulate"]
testing = [
"tinygrad[testing_minimal]",
"pillow",
"onnx==1.18.0",
"onnx==1.19.0",
"onnx2torch",
"onnxruntime",
"opencv-python",

View File

@@ -170,6 +170,19 @@ backend_test.exclude('test_scan_*')
backend_test.exclude('test_split_to_sequence_*')
backend_test.exclude('test_ai_onnx_ml_tree_ensemble_*') # https://github.com/onnx/onnx/blob/main/onnx/reference/ops/aionnxml/op_tree_ensemble.py#L121
# TODO: not yet implemented
backend_test.exclude('test_tensorscatter_*')
backend_test.exclude('test_l1normalization_*')
backend_test.exclude('test_l2normalization_*')
backend_test.exclude('test_lpnormalization_*')
backend_test.exclude('test_einsum_scalar_cpu')
backend_test.exclude('test_mod_mixed_sign_float16_cpu')
backend_test.exclude('test_qlinearmatmul_2D_uint8_float16_cpu')
backend_test.exclude('test_qlinearmatmul_3D_uint8_float16_cpu')
backend_test.exclude('test_attention_3d_*')
backend_test.exclude('test_attention_4d_*')
# rest of the failing tests
backend_test.exclude('test_resize_tf_crop_and_resize_cpu') # tf_crop_and_resize not implemented
backend_test.exclude('test_resize_tf_crop_and_resize_axes_2_3_cpu') # tf_crop_and_resize not implemented

View File

@@ -58,18 +58,6 @@ class TestOnnxModel(unittest.TestCase):
print(cls, _LABELS[cls])
assert "car" in _LABELS[cls] or _LABELS[cls] == "convertible"
def test_pad_list_value(self):
from tinygrad.nn.onnx import onnx_ops
from tinygrad import Tensor
Pad = onnx_ops['Pad']
x = Tensor([1, 2, 3])
out = Pad(x, pads=[0, 1], value=[-float('inf')])
assert out.shape == (4,)
assert out.numpy()[-1] == -float('inf')
out2 = Pad(x, pads=[1, 0], constant_value=[5.0])
assert out2.shape == (4,)
assert out2.numpy()[0] == 5.0
@unittest.skipUnless(Device.DEFAULT == "METAL", "only run on METAL")
class TestHuggingFaceOnnxModels(unittest.TestCase):
@classmethod

View File

@@ -626,6 +626,7 @@ def get_onnx_ops() -> dict[str, types.FunctionType|dict[OpSetId, types.FunctionT
def ThresholdedRelu(X:Tensor, alpha:float=1.0): return (X > alpha).where(X, 0)
def LogSoftmax(x: Tensor, axis:int=-1): return x.log_softmax(axis)
def Binarizer(x:Tensor, threshold:float=0.0): return (x > threshold).float()
def Swish(x:Tensor, alpha:float=1.0): return x * (x * alpha).sigmoid()
# ***** Unary Ops (broadcasted) *****
def Add(x:Tensor,y:Tensor, broadcast=None, axis=None): return x + y