adjust cuda ci test targets (#9014)

This commit is contained in:
chenyu
2025-02-10 15:29:59 -05:00
committed by GitHub
parent dfc9d6827f
commit 6c39aa4a6b
3 changed files with 4 additions and 3 deletions

View File

@@ -485,7 +485,7 @@ jobs:
run: python -m pytest -n=auto test/ --ignore=test/models --ignore=test/unit --durations=20
- name: Run pytest (cuda)
if: matrix.backend=='ptx'||matrix.backend=='triton'||matrix.backend=='nv'
run: python -m pytest -n=auto test/ -k 'not (half or test_efficientnet_safetensors)' --ignore=test/models --ignore=test/unit --ignore test/test_gc.py --durations=20
run: python -m pytest -n=auto test/ --ignore=test/models --ignore=test/unit --ignore test/test_gc.py --durations=20
- name: Run pytest (amd)
if: matrix.backend=='amd'
run: python -m pytest -n=auto test/test_ops.py test/test_dtype.py test/test_dtype_alu.py test/test_linearizer.py test/test_randomness.py test/imported/test_indexing.py test/test_hcq.py test/external/external_test_am.py --durations=20

View File

@@ -38,7 +38,7 @@ def apply(tor, ten, tor_fn, ten_fn=None):
except: ten, ok = None, not ok # noqa: E722
return tor, ten, ok
@unittest.skipIf(CI and Device.DEFAULT == "CLANG", "slow")
@unittest.skipIf(CI and Device.DEFAULT in ("CLANG", "NV"), "slow")
class TestShapeOps(unittest.TestCase):
@settings.get_profile(__file__)
@given(st_shape(), st_int32, st.one_of(st_int32, st.lists(st_int32)))

View File

@@ -3,7 +3,7 @@ from typing import List, cast
import numpy as np
from tinygrad.codegen.rewriter import full_graph_rewrite
from tinygrad.codegen.linearize import linearize_uop
from tinygrad.device import Buffer, Device
from tinygrad.device import Buffer, Device, is_dtype_supported
from tinygrad.dtype import dtypes
from tinygrad.engine.realize import CompiledRunner
from tinygrad.helpers import dedup, flatten, prod
@@ -64,6 +64,7 @@ class TestPTXFailures(unittest.TestCase):
ret = _test_uop_result([], uops, local_size=[4, 1, 1])[0]
np.testing.assert_equal(ret, [0, 1, 1, 1])
@unittest.skipUnless(is_dtype_supported(dtypes.half), "need half")
def test_gated_define_acc_with_half_dtype(self):
a = Tensor.randn(32, 32, dtype=dtypes.half).realize()
b = Tensor.randn(34, 32, dtype=dtypes.half).realize()