faster ci [pr] (#7348)

This commit is contained in:
George Hotz
2024-10-29 13:01:44 +07:00
committed by GitHub
parent a5e0f59e41
commit d9d4dd6756
4 changed files with 9 additions and 9 deletions

View File

@@ -1,7 +1,7 @@
name: Unit Tests
env:
# increment this when downloads substantially change to avoid the internet
DOWNLOAD_CACHE_VERSION: '6'
DOWNLOAD_CACHE_VERSION: '7'
RUN_PROCESS_REPLAY: 1
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PYTHONPATH: .
@@ -285,7 +285,7 @@ jobs:
- if: ${{ matrix.task == 'optimage'}}
name: Test GPU IMAGE=2 ops + training
run: |
PYTHONPATH="." GPU=1 IMAGE=2 python -m pytest -n=auto test/test_ops.py
PYTHONPATH="." GPU=1 IMAGE=2 python -m pytest -n=auto test/test_ops.py --durations=20
PYTHONPATH="." GPU=1 IMAGE=2 python3 test/models/test_end2end.py TestEnd2End.test_linear_mnist
- if: ${{ matrix.task == 'optimage' }}
name: Test openpilot model compile and size

View File

@@ -21,7 +21,7 @@ st_int32 = st.integers(-2147483648, 2147483647)
@st.composite
def st_shape(draw) -> tuple[int, ...]:
s = draw(stn.array_shapes(min_dims=0, max_dims=6,
min_side=0, max_side=512))
min_side=0, max_side=128))
assume(prod(s) <= 1024 ** 2)
assume(prod([d for d in s if d]) <= 1024 ** 4)
return s
@@ -56,7 +56,7 @@ class TestShapeOps(unittest.TestCase):
def test_chunk(self, s:tuple[int, ...], dim:int, num:int):
# chunking on a 0 dim is cloning and leads to OOM if done unbounded.
assume((0 <= (actual_dim := len(s)-dim if dim < 0 else dim) < len(s) and s[actual_dim] > 0) or
(num < 32))
(num < 16))
tor, ten = tensors_for_shape(s)
tor, ten, ok = apply(tor, ten, lambda t: t.chunk(num, dim))

View File

@@ -1594,16 +1594,16 @@ class TestOps(unittest.TestCase):
lambda x,w: torch.nn.functional.conv1d(torch.nn.functional.pad(x, p),w).relu(),
lambda x,w: Tensor.conv2d(x,w,padding=p).relu())
def _test_conv2d(self, bs=1, cin=1):
def _test_conv2d(self, bs=1, cin=1, cout=6):
for H in [1,2,3]:
for W in [1,2,3,5]:
for groups in [1,3] if cin == 3 and H == 3 and W == 3 else [1]:
for groups in [1,3] if cin == 3 and cout == 6 and H == 3 and W == 3 else [1]:
with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W):
helper_test_op([(bs,cin,11,7), (6,cin//groups,H,W)],
helper_test_op([(bs,cin,5,7), (cout,cin//groups,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(),
lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), grad_rtol=1e-5)
def test_conv2d(self): self._test_conv2d(bs=1, cin=3)
def test_conv2d_bs_4_cin_3(self): self._test_conv2d(bs=4, cin=3)
def test_conv2d_bs_4_cin_3(self): self._test_conv2d(bs=4, cin=3, cout=2)
def test_conv2d_bs_1_cin_1(self): self._test_conv2d(bs=1, cin=1)
def test_conv2d_bs_4_cin_1(self): self._test_conv2d(bs=4, cin=1)

View File

@@ -137,7 +137,7 @@ class TestBEAM(unittest.TestCase):
lin = Kernel(ast)
bufs = bufs_from_lin(lin)
best_lin = beam_search(lin, bufs, 3)
best_lin = beam_search(lin, bufs, 2)
assert best_lin
# need disable_cache to trigger.
tm = time_linearizer(best_lin, bufs, allow_test_size=False, cnt=2, disable_cache=True)