mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
tests: remove 2 runners, make cache reliable (#2106)
* remove 2 runners * device.DEFAULT printing * explain rebuild * disable ocelot rebuild * try again to fix workflow * this? fix cache hash * force no rebuild * fix pylint
This commit is contained in:
56
.github/workflows/test.yml
vendored
56
.github/workflows/test.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.Python3_ROOT_DIR }}/lib/python3.8/site-packages
|
||||
key: linting-packages-${{ hashFiles('*/setup.py') }}-3.8
|
||||
key: linting-packages-${{ hashFiles('**/setup.py') }}-3.8
|
||||
- name: Install dependencies
|
||||
run: pip install -e '.[linting,testing]' --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
- name: Repo line count
|
||||
@@ -58,7 +58,7 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.Python3_ROOT_DIR }}/lib/python3.8/site-packages
|
||||
key: testing-packages-${{ hashFiles('*/setup.py') }}
|
||||
key: testing-packages-${{ hashFiles('**/setup.py') }}
|
||||
- name: Install Dependencies
|
||||
run: pip install -e '.[testing]' --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
- name: Test Docs
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.Python3_ROOT_DIR }}/lib/python3.11/site-packages
|
||||
key: testing-packages-${{ hashFiles('*/setup.py') }}
|
||||
key: testing-packages-${{ hashFiles('**/setup.py') }}
|
||||
- name: Install Dependencies
|
||||
run: pip install -e '.[testing]' --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
- name: Run Pytest
|
||||
@@ -116,8 +116,8 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
task: [optimage, openpilot, multigpu, realworld]
|
||||
name: ${{ matrix.task=='optimage'&&'GPU OPT and IMAGE Tests'|| matrix.task=='openpilot'&&'openpilot (OpenCL) Tests'|| matrix.task=='multigpu'&&'MultiGPU Tests' || matrix.task=='realworld'&&'Real World Tests'}}
|
||||
task: [optimage, openpilot]
|
||||
name: ${{ matrix.task=='optimage'&&'GPU OPT and IMAGE Tests'|| matrix.task=='openpilot'&&'openpilot (OpenCL) Tests'}}
|
||||
runs-on: ubuntu-20.04
|
||||
timeout-minutes: 20
|
||||
|
||||
@@ -137,7 +137,7 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.Python3_ROOT_DIR }}/lib/python3.11/site-packages
|
||||
key: testing-packages-${{ hashFiles('*/setup.py') }}
|
||||
key: testing-packages-${{ hashFiles('**/setup.py') }}
|
||||
- name: Install Dependencies
|
||||
run: pip install -e '.[testing]' --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
- if: ${{ matrix.task == 'optimage' }}
|
||||
@@ -149,10 +149,11 @@ jobs:
|
||||
name: Test WINO=1
|
||||
run: GPU=1 DEBUG=2 WINO=1 python3 test/test_ops.py TestOps.test_simple_conv2d
|
||||
- if: ${{ matrix.task == 'optimage'}}
|
||||
name: Test GPU IMAGE ops
|
||||
run: |
|
||||
GPU=1 IMAGE=1 python -m pytest -n=auto test/test_ops.py
|
||||
GPU=1 IMAGE=2 python -m pytest -n=auto test/test_ops.py
|
||||
name: Test GPU IMAGE=1 ops
|
||||
run: GPU=1 IMAGE=1 python -m pytest -n=auto test/test_ops.py
|
||||
- if: ${{ matrix.task == 'optimage'}}
|
||||
name: Test GPU IMAGE=2 ops
|
||||
run: GPU=1 IMAGE=2 python -m pytest -n=auto test/test_ops.py
|
||||
- if: ${{ matrix.task == 'openpilot' }}
|
||||
name: Test openpilot model compile and size
|
||||
run: |
|
||||
@@ -167,16 +168,11 @@ jobs:
|
||||
- if: ${{ matrix.task == 'openpilot' }}
|
||||
name: Test tensor core ops
|
||||
run: GPU=1 TC=2 python -m pytest -n=auto test/test_ops.py
|
||||
- if: ${{ matrix.task == 'multigpu' }}
|
||||
- if: ${{ matrix.task == 'openpilot' }}
|
||||
name: Test multigpu
|
||||
run: |
|
||||
PYTHONPATH="." python test/external/dist/test_world.py
|
||||
PYTHONPATH="." python test/external/dist/test_collectives.py
|
||||
- if: ${{ matrix.task == 'realworld' }}
|
||||
name: Run GPT2
|
||||
run: |
|
||||
PYTHONPATH="." JIT=0 python examples/gpt2.py --model_size=gpt2 --prompt "Hello." --count 10 --temperature 0 --timing
|
||||
PYTHONPATH="." JIT=1 python examples/gpt2.py --model_size=gpt2 --prompt "Hello." --count 10 --temperature 0 --timing
|
||||
|
||||
testmetalwebgpu:
|
||||
name: Metal and WebGPU Tests
|
||||
@@ -194,7 +190,7 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.Python3_ROOT_DIR }}/lib/python3.11/site-packages
|
||||
key: metal-webgpu-testing-packages-${{ hashFiles('*/setup.py') }}
|
||||
key: metal-webgpu-testing-packages-${{ hashFiles('**/setup.py') }}
|
||||
- name: Install Dependencies
|
||||
run: pip install -e '.[metal,webgpu,testing]' --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
- name: Test LLaMA compile speed
|
||||
@@ -202,6 +198,10 @@ jobs:
|
||||
#- name: Run dtype test
|
||||
# run: DEBUG=4 METAL=1 python -m pytest -n=auto test/test_dtype.py
|
||||
# dtype test has issues on test_half_to_int8
|
||||
- name: Check Device.DEFAULT (METAL) and print some source
|
||||
run: |
|
||||
METAL=1 python -c "from tinygrad.ops import Device; assert Device.DEFAULT == 'METAL', Device.DEFAULT"
|
||||
METAL=1 DEBUG=4 FORWARD_ONLY=1 python3 test/test_ops.py TestOps.test_add
|
||||
- name: Run metal ops test
|
||||
run: DEBUG=2 METAL=1 python -m pytest -n=auto test/test_ops.py
|
||||
- name: Run JIT test
|
||||
@@ -212,8 +212,10 @@ jobs:
|
||||
run: METAL=1 python -m pytest -n=auto test/external/external_test_onnx_backend.py
|
||||
- name: Run whisper test
|
||||
run: METAL=1 python -m pytest test/models/test_whisper.py
|
||||
- name: Check Device.DEFAULT
|
||||
run: WEBGPU=1 python -c "from tinygrad.ops import Device; assert Device.DEFAULT == 'WEBGPU', Device.DEFAULT"
|
||||
- name: Check Device.DEFAULT (WEBGPU) and print some source
|
||||
run: |
|
||||
WEBGPU=1 python -c "from tinygrad.ops import Device; assert Device.DEFAULT == 'WEBGPU', Device.DEFAULT"
|
||||
WEBGPU=1 DEBUG=4 FORWARD_ONLY=1 python3 test/test_ops.py TestOps.test_add
|
||||
- name: Run linearizer and tensor core test
|
||||
run: METAL=1 python -m pytest -n=auto test/test_linearizer.py
|
||||
#- name: Run webgpu pytest
|
||||
@@ -227,7 +229,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
backend: [llvm, clang, gpu, cuda, triton] #, ptx]
|
||||
backend: [llvm, clang, gpu, cuda, triton] #, ptx]
|
||||
|
||||
name: Tests on (${{ matrix.backend }})
|
||||
runs-on: ${{ matrix.backend == 'gpu' && 'ubuntu-20.04' || 'ubuntu-latest' }}
|
||||
@@ -244,9 +246,9 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.Python3_ROOT_DIR }}/lib/python3.11/site-packages
|
||||
key: ${{ matrix.backend }}-packages-${{ hashFiles('*/setup.py') }}
|
||||
key: ${{ matrix.backend }}-packages-${{ hashFiles('**/setup.py') }}
|
||||
- name: Set env
|
||||
run: printf "${{ matrix.backend == 'llvm' && 'ENABLE_METHOD_CACHE=1\nLLVM=1' || matrix.backend == 'clang' && 'CLANG=1\nENABLED_METHOD_CACHE=1' || matrix.backend == 'gpu' && 'GPU=1' || matrix.backend == 'cuda' && 'FORWARD_ONLY=1\nJIT=1\nOPT=2\nCUDA=1\nCUDACPU=1\n' || matrix.backend == 'PTX' && 'FORWARD_ONLY=1\nJIT=1\nOPT=2\nCUDA=1\nCUDACPU=1\nPTX=1' || matrix.backend == 'triton' && 'FORWARD_ONLY=1\nJIT=1\nOPT=2\nCUDA=1\nCUDACPU=1\nTRITON=1\nTRITON_PTXAS_PATH=/usr/bin/ptxas'}}" >> $GITHUB_ENV
|
||||
run: printf "${{ matrix.backend == 'llvm' && 'LLVM=1' || matrix.backend == 'clang' && 'CLANG=1' || matrix.backend == 'gpu' && 'GPU=1' || matrix.backend == 'cuda' && 'FORWARD_ONLY=1\nJIT=1\nOPT=2\nCUDA=1\nCUDACPU=1\n' || matrix.backend == 'PTX' && 'FORWARD_ONLY=1\nJIT=1\nOPT=2\nCUDA=1\nCUDACPU=1\nPTX=1' || matrix.backend == 'triton' && 'FORWARD_ONLY=1\nJIT=1\nOPT=2\nCUDA=1\nCUDACPU=1\nTRITON=1\nTRITON_PTXAS_PATH=/usr/bin/ptxas'}}" >> $GITHUB_ENV
|
||||
- name: Install OpenCL
|
||||
if: matrix.backend == 'gpu'
|
||||
run: |
|
||||
@@ -259,14 +261,14 @@ jobs:
|
||||
sudo apt update -y
|
||||
sudo apt install -y --no-install-recommends git g++ cmake ninja-build llvm-15-dev zlib1g-dev libglew-dev flex bison libfl-dev libboost-thread-dev libboost-filesystem-dev nvidia-cuda-toolkit-gcc
|
||||
- name: Cache gpuocelot
|
||||
if: matrix.backend == 'cuda' || matrix.backend == 'ptx' || matrix.backend == 'triton'
|
||||
if: matrix.backend == 'cuda' || matrix.backend == 'ptx' || matrix.backend == 'triton'
|
||||
id: cache-build
|
||||
uses: actions/cache@v3
|
||||
env:
|
||||
cache-name: cache-gpuocelot-build
|
||||
with:
|
||||
path: ${{ github.workspace }}/gpuocelot/ocelot
|
||||
key: ubuntu22.04-gpuocelot-szymonozog-tinygrad
|
||||
key: ubuntu22.04-gpuocelot-szymonozog-tinygrad-norebuild
|
||||
- name: Clone/compile gpuocelot
|
||||
if: (matrix.backend == 'cuda' || matrix.backend == 'ptx' || matrix.backend == 'triton') && steps.cache-build.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
@@ -274,13 +276,13 @@ jobs:
|
||||
cd ${{ github.workspace }}/gpuocelot/ocelot
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -Wno-dev -G Ninja -DOCELOT_BUILD_TOOLS=OFF
|
||||
cmake .. -Wno-dev -G Ninja -DOCELOT_BUILD_TOOLS=OFF -DCMAKE_BUILD_ALWAYS=0
|
||||
ninja
|
||||
- name: Install gpuocelot
|
||||
if: matrix.backend == 'cuda' || matrix.backend == 'ptx' || matrix.backend == 'triton'
|
||||
run: |
|
||||
cd ${{ github.workspace }}/gpuocelot/ocelot/build
|
||||
sudo ninja install
|
||||
sudo ninja install -d explain
|
||||
- name: Install dependencies
|
||||
run: pip install -e '.[testing${{matrix.backend=='llvm'&&',llvm'||matrix.backend=='cuda'&&',cuda'||matrix.backend=='ptx'&&',cuda'||matrix.backend=='triton'&&',triton'||''}}]' --extra-index-url https://download.pytorch.org/whl/cpu --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/Triton-Nightly/pypi/simple/
|
||||
- name: Check Device.DEFAULT and print some source
|
||||
@@ -319,7 +321,7 @@ jobs:
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ env.Python3_ROOT_DIR }}/lib/python3.11/site-packages
|
||||
key: testing-arm-packages-${{ hashFiles('*/setup.py') }}
|
||||
key: testing-arm-packages-${{ hashFiles('**/setup.py') }}
|
||||
- name: Install cross-assembler
|
||||
run: |
|
||||
sudo apt update -y
|
||||
|
||||
9
setup.py
9
setup.py
@@ -37,6 +37,8 @@ setup(name='tinygrad',
|
||||
"typing-extensions",
|
||||
"pre-commit",
|
||||
"ruff",
|
||||
"types-PyYAML",
|
||||
"types-tqdm",
|
||||
],
|
||||
'testing': [
|
||||
"torch",
|
||||
@@ -47,14 +49,11 @@ setup(name='tinygrad',
|
||||
"opencv-python",
|
||||
"tabulate",
|
||||
"safetensors",
|
||||
"types-PyYAML",
|
||||
"types-tqdm",
|
||||
"cloudpickle",
|
||||
"transformers",
|
||||
"nevergrad",
|
||||
"sentencepiece",
|
||||
"tiktoken",
|
||||
"librosa"
|
||||
],
|
||||
"librosa",
|
||||
]
|
||||
},
|
||||
include_package_data=True)
|
||||
|
||||
@@ -19,7 +19,7 @@ torch_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{
|
||||
UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.SQRT: lambda x: x.sqrt(), UnaryOps.EXP2: lambda x: x.exp2(), UnaryOps.LOG2: lambda x: x.log2(), UnaryOps.SIN: torch.sin,
|
||||
UnaryOps.CAST: lambda x,y: (x.view if y[1] else x.type)(next(k for k,v in type_map.items() if v==y[0])),
|
||||
BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)), BinaryOps.SUB: lambda x,y: torch.logical_xor(x, y) if y.dtype is torch.bool else torch.sub(x, y),
|
||||
MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]),
|
||||
MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]), # pylint: disable=E1102
|
||||
TernaryOps.MULACC: einsum_mulacc(lambda s,a,b: torch.einsum(s, a.float(), b.float()).type(torch.promote_types(a.dtype, b.dtype)), lambda x: x.stride(), lambda x,s: x.expand(s)),
|
||||
TernaryOps.WHERE: lambda x, y, z: torch.where(x != 0, y, z),
|
||||
MovementOps.STRIDE: lambda x, arg: x[tuple(slice(None, None, abs(i)) for i in arg)].flip([i for i,a in enumerate(arg) if a < 0]),
|
||||
|
||||
Reference in New Issue
Block a user