From 3c3f846c45928e5074e7a21740c071a66b9ef991 Mon Sep 17 00:00:00 2001 From: chenyu Date: Tue, 5 Mar 2024 11:03:52 -0500 Subject: [PATCH] tinybox benchmark with HSA (#3603) * tinybox benchmark with HSA * torch cuda init can fail * no TORCHCUDA * print torch version * LD_PRELOAD="/opt/rocm/lib/libhsa-runtime64.so" --- .github/workflows/benchmark.yml | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 7de9b57785..8e79780f57 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -140,30 +140,32 @@ jobs: ln -s ~/tinygrad/weights/LLaMA weights/LLaMA ln -s ~/tinygrad/extra/datasets/cifar-10-python.tar.gz extra/datasets/cifar-10-python.tar.gz - name: Run model inference benchmark - run: HIP=1 NOCLANG=1 python3 test/external/external_model_benchmark.py + run: LD_PRELOAD="/opt/rocm/lib/libhsa-runtime64.so" HSA=1 NOCLANG=1 python3 test/external/external_model_benchmark.py - name: Test speed vs torch - run: BIG=2 TORCHCUDA=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt + run: | + python3 -c "import torch; print(torch.__version__)" + LD_PRELOAD="/opt/rocm/lib/libhsa-runtime64.so" HSA=1 BIG=2 TORCHCUDA=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt - name: Run Tensor Core GEMM - run: HIP=1 HALF=1 DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul.txt + run: HSA=1 HALF=1 DEBUG=2 python3 extra/gemm/simple_matmul.py | tee matmul.txt - name: Run Stable Diffusion - run: python3 examples/stable_diffusion.py --seed 0 --noshow --timing | tee sd.txt - - name: Run LLaMA (with HIP) + run: HSA=1 python3 examples/stable_diffusion.py --seed 0 --noshow --timing | tee sd.txt + - name: Run LLaMA run: | - HIP=1 JIT=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_unjitted.txt - HIP=1 JIT=1 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_jitted.txt - - name: Run GPT2 (with HIP) + HSA=1 JIT=0 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_unjitted.txt + HSA=1 JIT=1 python3 examples/llama.py --gen 1 --prompt "Hello." --count 10 --temperature 0 --timing | tee llama_jitted.txt + - name: Run GPT2 run: | - HIP=1 JIT=0 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_unjitted.txt - HIP=1 JIT=1 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_jitted.txt + HSA=1 JIT=0 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_unjitted.txt + HSA=1 JIT=1 python3 examples/gpt2.py --prompt "Hello." --count 10 --temperature 0 --timing | tee gpt2_jitted.txt - name: Run 10 CIFAR training steps - run: HIP=1 STEPS=10 python3 examples/hlb_cifar10.py | tee train_cifar.txt + run: HSA=1 STEPS=10 python3 examples/hlb_cifar10.py | tee train_cifar.txt - name: Run 10 CIFAR training steps w HALF - run: HIP=1 STEPS=10 HALF=1 python3 examples/hlb_cifar10.py | tee train_cifar_half.txt + run: HSA=1 STEPS=10 HALF=1 python3 examples/hlb_cifar10.py | tee train_cifar_half.txt # # TODO: enable this. it took 3 minutes in CI and made the full training one more than 5 minutes # - name: Run 10 CIFAR training steps w 6 GPUS # run: time HALF=1 STEPS=10 BS=1536 GPUS=6 python3 examples/hlb_cifar10.py - name: Run full CIFAR training - run: time HIP=1 HALF=1 LATEWINO=1 STEPS=1000 TARGET_EVAL_ACC_PCT=93 python3 examples/hlb_cifar10.py | tee train_cifar_one_gpu.txt + run: time HSA=1 HALF=1 LATEWINO=1 STEPS=1000 TARGET_EVAL_ACC_PCT=93 python3 examples/hlb_cifar10.py | tee train_cifar_one_gpu.txt - uses: actions/upload-artifact@v4 with: name: Speed (AMD)