use default dict for external_model_benchmark (#2592)

* device default

* Device.DEFAULT

* half max for cuda

* CUDA_INCLUDE_PATH

* closer to working

* cuda fixups

* Update ops_cuda.py
This commit is contained in:
George Hotz
2023-12-03 15:25:43 -08:00
committed by GitHub
parent 550817389a
commit bbeba8ec85
7 changed files with 64 additions and 36 deletions

View File

@@ -27,7 +27,7 @@ jobs:
ln -s ~/tinygrad/weights/LLaMA weights/LLaMA
ln -s ~/tinygrad/extra/datasets/cifar-10-python.tar.gz extra/datasets/cifar-10-python.tar.gz
- name: Run model inference benchmark
run: python3 test/external/external_model_benchmark.py
run: METAL=1 python3 test/external/external_model_benchmark.py
- name: Test speed vs torch
run: BIG=2 MPS=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt
- name: Run Tensor Core GEMM
@@ -73,8 +73,8 @@ jobs:
steps:
- name: Checkout Code
uses: actions/checkout@v3
#- name: Run model inference benchmark
# run: CUDA=1 python3 test/external/external_model_benchmark.py
- name: Run model inference benchmark
run: CUDA=1 python3 test/external/external_model_benchmark.py
- name: Test speed vs torch
run: CUDA=1 BIG=2 TORCHCUDA=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt
- name: Run GPT2
@@ -109,7 +109,7 @@ jobs:
ln -s ~/tinygrad/weights/LLaMA weights/LLaMA
ln -s ~/tinygrad/extra/datasets/cifar-10-python.tar.gz extra/datasets/cifar-10-python.tar.gz
- name: Run model inference benchmark
run: python3 test/external/external_model_benchmark.py
run: GPU=1 python3 test/external/external_model_benchmark.py
- name: Test speed vs torch
run: BIG=2 TORCHCUDA=1 python3 test/test_speed_v_torch.py | tee torch_speed.txt
- name: Run Tensor Core GEMM