From cb372b053ff15ce27f35ac4113ca11a6b27ed6aa Mon Sep 17 00:00:00 2001 From: George Hotz <72895+geohot@users.noreply.github.com> Date: Thu, 25 Jan 2024 12:01:22 -0800 Subject: [PATCH] add device speed test (#3244) --- test/test_device_speed.py | 27 +++++++++++++++++++++++++++ tinygrad/runtime/ops_cuda.py | 2 +- tinygrad/runtime/ops_gpu.py | 6 +++--- tinygrad/runtime/ops_hip.py | 2 +- tinygrad/runtime/ops_metal.py | 2 +- 5 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 test/test_device_speed.py diff --git a/test/test_device_speed.py b/test/test_device_speed.py new file mode 100644 index 0000000000..56b3526928 --- /dev/null +++ b/test/test_device_speed.py @@ -0,0 +1,27 @@ +import unittest +from tinygrad import Device +from tinygrad.helpers import Timing +from tinygrad.device import Compiled + +@unittest.skipIf(not isinstance(Device[Device.DEFAULT], Compiled), "only for compiled backend") +class TestDeviceSpeed(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.dev = Device[Device.DEFAULT] + cls.empty = Device[Device.DEFAULT].renderer("test", []) + + def test_empty_compile(self): + with Timing("compiler "): + self.dev.compiler(self.empty) + + def test_launch_speed(self): + prg_bin = self.dev.compiler(self.empty) + prg = self.dev.runtime("test", prg_bin) + prg() # ignore first launch + with Timing("launch 1000x"): + for _ in range(1000): prg() + with Timing("launch 1000x with wait"): + for _ in range(1000): prg(wait=True) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/tinygrad/runtime/ops_cuda.py b/tinygrad/runtime/ops_cuda.py index d8a1ea2ef7..9865fb4b83 100644 --- a/tinygrad/runtime/ops_cuda.py +++ b/tinygrad/runtime/ops_cuda.py @@ -52,7 +52,7 @@ class CUDAProgram: def __del__(self): if hasattr(self, 'module'): check(cuda.cuModuleUnload(self.module)) - def __call__(self, *bufs, global_size:Tuple[int,int,int], local_size:Tuple[int,int,int], vals:Tuple[int, ...]=(), wait=False): + def __call__(self, *bufs, global_size:Tuple[int,int,int]=(1,1,1), local_size:Tuple[int,int,int]=(1,1,1), vals:Tuple[int, ...]=(), wait=False): if not CUDACPU: check(cuda.cuCtxSetCurrent(self.device.context)) c_kernel_input_config = encode_args_cuda_style(bufs, vals, cuda.CUdeviceptr_v2, (1,2,0))[0] if not CUDACPU else (bufs+tuple(vals)) return cu_time_execution(lambda: check(cuda.cuLaunchKernel(self.prg, *global_size, *local_size, 0, None, None, c_kernel_input_config)), enable=wait) # noqa: E501 diff --git a/tinygrad/runtime/ops_gpu.py b/tinygrad/runtime/ops_gpu.py index c36b33ba2e..b40e69badd 100644 --- a/tinygrad/runtime/ops_gpu.py +++ b/tinygrad/runtime/ops_gpu.py @@ -1,5 +1,5 @@ from __future__ import annotations -from typing import Tuple, Optional, List +from typing import Tuple, Optional, List, cast import ctypes, functools, hashlib import gpuctypes.opencl as cl from tinygrad.helpers import init_c_var, to_char_p_p, from_mv, OSX, DEBUG @@ -41,10 +41,10 @@ class CLProgram: if hasattr(self, 'kernel'): check(cl.clReleaseKernel(self.kernel)) if hasattr(self, 'program'): check(cl.clReleaseProgram(self.program)) - def __call__(self, *bufs:cl.cl_mem, global_size:Tuple[int,...], local_size:Optional[Tuple[int,...]]=None, vals:Tuple[int, ...]=(), wait=False) -> Optional[float]: # noqa: E501 + def __call__(self, *bufs:cl.cl_mem, global_size:Tuple[int,int,int]=(1,1,1), local_size:Optional[Tuple[int,int,int]]=None, vals:Tuple[int, ...]=(), wait=False) -> Optional[float]: # noqa: E501 for i,b in enumerate(bufs): cl.clSetKernelArg(self.kernel, i, ctypes.sizeof(b), ctypes.byref(b)) for i,b in enumerate(vals,start=len(bufs)): cl.clSetKernelArg(self.kernel, i, 4, ctypes.byref(ctypes.c_int32(b))) - if local_size is not None: global_size = tuple(int(g*l) for g,l in zip(global_size, local_size)) + if local_size is not None: global_size = cast(Tuple[int,int,int], tuple(int(g*l) for g,l in zip(global_size, local_size))) event = cl.cl_event() if wait else None check(cl.clEnqueueNDRangeKernel(self.device.queue, self.kernel, len(global_size), None, (ctypes.c_size_t * len(global_size))(*global_size), (ctypes.c_size_t * len(local_size))(*local_size) if local_size else None, 0, None, event)) # noqa: E501 if wait: diff --git a/tinygrad/runtime/ops_hip.py b/tinygrad/runtime/ops_hip.py index 31fef6ed4a..f6e15b0011 100644 --- a/tinygrad/runtime/ops_hip.py +++ b/tinygrad/runtime/ops_hip.py @@ -35,7 +35,7 @@ class HIPProgram: def __del__(self): if hasattr(self, 'module'): check(hip.hipModuleUnload(self.module)) - def __call__(self, *args, global_size:Tuple[int,int,int], local_size:Tuple[int,int,int], vals:Tuple[int, ...]=(), wait=False): + def __call__(self, *args, global_size:Tuple[int,int,int]=(1,1,1), local_size:Tuple[int,int,int]=(1,1,1), vals:Tuple[int, ...]=(), wait=False): if MOCKHIP: return float("inf") check(hip.hipSetDevice(self.device)) return hip_time_execution(lambda: check(hip.hipModuleLaunchKernel(self.prg, *global_size, *local_size, 0, None, None, encode_args_cuda_style(args, vals, hip.hipDeviceptr_t, marks=(1,2,3))[0])), enable=wait) # noqa: E501 diff --git a/tinygrad/runtime/ops_metal.py b/tinygrad/runtime/ops_metal.py index 2156f64e63..39b6a98730 100644 --- a/tinygrad/runtime/ops_metal.py +++ b/tinygrad/runtime/ops_metal.py @@ -30,7 +30,7 @@ class MetalProgram: self.fxn = self.library.newFunctionWithName_(name) self.pipeline_state = unwrap2(self.device.device.newComputePipelineStateWithFunction_error_(self.fxn, None)) - def __call__(self, *bufs, global_size:Tuple[int,int,int], local_size:Tuple[int,int,int], vals:Tuple[int, ...]=(), wait=False): + def __call__(self, *bufs, global_size:Tuple[int,int,int]=(1,1,1), local_size:Tuple[int,int,int]=(1,1,1), vals:Tuple[int, ...]=(), wait=False): assert prod(local_size) <= self.pipeline_state.maxTotalThreadsPerThreadgroup(),f"local size {local_size} bigger than {self.pipeline_state.maxTotalThreadsPerThreadgroup()} with exec width {self.pipeline_state.threadExecutionWidth()} memory length {self.pipeline_state.staticThreadgroupMemoryLength()}" # noqa: E501 command_buffer = self.device.mtl_queue.commandBuffer() encoder = command_buffer.computeCommandEncoder()