mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-02-12 23:54:58 -05:00
move gpuctypes in tree (#3253)
* move gpuctypes in tree * fix mypy * regex exclude * autogen sh * mypy exclude * does that fix it * fix mypy * add hip confirm * verify all autogens * build clang2py * opencl headers * gpu on 22.04
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
from typing import Tuple, Optional, List, cast
|
||||
import ctypes, functools, hashlib
|
||||
import gpuctypes.opencl as cl
|
||||
import tinygrad.autogen.opencl as cl
|
||||
from tinygrad.helpers import init_c_var, to_char_p_p, from_mv, OSX, DEBUG
|
||||
from tinygrad.codegen.kernel import LinearizerOptions
|
||||
from tinygrad.renderer.cstyle import OpenCLRenderer
|
||||
@@ -41,13 +41,14 @@ class CLProgram:
|
||||
if hasattr(self, 'kernel'): check(cl.clReleaseKernel(self.kernel))
|
||||
if hasattr(self, 'program'): check(cl.clReleaseProgram(self.program))
|
||||
|
||||
def __call__(self, *bufs:cl.cl_mem, global_size:Tuple[int,int,int]=(1,1,1), local_size:Optional[Tuple[int,int,int]]=None, vals:Tuple[int, ...]=(), wait=False) -> Optional[float]: # noqa: E501
|
||||
def __call__(self, *bufs:ctypes._CData, global_size:Tuple[int,int,int]=(1,1,1), local_size:Optional[Tuple[int,int,int]]=None, vals:Tuple[int, ...]=(), wait=False) -> Optional[float]: # noqa: E501
|
||||
for i,b in enumerate(bufs): cl.clSetKernelArg(self.kernel, i, ctypes.sizeof(b), ctypes.byref(b))
|
||||
for i,b in enumerate(vals,start=len(bufs)): cl.clSetKernelArg(self.kernel, i, 4, ctypes.byref(ctypes.c_int32(b)))
|
||||
for i,v in enumerate(vals,start=len(bufs)): cl.clSetKernelArg(self.kernel, i, 4, ctypes.byref(ctypes.c_int32(v)))
|
||||
if local_size is not None: global_size = cast(Tuple[int,int,int], tuple(int(g*l) for g,l in zip(global_size, local_size)))
|
||||
event = cl.cl_event() if wait else None
|
||||
check(cl.clEnqueueNDRangeKernel(self.device.queue, self.kernel, len(global_size), None, (ctypes.c_size_t * len(global_size))(*global_size), (ctypes.c_size_t * len(local_size))(*local_size) if local_size else None, 0, None, event)) # noqa: E501
|
||||
if wait:
|
||||
assert event is not None
|
||||
check(cl.clWaitForEvents(1, ctypes.byref(event)))
|
||||
start = init_c_var(ctypes.c_uint64(), lambda x: check(cl.clGetEventProfilingInfo(event, cl.CL_PROFILING_COMMAND_START, ctypes.sizeof(x), ctypes.byref(x), None))) # noqa: E501
|
||||
end = init_c_var(ctypes.c_uint64(), lambda x: check(cl.clGetEventProfilingInfo(event, cl.CL_PROFILING_COMMAND_END, ctypes.sizeof(x), ctypes.byref(x), None))) # noqa: E501
|
||||
@@ -58,19 +59,19 @@ class CLAllocator(LRUAllocator):
|
||||
def __init__(self, device:CLDevice):
|
||||
self.device = device
|
||||
super().__init__()
|
||||
def _alloc(self, size:int) -> cl.cl_mem:
|
||||
def _alloc(self, size:int) -> ctypes._CData:
|
||||
return checked(cl.clCreateBuffer(self.device.context, cl.CL_MEM_READ_WRITE, size, None, ctypes.byref(status := ctypes.c_int32())), status)
|
||||
def _alloc_with_options(self, size:int, options:BufferOptions) -> cl.cl_mem:
|
||||
def _alloc_with_options(self, size:int, options:BufferOptions) -> ctypes._CData:
|
||||
if options.image is not None:
|
||||
return checked(cl.clCreateImage2D(self.device.context, cl.CL_MEM_READ_WRITE,
|
||||
cl.cl_image_format(cl.CL_RGBA, {2: cl.CL_HALF_FLOAT, 4: cl.CL_FLOAT}[options.image.itemsize]),
|
||||
options.image.shape[1], options.image.shape[0], 0, None, ctypes.byref(status := ctypes.c_int32())), status)
|
||||
else: return self._alloc(size)
|
||||
def _free(self, buf:cl.cl_mem): check(cl.clReleaseMemObject(buf))
|
||||
def copyin(self, dest:cl.cl_mem, src:memoryview):
|
||||
def _free(self, buf:ctypes._CData): check(cl.clReleaseMemObject(buf))
|
||||
def copyin(self, dest:ctypes._CData, src:memoryview):
|
||||
check(cl.clEnqueueWriteBuffer(self.device.queue, dest, False, 0, len(src)*src.itemsize, from_mv(src), 0, None, None))
|
||||
self.device.pending_copyin.append(src) # NOTE: these can't be freed until the GPU actually executes this command
|
||||
def copyout(self, dest:memoryview, src:cl.cl_mem):
|
||||
def copyout(self, dest:memoryview, src:ctypes._CData):
|
||||
check(cl.clEnqueueReadBuffer(self.device.queue, src, False, 0, len(dest)*dest.itemsize, from_mv(dest), 0, None, None))
|
||||
self.device.synchronize()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user