hsa flush hdp (#3591)

* hsa flush hdp

* use _alloc()
This commit is contained in:
nimlgen
2024-03-03 15:55:07 +03:00
committed by GitHub
parent 660df3cff1
commit 640dc0fc51
3 changed files with 30 additions and 35 deletions

View File

@@ -136,15 +136,12 @@ def scan_agents():
hsa.hsa_iterate_agents(__scan_agents, None)
return agents
def find_memory_pool(agent, segtyp=-1, flags=-1, location=-1):
def find_memory_pool(agent, segtyp=-1, location=-1):
@ctypes.CFUNCTYPE(hsa.hsa_status_t, hsa.hsa_amd_memory_pool_t, ctypes.c_void_p)
def __filter_amd_memory_pools(mem_pool, data):
check(hsa.hsa_amd_memory_pool_get_info(mem_pool, hsa.HSA_AMD_MEMORY_POOL_INFO_SEGMENT, ctypes.byref(segment := hsa.hsa_amd_segment_t())))
if segtyp >= 0 and segment.value != segtyp: return hsa.HSA_STATUS_SUCCESS
check(hsa.hsa_amd_memory_pool_get_info(mem_pool, hsa.HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, ctypes.byref(fgs := hsa.hsa_amd_memory_pool_global_flag_t()))) # noqa: E501
if flags >= 0 and (fgs.value & flags) == flags: return hsa.HSA_STATUS_SUCCESS
check(hsa.hsa_amd_memory_pool_get_info(mem_pool, hsa.HSA_AMD_MEMORY_POOL_INFO_LOCATION, ctypes.byref(loc:=hsa.hsa_amd_memory_pool_location_t())))
if location >= 0 and loc.value != location: return hsa.HSA_STATUS_SUCCESS

View File

@@ -48,12 +48,7 @@ class HSAGraph(MultiDeviceJITGraph):
kernargs_size: Dict[HSADevice, int] = collections.defaultdict(int)
for ji in self.jit_cache:
if isinstance(ji.prg, CompiledASTRunner): kernargs_size[cast(HSADevice, ji.prg.device)] += (ctypes.sizeof(ji.prg.clprg.args_struct_t)+15) & ~15
kernargs_ptrs: Dict[Compiled, int] = {}
for dev,sz in kernargs_size.items():
kernargs_ptrs[dev] = init_c_var(ctypes.c_void_p(),
lambda x: check(hsa.hsa_amd_memory_pool_allocate(dev.kernargs_pool, sz, 0, ctypes.byref(x)))).value
check(hsa.hsa_amd_agents_allow_access(1, ctypes.byref(dev.agent), None, kernargs_ptrs[dev]))
kernargs_ptrs: Dict[Compiled, int] = {dev:dev.allocator._alloc(sz) for dev,sz in kernargs_size.items()}
# Fill initial arguments.
self.ji_kargs_structs: Dict[int, ctypes.Structure] = {}
@@ -138,6 +133,7 @@ class HSAGraph(MultiDeviceJITGraph):
self.packets[j].grid_size_z = gl[2] * lc[2]
for dev in self.devices:
dev.flush_hdp()
dev.hw_queue.blit_packets(self.virt_aql_queues[dev].queue_base, self.virt_aql_queues[dev].packets_count)
for transfer_data in self.transfers:

View File

@@ -3,7 +3,7 @@ import ctypes, functools, subprocess, io, atexit
from typing import Tuple, TypeVar, List, Dict
import tinygrad.runtime.autogen.hsa as hsa
from tinygrad.helpers import DEBUG, init_c_var, from_mv, round_up, to_mv, init_c_struct_t
from tinygrad.device import Compiled, LRUAllocator
from tinygrad.device import Compiled, LRUAllocator, BufferOptions
from tinygrad.runtime.ops_hip import HIPCompiler
from tinygrad.runtime.driver.hsa import check, scan_agents, find_memory_pool, AQLQueue
@@ -46,6 +46,7 @@ class HSAProgram:
args_st = self.args_struct_t.from_address(kernargs)
for i in range(len(args)): args_st.__setattr__(f'f{i}', args[i])
for i in range(len(vals)): args_st.__setattr__(f'v{i}', vals[i])
self.device.flush_hdp()
signal = self.device.hw_queue.submit_kernel(self, global_size, local_size, kernargs, need_signal=wait)
if wait:
@@ -66,6 +67,13 @@ class HSAAllocator(LRUAllocator):
check(hsa.hsa_amd_agents_allow_access(len(HSADevice.agents[hsa.HSA_DEVICE_TYPE_GPU]), c_agents, None, buf))
return buf.value
def _alloc_with_options(self, size:int, options:BufferOptions):
if options.host:
check(hsa.hsa_amd_memory_pool_allocate(HSADevice.cpu_mempool, size, 0, ctypes.byref(mem := ctypes.c_void_p())))
check(hsa.hsa_amd_agents_allow_access(2, (hsa.hsa_agent_t*2)(HSADevice.cpu_agent, self.device.agent), None, mem))
return mem.value
else: raise Exception("no options")
def _free(self, opaque:T):
HSADevice.synchronize_system()
check(hsa.hsa_amd_memory_pool_free(opaque))
@@ -74,9 +82,7 @@ class HSAAllocator(LRUAllocator):
# Async copyin sync model uses barriers on the main hw queue, since barriers are guaranteed to execute in order with all other packets.
copy_signal = self.device.alloc_signal(reusable=True)
sync_signal = self.device.hw_queue.submit_barrier(need_signal=True)
c_agents = (hsa.hsa_agent_t*2)(HSADevice.cpu_agent, self.device.agent)
check(hsa.hsa_amd_memory_pool_allocate(HSADevice.cpu_mempool, src.nbytes, 0, ctypes.byref(mem := ctypes.c_void_p())))
check(hsa.hsa_amd_agents_allow_access(2, c_agents, None, mem))
mem = self._alloc_with_options(src.nbytes, BufferOptions(host=True))
ctypes.memmove(mem, from_mv(src), src.nbytes)
check(hsa.hsa_amd_memory_async_copy_on_engine(dest, self.device.agent, mem, HSADevice.cpu_agent, src.nbytes,
1, ctypes.byref(sync_signal), copy_signal, hsa.HSA_AMD_SDMA_ENGINE_0, True))
@@ -87,12 +93,7 @@ class HSAAllocator(LRUAllocator):
sync_signal = self.device.hw_queue.submit_barrier(need_signal=True)
if not hasattr(self, 'hb'):
c_agents = (hsa.hsa_agent_t*2)(HSADevice.cpu_agent, self.device.agent)
self.hb = []
for _ in range(2):
check(hsa.hsa_amd_memory_pool_allocate(HSADevice.cpu_mempool, CHUNK_SIZE, 0, ctypes.byref(mem := ctypes.c_void_p())))
check(hsa.hsa_amd_agents_allow_access(2, c_agents, None, mem))
self.hb.append(mem.value)
self.hb = [self._alloc_with_options(CHUNK_SIZE, BufferOptions(host=True)) for _ in range(2)]
self.hb_signals = [self.device.alloc_signal(reusable=False) for _ in range(2)]
self.hb_polarity = 0
self.sdma = [hsa.HSA_AMD_SDMA_ENGINE_0, hsa.HSA_AMD_SDMA_ENGINE_1]
@@ -159,7 +160,6 @@ class HSADevice(Compiled):
self.device_id = int(device.split(":")[1]) if ":" in device else 0
self.agent = HSADevice.agents[hsa.HSA_DEVICE_TYPE_GPU][self.device_id]
self.gpu_mempool = find_memory_pool(self.agent, segtyp=hsa.HSA_AMD_SEGMENT_GLOBAL, location=hsa.HSA_AMD_MEMORY_POOL_LOCATION_GPU)
self.kernargs_pool = find_memory_pool(self.agent, segtyp=hsa.HSA_AMD_SEGMENT_GLOBAL, flags=hsa.HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT)
self.hw_queue = AQLQueue(self)
HSADevice.devices.append(self)
@@ -169,20 +169,19 @@ class HSADevice(Compiled):
check(hsa.hsa_system_get_info(hsa.HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, ctypes.byref(gpu_freq := ctypes.c_uint64())))
self.clocks_to_time: float = 1 / gpu_freq.value
self.kernarg_pool_sz = 16 << 20
self.kernarg_start_addr = init_c_var(ctypes.c_void_p(), lambda x: check(hsa.hsa_amd_memory_pool_allocate(self.kernargs_pool, self.kernarg_pool_sz, 0, ctypes.byref(x)))).value # noqa: E501
self.kernarg_next_addr = self.kernarg_start_addr
check(hsa.hsa_agent_get_info(self.agent, hsa.HSA_AMD_AGENT_INFO_HDP_FLUSH, ctypes.byref(hdp_flush := hsa.hsa_amd_hdp_flush_t())))
self.hdp_flush = hdp_flush
self.delayed_free: List[ctypes.c_void_p] = []
self.signal_pool: List[hsa.hsa_signal_t] = []
self.delayed_free: List[int] = []
self.reusable_signals: List[hsa.hsa_signal_t] = []
for _ in range(4096):
check(hsa.hsa_amd_signal_create(1, 0, None, 0, ctypes.byref(signal := hsa.hsa_signal_t())))
self.signal_pool.append(signal)
from tinygrad.runtime.graph.hsa import HSAGraph
super().__init__(device, HSAAllocator(self), HSACompiler(self.arch), functools.partial(HSAProgram, self), HSAGraph)
# Finish init: preallocate some signals + space for kernargs
self.signal_pool = [init_c_var(hsa.hsa_signal_t(), lambda x: check(hsa.hsa_signal_create(1, 0, None, ctypes.byref(x)))) for _ in range(4096)]
self._new_kernargs_region(16 << 20) # initial region size is 16mb
def synchronize(self):
self.hw_queue.wait()
@@ -208,12 +207,15 @@ class HSADevice(Compiled):
return signal
def alloc_kernargs(self, sz):
if self.kernarg_next_addr + sz >= self.kernarg_start_addr + self.kernarg_pool_sz:
self.delayed_free.append(self.kernarg_start_addr)
self.kernarg_pool_sz = int(self.kernarg_pool_sz * 2)
self.kernarg_start_addr = init_c_var(ctypes.c_void_p(), lambda x: check(hsa.hsa_amd_memory_pool_allocate(self.kernargs_pool, self.kernarg_pool_sz, 0, ctypes.byref(x)))).value # noqa: E501
self.kernarg_next_addr = self.kernarg_start_addr
if self.kernarg_next_addr + sz >= self.kernarg_start_addr + self.kernarg_pool_sz: self._new_kernargs_region(int(self.kernarg_pool_sz * 2))
result = self.kernarg_next_addr
self.kernarg_next_addr = (self.kernarg_next_addr + sz + 15) & (~15) # align to 16 bytes
return result
def _new_kernargs_region(self, sz:int):
if hasattr(self, 'kernarg_start_addr'): self.delayed_free.append(self.kernarg_start_addr)
self.kernarg_start_addr: int = self.allocator._alloc(sz)
self.kernarg_next_addr = self.kernarg_start_addr
self.kernarg_pool_sz: int = sz
def flush_hdp(self): self.hdp_flush.HDP_MEM_FLUSH_CNTL[0] = 1