diff --git a/README.md b/README.md index d49804fb58..bb98976b35 100644 --- a/README.md +++ b/README.md @@ -87,6 +87,7 @@ tinygrad already supports numerous accelerators, including: - [x] [CUDA](tinygrad/runtime/ops_cuda.py) - [x] [AMD](tinygrad/runtime/ops_amd.py) - [x] [NV](tinygrad/runtime/ops_nv.py) +- [x] [QCOM](tinygrad/runtime/ops_qcom.py) And it is easy to add more! Your accelerator of choice only needs to support a total of ~25 low level ops. diff --git a/tinygrad/runtime/ops_qcom.py b/tinygrad/runtime/ops_qcom.py index 624d522c79..ca0b4b2c52 100644 --- a/tinygrad/runtime/ops_qcom.py +++ b/tinygrad/runtime/ops_qcom.py @@ -280,8 +280,6 @@ class QCOMProgram(HCQProgram): if hasattr(self, 'lib_gpu'): self.device.allocator.free(self.lib_gpu, self.lib_gpu.size, options=BufferOptions(cpu_access=True, nolru=True)) class QCOMAllocator(HCQAllocator): - def __init__(self, device:QCOMDevice): super().__init__(device) - def _alloc(self, size:int, options:BufferOptions) -> HCQBuffer: if options.image is not None: pitch = round_up(round_up(options.image.shape[1], 16) * (4 * options.image.base.itemsize), 1 << (pitchalign:=6))