mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-10 07:28:15 -05:00
disable disable_abstract_method [pr] (#7815)
This commit is contained in:
@@ -54,7 +54,7 @@ confidence=
|
||||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0105,E0401
|
||||
disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0105,E0401,abstract-method
|
||||
# E1101 for function binding
|
||||
# W0221 for Function class
|
||||
# W0105 for comment strings
|
||||
|
||||
@@ -151,7 +151,7 @@ class Allocator:
|
||||
# def _offset(self, buf, size:int, offset:int):
|
||||
# def _transfer(self, dest, src, sz:int, src_dev, dest_dev):
|
||||
|
||||
class LRUAllocator(Allocator): # pylint: disable=abstract-method
|
||||
class LRUAllocator(Allocator):
|
||||
"""
|
||||
The LRU Allocator is responsible for caching buffers.
|
||||
It ensures that buffers are not freed until it is absolutely necessary, optimizing performance.
|
||||
|
||||
@@ -69,7 +69,7 @@ def get_input_replace(jit_cache: List[ExecItem], input_rawbuffers:List[Buffer])
|
||||
input_replace[(j,i)] = input_rawbuffers.index(a)
|
||||
return input_replace
|
||||
|
||||
class GraphRunner(Runner): # pylint: disable=abstract-method
|
||||
class GraphRunner(Runner):
|
||||
def __init__(self, jit_cache: List[ExecItem], input_rawbuffers: List[Buffer], var_vals: Dict[Variable, int]):
|
||||
self.jit_cache = jit_cache
|
||||
self.input_replace:Dict[Tuple[int, int], int] = get_input_replace(jit_cache, input_rawbuffers)
|
||||
@@ -128,7 +128,7 @@ class GraphRunner(Runner): # pylint: disable=abstract-method
|
||||
return list({id(x):x for x in wait_nodes}.values())
|
||||
|
||||
# a marker for your graph supporting multiple devices of the same type
|
||||
class MultiGraphRunner(GraphRunner): pass # pylint: disable=abstract-method
|
||||
class MultiGraphRunner(GraphRunner): pass
|
||||
|
||||
ReturnType = TypeVar('ReturnType')
|
||||
@dataclass
|
||||
|
||||
@@ -72,7 +72,7 @@ class SimpleMathTrait:
|
||||
def __le__(self, x): return self.le(x)
|
||||
# NOTE: __eq__ isn't overridden, and means the same thing as is by default
|
||||
|
||||
class MathTrait(SimpleMathTrait): # pylint: disable=abstract-method
|
||||
class MathTrait(SimpleMathTrait):
|
||||
# TODO: move to Tensor when new backward is done
|
||||
def lshift(self, x, reverse=False): return self._binop(Ops.SHL, x, reverse)
|
||||
def rshift(self, x, reverse=False): return self._binop(Ops.SHR, x, reverse)
|
||||
|
||||
@@ -54,7 +54,7 @@ class AMDSignal(HCQSignal):
|
||||
kfd.AMDKFD_IOC_WAIT_EVENTS(AMDDevice.kfd, events_ptr=ctypes.addressof(self._evt_array), num_events=1, wait_for_all=1, timeout=1000)
|
||||
raise RuntimeError(f"wait_signal: not set to {value}, but {self._signal[0]}, {timeout} ms TIMEOUT!")
|
||||
|
||||
class AMDComputeQueue(HWQueue): # pylint: disable=abstract-method
|
||||
class AMDComputeQueue(HWQueue):
|
||||
def __init__(self):
|
||||
self.cmd_idx_to_local_offset, self.cmd_idx_to_global_offset, self.cmd_idx_to_dispatch_packet = {}, {}, {}
|
||||
super().__init__()
|
||||
@@ -184,7 +184,7 @@ class AMDComputeQueue(HWQueue): # pylint: disable=abstract-method
|
||||
dev.compute_queue.doorbell[0] = dev.compute_queue.put_value
|
||||
|
||||
SDMA_MAX_COPY_SIZE = 0x400000
|
||||
class AMDCopyQueue(HWQueue): # pylint: disable=abstract-method
|
||||
class AMDCopyQueue(HWQueue):
|
||||
def __init__(self):
|
||||
self.internal_cmd_sizes, self.copy_cmds_per_copy = [], {}
|
||||
super().__init__()
|
||||
|
||||
@@ -2,7 +2,7 @@ import numpy as np
|
||||
from tinygrad.helpers import flat_mv
|
||||
from tinygrad.device import Compiled, Allocator
|
||||
|
||||
class NpyAllocator(Allocator): # pylint: disable=abstract-method
|
||||
class NpyAllocator(Allocator):
|
||||
def _copyout(self, dest:memoryview, src:np.ndarray): dest[:] = flat_mv(np.require(src, requirements='C').data)
|
||||
|
||||
class NpyDevice(Compiled):
|
||||
|
||||
@@ -83,7 +83,7 @@ class NVSignal(HCQSignal):
|
||||
def _get_timestamp(self) -> decimal.Decimal: return decimal.Decimal(self._signal[1]) / decimal.Decimal(1000)
|
||||
def _set_value(self, new_value:int): self._signal[0] = new_value
|
||||
|
||||
class NVCommandQueue(HWQueue[NVSignal, 'NVDevice', 'NVProgram', 'NVArgsState']): # pylint: disable=abstract-method
|
||||
class NVCommandQueue(HWQueue[NVSignal, 'NVDevice', 'NVProgram', 'NVArgsState']):
|
||||
def __del__(self):
|
||||
if self.binded_device is not None: self.binded_device.allocator.free(self.hw_page, self.hw_page.size, BufferSpec(cpu_access=True, nolru=True))
|
||||
|
||||
@@ -132,7 +132,7 @@ class NVCommandQueue(HWQueue[NVSignal, 'NVDevice', 'NVProgram', 'NVArgsState']):
|
||||
dev.gpu_mmio[0x90 // 4] = gpfifo.token
|
||||
gpfifo.put_value += 1
|
||||
|
||||
class NVComputeQueue(NVCommandQueue): # pylint: disable=abstract-method
|
||||
class NVComputeQueue(NVCommandQueue):
|
||||
def __init__(self):
|
||||
self.cmd_idx_to_qmd, self.cmd_idx_to_signal_id, self.cmd_idx_to_global_dims, self.cmd_idx_to_local_dims = {}, {}, {}, {}
|
||||
super().__init__()
|
||||
@@ -187,7 +187,7 @@ class NVComputeQueue(NVCommandQueue): # pylint: disable=abstract-method
|
||||
|
||||
def _submit(self, dev): self._submit_to_gpfifo(dev, cast(NVDevice, dev).compute_gpfifo)
|
||||
|
||||
class NVCopyQueue(NVCommandQueue): # pylint: disable=abstract-method
|
||||
class NVCopyQueue(NVCommandQueue):
|
||||
def _copy(self, dest, src, copy_size):
|
||||
self.q += [nvmethod(4, nv_gpu.NVC6B5_OFFSET_IN_UPPER, 4), *data64(src), *data64(dest)]
|
||||
self.q += [nvmethod(4, nv_gpu.NVC6B5_LINE_LENGTH_IN, 1), copy_size]
|
||||
|
||||
@@ -44,7 +44,7 @@ class QCOMSignal(HCQSignal):
|
||||
def _get_timestamp(self) -> decimal.Decimal: return decimal.Decimal(self._signal[1]) / decimal.Decimal(19.2) # based on the 19.2MHz always-on timer
|
||||
def _set_value(self, new_value:int): self._signal[0] = new_value
|
||||
|
||||
class QCOMComputeQueue(HWQueue): # pylint: disable=abstract-method
|
||||
class QCOMComputeQueue(HWQueue):
|
||||
def __init__(self):
|
||||
self.cmd_idx_to_dims = {}
|
||||
super().__init__()
|
||||
|
||||
@@ -474,7 +474,7 @@ class HCQCompiled(Compiled, Generic[SignalType]):
|
||||
# Protocol for hcq compatible allocators for allocated buffers to contain VA address and it's size.
|
||||
class HCQBuffer(Protocol): va_addr:int; size:int # noqa: E702
|
||||
|
||||
class HCQAllocator(LRUAllocator, Generic[DeviceType]): # pylint: disable=abstract-method
|
||||
class HCQAllocator(LRUAllocator, Generic[DeviceType]):
|
||||
"""
|
||||
A base allocator class compatible with the HCQ (Hardware Command Queue) API.
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ def _broadcast_shape(*shapes:Tuple[sint, ...]) -> Tuple[sint, ...]:
|
||||
|
||||
ReductionStr = Literal["mean", "sum", "none"]
|
||||
|
||||
class Tensor(SimpleMathTrait): # pylint: disable=abstract-method
|
||||
class Tensor(SimpleMathTrait):
|
||||
"""
|
||||
A `Tensor` is a multi-dimensional matrix containing elements of a single data type.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user