From 123ea051e60fc2151aa73f63ba4ccb9481740dca Mon Sep 17 00:00:00 2001 From: valar <68338647+76616c6172@users.noreply.github.com> Date: Sun, 12 Nov 2023 11:04:20 -0800 Subject: [PATCH] refactor/ci: delete many `# type: ignore` (#2281) * refactor/ci: delete many `# type: ignore` * replace `axis.__class__ is int` with `isinstance(axis, int)` to make mypy happy * add `--warn-unused-ignores` to mypy flag refs #2240 * ci: move `--warn-unused-ignores` flag to mypy config refs #2240 --- extra/helpers.py | 2 +- mypy.ini | 1 + tinygrad/graph.py | 2 +- tinygrad/lazy.py | 2 +- tinygrad/renderer/llvmir.py | 2 +- tinygrad/renderer/triton.py | 2 +- tinygrad/runtime/lib.py | 2 +- tinygrad/runtime/ops_cuda.py | 8 ++++---- tinygrad/runtime/ops_gpu.py | 4 ++-- tinygrad/runtime/ops_llvm.py | 2 +- tinygrad/runtime/ops_metal.py | 2 +- tinygrad/runtime/ops_shm.py | 4 ++-- tinygrad/runtime/ops_webgpu.py | 4 ++-- tinygrad/tensor.py | 4 ++-- 14 files changed, 21 insertions(+), 20 deletions(-) diff --git a/extra/helpers.py b/extra/helpers.py index aa11301320..c580a1a69c 100644 --- a/extra/helpers.py +++ b/extra/helpers.py @@ -1,5 +1,5 @@ import multiprocessing, subprocess -import cloudpickle # type: ignore +import cloudpickle from typing import Any def _early_exec_process(qin, qout): diff --git a/mypy.ini b/mypy.ini index fdeddfdec8..0708002454 100644 --- a/mypy.ini +++ b/mypy.ini @@ -6,3 +6,4 @@ check_untyped_defs = True explicit_package_bases = True warn_unreachable = True warn_redundant_casts = True +warn_unused_ignores = True diff --git a/tinygrad/graph.py b/tinygrad/graph.py index fd6f9c28f5..ce35c1b2de 100644 --- a/tinygrad/graph.py +++ b/tinygrad/graph.py @@ -1,6 +1,6 @@ import os, atexit, functools try: - import networkx as nx # type: ignore + import networkx as nx except ImportError: nx = None # graph won't work from collections import defaultdict diff --git a/tinygrad/lazy.py b/tinygrad/lazy.py index bba0d08fb3..6878e8f131 100644 --- a/tinygrad/lazy.py +++ b/tinygrad/lazy.py @@ -229,7 +229,7 @@ class LazyBuffer: if MERGE_ELEMENTWISE_OPS: # remove the buffers from any (childless) BinaryOps that feed into this - _srcs = tuple([x.op if x.optype == BinaryOps and not x.children and not x.realized else x for x in srcs]) # type: ignore + _srcs = tuple([x.op if x.optype == BinaryOps and not x.children and not x.realized else x for x in srcs]) # TODO: needs general merge limiting if out_device != "WEBGPU" or len(dedup([x.base for _src in _srcs for x in _src.buffers if not x.is_unrealized_const()])) < 7: srcs = _srcs # type: ignore diff --git a/tinygrad/renderer/llvmir.py b/tinygrad/renderer/llvmir.py index bfe2d75d13..7ab070500c 100644 --- a/tinygrad/renderer/llvmir.py +++ b/tinygrad/renderer/llvmir.py @@ -1,5 +1,5 @@ from typing import Final, Dict, Callable, Any, List, Optional, Tuple -from llvmlite import ir # type: ignore +from llvmlite import ir from tinygrad.codegen.linearizer import UOps, UOp from tinygrad.helpers import dtypes from tinygrad.ops import Op, UnaryOps, BinaryOps, TernaryOps diff --git a/tinygrad/renderer/triton.py b/tinygrad/renderer/triton.py index 93f92db3c7..e06aaffa9c 100644 --- a/tinygrad/renderer/triton.py +++ b/tinygrad/renderer/triton.py @@ -3,7 +3,7 @@ from collections import defaultdict from tinygrad.ops import UnaryOps, BinaryOps, TernaryOps, Op from tinygrad.helpers import DType, dtypes, ImageDType, DEBUG, getenv from tinygrad.codegen.linearizer import UOp, UOps -from triton.compiler import compile as triton_compile # type: ignore +from triton.compiler import compile as triton_compile import linecache import math import re diff --git a/tinygrad/runtime/lib.py b/tinygrad/runtime/lib.py index 39513291a2..1b377ed5ca 100644 --- a/tinygrad/runtime/lib.py +++ b/tinygrad/runtime/lib.py @@ -38,7 +38,7 @@ class RawBufferCopyIn(RawBuffer): class RawBufferMapped(RawBufferCopyIn): def _buffer(self) -> memoryview: raise NotImplementedError("must be implemented") # NOTE: this metadata prevents the backing buffer from being freed. hack can be removed with PEP688 - def buffer_view(self) -> np.ndarray: return np.frombuffer(self._buffer(), dtype=np.dtype(self.dtype.np, metadata={"backing": self}), count=self.size) # type: ignore + def buffer_view(self) -> np.ndarray: return np.frombuffer(self._buffer(), dtype=np.dtype(self.dtype.np, metadata={"backing": self}), count=self.size) def toCPU(self) -> np.ndarray: return self.buffer_view().copy() # Need a copy, since jit will write to the same buffer. def _copyin(self, x:np.ndarray) -> None: np.copyto(self.buffer_view(), x.reshape(-1)) diff --git a/tinygrad/runtime/ops_cuda.py b/tinygrad/runtime/ops_cuda.py index 0ac8a40ad1..1e3585bb03 100644 --- a/tinygrad/runtime/ops_cuda.py +++ b/tinygrad/runtime/ops_cuda.py @@ -2,7 +2,7 @@ import subprocess, time, re, hashlib, tempfile from pathlib import Path from typing import Optional, Tuple import numpy as np -from pycuda.compiler import compile as cuda_compile # type: ignore +from pycuda.compiler import compile as cuda_compile from tinygrad.helpers import DEBUG, getenv, colored, diskcache from tinygrad.ops import Compiled from tinygrad.runtime.lib import RawBufferCopyInOut, RawMallocBuffer, LRUAllocator @@ -42,18 +42,18 @@ if getenv("CUDACPU", 0) == 1: class device: compute_capability = lambda: (3,5) # pylint: disable=unnecessary-lambda # noqa: E731 get_device = lambda: context.device # pylint: disable=unnecessary-lambda # noqa: E731 - import pycuda.driver # type: ignore + import pycuda.driver pycuda.driver.Context = context RawCUDABuffer = RawMallocBuffer else: - import pycuda.autoprimaryctx # type: ignore # pylint: disable=unused-import # noqa: F401 + import pycuda.autoprimaryctx # pylint: disable=unused-import # noqa: F401 import pycuda.driver as cuda # type: ignore class CUDAAllocator(LRUAllocator): def __init__(self): super().__init__(self._get_cur_free_space(None)) def _do_alloc(self, size, dtype, device, **kwargs): return cuda.mem_alloc(size * dtype.itemsize) # type: ignore def _cached_bufkey(self, size, dtype, device): return (device, size*dtype.itemsize) # Buffers of the same length could be reused, no matter what dtype. def _get_cur_free_space(self, device): return cuda.mem_get_info()[0] # type: ignore - CUDAAlloc = CUDAAllocator() # type: ignore + CUDAAlloc = CUDAAllocator() class RawCUDABuffer(RawBufferCopyInOut): # type: ignore def __init__(self, size, dtype): super().__init__(size, dtype, allocator=CUDAAlloc) def _copyin(self, x:np.ndarray, stream:Optional[cuda.Stream]=None): cuda.memcpy_htod_async(self._buf, x.ravel(), stream) # type: ignore diff --git a/tinygrad/runtime/ops_gpu.py b/tinygrad/runtime/ops_gpu.py index 19ee1b801b..7d4b99f580 100644 --- a/tinygrad/runtime/ops_gpu.py +++ b/tinygrad/runtime/ops_gpu.py @@ -3,7 +3,7 @@ import os os.environ['PYOPENCL_NO_CACHE'] = '1' import pathlib import numpy as np -import pyopencl as cl # type: ignore +import pyopencl as cl from typing import Optional, List, Tuple from tinygrad.helpers import DEBUG, getenv, prod, ImageDType, OSX, fromimport, diskcache from tinygrad.ops import Compiled @@ -71,7 +71,7 @@ def compile_gpu(prg:str) -> bytes: class CLProgram: def __init__(self, name:str, prg:bytes, argdtypes=None, options=None): - self.name, self.clprograms = name, [cl.Program(ctx, ctx.devices, [prg]*len(ctx.devices)) for ctx in CL.cl_ctxs] # type: ignore + self.name, self.clprograms = name, [cl.Program(ctx, ctx.devices, [prg]*len(ctx.devices)) for ctx in CL.cl_ctxs] self._clprgs = [clprogram.build(options=options) for clprogram in self.clprograms] self.clprgs = [clprg.__getattr__(name) for clprg in self._clprgs] if DEBUG >= 5 and not OSX: diff --git a/tinygrad/runtime/ops_llvm.py b/tinygrad/runtime/ops_llvm.py index 9cfaa45a28..708e6b827d 100644 --- a/tinygrad/runtime/ops_llvm.py +++ b/tinygrad/runtime/ops_llvm.py @@ -7,7 +7,7 @@ from tinygrad.codegen.kernel import LinearizerOptions from tinygrad.renderer.llvmir import uops_to_llvm_ir from tinygrad.runtime.lib import RawMallocBuffer -import llvmlite.binding as llvm # type: ignore +import llvmlite.binding as llvm LLVMOPT = bool(getenv("LLVMOPT")) diff --git a/tinygrad/runtime/ops_metal.py b/tinygrad/runtime/ops_metal.py index 03f03b0ad1..30ab1523ef 100644 --- a/tinygrad/runtime/ops_metal.py +++ b/tinygrad/runtime/ops_metal.py @@ -1,6 +1,6 @@ # pip3 install pyobjc-framework-Metal pyobjc-framework-Cocoa pyobjc-framework-libdispatch import os, subprocess, pathlib, ctypes, tempfile -import Metal, Cocoa, libdispatch # type: ignore +import Metal, Cocoa, libdispatch from typing import List, Any, Tuple from tinygrad.codegen.kernel import LinearizerOptions from tinygrad.helpers import prod, getenv, DEBUG, DType, dtypes, diskcache diff --git a/tinygrad/runtime/ops_shm.py b/tinygrad/runtime/ops_shm.py index 118656b8da..9cbf3af5c7 100644 --- a/tinygrad/runtime/ops_shm.py +++ b/tinygrad/runtime/ops_shm.py @@ -1,5 +1,5 @@ import os, mmap -try: import _posixshmem # type: ignore +try: import _posixshmem except Exception: pass from typing import Callable, Dict from tinygrad.helpers import DType, OSX @@ -16,7 +16,7 @@ class RawShmBuffer(RawBufferMapped): fd = _posixshmem.shm_open(device, os.O_RDWR, 0o600) # TODO: these flags are somewhat platform specific, but python doesn't expose the ones we need shm = mmap.mmap(fd, size * dtype.itemsize, flags=mmap.MAP_SHARED | 0x2000 | 0x008000) - shm.madvise(mmap.MADV_HUGEPAGE) # type: ignore + shm.madvise(mmap.MADV_HUGEPAGE) os.close(fd) super().__init__(size, dtype, shm) diff --git a/tinygrad/runtime/ops_webgpu.py b/tinygrad/runtime/ops_webgpu.py index b186bfe0dc..d9d43a33fb 100644 --- a/tinygrad/runtime/ops_webgpu.py +++ b/tinygrad/runtime/ops_webgpu.py @@ -1,13 +1,13 @@ import numpy as np import functools -from wgpu.utils._device import get_default_device # type: ignore +from wgpu.utils._device import get_default_device from tinygrad.runtime.lib import RawBufferCopyIn, LRUAllocator from tinygrad.helpers import dtypes, DType from tinygrad.ops import Compiled from tinygrad.codegen.kernel import LinearizerOptions from tinygrad.renderer.cstyle import uops_to_cstyle from tinygrad.renderer.wgsl import WGSLLanguage -import wgpu # type: ignore +import wgpu wgpu_device = get_default_device() diff --git a/tinygrad/tensor.py b/tinygrad/tensor.py index bd8448488c..6d14c10148 100644 --- a/tinygrad/tensor.py +++ b/tinygrad/tensor.py @@ -109,7 +109,7 @@ class Tensor: # TODO: this is a hack for writing to DISK if self.device.startswith("DISK"): if x.__class__ is not Tensor: x = Tensor(x, device="CPU", dtype=self.dtype) - self.contiguous().realize().lazydata.realized._copyin(x.numpy()) # type: ignore + self.contiguous().realize().lazydata.realized._copyin(x.numpy()) return self if x.__class__ is not Tensor: x = Tensor(x, device=self.device, dtype=self.dtype) assert self.shape == x.shape and self.device == x.device, f"assign shape mismatch {self.shape} != {x.shape} or device mismatch {self.device} != {x.device}" @@ -427,7 +427,7 @@ class Tensor: # ***** reduce ops ***** def _reduce(self, fxn:Type[Function], axis:Optional[Union[int, Tuple[int, ...]]]=None, keepdim=False) -> Tensor: - axis_: List[int] = list(range(len(self.shape))) if axis is None else ([axis] if axis.__class__ is int else list(axis)) # type: ignore + axis_: List[int] = list(range(len(self.shape))) if axis is None else ([axis] if isinstance(axis, int) else list(axis)) axis_ = [x if x >= 0 else x+len(self.shape) for x in axis_] shape = [s for i,s in enumerate(self.shape) if i not in axis_] ret = fxn.apply(self, new_shape=tuple([1 if i in axis_ else s for i,s in enumerate(self.shape)]))