use class Foo: instead of class Foo(): (#1797)

* use class Foo: instead of class Foo():

* add ruff linter, copy settings from .flake8 to ruff.toml
This commit is contained in:
Pavol Rusnak
2023-09-06 21:20:25 +02:00
committed by GitHub
parent fd25792c8b
commit 52a92bf95d
12 changed files with 57 additions and 11 deletions

View File

@@ -31,6 +31,8 @@ jobs:
run: python -m pylint --disable=all -e W0311 -e C0303 --jobs=0 --indent-string=' ' **/*.py run: python -m pylint --disable=all -e W0311 -e C0303 --jobs=0 --indent-string=' ' **/*.py
- name: Lint with flake8 - name: Lint with flake8
run: python -m flake8 . --statistics -j4 run: python -m flake8 . --statistics -j4
- name: Lint with ruff
run: ruff .
- name: Lint tinygrad with pylint - name: Lint tinygrad with pylint
run: python -m pylint tinygrad/ run: python -m pylint tinygrad/
- name: Run mypy - name: Run mypy

View File

@@ -210,7 +210,7 @@ class MultiHeadAttention:
ret = self.out_proj(wv).transpose(0,1) # BxTxC -> TxBxC ret = self.out_proj(wv).transpose(0,1) # BxTxC -> TxBxC
return ret return ret
class ConvFeatureExtractionModel(): class ConvFeatureExtractionModel:
def __init__(self, conv_layers, dropout=.0, mode="default", conv_bias=False): def __init__(self, conv_layers, dropout=.0, mode="default", conv_bias=False):
assert mode in {"default", "group_norm_masked", "layer_norm"} assert mode in {"default", "group_norm_masked", "layer_norm"}
def block(n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False): def block(n_in, n_out, k, stride, is_layer_norm=False, is_group_norm=False, conv_bias=False):
@@ -352,7 +352,7 @@ class Upsample:
new_shape = (*x.shape[:-1], x.shape[-1] * self.scale) new_shape = (*x.shape[:-1], x.shape[-1] * self.scale)
return x.unsqueeze(-1).repeat(repeats).reshape(new_shape) return x.unsqueeze(-1).repeat(repeats).reshape(new_shape)
class SineGen(): class SineGen:
def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voice_threshold=0, flag_for_pulse=False): def __init__(self, samp_rate, harmonic_num=0, sine_amp=0.1, noise_std=0.003, voice_threshold=0, flag_for_pulse=False):
self.sine_amp, self.noise_std, self.harmonic_num, self.sampling_rate, self.voiced_threshold, self.flag_for_pulse = sine_amp, noise_std, harmonic_num, samp_rate, voice_threshold, flag_for_pulse self.sine_amp, self.noise_std, self.harmonic_num, self.sampling_rate, self.voiced_threshold, self.flag_for_pulse = sine_amp, noise_std, harmonic_num, samp_rate, voice_threshold, flag_for_pulse
self.dim = self.harmonic_num + 1 self.dim = self.harmonic_num + 1

View File

@@ -244,7 +244,7 @@ class Upsample:
tmp = x.reshape([b, c, -1] + [1] * _lens) * Tensor.ones(*[1, 1, 1] + [self.scale_factor] * _lens) tmp = x.reshape([b, c, -1] + [1] * _lens) * Tensor.ones(*[1, 1, 1] + [self.scale_factor] * _lens)
return tmp.reshape(list(x.shape) + [self.scale_factor] * _lens).permute([0, 1] + list(chain.from_iterable([[y+2, y+2+_lens] for y in range(_lens)]))).reshape([b, c] + [x * self.scale_factor for x in x.shape[2:]]) return tmp.reshape(list(x.shape) + [self.scale_factor] * _lens).permute([0, 1] + list(chain.from_iterable([[y+2, y+2+_lens] for y in range(_lens)]))).reshape([b, c] + [x * self.scale_factor for x in x.shape[2:]])
class Conv_Block(): class Conv_Block:
def __init__(self, c1, c2, kernel_size=1, stride=1, groups=1, dilation=1, padding=None): def __init__(self, c1, c2, kernel_size=1, stride=1, groups=1, dilation=1, padding=None):
self.conv = Conv2d(c1,c2, kernel_size, stride, padding=autopad(kernel_size, padding, dilation), bias=False, groups=groups, dilation=dilation) self.conv = Conv2d(c1,c2, kernel_size, stride, padding=autopad(kernel_size, padding, dilation), bias=False, groups=groups, dilation=dilation)
self.bn = BatchNorm2d(c2, eps=0.001) self.bn = BatchNorm2d(c2, eps=0.001)

View File

@@ -11,7 +11,7 @@ from llvmlite import ir # type: ignore
# https://github.com/corsix/amx/blob/main/Instructions.md # https://github.com/corsix/amx/blob/main/Instructions.md
# 12 lines for AMX support # 12 lines for AMX support
from functools import partialmethod from functools import partialmethod
class AMX(): class AMX:
@staticmethod @staticmethod
def nop_op_imm5(op, imm5, builder): builder.asm(ir.FunctionType(ir.VoidType(), []), f".word (0x201000 + ({op} << 5) + {imm5}); amx op {op} imm {imm5}", "", tuple(), True) def nop_op_imm5(op, imm5, builder): builder.asm(ir.FunctionType(ir.VoidType(), []), f".word (0x201000 + ({op} << 5) + {imm5}); amx op {op} imm {imm5}", "", tuple(), True)
@staticmethod @staticmethod

43
ruff.toml Normal file
View File

@@ -0,0 +1,43 @@
tab-size = 2
select = [
"F",
"W6",
"E71",
"E72",
"E112",
"E113",
# "E124",
"E203",
"E272",
# "E303",
# "E304",
# "E502",
"E702",
"E703",
"E731",
"W191",
"UP039", # unnecessary-class-parentheses
]
exclude = [
"disassemblers/",
"docs/",
"examples/",
"extra/",
"models/",
"openpilot/",
]
[per-file-ignores]
"test/*" = [
"F401",
"F403",
"F405",
"F541",
"E722",
"E731",
"F811",
"F821",
"F841",
]

View File

@@ -35,6 +35,7 @@ setup(name='tinygrad',
"pylint", "pylint",
"mypy", "mypy",
"pre-commit", "pre-commit",
"ruff",
], ],
'testing': [ 'testing': [
"torch", "torch",

View File

@@ -11,7 +11,7 @@ from tinygrad.ops import Device
from examples.llama import Transformer from examples.llama import Transformer
ALLOCATED_DEV_BUFS = 0 ALLOCATED_DEV_BUFS = 0
class FakeDeviceBuffer(): class FakeDeviceBuffer:
def __init__(self, sz, dt, device): def __init__(self, sz, dt, device):
self.id = 1 self.id = 1
self.size = sz self.size = sz

View File

@@ -17,7 +17,7 @@ from tinygrad.ops import GlobalCounters, MovementOps, ReduceOps
from tinygrad.lazy import PUSH_PERMUTES from tinygrad.lazy import PUSH_PERMUTES
from tinygrad.jit import CacheCollector from tinygrad.jit import CacheCollector
class CLCache(): class CLCache:
def __init__(self, allowed=None, strict=False, preclear=True): self.allowed, self.strict, self.preclear = allowed, strict, preclear def __init__(self, allowed=None, strict=False, preclear=True): self.allowed, self.strict, self.preclear = allowed, strict, preclear
def __enter__(self): def __enter__(self):
if self.preclear: if self.preclear:

View File

@@ -11,7 +11,7 @@ x_init = np.random.randn(1,4).astype(np.float32)
W_init = np.random.randn(4,4).astype(np.float32) W_init = np.random.randn(4,4).astype(np.float32)
m_init = np.random.randn(1,4).astype(np.float32) m_init = np.random.randn(1,4).astype(np.float32)
class TinyNet(): class TinyNet:
def __init__(self): def __init__(self):
self.x = Tensor(x_init.copy(), requires_grad=True) self.x = Tensor(x_init.copy(), requires_grad=True)
self.W = Tensor(W_init.copy(), requires_grad=True) self.W = Tensor(W_init.copy(), requires_grad=True)
@@ -23,7 +23,7 @@ class TinyNet():
out = out.mul(self.m).add(self.m).sum() out = out.mul(self.m).add(self.m).sum()
return out return out
class TinyNetTF(): class TinyNetTF:
def __init__(self): def __init__(self):
self.x = tf.Variable(x_init.copy(), trainable=True) self.x = tf.Variable(x_init.copy(), trainable=True)
self.W = tf.Variable(W_init.copy(), trainable=True) self.W = tf.Variable(W_init.copy(), trainable=True)

View File

@@ -12,7 +12,7 @@ def check_gc():
from extra.introspection import print_objects from extra.introspection import print_objects
assert print_objects() == 0 assert print_objects() == 0
class FakeDeviceBuffer(): class FakeDeviceBuffer:
def __init__(self, sz, dt, device): def __init__(self, sz, dt, device):
self.id = 1 self.id = 1
self.size = sz self.size = sz

View File

@@ -5,7 +5,7 @@ from tinygrad.helpers import dtypes
from tinygrad.jit import CacheCollector from tinygrad.jit import CacheCollector
from weakref import ref from weakref import ref
class FakeDeviceBuffer(): class FakeDeviceBuffer:
def __init__(self, sz, dt, device): def __init__(self, sz, dt, device):
self.size = sz self.size = sz
self.dtype = dt self.dtype = dt

View File

@@ -12,7 +12,7 @@ x_init = np.random.randn(1,4).astype(np.float32)
W_init = np.random.randn(4,4).astype(np.float32) W_init = np.random.randn(4,4).astype(np.float32)
m_init = np.random.randn(1,4).astype(np.float32) m_init = np.random.randn(1,4).astype(np.float32)
class TinyNet(): class TinyNet:
def __init__(self, tensor): def __init__(self, tensor):
self.x = tensor(x_init.copy(), requires_grad=True) self.x = tensor(x_init.copy(), requires_grad=True)
self.W = tensor(W_init.copy(), requires_grad=True) self.W = tensor(W_init.copy(), requires_grad=True)