From e194ae0c1db3522f25b48bcedd195fcffe26ad99 Mon Sep 17 00:00:00 2001 From: George Hotz Date: Tue, 30 Aug 2022 19:52:21 -0700 Subject: [PATCH] typos --- accel/opencl/preprocessing.py | 2 +- tinygrad/helpers.py | 2 +- tinygrad/ops.py | 2 +- tinygrad/tensor.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/accel/opencl/preprocessing.py b/accel/opencl/preprocessing.py index 268a2d53b7..d5c6df85a5 100644 --- a/accel/opencl/preprocessing.py +++ b/accel/opencl/preprocessing.py @@ -8,7 +8,7 @@ def preprocessing_op(x,w,C): #print(x.shape, w.shape) if C.bs > 1 and C.py > 0: - # explictly add y-padding for batched inputs + # explicitly add y-padding for batched inputs # N C H W xs = [(0, 0) for _ in x.shape] xs[2] = (C.py, C.py) diff --git a/tinygrad/helpers.py b/tinygrad/helpers.py index d147043683..c3de0613ac 100644 --- a/tinygrad/helpers.py +++ b/tinygrad/helpers.py @@ -18,7 +18,7 @@ def get_conv_args(x_shape, w_shape, stride=1, groups=1, padding=0, dilation=1, o bs,cin_,iy,ix = x_shape # this can change px_ and py_ to make the out_shape right - # TOOD: copy padding names from http://nvdla.org/hw/v1/ias/unit_description.html + # TODO: copy padding names from http://nvdla.org/hw/v1/ias/unit_description.html if out_shape is not None: py_ = (out_shape[2] - 1) * sy + 1 + dy * (H-1) - iy - py px_ = (out_shape[3] - 1) * sx + 1 + dx * (W-1) - ix - px diff --git a/tinygrad/ops.py b/tinygrad/ops.py index 733533a938..64d8f2d1fe 100644 --- a/tinygrad/ops.py +++ b/tinygrad/ops.py @@ -141,7 +141,7 @@ def _realize_binaryops(self:LazyBuffer) -> Tuple[DeviceBuffer, List[DeviceBuffer earlycode = "acc" conv_args : Optional[ConvArgs] = None - # if there's *one* processing or reduce op in here, we can corealize it. we can corealize binary op sibilings as well + # if there's *one* processing or reduce op in here, we can corealize it. we can corealize binary op siblings as well # NOTE: if it references the same conv multiple times, they should already be merged by the dictionary psrcs : List[Tuple[LazyBuffer, LazyBuffer]] = [(k,x) for k,x in zip(real_srcs.keys(), map(get_movementroot_contiguous, real_srcs.keys())) if x.optype in [ProcessingOps,ReduceOps] and x.realized is None and len(x.children) <= 1 and len(k.children) <= 1] if len(psrcs) == 1 and MERGE_ONE_REDUCE_INTO_ELEMENTWISE: diff --git a/tinygrad/tensor.py b/tinygrad/tensor.py index 59ed3aebfd..42ac763da4 100644 --- a/tinygrad/tensor.py +++ b/tinygrad/tensor.py @@ -61,7 +61,7 @@ class Tensor: def detach(self): return Tensor(self.lazydata, device=self.device, requires_grad=False) def numpy(self): return np.array(self.lazydata.toCPU()) - # TOOD: this keeps the legacy behavior working, remove it after refactor + # TODO: this keeps the legacy behavior working, remove it after refactor @property def data(self): return self.numpy()