mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-08 22:48:25 -05:00
docs: clean up mentions of mlops (#4720)
This commit is contained in:
@@ -55,7 +55,7 @@ confidence=
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0105,E0401
|
||||
# E1101 for mlops binding
|
||||
# E1101 for function binding
|
||||
# W0221 for Function class
|
||||
# W0105 for comment strings
|
||||
# E0401 for missing imports
|
||||
|
||||
@@ -7,7 +7,7 @@ The tinygrad framework has four pieces
|
||||
|
||||
## Frontend
|
||||
|
||||
Everything in [Tensor](tensor.md) is syntactic sugar around [function.py](function.md), where the forwards and backwards passes are implemented for the different mlops. There's about 25 of them, implemented using about 20 basic ops. Those basic ops go on to construct a graph of:
|
||||
Everything in [Tensor](tensor.md) is syntactic sugar around [function.py](function.md), where the forwards and backwards passes are implemented for the different functions. There's about 25 of them, implemented using about 20 basic ops. Those basic ops go on to construct a graph of:
|
||||
|
||||
::: tinygrad.lazy.LazyBuffer
|
||||
options:
|
||||
|
||||
@@ -672,7 +672,7 @@ class Tensor:
|
||||
del t0._ctx
|
||||
return self
|
||||
|
||||
# ***** movement mlops *****
|
||||
# ***** movement low level ops *****
|
||||
|
||||
def view(self, *shape) -> Tensor:
|
||||
"""`.view` is an alias for `.reshape`."""
|
||||
@@ -782,7 +782,7 @@ class Tensor:
|
||||
ret = F.Pad.apply(self, arg=(narg:=tuple(x if x is not None else (0,0) for x in arg)))
|
||||
return ret if 0 == value else ret + F.Pad.apply(Tensor.ones_like(self), arg=narg).where(0, value)
|
||||
|
||||
# ***** movement hlops *****
|
||||
# ***** movement high level ops *****
|
||||
|
||||
# Supported Indexing Implementations:
|
||||
# 1. Int indexing (no copy)
|
||||
@@ -1612,7 +1612,7 @@ class Tensor:
|
||||
def triu(self, k:int=0) -> Tensor: return Tensor._tri(self.shape[-2], self.shape[-1], k=k, device=self.device).where(self, 0)
|
||||
def tril(self, k:int=0) -> Tensor: return Tensor._tri(self.shape[-2], self.shape[-1], k=k+1, device=self.device).where(0, self)
|
||||
|
||||
# ***** mlops (unary) *****
|
||||
# ***** unary ops *****
|
||||
|
||||
def logical_not(self): return F.Eq.apply(*self._broadcasted(False))
|
||||
def neg(self): return F.Neg.apply(self) if self.dtype != dtypes.bool else self.logical_not()
|
||||
@@ -1650,7 +1650,7 @@ class Tensor:
|
||||
def cos(self): return ((math.pi/2)-self).sin()
|
||||
def tan(self): return self.sin() / self.cos()
|
||||
|
||||
# ***** math functions (unary) *****
|
||||
# ***** math functions *****
|
||||
|
||||
def trunc(self: Tensor) -> Tensor: return self.cast(dtypes.int32).cast(self.dtype)
|
||||
def ceil(self: Tensor) -> Tensor: return (self > (b := self.trunc())).where(b+1, b)
|
||||
@@ -1664,7 +1664,7 @@ class Tensor:
|
||||
def abs(self): return self * self.sign()
|
||||
def reciprocal(self): return F.Reciprocal.apply(self.cast(least_upper_float(self.dtype)))
|
||||
|
||||
# ***** activation functions (unary) *****
|
||||
# ***** activation functions *****
|
||||
|
||||
def elu(self, alpha=1.0):
|
||||
"""
|
||||
@@ -1678,6 +1678,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self.relu() - alpha*(1-self.exp()).relu()
|
||||
|
||||
def celu(self, alpha=1.0):
|
||||
"""
|
||||
Applies the Continuously differentiable Exponential Linear Unit (CELU) function element-wise.
|
||||
@@ -1690,6 +1691,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self.maximum(0) + (alpha * ((self / alpha).exp() - 1)).minimum(0)
|
||||
|
||||
def swish(self):
|
||||
"""
|
||||
See `.silu()`
|
||||
@@ -1701,6 +1703,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self * self.sigmoid()
|
||||
|
||||
def silu(self):
|
||||
"""
|
||||
Applies the Sigmoid Linear Unit (SiLU) function element-wise.
|
||||
@@ -1713,6 +1716,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self.swish() # The SiLU function is also known as the swish function.
|
||||
|
||||
def relu6(self):
|
||||
"""
|
||||
Applies the ReLU6 function element-wise.
|
||||
@@ -1725,6 +1729,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self.relu() - (self-6).relu()
|
||||
|
||||
def hardswish(self):
|
||||
"""
|
||||
Applies the Hardswish function element-wise.
|
||||
@@ -1737,6 +1742,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self * (self+3).relu6() * (1/6)
|
||||
|
||||
def tanh(self):
|
||||
"""
|
||||
Applies the Hyperbolic Tangent (tanh) function element-wise.
|
||||
@@ -1748,6 +1754,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return 2.0 * ((2.0 * self).sigmoid()) - 1.0
|
||||
|
||||
def sinh(self):
|
||||
"""
|
||||
Applies the Hyperbolic Sine (sinh) function element-wise.
|
||||
@@ -1759,6 +1766,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return (self.exp() - self.neg().exp()) / 2
|
||||
|
||||
def cosh(self):
|
||||
"""
|
||||
Applies the Hyperbolic Cosine (cosh) function element-wise.
|
||||
@@ -1770,6 +1778,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return (self.exp() + self.neg().exp()) / 2
|
||||
|
||||
def atanh(self):
|
||||
"""
|
||||
Applies the Inverse Hyperbolic Tangent (atanh) function element-wise.
|
||||
@@ -1781,6 +1790,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return ((1 + self)/(1 - self)).log() / 2
|
||||
|
||||
def asinh(self):
|
||||
"""
|
||||
Applies the Inverse Hyperbolic Sine (asinh) function element-wise.
|
||||
@@ -1792,6 +1802,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return (self + (self.square() + 1).sqrt()).log()
|
||||
|
||||
def acosh(self):
|
||||
"""
|
||||
Applies the Inverse Hyperbolic Cosine (acosh) function element-wise.
|
||||
@@ -1803,6 +1814,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return (self + (self.square() - 1).sqrt()).log()
|
||||
|
||||
def hardtanh(self, min_val=-1, max_val=1):
|
||||
"""
|
||||
Applies the Hardtanh function element-wise.
|
||||
@@ -1814,6 +1826,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self.clip(min_val, max_val)
|
||||
|
||||
def gelu(self):
|
||||
"""
|
||||
Applies the Gaussian Error Linear Unit (GELU) function element-wise.
|
||||
@@ -1826,6 +1839,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return 0.5 * self * (1 + (self * 0.7978845608 * (1 + 0.044715 * self * self)).tanh())
|
||||
|
||||
def quick_gelu(self):
|
||||
"""
|
||||
Applies the Sigmoid GELU approximation element-wise.
|
||||
@@ -1837,6 +1851,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self * (self * 1.702).sigmoid()
|
||||
|
||||
def leakyrelu(self, neg_slope=0.01):
|
||||
"""
|
||||
Applies the Leaky ReLU function element-wise.
|
||||
@@ -1851,6 +1866,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self.relu() - (-neg_slope*self).relu()
|
||||
|
||||
def mish(self):
|
||||
"""
|
||||
Applies the Mish function element-wise.
|
||||
@@ -1863,6 +1879,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return self * self.softplus().tanh()
|
||||
|
||||
def softplus(self, beta=1):
|
||||
"""
|
||||
Applies the Softplus function element-wise.
|
||||
@@ -1874,6 +1891,7 @@ class Tensor:
|
||||
```
|
||||
"""
|
||||
return (1/beta) * (1 + (self*beta).exp()).log()
|
||||
|
||||
def softsign(self):
|
||||
"""
|
||||
Applies the Softsign function element-wise.
|
||||
@@ -1886,7 +1904,7 @@ class Tensor:
|
||||
"""
|
||||
return self / (1 + self.abs())
|
||||
|
||||
# ***** broadcasted elementwise mlops *****
|
||||
# ***** broadcasted elementwise ops *****
|
||||
def _broadcast_to(self, shape:Tuple[sint, ...]):
|
||||
reshape_arg, _ = _pad_left(self.shape, shape)
|
||||
if self.ndim > len(shape) or not all(sh in {s,1} or (s==0 and sh==1) for sh,s in zip(reshape_arg, shape)):
|
||||
|
||||
Reference in New Issue
Block a user