grad -> grad_output in mlops for consistency (#2982)

This commit is contained in:
chenyu
2024-01-02 18:03:55 -05:00
committed by GitHub
parent ad0d710ec4
commit c07907e644

View File

@@ -26,19 +26,19 @@ class Cast(Function):
class Zero(Function):
def forward(self, x:LazyBuffer) -> LazyBuffer: return x.const(0)
def backward(self, grad:LazyBuffer) -> LazyBuffer: return grad.const(0)
def backward(self, grad_output:LazyBuffer) -> LazyBuffer: return grad_output.const(0)
class Neg(Function):
def forward(self, x:LazyBuffer) -> LazyBuffer: return x.e(UnaryOps.NEG)
def backward(self, grad:LazyBuffer) -> LazyBuffer: return grad.e(UnaryOps.NEG)
def backward(self, grad_output:LazyBuffer) -> LazyBuffer: return grad_output.e(UnaryOps.NEG)
class Sin(Function):
def forward(self, x:LazyBuffer) -> LazyBuffer:
self.x = x
return x.e(UnaryOps.SIN)
def backward(self, grad:LazyBuffer) -> LazyBuffer:
return self.x.const(math.pi / 2).e(BinaryOps.SUB, self.x).e(UnaryOps.SIN).e(BinaryOps.MUL, grad)
def backward(self, grad_output:LazyBuffer) -> LazyBuffer:
return self.x.const(math.pi / 2).e(BinaryOps.SUB, self.x).e(UnaryOps.SIN).e(BinaryOps.MUL, grad_output)
# NOTE: maximum(x, 0) behaves differently where x=0
class Relu(Function):