add std to tensor.py (#767)

* add std

* delete comment

* edit: one liner std, add: test

* adjust

* fix: shape mismatch

* set unbiased to False

* added unbiased option

* fix unbiased option in test and clean code

* better

* generalize axis

* holly coffee molly

* generalize axes without unbiased opt.

* hopefully done

* complete unbiased true for axes

* Update test_ops.py

* fixed

* std completed without bessels correction

* fix comment

* ups
This commit is contained in:
Rabia Eda Yılmaz
2023-05-13 22:20:44 +03:00
committed by GitHub
parent b705510d5c
commit e5b4b36cba
2 changed files with 13 additions and 1 deletions

View File

@@ -214,6 +214,15 @@ class TestOps(unittest.TestCase):
helper_test_op([(3,4,5,6)], lambda x: x.mean())
def test_mean_axis(self):
helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2)))
def test_std(self):
helper_test_op([(45, 65, 85)], lambda x: torch.std(x, unbiased=False), lambda x: Tensor.std(x))
def test_std_axis(self):
helper_test_op([(45, 65, 85)], lambda x: torch.std(x, unbiased=False, dim=0), lambda x: Tensor.std(x, axis=0))
helper_test_op([(45, 65, 85)], lambda x: torch.std(x, unbiased=False, dim=2), lambda x: Tensor.std(x, axis=2))
helper_test_op([(45, 65, 85)], lambda x: torch.std(x, unbiased=False, dim=[1, 2]), lambda x: Tensor.std(x, axis=[1, 2]))
helper_test_op([(45, 65, 85)], lambda x: torch.std(x, unbiased=False, dim=None), lambda x: Tensor.std(x, axis=None))
def test_std_keepdim(self):
helper_test_op([(45, 65, 85)], lambda x: torch.std(x, keepdim=True), lambda x: Tensor.std(x, keepdim=True))
def test_log_softmax(self):
helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.log_softmax, atol=1e-7, grad_atol=1e-7)
def test_log_softmax_other_axis(self):

View File

@@ -309,7 +309,10 @@ class Tensor:
def mean(self, axis=None, keepdim=False):
out = self.sum(axis=axis, keepdim=keepdim)
return out * (prod(out.shape)/prod(self.shape))
# TODO: implement unbiased True option for torch bessel's correction (subtracting 1 from divisor causes 0.01 error)
def std(self, axis=None, keepdim=False):
square_sum = ((self - self.mean(axis=axis, keepdim=True)).square()).sum(axis=axis, keepdim=keepdim)
return (square_sum * (prod(square_sum.shape)/prod(self.shape))).sqrt()
def _softmax(self, axis):
m = self - self.max(axis=axis, keepdim=True)
e = m.exp()