mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-08 22:48:25 -05:00
* Update all devices to be tested
ANE, CPU and OCL all now support all tests.
However tests are not currently passing on GPU and I cannot test on CPU.
Failing GPU test are not an issue caused by this update. Tests have not
been passing due to a missing "six" required installation.
OpenCL Tests have not been run since commit: 1a1c63a08b
devices have 3 types and are handle by a new DeviceTypes enum. (The goal
is to revert to Tensor.<type>, but this current setup allows for keyword
argument defaults: `device=DeviceType.CPU`)
All references to Tensor.GPU/CPU/ANE as been converted to the
corresponding `DeviceTypes` enum.
Refactor of the conversion code to allow for any device to any device
conversion.
* Add six dependency in requirements.txt
* Resolve failure to run tests
Move six into gpu required installs. Remove six from standard
installation.
* Remove repeated data conversion
* Refactor method names
Also reduce code with .to and .to_
* Dynamic device handlers
* Refactor DeviceTypes -> Device
* Add mem copy profiling back
* test_backward_pass_diamond_model passing
* Resolve Sum issue on GPU
* Revert batchnorm2d tests
* Update README with upadated API
* ANE testing with
* Last minute line gains
47 lines
1.2 KiB
Python
47 lines
1.2 KiB
Python
#!/usr/bin/env python
|
|
import gc
|
|
import unittest
|
|
from tinygrad.tensor import Tensor, GPU, Device
|
|
from .config import ANE
|
|
|
|
def tensors_allocated():
|
|
return sum([isinstance(x, Tensor) for x in gc.get_objects()])
|
|
|
|
class TestGC(unittest.TestCase):
|
|
device = Device.CPU
|
|
|
|
def test_gc(self):
|
|
a = Tensor.zeros(4,4, device=self.device)
|
|
b = Tensor.zeros(4,4, device=self.device)
|
|
(a*b).mean().backward()
|
|
assert(tensors_allocated() > 0)
|
|
del a,b
|
|
assert(tensors_allocated() == 0)
|
|
|
|
def test_gc_complex(self):
|
|
a = Tensor.zeros(4,4, device=self.device)
|
|
b = Tensor.zeros(4,4, device=self.device)
|
|
assert(tensors_allocated() == 2)
|
|
(a*b).mean().backward()
|
|
assert(tensors_allocated() == 4)
|
|
del b
|
|
assert(tensors_allocated() == 2)
|
|
b = Tensor.zeros(4,4, device=self.device)
|
|
print(tensors_allocated())
|
|
(a*b).mean().backward()
|
|
print(tensors_allocated())
|
|
assert(tensors_allocated() == 4)
|
|
del b
|
|
assert(tensors_allocated() == 2)
|
|
|
|
@unittest.skipUnless(GPU, "Requires GPU")
|
|
class TestGCGPU(TestGC):
|
|
device = Device.GPU
|
|
|
|
@unittest.skipUnless(ANE, "Requires ANE")
|
|
class TestGCANE(TestGC):
|
|
device=Device.ANE
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|