mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-08 22:48:25 -05:00
don't use tons of memory for tests non CI [pr] (#7209)
* don't use tons of memory for tests
* fix import and clean up pre-commit
* use pathlib
* no shm on windows
* Revert "use pathlib"
This reverts commit 7c38489820.
* run pre-commit hooks in test
* ugh, fix later
This commit is contained in:
@@ -1,21 +1,15 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: whitespace
|
||||
name: strip whitespace
|
||||
entry: find tinygrad -type f -name "*.py" -exec sed -i '' 's/ *$//' '{}' ';'
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
- id: ruff
|
||||
name: ruff
|
||||
entry: ruff check .
|
||||
entry: python3 -m ruff check .
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: mypy tinygrad/ --strict-equality
|
||||
entry: python3 -m mypy tinygrad/ --strict-equality
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
@@ -27,7 +21,7 @@ repos:
|
||||
pass_filenames: false
|
||||
- id: devicetests
|
||||
name: select GPU tests
|
||||
entry: env GPU=1 PYTHONPATH="." pytest test/test_uops.py test/test_search.py
|
||||
entry: env GPU=1 PYTHONPATH="." python3 -m pytest test/test_uops.py test/test_search.py
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
@@ -45,7 +39,7 @@ repos:
|
||||
pass_filenames: false
|
||||
- id: pylint
|
||||
name: pylint
|
||||
entry: env PYTHONPATH="." python3 -m pylint tinygrad/
|
||||
entry: python3 -m pylint tinygrad/
|
||||
language: system
|
||||
always_run: true
|
||||
pass_filenames: false
|
||||
@@ -10,7 +10,7 @@ from test.helpers import derandomize_model, is_dtype_supported
|
||||
|
||||
from examples.gpt2 import Transformer as GPT2Transformer, MODEL_PARAMS as GPT2_MODEL_PARAMS
|
||||
from examples.hlb_cifar10 import SpeedyResNet, hyp
|
||||
from examples.llama import Transformer as LLaMaTransformer, MODEL_PARAMS as LLAMA_MODEL_PARAMS
|
||||
from examples.llama import Transformer as LLaMaTransformer
|
||||
from examples.stable_diffusion import UNetModel, unet_params
|
||||
from extra.models.unet import ResBlock
|
||||
|
||||
@@ -50,16 +50,15 @@ class TestRealWorld(unittest.TestCase):
|
||||
@unittest.skipUnless(is_dtype_supported(dtypes.float16), "need dtypes.float16")
|
||||
def test_stable_diffusion(self):
|
||||
params = unet_params
|
||||
if CI:
|
||||
params["model_ch"] = 16
|
||||
params["ctx_dim"] = 16
|
||||
params["num_res_blocks"] = 1
|
||||
params["n_heads"] = 2
|
||||
params["model_ch"] = 16
|
||||
params["ctx_dim"] = 16
|
||||
params["num_res_blocks"] = 1
|
||||
params["n_heads"] = 2
|
||||
model = UNetModel(**params)
|
||||
derandomize_model(model)
|
||||
@TinyJit
|
||||
def test(t, t2): return model(t, Tensor([801]), t2).realize()
|
||||
helper_test("test_sd", lambda: (Tensor.randn(1, 4, 64, 64),Tensor.randn(1, 77, params["ctx_dim"])), test, 18.0, 513 if CI else 839)
|
||||
helper_test("test_sd", lambda: (Tensor.randn(1, 4, 64, 64),Tensor.randn(1, 77, params["ctx_dim"])), test, 18.0, 513)
|
||||
|
||||
def test_unet_resblock(self):
|
||||
model = [ResBlock(16, 24, 16) for _ in range(4)]
|
||||
@@ -75,12 +74,12 @@ class TestRealWorld(unittest.TestCase):
|
||||
dtypes.default_float = dtypes.float16
|
||||
|
||||
args_tiny = {"dim": 1024, "hidden_dim": 2048, "n_heads": 8, "n_layers": 8, "norm_eps": 1e-05, "vocab_size": 1000}
|
||||
model = LLaMaTransformer(**(args_tiny if CI else LLAMA_MODEL_PARAMS["1"]["7B"]["args"]))
|
||||
model = LLaMaTransformer(**args_tiny)
|
||||
derandomize_model(model)
|
||||
@TinyJit
|
||||
def test(t): return model(t, 0).realize()
|
||||
# TODO: test first token vs rest properly
|
||||
helper_test("test_llama", lambda: (Tensor([[1,2,3,4]]),), test, 0.27 if CI else 14.9, 192 if CI else 719, all_jitted=True)
|
||||
helper_test("test_llama", lambda: (Tensor([[1,2,3,4]]),), test, 0.27, 192, all_jitted=True)
|
||||
|
||||
@unittest.skipUnless(is_dtype_supported(dtypes.float16), "need dtypes.float16")
|
||||
def test_gpt2(self):
|
||||
@@ -117,8 +116,7 @@ class TestRealWorld(unittest.TestCase):
|
||||
with Tensor.train():
|
||||
model = SpeedyResNet(Tensor.ones((12,3,2,2)))
|
||||
optimizer = optim.SGD(get_parameters(model), lr=0.01, momentum=0.8, nesterov=True, weight_decay=0.15)
|
||||
|
||||
BS = 32 if CI else 512
|
||||
BS = 32
|
||||
|
||||
@TinyJit
|
||||
def train(X):
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import unittest
|
||||
import multiprocessing.shared_memory as shared_memory
|
||||
from tinygrad.helpers import CI
|
||||
from tinygrad.helpers import CI, WIN
|
||||
from tinygrad.tensor import Tensor, Device
|
||||
import numpy as np
|
||||
|
||||
@unittest.skipIf(WIN, "no shm on Windows")
|
||||
class TestRawShmBuffer(unittest.TestCase):
|
||||
def test_e2e(self):
|
||||
t = Tensor.randn(2, 2, 2).realize()
|
||||
|
||||
Reference in New Issue
Block a user