mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-06 21:53:53 -05:00
move frontend dir to nn [pr] (#12470)
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
import sys, time
|
||||
from tinygrad import TinyJit, GlobalCounters, fetch, getenv
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from extra.onnx_helpers import get_example_inputs, validate
|
||||
|
||||
def load_onnx_model(onnx_file):
|
||||
|
||||
@@ -8,7 +8,7 @@ import numpy as np
|
||||
import subprocess
|
||||
import tensorflow as tf
|
||||
import tf2onnx
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from tinygrad.tensor import Tensor
|
||||
from tinygrad.helpers import to_mv
|
||||
from extra.export_model import export_model_clang, compile_net, jit_model
|
||||
|
||||
@@ -10,7 +10,7 @@ from tinygrad.helpers import DEBUG, getenv
|
||||
from tinygrad.engine.realize import CompiledRunner
|
||||
|
||||
import onnx
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
|
||||
OPENPILOT_MODEL = sys.argv[1] if len(sys.argv) > 1 else "https://github.com/commaai/openpilot/raw/v0.9.7/selfdrive/modeld/models/supercombo.onnx"
|
||||
OUTPUT = sys.argv[2] if len(sys.argv) > 2 else "/tmp/openpilot.pkl"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import sys
|
||||
from tinygrad import Tensor, fetch, GlobalCounters, dtypes
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from tinygrad.schedule.kernelize import get_kernelize_map
|
||||
from tinygrad.schedule.rangeify import get_rangeify_map
|
||||
from tinygrad.helpers import RANGEIFY
|
||||
|
||||
@@ -27,7 +27,7 @@ class Model(nn.Module):
|
||||
|
||||
if __name__ == "__main__":
|
||||
if getenv("TINY_BACKEND"):
|
||||
import tinygrad.frontend.torch # noqa: F401
|
||||
import tinygrad.nn.torch # noqa: F401
|
||||
device = torch.device("tiny")
|
||||
else:
|
||||
device = torch.device({"METAL":"mps","NV":"cuda"}.get(Device.DEFAULT, "cpu"))
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
import os
|
||||
from ultralytics import YOLO
|
||||
from pathlib import Path
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from extra.onnx_helpers import get_example_inputs
|
||||
|
||||
os.chdir("/tmp")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import onnx, yaml, tempfile, time, argparse, json
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from extra.onnx_helpers import validate, get_example_inputs
|
||||
from extra.huggingface_onnx.huggingface_manager import DOWNLOADS_DIR, snapshot_download_with_retry
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.tensor import _to_np_dtype
|
||||
from tinygrad.frontend.onnx import OnnxRunner, OnnxValue
|
||||
from tinygrad.nn.onnx import OnnxRunner, OnnxValue
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
import torch
|
||||
import tinygrad.frontend.torch
|
||||
import tinygrad.nn.torch
|
||||
torch.set_default_device("tiny")
|
||||
import numpy as np
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest
|
||||
from tinygrad.helpers import getenv
|
||||
import torch
|
||||
import tinygrad.frontend.torch
|
||||
import tinygrad.nn.torch
|
||||
torch.set_default_device("tiny")
|
||||
import numpy as np
|
||||
|
||||
|
||||
1
setup.py
1
setup.py
@@ -31,7 +31,6 @@ setup(name='tinygrad',
|
||||
'tinygrad.codegen.opt',
|
||||
'tinygrad.codegen.late',
|
||||
'tinygrad.engine',
|
||||
'tinygrad.frontend',
|
||||
'tinygrad.nn',
|
||||
'tinygrad.renderer',
|
||||
'tinygrad.runtime',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import time, sys, hashlib
|
||||
from pathlib import Path
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from tinygrad import Tensor, dtypes, TinyJit
|
||||
from tinygrad.helpers import IMAGE, GlobalCounters, fetch, colored, getenv, trange
|
||||
import numpy as np
|
||||
|
||||
2
test/external/external_model_benchmark.py
vendored
2
test/external/external_model_benchmark.py
vendored
@@ -4,7 +4,7 @@ import torch
|
||||
torch.set_num_threads(1)
|
||||
import onnxruntime as ort
|
||||
from onnx2torch import convert
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from tinygrad.helpers import OSX, DEBUG, fetch, getenv
|
||||
from tinygrad.dtype import _to_np_dtype
|
||||
from tinygrad import Tensor, Device, dtypes
|
||||
|
||||
2
test/external/external_test_onnx_backend.py
vendored
2
test/external/external_test_onnx_backend.py
vendored
@@ -6,7 +6,7 @@ import numpy as np
|
||||
from tinygrad import Tensor, Device, dtypes
|
||||
from tinygrad.helpers import getenv, OSX
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
|
||||
# pip3 install tabulate
|
||||
pytest_plugins = 'onnx.backend.test.report',
|
||||
|
||||
2
test/external/external_test_onnx_ops.py
vendored
2
test/external/external_test_onnx_ops.py
vendored
@@ -5,7 +5,7 @@
|
||||
from typing import Any
|
||||
import unittest, onnx, tempfile
|
||||
from tinygrad import dtypes, Tensor
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
import numpy as np
|
||||
from extra.onnx_helpers import validate
|
||||
from onnx.defs import ONNX_DOMAIN, AI_ONNX_PREVIEW_TRAINING_DOMAIN
|
||||
|
||||
2
test/external/external_test_onnx_runner.py
vendored
2
test/external/external_test_onnx_runner.py
vendored
@@ -3,7 +3,7 @@ import numpy as np
|
||||
from tinygrad import dtypes, Tensor
|
||||
from tinygrad.uop.ops import Ops
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.frontend.onnx import OnnxRunner, OnnxDataType
|
||||
from tinygrad.nn.onnx import OnnxRunner, OnnxDataType
|
||||
from hypothesis import given, strategies as st
|
||||
|
||||
# copied from test_const_folding.py
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
import unittest
|
||||
import numpy as np
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
from tinygrad.device import Device
|
||||
from tinygrad.helpers import fetch, Context
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ from tinygrad.tensor import _to_np_dtype
|
||||
from tinygrad.device import is_dtype_supported
|
||||
|
||||
if getenv("TINY_BACKEND"):
|
||||
import tinygrad.frontend.torch # noqa: F401 # pylint: disable=unused-import
|
||||
import tinygrad.nn.torch # noqa: F401 # pylint: disable=unused-import
|
||||
torch.set_default_device("tiny")
|
||||
|
||||
if CI:
|
||||
|
||||
@@ -68,7 +68,7 @@ class TestQuantizeOnnxCPU(unittest.TestCase):
|
||||
import onnx # noqa: F401 # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
raise unittest.SkipTest()
|
||||
from tinygrad.frontend.onnx import OnnxRunner
|
||||
from tinygrad.nn.onnx import OnnxRunner
|
||||
out_file = get_quantized_model(sz)
|
||||
run_onnx = OnnxRunner(out_file)
|
||||
inp = Tensor(np.random.uniform(size=(sz, sz)).astype(np.float32))
|
||||
|
||||
Reference in New Issue
Block a user