mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-06 21:53:53 -05:00
move files into uop dir (#10399)
* move files into uop dir [pr] * tinygrad.uop is a thing * fix uop docs, no pr * fix viz
This commit is contained in:
@@ -39,7 +39,7 @@ DEVICE = "CPU" # NOTE: you can change this!
|
||||
import struct
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.device import Buffer, Device
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
|
||||
# allocate some buffers + load in values
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
::: tinygrad.ops.UOp
|
||||
::: tinygrad.uop.ops.UOp
|
||||
options:
|
||||
members: false
|
||||
members_order: source
|
||||
show_labels: false
|
||||
|
||||
::: tinygrad.ops.Ops
|
||||
::: tinygrad.uop.ops.Ops
|
||||
options:
|
||||
members: true
|
||||
members_order: source
|
||||
|
||||
@@ -3,7 +3,7 @@ import os, argparse, contextlib
|
||||
from typing import Optional, Union
|
||||
with contextlib.suppress(ImportError): import tiktoken
|
||||
from tinygrad import Tensor, TinyJit, Device, GlobalCounters, Variable, dtypes
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.helpers import Timing, DEBUG, JIT, getenv, fetch, colored, trange
|
||||
from tinygrad.nn import Embedding, Linear, LayerNorm
|
||||
from tinygrad.nn.state import gguf_load, torch_load, load_state_dict, get_state_dict
|
||||
|
||||
@@ -4,7 +4,7 @@ from examples.mlperf.helpers import get_mlperf_bert_model
|
||||
from tinygrad import Tensor, Device, dtypes, nn
|
||||
from tinygrad.codegen.kernel import Kernel
|
||||
from tinygrad.codegen.heuristic import hand_coded_optimizations
|
||||
from tinygrad.ops import Ops, sym_infer
|
||||
from tinygrad.uop.ops import Ops, sym_infer
|
||||
from tinygrad.device import Compiled
|
||||
from tinygrad.engine.search import beam_search, bufs_from_lin
|
||||
from tinygrad.helpers import DEBUG, ansilen, getenv, colored, TRACEMETA
|
||||
|
||||
@@ -7,7 +7,7 @@ from train_gpt2 import GPT, GPTConfig
|
||||
from tinygrad.helpers import dedup, to_function_name, flatten, getenv, GlobalCounters, ansilen, to_function_name
|
||||
from tinygrad.engine.realize import get_kernel, run_schedule
|
||||
from tinygrad.engine.memory import memory_planner
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
|
||||
TIMING = getenv("TIMING")
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from typing import Tuple, List, NamedTuple, Any, Dict, Optional, Union, DefaultDict, cast
|
||||
from tinygrad.codegen.kernel import Ops, MemOp, UOp
|
||||
from tinygrad.ops import BinaryOps, UnaryOps
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps
|
||||
from tinygrad.dtype import DType, dtypes
|
||||
from tinygrad.helpers import DEBUG
|
||||
from tinygrad.ops import Variable, NumNode, MulNode, DivNode, ModNode, LtNode, SumNode, AndNode
|
||||
from tinygrad.uop.ops import Variable, NumNode, MulNode, DivNode, ModNode, LtNode, SumNode, AndNode
|
||||
import functools
|
||||
import math
|
||||
from collections import defaultdict
|
||||
|
||||
@@ -2,7 +2,7 @@ import struct
|
||||
from platform import system
|
||||
from typing import Tuple, Dict, List, Optional
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.codegen.kernel import Ops, UOp
|
||||
from tinygrad.helpers import CI
|
||||
from tinygrad.codegen.assembly import uops_to_asmstyle, AssemblyLanguage
|
||||
|
||||
@@ -3,7 +3,7 @@ import struct
|
||||
from tinygrad.codegen.assembly import uops_to_asmstyle, AssemblyLanguage
|
||||
from tinygrad.codegen.kernel import Ops, UOp
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.runtime.ops_cuda import arch
|
||||
|
||||
dtype_to_nvtype = {dtypes.float32: "f32", dtypes.float16: "f16", dtypes.int64: "s64", dtypes.int32: "s32", dtypes.int8: "s8", dtypes.bool: "pred", dtypes.uint64: "u64", dtypes.uint32: "u32", dtypes.uint16: "u16", dtypes.uint8: "u8", "bits16": "b16", dtypes.float64: "f64"}
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import Tuple, Set, Dict
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.codegen.assembly import AssemblyCodegen, Register
|
||||
from tinygrad.codegen.kernel import Ops
|
||||
from tinygrad.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.runtime.ops_gpu import ROCM_LLVM_PATH
|
||||
|
||||
# ugh, is this really needed?
|
||||
|
||||
@@ -4,7 +4,7 @@ from tinygrad.helpers import dedup, cpu_time_execution, DEBUG
|
||||
from tinygrad.engine.jit import GraphRunner, GraphException
|
||||
from tinygrad.device import Buffer, Device
|
||||
from tinygrad.engine.realize import ExecItem, CompiledRunner
|
||||
from tinygrad.ops import Variable
|
||||
from tinygrad.uop.ops import Variable
|
||||
from tinygrad.runtime.ops_cpu import ClangProgram
|
||||
from tinygrad.renderer.cstyle import ClangRenderer
|
||||
render_dtype = ClangRenderer().render_dtype
|
||||
|
||||
@@ -3,7 +3,7 @@ from typing import List, Any, Dict, cast, Optional, Tuple
|
||||
from tinygrad.helpers import init_c_var, round_up
|
||||
from tinygrad.device import Buffer, BufferSpec
|
||||
from tinygrad.device import Compiled, Device
|
||||
from tinygrad.ops import Variable
|
||||
from tinygrad.uop.ops import Variable
|
||||
from tinygrad.runtime.ops_hsa import HSADevice, PROFILE, Profiler
|
||||
from tinygrad.engine.realize import ExecItem, BufferXfer, CompiledRunner
|
||||
from tinygrad.engine.jit import MultiGraphRunner, GraphException
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Dict, Set
|
||||
import yaml
|
||||
from tinygrad.codegen.uops import UOpGraph, UOps, UOp
|
||||
from tinygrad.ops import BinaryOps
|
||||
from tinygrad.uop.ops import BinaryOps
|
||||
from tinygrad.dtype import dtypes
|
||||
|
||||
def uops_to_rdna(function_name:str, uops:UOpGraph) -> str:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Dict, List, Final, Callable, DefaultDict
|
||||
from collections import defaultdict
|
||||
from tinygrad.ops import UnaryOps, BinaryOps, TernaryOps, Op
|
||||
from tinygrad.uop.ops import UnaryOps, BinaryOps, TernaryOps, Op
|
||||
from tinygrad.helpers import DType, PtrDType, dtypes, ImageDType, DEBUG, getenv
|
||||
from tinygrad.codegen.kernel import UOp, Ops
|
||||
from triton.compiler import compile as triton_compile
|
||||
|
||||
@@ -6,7 +6,7 @@ from tinygrad.engine.jit import TinyJit
|
||||
from tinygrad.nn.state import get_state_dict
|
||||
from tinygrad.helpers import Context
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from tinygrad import Tensor, Device, Context
|
||||
from tinygrad.helpers import getenv
|
||||
from tinygrad.codegen.kernel import Kernel, Opt, OptOps
|
||||
from tinygrad.engine.realize import CompiledRunner, ExecItem
|
||||
from tinygrad.ops import graph_rewrite, PatternMatcher, UPat, Ops, UOp
|
||||
from tinygrad.uop.ops import graph_rewrite, PatternMatcher, UPat, Ops, UOp
|
||||
|
||||
# TODO: on METAL for `DEBUG=4 python3 extra/gemm/amd_matmul.py`
|
||||
# * fix load grouping (like float4). idk why it's not working, need new devectorizer (this is a Monday project)
|
||||
|
||||
@@ -5,7 +5,7 @@ from typing import Optional, List, Tuple, cast, Dict, Final, DefaultDict, Self
|
||||
|
||||
# for copied uops
|
||||
from tinygrad.codegen.kernel import Kernel, KernelOptError
|
||||
from tinygrad.ops import UOp, Ops, BinaryOps, UnaryOps, TernaryOps, KernelInfo
|
||||
from tinygrad.uop.ops import UOp, Ops, BinaryOps, UnaryOps, TernaryOps, KernelInfo
|
||||
from tinygrad.engine.search import Opt, OptOps
|
||||
from tinygrad import Device, dtypes, Tensor
|
||||
from tinygrad.dtype import PtrDType, DType, DTYPES_DICT
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import gc
|
||||
from tinygrad.helpers import prod
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.device import Buffer
|
||||
from tinygrad import Tensor, GlobalCounters
|
||||
|
||||
|
||||
@@ -11,11 +11,11 @@ from tinygrad.codegen.kernel import Kernel
|
||||
from tinygrad.helpers import getenv
|
||||
|
||||
# stuff needed to unpack a kernel
|
||||
from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
|
||||
from tinygrad.uop.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
from tinygrad.ops import Variable
|
||||
from tinygrad.uop.ops import Variable
|
||||
inf, nan = float('inf'), float('nan')
|
||||
from tinygrad.codegen.kernel import Opt, OptOps
|
||||
|
||||
|
||||
@@ -4,11 +4,11 @@ from tqdm import tqdm, trange
|
||||
import numpy as np
|
||||
|
||||
# stuff needed to unpack a kernel
|
||||
from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
|
||||
from tinygrad.uop.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
from tinygrad.ops import Variable
|
||||
from tinygrad.uop.ops import Variable
|
||||
inf, nan = float('inf'), float('nan')
|
||||
from tinygrad.codegen.kernel import Opt, OptOps
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# stuff needed to unpack a kernel
|
||||
from tinygrad import Variable
|
||||
from tinygrad.codegen.kernel import Opt, OptOps
|
||||
from tinygrad.ops import UOp, Ops, KernelInfo
|
||||
from tinygrad.uop.ops import UOp, Ops, KernelInfo
|
||||
from tinygrad.dtype import dtypes, PtrDType
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
|
||||
@@ -8,11 +8,11 @@ from tinygrad.nn.optim import Adam
|
||||
from tinygrad.nn.state import get_parameters, get_state_dict, safe_save, safe_load, load_state_dict
|
||||
|
||||
# stuff needed to unpack a kernel
|
||||
from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
|
||||
from tinygrad.uop.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
from tinygrad.ops import Variable
|
||||
from tinygrad.uop.ops import Variable
|
||||
inf, nan = float('inf'), float('nan')
|
||||
from tinygrad.codegen.kernel import Opt, OptOps
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ from collections import defaultdict
|
||||
from typing import List, Tuple, DefaultDict
|
||||
from extra.optimization.helpers import load_worlds, ast_str_to_ast
|
||||
from tinygrad.helpers import prod, tqdm
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.ops import sym_infer
|
||||
from tinygrad.uop.ops import sym_infer
|
||||
from tinygrad.tensor import Tensor
|
||||
|
||||
class MovementOps(Enum): RESHAPE = auto(); PERMUTE = auto(); EXPAND = auto(); PAD = auto(); SHRINK = auto(); STRIDE = auto(); AS_STRIDED = auto() # noqa: E702
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# A002 Function argument `input` is shadowing a Python builtin
|
||||
# A006 Lambda argument `input` is shadowing a Python builtin
|
||||
from tinygrad import Tensor, dtypes, Device
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
from tinygrad.helpers import getenv, prod
|
||||
import torch.lib
|
||||
TORCH_DEBUG = getenv("TORCH_DEBUG")
|
||||
|
||||
2
setup.py
2
setup.py
@@ -26,7 +26,7 @@ setup(name='tinygrad',
|
||||
long_description_content_type='text/markdown',
|
||||
packages = ['tinygrad', 'tinygrad.runtime.autogen', 'tinygrad.runtime.autogen.am', 'tinygrad.codegen', 'tinygrad.nn',
|
||||
'tinygrad.renderer', 'tinygrad.engine', 'tinygrad.viz', 'tinygrad.runtime', 'tinygrad.runtime.support',
|
||||
'tinygrad.runtime.support.am', 'tinygrad.runtime.graph', 'tinygrad.shape'],
|
||||
'tinygrad.runtime.support.am', 'tinygrad.runtime.graph', 'tinygrad.shape', 'tinygrad.uop'],
|
||||
package_data = {'tinygrad': ['py.typed'], 'tinygrad.viz': ['index.html', 'perfetto.html', 'assets/**/*', 'lib/**/*']},
|
||||
classifiers=[
|
||||
"Programming Language :: Python :: 3",
|
||||
|
||||
2
test/external/external_benchmark_schedule.py
vendored
2
test/external/external_benchmark_schedule.py
vendored
@@ -2,7 +2,7 @@ from typing import List
|
||||
from extra.models.resnet import ResNet50
|
||||
from tinygrad import Tensor, nn
|
||||
from tinygrad.helpers import Profiling, Timing, getenv, BEAM, NOOPT, DEBUG, Context, ansilen
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
from tinygrad.codegen.kernel import Kernel
|
||||
from tinygrad.codegen.heuristic import hand_coded_optimizations
|
||||
from tinygrad.codegen import get_rewrites_for_renderer, apply_rewrites
|
||||
|
||||
@@ -3,7 +3,7 @@ from tinygrad.codegen.kernel import Kernel, Opt, OptOps
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.engine.realize import CompiledRunner
|
||||
from tinygrad.engine.search import bufs_from_lin
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import unittest, random
|
||||
import numpy as np
|
||||
from tinygrad.codegen.kernel import Kernel, KernelOptError
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.engine.search import Opt, OptOps
|
||||
from tinygrad import Device, dtypes, Tensor
|
||||
from test.external.fuzz_linearizer import compare_linearizer, compare_states, get_fuzz_rawbuf_like
|
||||
|
||||
2
test/external/external_test_nv.py
vendored
2
test/external/external_test_nv.py
vendored
@@ -8,7 +8,7 @@ from tinygrad.engine.realize import get_runner, CompiledRunner
|
||||
from test.external.fuzz_linearizer import get_fuzz_rawbufs
|
||||
|
||||
from tinygrad.codegen.kernel import Kernel
|
||||
from tinygrad.ops import LazyOp, Ops, ReduceOps, BufferOps, MemBuffer
|
||||
from tinygrad.uop.ops import LazyOp, Ops, ReduceOps, BufferOps, MemBuffer
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
|
||||
|
||||
2
test/external/external_test_train_gpt2.py
vendored
2
test/external/external_test_train_gpt2.py
vendored
@@ -1,7 +1,7 @@
|
||||
# ruff: noqa: E501
|
||||
import unittest
|
||||
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.engine.search import Opt, OptOps
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
|
||||
2
test/external/external_test_valid_remove.py
vendored
2
test/external/external_test_valid_remove.py
vendored
@@ -2,7 +2,7 @@
|
||||
import unittest
|
||||
|
||||
from tinygrad import Device
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.engine.search import Opt, OptOps
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
|
||||
2
test/external/fuzz_fast_idiv.py
vendored
2
test/external/fuzz_fast_idiv.py
vendored
@@ -1,7 +1,7 @@
|
||||
import random
|
||||
from z3 import Int, Solver, sat
|
||||
from tinygrad import dtypes, Device
|
||||
from tinygrad.ops import UOp, Ops, UPat, graph_rewrite, PatternMatcher
|
||||
from tinygrad.uop.ops import UOp, Ops, UPat, graph_rewrite, PatternMatcher
|
||||
from tinygrad.codegen.devectorizer import fast_idiv
|
||||
random.seed(42)
|
||||
|
||||
|
||||
2
test/external/fuzz_linearizer.py
vendored
2
test/external/fuzz_linearizer.py
vendored
@@ -25,7 +25,7 @@ from tinygrad.codegen.kernel import Opt, OptOps
|
||||
from tinygrad.engine.search import get_kernel_actions, bufs_from_lin
|
||||
from tinygrad.engine.realize import CompiledRunner
|
||||
from tinygrad.helpers import getenv, from_mv, prod, colored, Context, DEBUG, Timing
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.device import is_dtype_supported
|
||||
|
||||
def on_linearizer_will_run(): pass
|
||||
|
||||
2
test/external/fuzz_schedule.py
vendored
2
test/external/fuzz_schedule.py
vendored
@@ -5,7 +5,7 @@ from tinygrad.device import Buffer
|
||||
from tinygrad.engine.realize import capturing, lower_schedule_item
|
||||
from tinygrad.helpers import DEBUG, MULTIOUTPUT, colored, getenv
|
||||
from tinygrad.engine.schedule import LBScheduleItem, _graph_schedule, ScheduleItem
|
||||
from tinygrad.ops import Ops, UOp
|
||||
from tinygrad.uop.ops import Ops, UOp
|
||||
from tinygrad.tensor import Tensor, _to_np_dtype
|
||||
|
||||
ctx_vars = { MULTIOUTPUT: (0, 1) }
|
||||
|
||||
4
test/external/fuzz_symbolic.py
vendored
4
test/external/fuzz_symbolic.py
vendored
@@ -1,7 +1,7 @@
|
||||
import itertools
|
||||
import random
|
||||
from tinygrad import Variable, dtypes
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.helpers import DEBUG
|
||||
random.seed(42)
|
||||
|
||||
@@ -42,7 +42,7 @@ def gt(expr, rng=None):
|
||||
return expr > rng, rng
|
||||
|
||||
# NOTE: you have to replace these for this test to pass
|
||||
from tinygrad.ops import python_alu, Ops
|
||||
from tinygrad.uop.ops import python_alu, Ops
|
||||
python_alu[Ops.MOD] = lambda x,y: x%y
|
||||
python_alu[Ops.IDIV] = lambda x,y: x//y
|
||||
|
||||
|
||||
4
test/external/fuzz_uops.py
vendored
4
test/external/fuzz_uops.py
vendored
@@ -3,11 +3,11 @@ from collections import defaultdict
|
||||
import numpy as np
|
||||
from dataclasses import replace
|
||||
from typing import DefaultDict, Dict, List, Tuple
|
||||
from tinygrad.ops import UOp, print_uops, Ops
|
||||
from tinygrad.uop.ops import UOp, print_uops, Ops
|
||||
from tinygrad.device import Buffer, Device
|
||||
from tinygrad.engine.realize import CompiledRunner
|
||||
from tinygrad.helpers import DEBUG, colored
|
||||
from tinygrad.ops import Variable
|
||||
from tinygrad.uop.ops import Variable
|
||||
from tinygrad.tensor import _to_np_dtype
|
||||
from test.external.fuzz_schedule import FUZZ_SCHEDULE_MAX_PATHS, find_all_toposorts
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ from tinygrad.helpers import VERSION, Context, ContextVar, colored, db_connectio
|
||||
from tinygrad.engine.grouper import get_becomes_map
|
||||
from tinygrad.codegen.kernel import Kernel, Opt
|
||||
from tinygrad.renderer import Renderer
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
|
||||
# *** process replay settings
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import time, struct
|
||||
from typing import Any, Callable, Optional
|
||||
import numpy as np
|
||||
from tinygrad import Tensor, dtypes, Device
|
||||
from tinygrad.ops import UOp, Ops, sint
|
||||
from tinygrad.uop.ops import UOp, Ops, sint
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.tensor import _to_np_dtype
|
||||
from tinygrad.engine.realize import Runner
|
||||
|
||||
@@ -6,7 +6,7 @@ from tinygrad.engine.realize import run_schedule
|
||||
from tinygrad.codegen.kernel import Opt, OptOps, Kernel, KernelOptError
|
||||
from tinygrad.engine.realize import CompiledRunner, ExecItem
|
||||
from tinygrad.engine.search import get_kernel_actions
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
|
||||
class TestArange(unittest.TestCase):
|
||||
def _get_flops(self, N, opts=None):
|
||||
|
||||
@@ -2,7 +2,7 @@ import unittest, itertools, math
|
||||
from typing import Any
|
||||
from tinygrad import Tensor, Device, dtypes
|
||||
from tinygrad.dtype import DType
|
||||
from tinygrad.ops import Ops, UOp
|
||||
from tinygrad.uop.ops import Ops, UOp
|
||||
from tinygrad.codegen import full_rewrite_to_sink
|
||||
import numpy as np
|
||||
from tinygrad.device import is_dtype_supported
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
import unittest
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
from tinygrad.tensor import Tensor
|
||||
from tinygrad.nn import Conv2d
|
||||
from tinygrad.shape.shapetracker import ShapeTracker, View
|
||||
|
||||
@@ -7,7 +7,7 @@ from hypothesis import given, strategies as strat, settings, HealthCheck
|
||||
from tinygrad.dtype import DType
|
||||
from tinygrad.helpers import CI, getenv
|
||||
from tinygrad.engine.realize import run_schedule
|
||||
from tinygrad.ops import GroupOp
|
||||
from tinygrad.uop.ops import GroupOp
|
||||
from tinygrad.tensor import _to_np_dtype
|
||||
from tinygrad.device import is_dtype_supported
|
||||
import pytest, math
|
||||
|
||||
@@ -4,7 +4,7 @@ import unittest
|
||||
import numpy as np
|
||||
from tinygrad.device import Buffer
|
||||
from tinygrad.engine.realize import run_schedule
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.tensor import Tensor
|
||||
|
||||
def tensors_allocated():
|
||||
|
||||
@@ -6,7 +6,7 @@ from dataclasses import replace
|
||||
from test.helpers import ast_const
|
||||
from tinygrad.codegen.kernel import Opt, OptOps, KernelOptError, Kernel
|
||||
from tinygrad.codegen.lowerer import get_grouped_dims
|
||||
from tinygrad.ops import UOp, Ops, GroupOp
|
||||
from tinygrad.uop.ops import UOp, Ops, GroupOp
|
||||
from tinygrad.device import Device, Buffer, is_dtype_supported
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
|
||||
@@ -6,7 +6,7 @@ import unittest
|
||||
from test.helpers import ast_const
|
||||
from tinygrad import Device, dtypes
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.helpers import getenv
|
||||
from tinygrad.shape.shapetracker import ShapeTracker, View
|
||||
from tinygrad.engine.search import Opt, OptOps
|
||||
|
||||
@@ -3,7 +3,7 @@ import unittest, random
|
||||
import numpy as np
|
||||
from tinygrad.codegen.kernel import Kernel, KernelOptError
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.engine.search import Opt, OptOps
|
||||
from tinygrad import Device, dtypes, Tensor
|
||||
from tinygrad.helpers import CI, Context
|
||||
|
||||
@@ -8,7 +8,7 @@ from tinygrad.engine.search import Opt, OptOps, bufs_from_lin
|
||||
from extra.optimization.helpers import time_linearizer
|
||||
|
||||
# stuff needed to unpack a kernel
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest, functools, random
|
||||
from typing import List
|
||||
from tinygrad import Tensor, Device, nn, GlobalCounters, TinyJit, dtypes, Variable
|
||||
from tinygrad.ops import Ops, UOp
|
||||
from tinygrad.uop.ops import Ops, UOp
|
||||
from tinygrad.helpers import CI, getenv, prod, Context, OSX
|
||||
from tinygrad.nn.state import get_parameters, get_state_dict
|
||||
from tinygrad.engine.realize import lower_schedule, BufferCopy, CompiledRunner, run_schedule
|
||||
|
||||
@@ -3,7 +3,7 @@ import unittest
|
||||
import numpy as np
|
||||
import torch
|
||||
from tinygrad import Tensor, Device, TinyJit
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
from tinygrad.helpers import GlobalCounters, CI, Context, OSX
|
||||
from tinygrad.nn import Conv1d, ConvTranspose1d, Conv2d, ConvTranspose2d, Linear, Embedding
|
||||
from tinygrad.nn import BatchNorm, LayerNorm, LayerNorm2d, GroupNorm, InstanceNorm, RMSNorm, LSTMCell
|
||||
|
||||
@@ -2,7 +2,7 @@ import unittest, pickle, types
|
||||
import numpy as np
|
||||
from tinygrad import Tensor, TinyJit, Variable, dtypes
|
||||
from tinygrad.helpers import GlobalCounters, ContextVar, Context
|
||||
from tinygrad.ops import PatternMatcher, UPat, UOp, Ops
|
||||
from tinygrad.uop.ops import PatternMatcher, UPat, UOp, Ops
|
||||
|
||||
class TestPickle(unittest.TestCase):
|
||||
def test_pickle_code_object(self):
|
||||
|
||||
@@ -3,7 +3,7 @@ import numpy as np
|
||||
import unittest
|
||||
from dataclasses import replace
|
||||
from tinygrad import Tensor, Context, Device, dtypes
|
||||
from tinygrad.ops import Ops, UOp # noqa: F401 # pylint: disable=unused-import
|
||||
from tinygrad.uop.ops import Ops, UOp # noqa: F401 # pylint: disable=unused-import
|
||||
from tinygrad.codegen.kernel import Kernel, Opt, OptOps
|
||||
from tinygrad.engine.realize import CompiledRunner, ExecItem, lower_schedule_item
|
||||
from tinygrad.engine.search import bufs_from_lin
|
||||
|
||||
@@ -9,7 +9,7 @@ from tinygrad.renderer.cstyle import CStyleLanguage
|
||||
from tinygrad.renderer.ptx import PTXRenderer
|
||||
from tinygrad.renderer.wgsl import WGSLRenderer
|
||||
from tinygrad.runtime.ops_python import PythonRenderer
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.renderer import ProgramSpec
|
||||
from tinygrad.tensor import Tensor, _to_np_dtype
|
||||
from tinygrad.codegen import full_rewrite
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.ops import PatternMatcher, Ops, UPat, graph_rewrite, RewriteContext, UOp
|
||||
from tinygrad.uop.ops import PatternMatcher, Ops, UPat, graph_rewrite, RewriteContext, UOp
|
||||
from tinygrad.engine.grouper import sym, merge_views
|
||||
|
||||
class TestRewriteTrackedChildren(unittest.TestCase):
|
||||
|
||||
@@ -11,9 +11,9 @@ from tinygrad import nn, dtypes, Device, Tensor
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.dtype import DType, ImageDType
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.ops import PatternMatcher, UOp, Ops, GroupOp, UPat, graph_rewrite, track_rewrites
|
||||
from tinygrad.uop.ops import PatternMatcher, UOp, Ops, GroupOp, UPat, graph_rewrite, track_rewrites
|
||||
from tinygrad.codegen.symbolic import symbolic_simple
|
||||
from tinygrad.spec import type_verify, shape_spec
|
||||
from tinygrad.uop.spec import type_verify, shape_spec
|
||||
from tinygrad.helpers import CI, DEBUG, FUSE_ARANGE, SPLIT_REDUCEOP, GlobalCounters, Context, getenv, all_same, temp
|
||||
from tinygrad.engine.grouper import view_left, view_right, sym, get_becomes_map, Kernel, create_ast, merge_views, create_kernels
|
||||
from tinygrad.engine.schedule import ScheduleItem, create_schedule_with_vars
|
||||
|
||||
@@ -3,7 +3,7 @@ import unittest
|
||||
from test.helpers import ast_const
|
||||
from tinygrad.codegen.kernel import Opt, OptOps
|
||||
from tinygrad.codegen.kernel import Kernel
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.engine.search import bufs_from_lin, actions, beam_search
|
||||
from tinygrad.device import Device, Buffer
|
||||
from tinygrad.tensor import Tensor
|
||||
|
||||
@@ -2,7 +2,7 @@ import unittest
|
||||
from tinygrad import Tensor, Variable
|
||||
from tinygrad.shape.shapetracker import View
|
||||
from tinygrad.helpers import Context, GlobalCounters
|
||||
from tinygrad.ops import sym_infer
|
||||
from tinygrad.uop.ops import sym_infer
|
||||
from examples.gpt2 import Attention
|
||||
import numpy as np
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ from tinygrad.helpers import getenv, temp, mv_address
|
||||
from extra.gradcheck import numerical_jacobian, jacobian, gradcheck
|
||||
from hypothesis import given, settings, strategies as strat
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.ops import Ops, UOp
|
||||
from tinygrad.uop.ops import Ops, UOp
|
||||
from tinygrad.runtime.support.compiler_cuda import PTX
|
||||
from tinygrad.codegen import full_rewrite
|
||||
from tinygrad.dtype import DType
|
||||
|
||||
@@ -3,7 +3,7 @@ import numpy as np
|
||||
import unittest
|
||||
from tinygrad import Tensor, Device, dtypes
|
||||
from tinygrad.engine.realize import run_schedule
|
||||
from tinygrad.ops import Ops, UOp, UPat
|
||||
from tinygrad.uop.ops import Ops, UOp, UPat
|
||||
|
||||
class TestTensorUOp(unittest.TestCase):
|
||||
def test_fromcpu_shape_tracker(self):
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import List
|
||||
import unittest, pytest
|
||||
from tinygrad import dtypes, Variable
|
||||
from tinygrad.helpers import DEBUG, Context
|
||||
from tinygrad.ops import Ops, UOp, UPat, PatternMatcher, track_rewrites, graph_rewrite
|
||||
from tinygrad.uop.ops import Ops, UOp, UPat, PatternMatcher, track_rewrites, graph_rewrite
|
||||
from tinygrad.codegen.symbolic import sym
|
||||
from tinygrad.codegen import full_rewrite, full_rewrite_to_sink
|
||||
from tinygrad.codegen.expander import expander
|
||||
|
||||
@@ -7,8 +7,8 @@ from tinygrad.tensor import Tensor, _to_np_dtype
|
||||
from tinygrad.helpers import CI, DEBUG, getenv, Context, Timing
|
||||
from tinygrad.dtype import dtypes, DType
|
||||
from tinygrad.device import Buffer, Device
|
||||
from tinygrad.ops import Ops, UOp, UPat, KernelInfo, exec_alu # noqa F401
|
||||
from tinygrad.spec import spec
|
||||
from tinygrad.uop.ops import Ops, UOp, UPat, KernelInfo, exec_alu # noqa F401
|
||||
from tinygrad.uop.spec import spec
|
||||
from tinygrad.renderer import ProgramSpec
|
||||
from tinygrad.engine.grouper import fix_kernel_ops
|
||||
from tinygrad.engine.realize import CompiledRunner, get_kernel
|
||||
|
||||
@@ -4,7 +4,7 @@ from tinygrad.helpers import getenv, GlobalCounters
|
||||
from tinygrad.engine.realize import lower_schedule_item, ProgramSpec
|
||||
from tinygrad.renderer import Estimates
|
||||
from tinygrad.codegen import full_rewrite
|
||||
from tinygrad.ops import Ops, UOp
|
||||
from tinygrad.uop.ops import Ops, UOp
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.codegen.kernel import Kernel, Opt, OptOps, KernelOptError
|
||||
from tinygrad.device import Device
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
from tinygrad import Tensor, GlobalCounters, dtypes
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
from tinygrad.helpers import Timing, CI, Profiling, WINO, DEBUG, getenv
|
||||
from tinygrad.codegen.kernel import Kernel
|
||||
from tinygrad.codegen.heuristic import hand_coded_optimizations
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.helpers import Context
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
|
||||
class TestRingAllReduce(unittest.TestCase):
|
||||
@unittest.skip("still broken")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest, random
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import print_uops, UOp, Ops
|
||||
from tinygrad.uop.ops import print_uops, UOp, Ops
|
||||
from tinygrad.codegen.linearize import block_reorder
|
||||
from tinygrad.renderer.cstyle import OpenCLRenderer
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import unittest, math
|
||||
import torch
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.gradient import compute_gradient
|
||||
|
||||
class TestGradient(unittest.TestCase):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest, math
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.helpers import all_same
|
||||
from tinygrad.ops import GroupOp, UOp, Ops, exec_alu
|
||||
from tinygrad.uop.ops import GroupOp, UOp, Ops, exec_alu
|
||||
from tinygrad.codegen import full_rewrite_to_sink
|
||||
|
||||
# Helper function to apply the graph rewrite
|
||||
@@ -203,7 +203,7 @@ class TestGEPAndVectorizeRewrite(unittest.TestCase):
|
||||
|
||||
|
||||
import inspect
|
||||
from tinygrad.ops import graph_rewrite, _substitute, track_rewrites
|
||||
from tinygrad.uop.ops import graph_rewrite, _substitute, track_rewrites
|
||||
from tinygrad.codegen.symbolic import symbolic_simple
|
||||
|
||||
class TestBottomUpRewrite(unittest.TestCase):
|
||||
|
||||
@@ -290,7 +290,7 @@ class TestPolyN(unittest.TestCase):
|
||||
|
||||
def test_uop(self):
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
from test.helpers import eval_uop
|
||||
np.testing.assert_allclose(eval_uop(polyN(UOp.const(dtypes.float, 1.0), [1.0, -2.0, 1.0])), 0.0)
|
||||
np.testing.assert_allclose(eval_uop(polyN(UOp.const(dtypes.float, 2.0), [1.0, -2.0, 1.0])), 1.0)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import unittest, time
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
from tinygrad.dtype import dtypes
|
||||
|
||||
# it's about 1 ms per 1k UOps on M3
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest, itertools
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import Ops, UOp, GroupOp # noqa: F401
|
||||
from tinygrad.ops import PatternMatcher, UPat
|
||||
from tinygrad.uop.ops import Ops, UOp, GroupOp # noqa: F401
|
||||
from tinygrad.uop.ops import PatternMatcher, UPat
|
||||
|
||||
class TestPatternMatcher(unittest.TestCase):
|
||||
def test_simple_match(self):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.ops import UOp, graph_rewrite_map, _substitute
|
||||
from tinygrad.uop.ops import UOp, graph_rewrite_map, _substitute
|
||||
from tinygrad.codegen.symbolic import symbolic
|
||||
|
||||
class TestRewriteMap(unittest.TestCase):
|
||||
|
||||
@@ -5,7 +5,7 @@ from tinygrad.dtype import dtypes
|
||||
from tinygrad.helpers import prod
|
||||
from tinygrad.shape.shapetracker import ShapeTracker, View
|
||||
from tinygrad import Variable
|
||||
from tinygrad.ops import UOp, Ops, graph_rewrite
|
||||
from tinygrad.uop.ops import UOp, Ops, graph_rewrite
|
||||
from tinygrad.codegen.devectorizer import sym
|
||||
from itertools import product
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import unittest, itertools
|
||||
|
||||
from tinygrad.codegen import full_rewrite_to_sink
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.codegen.symbolic import simplify_valid
|
||||
|
||||
def get_gated_load_uop(valid:UOp, idx:UOp):
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.ops import UPat, Ops, UOp
|
||||
from tinygrad.uop.ops import UPat, Ops, UOp
|
||||
|
||||
# NOTE: unlike before base for a realized tensor is always a BUFFER
|
||||
realized_pattern = UPat(Ops.BUFFER)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest, math
|
||||
import numpy as np
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.codegen.transcendental import TRANSCENDENTAL_SUPPORTED_DTYPES, payne_hanek_reduction, cody_waite_reduction
|
||||
from tinygrad.codegen.transcendental import frexp, rintk, xpow, xexp2, xlog2, trig_poly, pow2if
|
||||
from test.helpers import eval_uop
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import UOp, resolve
|
||||
from tinygrad.uop.ops import UOp, resolve
|
||||
|
||||
class TestUOpResolve(unittest.TestCase):
|
||||
def test_simple_int(self):
|
||||
|
||||
@@ -4,7 +4,7 @@ import unittest, pickle, functools
|
||||
from tinygrad.dtype import dtypes, ConstType
|
||||
from tinygrad.codegen import full_rewrite
|
||||
from tinygrad.codegen.devectorizer import sym
|
||||
from tinygrad.ops import UOp, Ops, graph_rewrite, sym_infer
|
||||
from tinygrad.uop.ops import UOp, Ops, graph_rewrite, sym_infer
|
||||
from tinygrad import Variable
|
||||
|
||||
def render(self) -> tuple[str, ConstType, ConstType]:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import unittest, math
|
||||
from tinygrad.ops import UOp, Ops
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.dtype import dtypes
|
||||
|
||||
class TestVminVmaxProperties(unittest.TestCase):
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import unittest
|
||||
from tinygrad.helpers import DEBUG
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import UPat, track_rewrites, GroupOp, Ops
|
||||
from tinygrad.upat import _get_code, upat_compile
|
||||
from tinygrad.uop.ops import UPat, track_rewrites, GroupOp, Ops
|
||||
from tinygrad.uop.upat import _get_code, upat_compile
|
||||
import dis
|
||||
|
||||
@track_rewrites()
|
||||
|
||||
@@ -4,8 +4,8 @@ import unittest
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.codegen.kernel import Kernel
|
||||
from tinygrad.helpers import DEBUG
|
||||
from tinygrad.ops import UOp, Ops, print_uops
|
||||
from tinygrad.spec import type_verify, shape_spec
|
||||
from tinygrad.uop.ops import UOp, Ops, print_uops
|
||||
from tinygrad.uop.spec import type_verify, shape_spec
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.shape.view import View
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import unittest, decimal, json
|
||||
from tinygrad.dtype import dtypes
|
||||
from tinygrad.ops import TRACK_MATCH_STATS, TrackedPatternMatcher, UOp, graph_rewrite, track_rewrites, UPat, Ops
|
||||
from tinygrad.uop.ops import TRACK_MATCH_STATS, TrackedPatternMatcher, UOp, graph_rewrite, track_rewrites, UPat, Ops
|
||||
from tinygrad.codegen.symbolic import symbolic
|
||||
from tinygrad.ops import tracked_ctxs as contexts, tracked_keys as keys, _name_cnt, _substitute
|
||||
from tinygrad.uop.ops import tracked_ctxs as contexts, tracked_keys as keys, _name_cnt, _substitute
|
||||
from tinygrad.device import ProfileDeviceEvent, ProfileRangeEvent, ProfileGraphEvent, ProfileGraphEntry
|
||||
from tinygrad.viz.serve import get_metadata, get_details, uop_to_json, to_perfetto
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ if int(os.getenv("TYPED", "0")):
|
||||
install_import_hook(__name__)
|
||||
from tinygrad.tensor import Tensor # noqa: F401
|
||||
from tinygrad.engine.jit import TinyJit # noqa: F401
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
Variable = UOp.variable
|
||||
from tinygrad.dtype import dtypes # noqa: F401
|
||||
from tinygrad.helpers import GlobalCounters, fetch, Context, getenv # noqa: F401
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import Any, Callable
|
||||
import functools
|
||||
from dataclasses import dataclass
|
||||
from tinygrad.helpers import QUANTIZE, DEVECTORIZE, TRANSCENDENTAL
|
||||
from tinygrad.ops import PatternMatcher, graph_rewrite, UOp
|
||||
from tinygrad.uop.ops import PatternMatcher, graph_rewrite, UOp
|
||||
from tinygrad.renderer import Renderer
|
||||
|
||||
# import all pattern matchers here
|
||||
|
||||
@@ -4,7 +4,7 @@ from collections import defaultdict
|
||||
from dataclasses import dataclass
|
||||
from tinygrad.device import is_dtype_supported
|
||||
from tinygrad.dtype import dtypes, ImageDType, PtrDType, promo_lattice, DType
|
||||
from tinygrad.ops import UOp, Ops, UPat, PatternMatcher, resolve, graph_rewrite, GroupOp, identity_element
|
||||
from tinygrad.uop.ops import UOp, Ops, UPat, PatternMatcher, resolve, graph_rewrite, GroupOp, identity_element
|
||||
from tinygrad.codegen.symbolic import split_uop, uop_given_valid, parse_valid, simplify_valid, sym, symbolic_flat
|
||||
from tinygrad.helpers import getenv, flatten, AMX, prod, partition
|
||||
from tinygrad.codegen.transcendental import xexp2, xlog2, xsin, xpow, TRANSCENDENTAL_SUPPORTED_DTYPES
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import functools, itertools, operator
|
||||
from tinygrad.helpers import AMX, dedup, flatten, all_same, prod
|
||||
from tinygrad.ops import UOp, Ops, UPat, PatternMatcher, GroupOp
|
||||
from tinygrad.uop.ops import UOp, Ops, UPat, PatternMatcher, GroupOp
|
||||
|
||||
def _expand_arg_to_idx(args:tuple[tuple[int, int], ...], rpk:dict[int, int]) -> int:
|
||||
idx, mul = 0, 1
|
||||
|
||||
@@ -2,7 +2,7 @@ import itertools
|
||||
from tinygrad.codegen.kernel import Kernel, Opt, OptOps, KernelOptError
|
||||
from tinygrad.helpers import getenv, DEBUG, all_int, prod
|
||||
from tinygrad.dtype import ImageDType
|
||||
from tinygrad.ops import Ops, resolve
|
||||
from tinygrad.uop.ops import Ops, resolve
|
||||
|
||||
def hand_coded_optimizations(k:Kernel) -> list[Opt]:
|
||||
# make a copy so it does not mutate the input
|
||||
|
||||
@@ -4,9 +4,9 @@ from dataclasses import dataclass
|
||||
from collections import defaultdict
|
||||
from typing import Optional, cast, Final, Callable, Sequence
|
||||
|
||||
from tinygrad.ops import GroupOp, KernelInfo, UOp, Ops, can_pad, resolve, Variable, sint, graph_rewrite, track_rewrites, print_uops, PatternMatcher
|
||||
from tinygrad.ops import smax
|
||||
from tinygrad.spec import type_verify, shape_spec
|
||||
from tinygrad.uop.ops import GroupOp, KernelInfo, UOp, Ops, can_pad, resolve, Variable, sint, graph_rewrite, track_rewrites, print_uops
|
||||
from tinygrad.uop.ops import PatternMatcher, smax
|
||||
from tinygrad.uop.spec import type_verify, shape_spec
|
||||
from tinygrad.device import Device
|
||||
from tinygrad.renderer import Renderer, TensorCore, ProgramSpec, Opt, OptOps
|
||||
from tinygrad.dtype import ImageDType
|
||||
|
||||
@@ -2,9 +2,9 @@ from __future__ import annotations
|
||||
import heapq
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, replace
|
||||
from tinygrad.ops import UOp, Ops, PatternMatcher, UPat, GroupOp
|
||||
from tinygrad.uop.ops import UOp, Ops, PatternMatcher, UPat, GroupOp
|
||||
from tinygrad.helpers import dedup, partition, all_same, flatten
|
||||
from tinygrad.spec import type_verify
|
||||
from tinygrad.uop.spec import type_verify
|
||||
|
||||
# NOTE: any toposort should be valid here, unlike last time this isn't required, it's just for speed
|
||||
def block_reorder(lst:list[UOp]) -> list[UOp]:
|
||||
|
||||
@@ -3,7 +3,7 @@ import itertools, operator, math
|
||||
from dataclasses import dataclass
|
||||
from typing import cast
|
||||
from tinygrad.dtype import dtypes, PtrDType, least_upper_dtype
|
||||
from tinygrad.ops import KernelInfo, UOp, Ops, PatternMatcher, UPat, sint, sint_to_uop
|
||||
from tinygrad.uop.ops import KernelInfo, UOp, Ops, PatternMatcher, UPat, sint, sint_to_uop
|
||||
from tinygrad.renderer import Renderer
|
||||
from tinygrad.helpers import all_int, prod, partition, flatten, unwrap
|
||||
from tinygrad.codegen.symbolic import symbolic
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
from typing import Any, Literal, cast
|
||||
import math, operator, struct, functools
|
||||
from collections import defaultdict
|
||||
from tinygrad.ops import Ops, PatternMatcher, UPat, UOp, GroupOp, exec_alu
|
||||
from tinygrad.uop.ops import Ops, PatternMatcher, UPat, UOp, GroupOp, exec_alu
|
||||
from tinygrad.dtype import ConstType, dtypes, PtrDType
|
||||
from tinygrad.helpers import partition, all_same, prod, flatten, get_single_element, cdiv, cmod
|
||||
from tinygrad.codegen.transcendental import xpow
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import math
|
||||
from tinygrad.dtype import dtypes, DType
|
||||
from tinygrad.helpers import polyN
|
||||
from tinygrad.ops import UOp
|
||||
from tinygrad.uop.ops import UOp
|
||||
|
||||
TRANSCENDENTAL_SUPPORTED_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64)
|
||||
|
||||
|
||||
@@ -382,7 +382,7 @@ if PROFILE:
|
||||
with open(fn:=temp("profile.pkl", append_user=True), "wb") as f: pickle.dump(Compiled.profile_events, f)
|
||||
|
||||
if not getenv("SQTT", 0):
|
||||
from tinygrad.ops import launch_viz
|
||||
from tinygrad.uop.ops import launch_viz
|
||||
launch_viz("PROFILE", fn)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from collections import defaultdict, deque
|
||||
from dataclasses import dataclass
|
||||
from tinygrad.ops import UOp, Ops, GroupOp, PatternMatcher, UPat, graph_rewrite, graph_rewrite_map, identity_element, resolve
|
||||
from tinygrad.ops import can_pad, sint, track_rewrites, _substitute
|
||||
from tinygrad.uop.ops import UOp, Ops, GroupOp, PatternMatcher, UPat, graph_rewrite, graph_rewrite_map, identity_element, resolve
|
||||
from tinygrad.uop.ops import can_pad, sint, track_rewrites, _substitute
|
||||
from tinygrad.codegen.lowerer import get_contraction_with_reduce, get_contraction
|
||||
from tinygrad.codegen.symbolic import symbolic_simple
|
||||
from tinygrad.helpers import Metadata, all_int, all_same, colored, prod, dedup, unwrap, getenv, pluralize, ContextVar, Context, diskcache_put
|
||||
@@ -10,7 +10,7 @@ from tinygrad.dtype import ImageDType
|
||||
from tinygrad.engine.multi import replace_allreduce
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View, strides_for_shape
|
||||
from tinygrad.spec import type_verify, sched_spec
|
||||
from tinygrad.uop.spec import type_verify, sched_spec
|
||||
|
||||
# creation can recurse a lot
|
||||
import sys
|
||||
|
||||
@@ -4,7 +4,7 @@ from tinygrad.tensor import Tensor
|
||||
from tinygrad.helpers import flatten, merge_dicts, DEBUG, Context, BEAM, getenv, colored, JIT, dedup, partition, unwrap
|
||||
from tinygrad.device import Buffer, Compiled, Device, MultiBuffer
|
||||
from tinygrad.dtype import DType
|
||||
from tinygrad.ops import UOp, Variable, sym_infer, Ops
|
||||
from tinygrad.uop.ops import UOp, Variable, sym_infer, Ops
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.engine.realize import ExecItem, capturing, ViewOp, BufferCopy, BufferXfer, CompiledRunner, Runner, Estimates
|
||||
from tinygrad.engine.memory import _internal_memory_planner
|
||||
|
||||
@@ -3,7 +3,7 @@ from collections import defaultdict
|
||||
from tinygrad.engine.schedule import ScheduleItem
|
||||
from tinygrad.device import Device, Buffer
|
||||
from tinygrad.helpers import NO_MEMORY_PLANNER, dedup, DEBUG, round_up
|
||||
from tinygrad.ops import Ops
|
||||
from tinygrad.uop.ops import Ops
|
||||
from tinygrad.dtype import dtypes, ImageDType
|
||||
from tinygrad.runtime.support.allocator import TLSFAllocator
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import functools, itertools, operator
|
||||
from tinygrad.helpers import all_same, all_int, prod, DEBUG, RING, getenv
|
||||
from tinygrad.ops import Ops, UOp, sint, PatternMatcher, UPat, GroupOp, graph_rewrite_map, track_rewrites
|
||||
from tinygrad.uop.ops import Ops, UOp, sint, PatternMatcher, UPat, GroupOp, graph_rewrite_map, track_rewrites
|
||||
|
||||
# *** allreduce implementation ***
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import time, pprint
|
||||
from dataclasses import dataclass, replace, field
|
||||
from tinygrad.helpers import all_same, colored, getenv, DEBUG, GlobalCounters, ansilen, BEAM, NOOPT, all_int, CAPTURING, Metadata, TRACEMETA
|
||||
from tinygrad.helpers import DEVECTORIZE, time_to_str, VALIDATE_WITH_CPU
|
||||
from tinygrad.ops import Ops, PatternMatcher, UOp, UPat, Variable, sym_infer
|
||||
from tinygrad.uop.ops import Ops, PatternMatcher, UOp, UPat, Variable, sym_infer
|
||||
from tinygrad.device import Device, Buffer
|
||||
from tinygrad.renderer import Renderer, ProgramSpec, Estimates
|
||||
from tinygrad.codegen.kernel import Kernel
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import cast
|
||||
from dataclasses import dataclass, field
|
||||
from collections import deque, defaultdict
|
||||
from tinygrad.ops import UOp, Variable, Ops, UPat, PatternMatcher, graph_rewrite, buffers
|
||||
from tinygrad.uop.ops import UOp, Variable, Ops, UPat, PatternMatcher, graph_rewrite, buffers
|
||||
from tinygrad.device import Buffer, MultiBuffer
|
||||
from tinygrad.helpers import Metadata, unwrap, merge_dicts
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import cast, Optional, Callable
|
||||
import itertools, functools, random, math, time, multiprocessing, traceback, signal, atexit
|
||||
from collections import defaultdict
|
||||
from dataclasses import replace
|
||||
from tinygrad.ops import UOp, Ops, Variable, sym_infer
|
||||
from tinygrad.uop.ops import UOp, Ops, Variable, sym_infer
|
||||
from tinygrad.device import Device, Buffer, Compiler
|
||||
from tinygrad.helpers import prod, flatten, DEBUG, CACHELEVEL, diskcache_get, diskcache_put, getenv, Context, colored, time_to_str
|
||||
from tinygrad.helpers import IGNORE_BEAM_CACHE, TC_SEARCH_OVER_SHAPE
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import cast
|
||||
import math, dataclasses
|
||||
from tinygrad.dtype import dtypes, sum_acc_dtype
|
||||
from tinygrad.ops import UOp, PatternMatcher, UPat, Ops, all_metadata
|
||||
from tinygrad.uop.ops import UOp, PatternMatcher, UPat, Ops, all_metadata
|
||||
from tinygrad.helpers import argsort
|
||||
|
||||
def reduce_gradient(ctx:UOp, ret:UOp):
|
||||
|
||||
@@ -4,7 +4,7 @@ import functools, math
|
||||
from enum import Enum, auto
|
||||
from dataclasses import dataclass, field, replace
|
||||
from tinygrad.helpers import to_function_name, dedup, prod
|
||||
from tinygrad.ops import Ops, UOp, sym_infer, sint, Variable, ssimplify, GroupOp, PatternMatcher
|
||||
from tinygrad.uop.ops import Ops, UOp, sym_infer, sint, Variable, ssimplify, GroupOp, PatternMatcher
|
||||
from tinygrad.dtype import DType
|
||||
|
||||
class OptOps(Enum):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Literal, Callable, cast
|
||||
import os, math, sys
|
||||
from collections import defaultdict, Counter
|
||||
from tinygrad.ops import GroupOp, Ops, UOp, PatternMatcher, UPat
|
||||
from tinygrad.uop.ops import GroupOp, Ops, UOp, PatternMatcher, UPat
|
||||
from tinygrad.helpers import strip_parens, getenv, prod, dedup, AMX
|
||||
from tinygrad.dtype import ImageDType, dtypes, DType, PtrDType
|
||||
from tinygrad.renderer import Renderer, TensorCore
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user