mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 15:08:02 -05:00
cleanup stale examples/extra (#13764)
* cleanup stale files * examples * move those back * old * delete more
This commit is contained in:
@@ -1,189 +0,0 @@
|
||||
from typing import Tuple, List, NamedTuple, Any, Dict, Optional, Union, DefaultDict, cast
|
||||
from tinygrad.codegen.opt.kernel import Ops, MemOp, UOp
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps
|
||||
from tinygrad.dtype import DType, dtypes
|
||||
from tinygrad.helpers import DEBUG
|
||||
from tinygrad.uop.ops import Variable, NumNode, MulNode, DivNode, ModNode, LtNode, SumNode, AndNode
|
||||
import functools
|
||||
import math
|
||||
from collections import defaultdict
|
||||
|
||||
_type_to_letter = {dtypes.float32: 'f', dtypes.bool: 'p', dtypes.int32: 'i', dtypes.int64: 'a', dtypes.uint32: 'u', dtypes.uint64: 'b', dtypes.float.vec(4): 'x', dtypes.uint8: 'uc', dtypes.float16: 'h',
|
||||
dtypes.int8: 'c', dtypes.uint16: 'us', dtypes.float64: 'd'}
|
||||
|
||||
class Register(NamedTuple):
|
||||
nm:str
|
||||
dtype:DType
|
||||
scalar:bool
|
||||
off:Optional[int] = None
|
||||
def __repr__(self): return self.nm if self.off is None else f"{self.nm}:{self.off}"
|
||||
def subregs(self):
|
||||
if self.dtype == dtypes.float.vec(4):
|
||||
return [Register(self.nm, dtypes.float, False, off=off) for off in range(4)]
|
||||
return []
|
||||
|
||||
class AssemblyInstruction(NamedTuple):
|
||||
op: Ops
|
||||
out: Optional[Register]
|
||||
vin: List[Union[Register, int, float]]
|
||||
arg: Any = None
|
||||
|
||||
# warp size of 32, s registers are shared across the warp, v are 32-wide vectors
|
||||
class AssemblyLanguage:
|
||||
supports_load3: bool = False
|
||||
sin_is_sin2pi: bool = False
|
||||
no_div: bool = False
|
||||
#TODO: these should be global vars
|
||||
cnts:DefaultDict[Tuple[DType, bool], int] = defaultdict(int)
|
||||
tor: Dict[Any, Register] = {}
|
||||
ins: List[AssemblyInstruction] = []
|
||||
|
||||
def type_to_letter(self,x): return _type_to_letter[x[0]].upper() if x[1] else _type_to_letter[x[0]]
|
||||
def newreg(self, tok, dtype=dtypes.float32, scalar=False) -> Register:
|
||||
self.tor[tok] = ret = Register(f"%{self.type_to_letter((dtype, scalar))}{self.cnts[(dtype, scalar)]}", dtype, scalar)
|
||||
if dtype == dtypes.float.vec(4):
|
||||
for off in range(4):
|
||||
self.tor[tok] = Register(ret.nm, dtypes.float, ret.scalar, off)
|
||||
self.cnts[(dtype, scalar)] += 1
|
||||
return ret
|
||||
|
||||
def render_numnode(self, b) -> Register:
|
||||
key = ("num", b)
|
||||
if key not in self.tor: self.ins.append(AssemblyInstruction(Ops.LOAD, self.newreg(key, scalar=True, dtype=dtypes.int32), [], b))
|
||||
return self.tor[key]
|
||||
|
||||
def render_alu(self, op, a:Register, b:Union[Register, int, float], dtype=dtypes.int32) -> Register:
|
||||
key = (op, a, b)
|
||||
if key not in self.tor:
|
||||
#if not isinstance(b, Register): b = render_numnode(b)
|
||||
self.ins.append(AssemblyInstruction(Ops.ALU, self.newreg(key, dtype=dtype, scalar=a.scalar and (not isinstance(b, Register) or b.scalar)), [a, b], op))
|
||||
return self.tor[key]
|
||||
|
||||
def render_cast(self, a:Register, new_dtype:DType) -> Register:
|
||||
if a.dtype == new_dtype: return a
|
||||
key = (a, new_dtype)
|
||||
if key not in self.tor:
|
||||
self.ins.append(AssemblyInstruction(Ops.CAST, self.newreg(key, dtype=new_dtype), [a]))
|
||||
return self.tor[key]
|
||||
|
||||
render_ops: Any = { Variable: lambda self, ops, ctx: ctx.tor[self], NumNode: lambda self, ops, ctx: ctx.render_numnode(self.b),
|
||||
MulNode: lambda self, ops, ctx: ctx.render_alu(BinaryOps.MUL, self.a.render(ops, ctx), self.b),
|
||||
DivNode: lambda self, ops, ctx: ctx.render_alu(BinaryOps.DIV, self.a.render(ops, ctx), self.b),
|
||||
ModNode: lambda self, ops, ctx: ctx.render_alu(BinaryOps.MOD, self.a.render(ops, ctx), self.b),
|
||||
LtNode: lambda self, ops, ctx: ctx.render_alu(BinaryOps.CMPLT, self.a.render(ops, ctx), self.b, dtype=dtypes.bool),
|
||||
SumNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.render_alu(BinaryOps.ADD, a, b.render(ops,ctx)), self.nodes[1:], self.nodes[0].render(ops,ctx)),
|
||||
AndNode: lambda self,ops,ctx: functools.reduce(lambda a,b: ctx.render_alu(BinaryOps.MUL, a, b.render(ops,ctx), dtype=dtypes.bool), self.nodes[1:], self.nodes[0].render(ops,ctx)) }
|
||||
|
||||
def addr_w_offset(self, args):
|
||||
assert isinstance(args, MemOp)
|
||||
idx = args.idx*args.memory_dtype.itemsize
|
||||
off = 0 # TODO: should this be None?
|
||||
if isinstance(idx, SumNode):
|
||||
nums = [n.b for n in idx.nodes if isinstance(n, NumNode)]
|
||||
if nums and nums[0] < 4096 and (idx-nums[0]).min >= 0: # TODO: different for each GPU?
|
||||
idx -= nums[0]
|
||||
off = cast(int, nums[0])
|
||||
reg = idx.render(self.render_ops, self)
|
||||
if self.supports_load3:
|
||||
if reg.scalar:
|
||||
new_reg = self.newreg((reg.nm, 'vec'), dtype=reg.dtype)
|
||||
self.ins.append(AssemblyInstruction(Ops.ALU, new_reg, [reg], UnaryOps.NOOP))
|
||||
reg = new_reg
|
||||
return self.tor[args.name], reg, off
|
||||
reg = self.render_alu(BinaryOps.ADD, self.render_cast(reg, dtypes.uint64), self.tor[args.name], dtype=dtypes.uint64)
|
||||
return reg, None, off
|
||||
|
||||
def uops_to_asmstyle(lang, function_name:str, uops:List[UOp]):
|
||||
#TODO: Do not use clear()
|
||||
lang.ins.clear()
|
||||
lang.tor.clear()
|
||||
lang.cnts.clear()
|
||||
buf_to_dtype = {args:dtype for uop,dtype,_,args,_ in uops if uop == Ops.DEFINE_GLOBAL}
|
||||
global_size, local_size = [], []
|
||||
skipload_branch = 0
|
||||
lang.ins += [AssemblyInstruction(Ops.SPECIAL, lang.newreg(buf, dtype=dtypes.uint64, scalar=True), [], buf) for buf in buf_to_dtype]
|
||||
for u in uops:
|
||||
uop,dtype,vin,args,_ = u
|
||||
if uop == Ops.DEFINE_LOCAL:
|
||||
lang.ins.append(AssemblyInstruction(Ops.DEFINE_LOCAL, None, [], args))
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, lang.newreg(args[0], dtype=dtypes.uint64), [args[0]], UnaryOps.NOOP))
|
||||
elif uop == Ops.LOOP:
|
||||
if args[1] == "global":
|
||||
for i,var in enumerate(args[0]):
|
||||
global_size.append(var.max+1)
|
||||
lang.ins.append(AssemblyInstruction(Ops.SPECIAL, lang.newreg(var, dtype=dtypes.int32), [], f"gid{len(args[0])-1-i}"))
|
||||
elif args[1] == "local":
|
||||
for i,var in enumerate(args[0]):
|
||||
local_size.append(var.max+1)
|
||||
lang.ins.append(AssemblyInstruction(Ops.SPECIAL, lang.newreg(var, dtype=dtypes.int32), [], f"lid{len(args[0])-1-i}"))
|
||||
else:
|
||||
for var in args[0]:
|
||||
if not isinstance(var, NumNode): # TODO: why is this coming through?
|
||||
lang.ins.append(AssemblyInstruction(Ops.LOAD, lang.newreg(var, dtype=dtypes.int32, scalar=True), [], 0))
|
||||
lang.ins.append(AssemblyInstruction(Ops.LABEL, None, [], "$loop_"+var.expr))
|
||||
elif uop == Ops.ENDLOOP:
|
||||
if args[1] not in ["global", "local", "global+local"]:
|
||||
for var in reversed(args[0]):
|
||||
if not isinstance(var, NumNode): # TODO: why is this coming through?
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, lang.tor[var], [lang.tor[var], 1], BinaryOps.ADD))
|
||||
pred = lang.render_alu(BinaryOps.CMPLT, lang.tor[var], var.max+1, dtypes.bool)
|
||||
lang.ins.append(AssemblyInstruction(Ops.COND_BRANCH, None, [pred], ("$loop_"+var.expr, True)))
|
||||
elif args[1] == "global+local":
|
||||
for i, var in enumerate(reversed(args[0])):
|
||||
lang.ins.append(AssemblyInstruction(Ops.ENDLOOP, None, [lang.tor[var]], (var.max+1, f"gid{i}")))
|
||||
elif args[1] == 'local':
|
||||
for i, var in enumerate(reversed(args[0])):
|
||||
lang.ins.append(AssemblyInstruction(Ops.ENDLOOP, None, [lang.tor[var]], (var.max+1, f"lid{i}")))
|
||||
elif uop == Ops.CAST:
|
||||
# TODO: we should reconsider outputting CAST in the linearizer. these are needless copies
|
||||
out = lang.newreg(u, dtype)
|
||||
for i,sr in enumerate(out.subregs()):
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, sr, [lang.tor[vin[i]]], UnaryOps.NOOP))
|
||||
elif uop == Ops.ALU:
|
||||
out = lang.newreg(u, dtype) if u not in lang.tor else lang.tor[u]
|
||||
# this is the only thing that can violate SSA
|
||||
if args in [BinaryOps.CMPLT]:
|
||||
pred_reg = lang.newreg((u, 'pred'), dtype=dtypes.bool)
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, pred_reg, [lang.tor[x] for x in vin], args))
|
||||
lang.ins.append(AssemblyInstruction(Ops.CAST, out, [pred_reg], args))
|
||||
elif args == BinaryOps.DIV and lang.no_div:
|
||||
tmp = lang.newreg((u, "rcp"))
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, tmp, [lang.tor[vin[1]]], UnaryOps.RECIP))
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, out, [lang.tor[vin[0]], tmp], BinaryOps.MUL))
|
||||
elif args == UnaryOps.SIN and lang.sin_is_sin2pi:
|
||||
tmp = lang.newreg((u, "2pi"))
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, tmp, [lang.tor[vin[0]], 1/(math.pi*2)], BinaryOps.MUL))
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, out, [tmp], args))
|
||||
else:
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, out, [lang.tor[x] for x in vin], args))
|
||||
elif uop == Ops.DEFINE_REG:
|
||||
reg = lang.newreg(u, dtype=dtype)
|
||||
lang.ins.append(AssemblyInstruction(Ops.LOAD, reg, [], args))
|
||||
elif uop == Ops.SPECIAL:
|
||||
lang.tor[u] = lang.tor[args]
|
||||
elif uop == Ops.CONST:
|
||||
lang.ins.append(AssemblyInstruction(Ops.LOAD, lang.newreg(u, dtype=dtype), [], args))
|
||||
elif uop == Ops.LOAD:
|
||||
idx, treg, off = lang.addr_w_offset(args)
|
||||
reg = lang.newreg(u, dtype=dtype, scalar=(idx.scalar and (not isinstance(treg, Register) or treg.scalar)))
|
||||
if args.valid.min == 0:
|
||||
lang.ins.append(AssemblyInstruction(Ops.LOAD, reg, [], 0))
|
||||
if args.valid.max == 1:
|
||||
pred = args.valid.render(lang.render_ops, lang)
|
||||
lang.ins.append(AssemblyInstruction(Ops.COND_BRANCH, None, [pred], (f"$skipload_{skipload_branch}", False)))
|
||||
if args.valid.max == 1:
|
||||
# NOTE: you can't compute the index in here, because it assumes it's all available later
|
||||
lang.ins.append(AssemblyInstruction(Ops.LOAD, reg, [idx] + ([treg] if treg is not None else []), (off, 'global' if not args.local else 'shared', args.memory_dtype if args.memory_dtype != dtypes.float else None)))
|
||||
if args.valid.min == 0 and args.valid.max == 1:
|
||||
lang.ins.append(AssemblyInstruction(Ops.LABEL, None, [], f"$skipload_{skipload_branch}"))
|
||||
skipload_branch += 1
|
||||
elif uop == Ops.STORE:
|
||||
if args is None:
|
||||
lang.ins.append(AssemblyInstruction(Ops.ALU, lang.tor[vin[0]], [lang.tor[vin[1]]], UnaryOps.NOOP))
|
||||
else:
|
||||
idx, treg, off = lang.addr_w_offset(args)
|
||||
lang.ins.append(AssemblyInstruction(Ops.STORE, None, [idx, lang.tor[vin[0]]] + ([treg] if treg is not None else []), (off, 'global' if not args.local else 'shared', args.memory_dtype if args.memory_dtype != dtypes.float else None)))
|
||||
|
||||
if DEBUG >= 4:
|
||||
for tins in lang.ins: print(tins)
|
||||
return global_size, local_size
|
||||
@@ -1,177 +0,0 @@
|
||||
import struct
|
||||
from platform import system
|
||||
from typing import Tuple, Dict, List, Optional
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.codegen.opt.kernel import Ops, UOp
|
||||
from tinygrad.helpers import CI
|
||||
from tinygrad.codegen.assembly import uops_to_asmstyle, AssemblyLanguage
|
||||
|
||||
def float_to_hex(x): return "%02X%02X%02X%02X" % tuple(struct.pack("f",x)[::-1])
|
||||
def compute_offsets(total):
|
||||
quotient, remainder = divmod(total, 4096)
|
||||
return [4096]*quotient + [remainder] if remainder else [4096]*quotient
|
||||
|
||||
#NOTE: Darwin needs names to start with a "_"
|
||||
def get_name(name): return ('_' if system() == 'Darwin' else '') + name
|
||||
|
||||
class ARM64Language(AssemblyLanguage): pass
|
||||
|
||||
def specialize_to_arm64(fn_nm, asm):
|
||||
var_size = 16
|
||||
prev_uop:Optional[Ops] = None
|
||||
ins = []
|
||||
x_regs = ['x' + str(i) for i in reversed(range(12))]
|
||||
s_regs = ['s' + str(i) for i in reversed(range(3,32)) if i <= 7 or i >= 16]
|
||||
type_to_reg = {dtypes.double: "d", dtypes.half: 'h', dtypes.float32: 's', dtypes.bool: 'w', dtypes.int8:'w', dtypes.int32: 'w', dtypes.int64: 'x', dtypes.uint8:'w', dtypes.uint32: 'w', dtypes.uint64: 'x'}
|
||||
alu = {BinaryOps.ADD: "add", BinaryOps.SUB: "sub", BinaryOps.MUL: "mul", BinaryOps.DIV: "div", BinaryOps.MAX: "max",
|
||||
BinaryOps.MOD: "", BinaryOps.CMPLT: "subs",
|
||||
UnaryOps.NOOP: "mov", UnaryOps.NEG: "neg",
|
||||
UnaryOps.SIN:'bl ' + get_name('sinf'), UnaryOps.LOG2: 'bl ' + get_name("log2f"), UnaryOps.EXP2: 'bl ' + get_name("exp2f"), UnaryOps.SQRT: 'bl ' + get_name("sqrtf"),
|
||||
TernaryOps.MULACC: "madd", TernaryOps.WHERE: "fcsel"}
|
||||
|
||||
def mov_imm(value, reg):
|
||||
# Manually move value into reg if value can't fit
|
||||
if value.__class__ is not float and abs(value) > abs(65535):
|
||||
ins.append(f"movz w15, #{value & 0xffff}")
|
||||
ins.append(f"movk w15, #{(value >> 16) & 0xffff}, lsl #16")
|
||||
ins.append(f"sxtw {reg}, w15")
|
||||
elif reg[0] == 's':
|
||||
ins.append(f"movz x15, 0x{float_to_hex(value)[4:]}")
|
||||
ins.append(f"movk x15, 0x{float_to_hex(value)[:4]}, lsl #16")
|
||||
ins.append("str x15, [sp, 16]")
|
||||
ins.append(f"ldr {reg}, [sp, 16]")
|
||||
else:
|
||||
ins.append(f"mov {reg}, #{value}")
|
||||
|
||||
# Get variables intervals
|
||||
live_range:Dict[str, List[int]] = {}
|
||||
for i, (uop, out, vin, arg) in enumerate(asm):
|
||||
for var in ([v for v in [out] + vin if v is not None and v.__class__ is not int]):
|
||||
live_range[var.nm] = [i,i] if var.nm not in live_range else [live_range[var.nm][0], i]
|
||||
|
||||
mem_vars:Dict[str, int] = {}
|
||||
rtor:Dict[str, str] = {}
|
||||
def allocate_regs(mvars):
|
||||
nonlocal var_size
|
||||
for v in [v for v in mvars if v is not None and v.__class__ is not int and v.nm not in rtor]:
|
||||
available_regs = s_regs if dtypes.is_float(v[1]) else x_regs
|
||||
#NOTE: Very simple spill, everything that don't fit in regs goes to mem
|
||||
if not available_regs:
|
||||
# ARM needs the stack 16-byte aligned
|
||||
var_size += 16
|
||||
available_regs.append('s0' if dtypes.is_float(out[1]) else 'x12')
|
||||
mem_vars[v.nm] = var_size
|
||||
rtor[v.nm] = available_regs.pop()
|
||||
|
||||
temp_floats = ['s0', 's1', 's2']
|
||||
temp_ints = ['x12', 'x13', 'x16']
|
||||
for i, (uop, out, vin, arg) in enumerate(asm):
|
||||
# Clear regs out of interval
|
||||
for var, reg in list(rtor.items()):
|
||||
available_regs = s_regs if reg[0] == 's' else x_regs
|
||||
if var[1] not in 'B' and var not in mem_vars and i > live_range[var][1]:
|
||||
available_regs.append(rtor.pop(var))
|
||||
# Assign a registers to the variables using live ranges.
|
||||
allocate_regs([out] + vin)
|
||||
# Assign temp regs to vin and load them before direct use
|
||||
for i, v in enumerate([v for v in vin if v.__class__ is not int and v.nm in mem_vars]):
|
||||
rtor[v.nm] = temp_floats[i] if dtypes.is_float(v[1]) else temp_ints[i]
|
||||
# ARM64 addressing constraints https://devblogs.microsoft.com/oldnewthing/20220728-00/?p=106912
|
||||
ins.append(f"mov x15, {mem_vars[v.nm]}")
|
||||
ins.append(f"ldr {rtor[v.nm]}, [sp, x15]")
|
||||
|
||||
if uop == Ops.SPECIAL:
|
||||
if arg.startswith('data'):
|
||||
# data 8 to n into the stack
|
||||
if int(arg[4:]) >= 8:
|
||||
ins.append(f"ldr x15, [x17, #{(int(arg[4:]) - 8) * 8}]")
|
||||
ins.append(f"mov {rtor[out.nm]}, x15")
|
||||
else:
|
||||
ins.append(f"mov {rtor[out.nm]}, #0")
|
||||
ins.append(f"loop_{arg}:")
|
||||
elif uop == Ops.CAST:
|
||||
if arg == BinaryOps.CMPLT:
|
||||
if rtor[out.nm][0] == 's':
|
||||
mov_imm(0.0, 's0')
|
||||
mov_imm(1.0, 's1')
|
||||
ins.append(f"fcsel {rtor[out.nm]}, s1, s0, lt")
|
||||
if rtor[out.nm][0] == 'x':
|
||||
mov_imm(0, 'x14')
|
||||
mov_imm(1, 'x15')
|
||||
ins.append(f"csel {rtor[out.nm]}, x15, x14, lt")
|
||||
else:
|
||||
ins.append(f"sxtw {rtor[out.nm]}, w{rtor[vin[0].nm][1:]}")
|
||||
elif uop == Ops.ALU:
|
||||
if len(vin)==2 and vin[1].__class__ is int: mov_imm(vin[1], 'x15')
|
||||
if arg == BinaryOps.MUL and out.dtype == dtypes.bool:
|
||||
ins.append(f"ands {','.join('x15' if v.__class__ is int else rtor[v.nm] for v in [out] + vin)}")
|
||||
elif arg == TernaryOps.WHERE:
|
||||
ins.append(f"fcmp {rtor[vin[0].nm]}, #0.0" if rtor[vin[0].nm][0] == 's' else f"cmp {rtor[vin[0].nm]}, #0")
|
||||
ins.append(f"{alu[arg]} {rtor[out.nm]}, {rtor[vin[1].nm]}, {rtor[vin[2].nm]}, ne")
|
||||
elif arg in [UnaryOps.LOG2, UnaryOps.SIN, UnaryOps.EXP2, UnaryOps.SQRT]:
|
||||
#NOTE: Not a real instruction, use to emulate a ext call in unicorn
|
||||
if CI: ins.append(f"{alu[arg]} {rtor[out.nm]} {rtor[vin[0].nm]}")
|
||||
else:
|
||||
save_regs = [k for k in rtor.keys() if k != out.nm and k not in mem_vars]
|
||||
ins.append(f"sub sp, sp, #{(len(save_regs))*16}")
|
||||
# Save the registers before they are cleared by func call
|
||||
for i,k in enumerate(save_regs,1):
|
||||
ins.append(f"str {rtor[k]}, [sp, #{16*i}]")
|
||||
ins.append("stp x29, x30, [sp, #0]!")
|
||||
ins.append("mov x29, sp")
|
||||
ins.append(f"fmov s0, {rtor[vin[0].nm]}")
|
||||
ins.append(alu[arg])
|
||||
ins.append(f"fmov {rtor[out.nm]}, s0")
|
||||
ins.append("mov sp, x29")
|
||||
ins.append("ldp x29, x30, [sp], #0")
|
||||
for i,k in enumerate(save_regs,1):
|
||||
ins.append(f"ldr {rtor[k]}, [sp, #{16*i}]")
|
||||
ins.append(f"add sp, sp, #{len(save_regs)*16}")
|
||||
elif arg == BinaryOps.CMPLT:
|
||||
ins.append(f"{alu[arg]} {','.join('x15' if v.__class__ is int else rtor[v.nm] for v in [out] + vin)}" if not dtypes.is_float(vin[0][1]) else f"fcmp {rtor[vin[0].nm]}, {rtor[vin[1].nm]}")
|
||||
elif arg == BinaryOps.MOD:
|
||||
rhs = 'x15' if vin[1].__class__ is int else rtor[vin[1].nm]
|
||||
ins.append(f"udiv x14, {rtor[vin[0].nm]}, {rhs}")
|
||||
ins.append(f"msub {rtor[out.nm]}, x14, {rhs}, {rtor[vin[0].nm]}")
|
||||
else:
|
||||
ins.append(f"{'f' if dtypes.is_float(vin[0][1]) else 's' if arg == BinaryOps.DIV else ''}{alu[arg]} {', '.join('x15' if v.__class__ is int else rtor[v.nm] for v in [out] + vin)}")
|
||||
elif uop == Ops.LOAD:
|
||||
if arg.__class__ in (int, float):
|
||||
mov_imm(arg, rtor[out.nm])
|
||||
else:
|
||||
#NOTE: if need casting load var in s/h0 or x/w12 temp regs
|
||||
reg_in = type_to_reg[arg[2]] + ('0' if dtypes.is_float(arg[2]) else '12') if arg[2] is not None else rtor[out.nm]
|
||||
mov_imm(arg[0], "x15")
|
||||
ins.append(f"add x15, {rtor[vin[0].nm]}, x15")
|
||||
ins.append(f"ldr{'sb' if arg[2] is not None and arg[2] in (dtypes.int8, dtypes.uint8, dtypes.bool) else ''} {reg_in}, [x15]")
|
||||
if arg[2] is not None: ins.append(f"{'fcvt' if arg[2] in [dtypes.half, dtypes.double] else 'scvtf'} {rtor[out.nm]}, {reg_in}")
|
||||
elif uop == Ops.STORE:
|
||||
#NOTE: if need casting load var in s/h0 or x/w12 temp regs
|
||||
reg_out = (type_to_reg[arg[2]] + ('0' if dtypes.is_float(arg[2]) else '12') if arg[2] is not None else rtor[vin[1].nm])
|
||||
if arg[2] is not None: ins.append(f"fcvt{'zs' if arg[2] not in [dtypes.half, dtypes.double] else '' } {reg_out}, {rtor[vin[1].nm]}")
|
||||
ins.append(f"mov x15, #{arg[0]}")
|
||||
ins.append(f"str {reg_out}, [{rtor[vin[0].nm]}, x15, lsl #0]")
|
||||
elif uop == Ops.COND_BRANCH:
|
||||
#TODO: this is a hack it shouldn't always be a cmp before a cond branch?
|
||||
if prev_uop == Ops.LOAD:
|
||||
ins.append(f"cmp {rtor[vin[0].nm]}, #0")
|
||||
ins.append(f"b.{'lt' if arg[1] else 'ge'} {arg[0][1:]}")
|
||||
elif uop == Ops.LABEL:
|
||||
ins.append(f"{arg[1:]}:")
|
||||
elif uop == Ops.ENDLOOP:
|
||||
mov_imm(arg[0], "x15")
|
||||
ins.append(f"add {rtor[vin[0].nm]}, {rtor[vin[0].nm]}, #1")
|
||||
ins.append(f"cmp {rtor[vin[0].nm]}, x15")
|
||||
ins.append(f"b.lt loop_{arg[1]}")
|
||||
prev_uop = uop
|
||||
# store regs into memory if needed
|
||||
if out is not None and out.nm in mem_vars:
|
||||
ins.append(f"mov x15, {mem_vars[out.nm]}")
|
||||
ins.append(f"str {rtor[out.nm]}, [sp, x15]")
|
||||
return "\n".join([f"//varsize {var_size}",".arch armv8-a",".text", f".global {get_name(fn_nm)}",".p2align 2", f"{get_name(fn_nm)}:", "mov x17, sp"] + [f"sub sp, sp, #{offset}" for offset in compute_offsets(var_size)]+ ins + [f"add sp, sp, #{offset}" for offset in compute_offsets(var_size)] +["ret", "\n"])
|
||||
|
||||
def uops_to_arm64_asm(fn_nm:str, uops:List[UOp]) -> Tuple[str, List[int], List[int], bool]:
|
||||
lang = ARM64Language()
|
||||
global_size, local_size = uops_to_asmstyle(lang, fn_nm, uops)
|
||||
return specialize_to_arm64(fn_nm, lang.ins), global_size[::-1], local_size[::-1], True
|
||||
@@ -1,105 +0,0 @@
|
||||
from typing import List
|
||||
import struct
|
||||
from tinygrad.codegen.assembly import uops_to_asmstyle, AssemblyLanguage
|
||||
from tinygrad.codegen.opt.kernel import Ops, UOp
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.runtime.ops_cuda import arch
|
||||
|
||||
dtype_to_nvtype = {dtypes.float32: "f32", dtypes.float16: "f16", dtypes.int64: "s64", dtypes.int32: "s32", dtypes.int8: "s8", dtypes.bool: "pred", dtypes.uint64: "u64", dtypes.uint32: "u32", dtypes.uint16: "u16", dtypes.uint8: "u8", "bits16": "b16", dtypes.float64: "f64"}
|
||||
def float_to_hex(x): return "%02X%02X%02X%02X" % tuple(struct.pack("f",x)[::-1])
|
||||
|
||||
def ptx_needs_cast(dest_dtype, src_dtype): return dtypes.is_float(dest_dtype) and dtypes.is_int(src_dtype) or dtypes.is_int(dest_dtype) and dtypes.is_float(src_dtype) or (dtypes.is_float(src_dtype) and dtypes.is_float(dest_dtype) and dest_dtype.itemsize != src_dtype.itemsize)
|
||||
|
||||
def render_cast(ins, inp, out):
|
||||
if inp.dtype == dtypes.bool and (dtypes.is_float(out.dtype) or dtypes.is_int(out.dtype)):
|
||||
ins.append(f"selp.{dtype_to_nvtype[out.dtype]} {out}, {'0f3F800000, 0f00000000' if dtypes.is_float(out.dtype) else '1, 0'}, {inp};")
|
||||
elif out.dtype == dtypes.bool:
|
||||
if inp.dtype == dtypes.bool:
|
||||
ins.append(f"mov.pred {out}, {inp};")
|
||||
else:
|
||||
ins.append(f"setp.ne.{dtype_to_nvtype[inp.dtype]} {out}, {'0f00000000' if dtypes.is_float(inp.dtype) else '0'}, {inp};")
|
||||
else:
|
||||
round_mod = ".rzi" if dtypes.is_int(out.dtype) and dtypes.is_float(inp.dtype) else '.rz' if dtypes.is_float(out.dtype) and (dtypes.is_int(inp.dtype) or dtypes.is_float(inp.dtype) and inp.dtype.itemsize > out.dtype.itemsize) else ''
|
||||
ins.append(f"cvt{round_mod}.{dtype_to_nvtype[out.dtype]}.{dtype_to_nvtype[inp.dtype]} {out}, {inp};")
|
||||
|
||||
# https://docs.nvidia.com/cuda/parallel-thread-execution/#
|
||||
|
||||
class PTXLanguage(AssemblyLanguage):
|
||||
supports_constant_folding: bool = True
|
||||
|
||||
def specialize_to_ptx(lang, function_name):
|
||||
param_cnt = 0
|
||||
ins = []
|
||||
alu = {BinaryOps.ADD: "add", BinaryOps.SUB: "sub", BinaryOps.MUL: "mul", BinaryOps.DIV: "div", BinaryOps.MAX: "max",
|
||||
BinaryOps.MOD: "rem", BinaryOps.CMPLT: "setp.lt", UnaryOps.SQRT: "sqrt.approx",
|
||||
UnaryOps.NOOP: "mov", UnaryOps.NEG: "neg",
|
||||
UnaryOps.SIN: "sin.approx", UnaryOps.LOG2: "lg2.approx", UnaryOps.EXP2: "ex2.approx.ftz",
|
||||
TernaryOps.MULACC: "fma.rn", TernaryOps.WHERE: "selp"}
|
||||
for uop, out, vin, arg in lang.ins:
|
||||
if uop == Ops.ENDLOOP:
|
||||
ins.append("bar.sync 0;")
|
||||
elif uop == Ops.DEFINE_LOCAL:
|
||||
ins.append(f".shared .align 4 .b8 {arg[0]}[{arg[1]*4}];")
|
||||
elif uop == Ops.SPECIAL:
|
||||
if arg.startswith('data'):
|
||||
param_cnt += 1
|
||||
ins.append(f"ld.param.u64 {out}, [{arg}];")
|
||||
# TODO: we sometimes want this to be local, nvcc converts to global most of the time, not sure when we would need to?
|
||||
# ins.append(f"cvta.to.global.u64 {out}, {out};")
|
||||
elif arg.startswith('gid'):
|
||||
ins.append(f"mov.u32 {out}, %ctaid.{'xyz'[int(arg[3:])]};")
|
||||
elif arg.startswith('lid'):
|
||||
ins.append(f"mov.u32 {out}, %tid.{'xyz'[int(arg[3:])]};")
|
||||
elif uop == Ops.ALU:
|
||||
if arg == BinaryOps.MUL and out.dtype == dtypes.bool:
|
||||
ins.append(f"and.pred {out}, {', '.join(str(x) for x in vin)};")
|
||||
else:
|
||||
otype = vin[0].dtype if arg in [BinaryOps.CMPLT] else out.dtype
|
||||
if arg == TernaryOps.WHERE:
|
||||
if vin[0].dtype == dtypes.bool:
|
||||
reg = vin[0]
|
||||
else:
|
||||
reg = lang.newreg((vin[0], 'bool'), dtypes.bool)
|
||||
ins.append(f"setp.ne.{dtype_to_nvtype[vin[0].dtype]} {reg}, {'0f00000000' if dtypes.is_float(vin[0].dtype) else '0'}, {vin[0]};")
|
||||
vin = vin[1:] + [reg]
|
||||
ins.append(f"{alu[arg]}{'.lo' if arg == BinaryOps.MUL and out.dtype != dtypes.float32 else ''}{'.rn' if arg == BinaryOps.DIV and out.dtype == dtypes.float32 else ''}.{dtype_to_nvtype[otype]} {out}, {', '.join(str(x) for x in vin)};")
|
||||
elif uop == Ops.LOAD:
|
||||
if arg.__class__ in (int, float):
|
||||
ins.append(f"mov.{dtype_to_nvtype[out.dtype]} {out}, {'0f'+float_to_hex(arg) if dtypes.is_float(out.dtype) else int(arg)};")
|
||||
elif arg[2] is not None and (arg[2] == dtypes.bool or arg[2] != out.dtype):
|
||||
dt = ('u16', dtypes.uint16) if arg[2] == dtypes.bool == out.dtype else ('u8', dtypes.uint8) if arg[2] == dtypes.bool else ('b16', dtypes.float16) if arg[2] == dtypes.half else (dtype_to_nvtype[arg[2]], arg[2])
|
||||
reg = lang.newreg((out, dt[0]), dtype=dt[1])
|
||||
ins.append(f"ld.{arg[1]}.{dt[0]} {reg}, [{vin[0]}{f'+{arg[0]}' if arg[0] is not None else ''}];")
|
||||
render_cast(ins, reg, out)
|
||||
else:
|
||||
ins.append(f"ld.{arg[1]}.{dtype_to_nvtype[dtypes.float if arg[2] is None else arg[2]]} {out}, [{vin[0]}{f'+{arg[0]}' if arg[0] is not None else ''}];")
|
||||
elif uop == Ops.STORE:
|
||||
if ptx_needs_cast(dtypes.float if arg[2] is None else arg[2], vin[1].dtype) or arg[2] == dtypes.bool:
|
||||
if arg[2] == dtypes.bool != vin[1].dtype:
|
||||
prereg = lang.newreg((vin[1],'bool'), dtype=dtypes.bool)
|
||||
render_cast(ins, vin[1], prereg)
|
||||
else: prereg = vin[1]
|
||||
reg = lang.newreg((prereg, dtypes.uint16 if arg[2] == dtypes.bool else arg[2]), dtype=dtypes.uint16 if arg[2] == dtypes.bool else dtypes.float if arg[2] is None else arg[2])
|
||||
render_cast(ins, prereg, reg)
|
||||
ins.append(f"st.{arg[1]}.{dtype_to_nvtype['bits16' if arg[2] == dtypes.float16 else dtypes.uint8 if arg[2] == dtypes.bool else dtypes.float if arg[2] is None else arg[2]]} [{vin[0]}{f'+{arg[0]}' if arg[0] is not None else ''}], {reg};")
|
||||
else:
|
||||
ins.append(f"st.{arg[1]}.{dtype_to_nvtype[dtypes.float if arg[2] is None else arg[2]]} [{vin[0]}{f'+{arg[0]}' if arg[0] is not None else ''}], {vin[1]};")
|
||||
elif uop == Ops.CAST:
|
||||
render_cast(ins, vin[0], out)
|
||||
elif uop == Ops.LABEL:
|
||||
ins.append(f"{arg}:")
|
||||
elif uop == Ops.COND_BRANCH:
|
||||
ins.append(f"@{'!' if not arg[1] else ''}{vin[0]} bra {arg[0]};")
|
||||
|
||||
ins_prefix = [".version 7.8", ".target " + arch(), ".address_size 64",
|
||||
f".visible .entry {function_name}({', '.join(f'.param .u64 data{i}' for i in range(param_cnt))}) {{"]
|
||||
for arg in [(dtype, lang.type_to_letter(dtype), c) for dtype,c in lang.cnts.items()]: ins_prefix.append(f".reg .{dtype_to_nvtype[arg[0][0]]} %{arg[1]}<{arg[2]}>;",)
|
||||
ins = ins_prefix + ins
|
||||
ins += ["ret;", "}"]
|
||||
return '\n'.join(ins)
|
||||
|
||||
def uops_to_ptx_asm(function_name:str, uops:List[UOp]):
|
||||
lang = PTXLanguage()
|
||||
global_size, local_size = uops_to_asmstyle(lang, function_name, uops)
|
||||
return specialize_to_ptx(lang, function_name), global_size[::-1], local_size[::-1], True
|
||||
@@ -1,203 +0,0 @@
|
||||
import yaml
|
||||
from typing import Tuple, Set, Dict
|
||||
from tinygrad import dtypes
|
||||
from tinygrad.codegen.assembly import AssemblyCodegen, Register
|
||||
from tinygrad.codegen.opt.kernel import Ops
|
||||
from tinygrad.uop.ops import BinaryOps, UnaryOps, TernaryOps
|
||||
from tinygrad.runtime.ops_cl import ROCM_LLVM_PATH
|
||||
|
||||
# ugh, is this really needed?
|
||||
from extra.helpers import enable_early_exec
|
||||
early_exec = enable_early_exec()
|
||||
|
||||
boilerplate_start = """
|
||||
.global _start
|
||||
_start:
|
||||
.rodata
|
||||
.align 0x10
|
||||
.global code.kd
|
||||
.type code.kd,STT_OBJECT
|
||||
.amdhsa_kernel code"""
|
||||
|
||||
code_start = """.end_amdhsa_kernel
|
||||
.text
|
||||
code:
|
||||
"""
|
||||
|
||||
# https://github.com/RadeonOpenCompute/ROCm_Documentation/blob/master/ROCm_Compiler_SDK/ROCm-Codeobj-format.rst
|
||||
# https://github.com/ROCm-Developer-Tools/ROCm-ComputeABI-Doc/blob/master/AMDGPU-ABI.md#initial-kernel-register-state
|
||||
# RDNA3 is actually a SIMD machine!
|
||||
class RDNACodegen(AssemblyCodegen):
|
||||
supports_float4: bool = True
|
||||
supports_float4_alu: bool = True
|
||||
supports_load3: bool = True
|
||||
sin_is_sin2pi: bool = True
|
||||
no_div: bool = True
|
||||
|
||||
def specialize(self, asm) -> Tuple[str, str]:
|
||||
args = []
|
||||
for i,b in enumerate(self.bufs): args.append({'.address_space': 'global', '.name': f'buf_{i}', '.offset': i*8, '.size': 8, '.type_name': b.dtype.name+"*", '.value_kind': 'global_buffer'})
|
||||
ins = []
|
||||
|
||||
v_cnt = 3 # v[0:2] is local_xyz
|
||||
s_cnt = 5 # s[0:1] is the address, s[2:4] is global_xyz
|
||||
|
||||
dtype_to_rdnatype = {dtypes.float32: "f32", dtypes.int64: "i64", dtypes.int32: "i32", dtypes.uint64: "u64", dtypes.bool: "i32"}
|
||||
alu = {BinaryOps.ADD: "add", BinaryOps.SUB: "sub", BinaryOps.MUL: "mul", TernaryOps.MULACC: "fma",
|
||||
BinaryOps.MAX: "max", UnaryOps.RECIP: "rcp",
|
||||
UnaryOps.NOOP: "mov", UnaryOps.SIN: "sin", UnaryOps.LOG2: "log", UnaryOps.EXP2: "exp",
|
||||
BinaryOps.CMPLT: "cmp_lt"}
|
||||
|
||||
pend_regs:Set[Register] = set()
|
||||
rtor:Dict[Register, str] = {}
|
||||
def reg_in(x):
|
||||
nonlocal pend_regs
|
||||
#print("reg_in", x, rtor[x], pend_regs)
|
||||
if x in pend_regs:
|
||||
#print("clear")
|
||||
ins.append('s_waitcnt lgkmcnt(0), vmcnt(0)')
|
||||
pend_regs.clear()
|
||||
return rtor[x]
|
||||
def reg_out(x):
|
||||
return rtor[x]
|
||||
for uop, out, vin, arg in asm:
|
||||
if uop == Ops.DEFINE_REGISTER:
|
||||
if arg[0][0] in [dtypes.uint32, dtypes.uint64, dtypes.int64, dtypes.int32, dtypes.float32, dtypes.float.vec(4)]:
|
||||
for i in range(arg[2]):
|
||||
# TODO: Re-use gaps created by this to avoid wasting registers
|
||||
align = int(arg[0][0].itemsize / 4)
|
||||
if arg[0][1]:
|
||||
s_cnt += s_cnt % align
|
||||
reg_name = f"s[{s_cnt}:{s_cnt + align - 1}]" if align > 1 else f"s{s_cnt}"
|
||||
s_cnt += align
|
||||
else:
|
||||
v_cnt += v_cnt % align
|
||||
reg_name = f"v[{v_cnt}:{v_cnt + align - 1}]" if align > 1 else f"v{v_cnt}"
|
||||
v_cnt += align
|
||||
rtor[Register(f"%{arg[1]}{i}", *arg[0])] = reg_name
|
||||
|
||||
if arg[0][0] == dtypes.float.vec(4):
|
||||
for off in range(4):
|
||||
reg_name = f"s{s_cnt-align+off}" if arg[0][1] else f"v{v_cnt-align+off}"
|
||||
rtor[Register(f"%{arg[1]}{i}", dtypes.float, False, off=off)] = reg_name
|
||||
elif arg[0][0] == dtypes.bool:
|
||||
for i in range(arg[2]):
|
||||
reg_name = "scc" if arg[0][1] else "vcc_lo" # `_lo` suffix since we're running wavefront_size=32
|
||||
rtor[Register(f"%{arg[1]}{i}", *arg[0])] = reg_name
|
||||
else:
|
||||
raise NotImplementedError("DEFINE_REGISTER not implemented for arg: ", arg)
|
||||
elif uop == Ops.SPECIAL:
|
||||
if arg.startswith('buf'):
|
||||
i = int(arg[3:])
|
||||
ins.append(f's_load_b64 {reg_out(out)}, s[0:1], {i*8}')
|
||||
pend_regs.add(out)
|
||||
for r in out.subregs(): pend_regs.add(r)
|
||||
elif arg.startswith('gid'):
|
||||
ins.append(f'v_mov_b32 {reg_out(out)}, s{2+int(arg[3])}')
|
||||
# the docs lied, this is actually y
|
||||
if int(arg[3]) == 2: ins.append("v_bfe_u32 v2, v0, 20, 10") # untested
|
||||
if int(arg[3]) == 1: ins.append("v_bfe_u32 v1, v0, 10, 10")
|
||||
elif int(arg[3]) == 0: ins.append("v_and_b32_e32 v0, 0x3ff, v0")
|
||||
# get local size
|
||||
offset = len(args)*8
|
||||
args.append({".offset": offset, ".value_kind": f"hidden_group_size_{'xyz'[int(arg[3])]}", ".size": 8})
|
||||
ins.append(f's_load_b32 s{2+int(arg[3])}, s[0:1], {offset}')
|
||||
ins.append('s_waitcnt vmcnt(0) lgkmcnt(0)')
|
||||
pend_regs.clear()
|
||||
ins.append(f'v_mul_i32_i24 {reg_out(out)}, {reg_out(out)}, s{2+int(arg[3])}')
|
||||
ins.append(f'v_add_nc_u32 {reg_out(out)}, v{int(arg[3])}, {reg_out(out)}')
|
||||
elif uop == Ops.CONST:
|
||||
if arg == float('inf'): arg = "0x7f800000"
|
||||
elif arg == float('-inf'): arg = "0xff800000"
|
||||
if out.dtype == dtypes.float.vec(4):
|
||||
for off in range(4):
|
||||
ins.append(f"{'s_' if out.scalar else 'v_'}mov_b32 {reg_out(Register(out.nm, dtypes.float, False, off=off))}, {arg}")
|
||||
else:
|
||||
ins.append(f"{'s_' if out.scalar else 'v_'}mov_b32 {reg_out(out)}, {arg}")
|
||||
elif uop == Ops.ALU:
|
||||
if arg in [BinaryOps.CMPLT]:
|
||||
ins.append(f"{'s' if out.scalar else 'v'}_{alu[arg]}_{dtype_to_rdnatype[out.dtype]} {', '.join(reg_in(x) if x.__class__ is Register else str(x) for x in vin)}")
|
||||
else:
|
||||
alu_arg = alu[arg]
|
||||
if arg == TernaryOps.MULACC and out == vin[2]:
|
||||
alu_arg = "fmac"
|
||||
vin = vin[0:2]
|
||||
if out.dtype == dtypes.float.vec(4):
|
||||
for rr in zip(*[x.subregs() if x.dtype == dtypes.float.vec(4) else [x,x,x,x] for x in [out]+vin]):
|
||||
ins.append(f"{'s_' if rr[0].scalar else 'v_'}{alu_arg}_{dtype_to_rdnatype[rr[0].dtype]} {reg_out(rr[0])}, {', '.join(reg_in(x) if x.__class__ is Register else str(x) for x in rr[1:])}")
|
||||
else:
|
||||
ins.append(f"{'s_' if out.scalar else 'v_'}{alu_arg}_{dtype_to_rdnatype[out.dtype] if arg != UnaryOps.NOOP else 'b32'}{'_i24' if arg == BinaryOps.MUL and out.dtype != dtypes.float32 and not out.scalar else ''} {reg_out(out)}, {', '.join(reg_in(x) if x.__class__ is Register else str(x) for x in vin)}")
|
||||
elif uop == Ops.LOAD:
|
||||
if out.scalar:
|
||||
# swap arg order
|
||||
ins.append(f's_load_b32 {reg_out(out)}, {reg_in(vin[0])}, {reg_in(vin[1])} offset:{arg[0]}')
|
||||
else:
|
||||
ins.append(f'global_load_{"b128" if out.dtype == dtypes.float.vec(4) else "b32"} {reg_out(out)}, {reg_in(vin[1])}, {reg_in(vin[0])} offset:{arg[0]}')
|
||||
pend_regs.add(out)
|
||||
for r in out.subregs(): pend_regs.add(r)
|
||||
elif uop == Ops.STORE:
|
||||
ins.append(f'global_store_{"b128" if vin[1].dtype == dtypes.float.vec(4) else "b32"} {reg_in(vin[2])}, {reg_in(vin[1])}, {reg_in(vin[0])} offset:{arg[0]}')
|
||||
elif uop == Ops.LABEL:
|
||||
ins.append(f"{arg}:")
|
||||
elif uop == Ops.COND_BRANCH:
|
||||
ins.append(f"s_cbranch_scc{'1' if arg[1] else '0'} {arg[0]}")
|
||||
elif uop == Ops.CAST:
|
||||
if vin[0].dtype == dtypes.bool:
|
||||
if out.dtype == dtypes.float32:
|
||||
ins.append(f"v_cndmask_b32 {reg_out(out)}, 0.0, 1.0, {reg_in(vin[0])}")
|
||||
else:
|
||||
raise NotImplementedError(f"cast {vin[0].dtype} -> {out.dtype}")
|
||||
else:
|
||||
raise NotImplementedError(uop)
|
||||
|
||||
ins += ['s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)', 's_endpgm', 's_code_end']
|
||||
|
||||
# dual alu group
|
||||
seen = set()
|
||||
new_ins = []
|
||||
for i,tins in enumerate(ins):
|
||||
if tins in seen: continue
|
||||
if tins.startswith("v_fmac_f32"):
|
||||
for gins in reversed(ins[i+1:]):
|
||||
if gins in seen: continue
|
||||
if gins.startswith("v_fmac_f32"):
|
||||
r0 = [int(x[1:].strip(',')) for x in tins.split(" ")[1:]]
|
||||
r1 = [int(x[1:].strip(',')) for x in gins.split(" ")[1:]]
|
||||
if r0[0]%2 == r1[0]%2: continue
|
||||
if r0[1]%2 == r1[1]%2: continue
|
||||
if r0[2]%2 == r1[2]%2: continue
|
||||
new_ins.append(tins.replace("v_", "v_dual_")+" :: " + gins.replace("v_", "v_dual_"))
|
||||
seen.add(tins)
|
||||
seen.add(gins)
|
||||
break
|
||||
if tins not in seen:
|
||||
new_ins.append(tins)
|
||||
ins = new_ins
|
||||
|
||||
return 'code', self.assemble(args, ins, v_cnt, s_cnt)
|
||||
|
||||
def assemble(self, args, ins, v_cnt, s_cnt):
|
||||
kernel_desc = {'.amdhsa_group_segment_fixed_size': 0, '.amdhsa_private_segment_fixed_size': 0, '.amdhsa_kernarg_size': 0,
|
||||
'.amdhsa_next_free_vgpr': v_cnt, # this matters!
|
||||
'.amdhsa_reserve_vcc': 0, '.amdhsa_reserve_xnack_mask': 0,
|
||||
'.amdhsa_next_free_sgpr': s_cnt,
|
||||
'.amdhsa_float_round_mode_32': 0, '.amdhsa_float_round_mode_16_64': 0, '.amdhsa_float_denorm_mode_32': 3, '.amdhsa_float_denorm_mode_16_64': 3, '.amdhsa_dx10_clamp': 1, '.amdhsa_ieee_mode': 1,
|
||||
'.amdhsa_fp16_overflow': 0, '.amdhsa_workgroup_processor_mode': 1, '.amdhsa_memory_ordered': 1, '.amdhsa_forward_progress': 0, '.amdhsa_enable_private_segment': 0,
|
||||
'.amdhsa_system_sgpr_workgroup_id_x': 1, '.amdhsa_system_sgpr_workgroup_id_y': 1, '.amdhsa_system_sgpr_workgroup_id_z': 1,
|
||||
'.amdhsa_system_sgpr_workgroup_info': 0, '.amdhsa_system_vgpr_workitem_id': 2, # is amdhsa_system_vgpr_workitem_id real?
|
||||
'.amdhsa_exception_fp_ieee_invalid_op': 0, '.amdhsa_exception_fp_denorm_src': 0, '.amdhsa_exception_fp_ieee_div_zero': 0, '.amdhsa_exception_fp_ieee_overflow': 0, '.amdhsa_exception_fp_ieee_underflow': 0,
|
||||
'.amdhsa_exception_fp_ieee_inexact': 0, '.amdhsa_exception_int_div_zero': 0, '.amdhsa_user_sgpr_dispatch_ptr': 0, '.amdhsa_user_sgpr_queue_ptr': 0, '.amdhsa_user_sgpr_kernarg_segment_ptr': 1,
|
||||
'.amdhsa_user_sgpr_dispatch_id': 0, '.amdhsa_user_sgpr_private_segment_size': 0, '.amdhsa_wavefront_size32': 1, '.amdhsa_uses_dynamic_stack': 0}
|
||||
|
||||
metadata = {'amdhsa.kernels': [{'.args': args,
|
||||
'.group_segment_fixed_size': 0, '.kernarg_segment_align': 8, '.kernarg_segment_size': args[-1][".offset"] + args[-1][".size"],
|
||||
'.language': 'OpenCL C', '.language_version': [1, 2], '.max_flat_workgroup_size': 256,
|
||||
'.name': 'code', '.private_segment_fixed_size': 0, '.sgpr_count': s_cnt, '.sgpr_spill_count': 0,
|
||||
'.symbol': 'code.kd', '.uses_dynamic_stack': False, '.vgpr_count': v_cnt, '.vgpr_spill_count': 0,
|
||||
'.wavefront_size': 32}],
|
||||
'amdhsa.target': 'amdgcn-amd-amdhsa--gfx1100', 'amdhsa.version': [1, 2]}
|
||||
|
||||
code = boilerplate_start + "\n" + '\n'.join("%s %d" % x for x in kernel_desc.items()) + "\n" + code_start + '\n'.join(ins) + "\n.amdgpu_metadata\n" + yaml.dump(metadata) + ".end_amdgpu_metadata"
|
||||
obj = early_exec(([ROCM_LLVM_PATH / "llvm-mc", '--arch=amdgcn', '--mcpu=gfx1100', '--triple=amdgcn-amd-amdhsa', '--filetype=obj', '-'], code.encode("utf-8")))
|
||||
asm = early_exec(([ROCM_LLVM_PATH / "ld.lld", "/dev/stdin", "-o", "/dev/stdout", "--pie"], obj))
|
||||
return asm
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import numpy as np
|
||||
from tinygrad.runtime.ops_cuda import CUDAProgram, RawCUDABuffer
|
||||
|
||||
if __name__ == "__main__":
|
||||
test = RawCUDABuffer.fromCPU(np.zeros(10, np.float32))
|
||||
prg = CUDAProgram("test", """
|
||||
.version 7.8
|
||||
.target sm_86
|
||||
.address_size 64
|
||||
.visible .entry test(.param .u64 x) {
|
||||
.reg .b32 %r<2>;
|
||||
.reg .b64 %rd<3>;
|
||||
|
||||
ld.param.u64 %rd1, [x];
|
||||
cvta.to.global.u64 %rd2, %rd1;
|
||||
mov.u32 %r1, 0x40000000; // 2.0 in float
|
||||
st.global.u32 [%rd2], %r1;
|
||||
ret;
|
||||
}""", binary=True)
|
||||
prg([1], [1], test)
|
||||
print(test.toCPU())
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from pathlib import Path
|
||||
import sys
|
||||
cwd = Path.cwd()
|
||||
sys.path.append(cwd.as_posix())
|
||||
sys.path.append((cwd / 'test').as_posix())
|
||||
from extra.datasets import fetch_mnist
|
||||
from tqdm import trange
|
||||
|
||||
def augment_img(X, rotate=10, px=3):
|
||||
Xaug = np.zeros_like(X)
|
||||
for i in trange(len(X)):
|
||||
im = Image.fromarray(X[i])
|
||||
im = im.rotate(np.random.randint(-rotate,rotate), resample=Image.BICUBIC)
|
||||
w, h = X.shape[1:]
|
||||
#upper left, lower left, lower right, upper right
|
||||
quad = np.random.randint(-px,px,size=(8)) + np.array([0,0,0,h,w,h,w,0])
|
||||
im = im.transform((w, h), Image.QUAD, quad, resample=Image.BICUBIC)
|
||||
Xaug[i] = im
|
||||
return Xaug
|
||||
|
||||
if __name__ == "__main__":
|
||||
import matplotlib.pyplot as plt
|
||||
X_train, Y_train, X_test, Y_test = fetch_mnist()
|
||||
X_train = X_train.reshape(-1, 28, 28).astype(np.uint8)
|
||||
X_test = X_test.reshape(-1, 28, 28).astype(np.uint8)
|
||||
X = np.vstack([X_train[:1]]*10+[X_train[1:2]]*10)
|
||||
fig, a = plt.subplots(2,len(X))
|
||||
Xaug = augment_img(X)
|
||||
for i in range(len(X)):
|
||||
a[0][i].imshow(X[i], cmap='gray')
|
||||
a[1][i].imshow(Xaug[i],cmap='gray')
|
||||
a[0][i].axis('off')
|
||||
a[1][i].axis('off')
|
||||
plt.show()
|
||||
|
||||
#create some nice gifs for doc?!
|
||||
for i in range(10):
|
||||
im = Image.fromarray(X_train[7353+i])
|
||||
im_aug = [Image.fromarray(x) for x in augment_img(np.array([X_train[7353+i]]*100))]
|
||||
im.save(f"aug{i}.gif", save_all=True, append_images=im_aug, duration=100, loop=0)
|
||||
@@ -1,39 +0,0 @@
|
||||
from typing import List, Dict, cast
|
||||
import ctypes
|
||||
from tinygrad.helpers import dedup, cpu_time_execution, DEBUG
|
||||
from tinygrad.engine.jit import GraphRunner, GraphException
|
||||
from tinygrad.device import Buffer, Device
|
||||
from tinygrad.engine.realize import ExecItem, CompiledRunner
|
||||
from tinygrad.uop.ops import Variable
|
||||
from tinygrad.runtime.ops_cpu import ClangProgram
|
||||
from tinygrad.renderer.cstyle import ClangRenderer
|
||||
render_dtype = ClangRenderer().render_dtype
|
||||
|
||||
class ClangGraph(GraphRunner):
|
||||
def __init__(self, jit_cache: List[ExecItem], input_rawbuffers: List[Buffer], var_vals: Dict[str, int]):
|
||||
super().__init__(jit_cache, input_rawbuffers, var_vals)
|
||||
if not all(isinstance(ji.prg, CompiledRunner) for ji in jit_cache): raise GraphException
|
||||
|
||||
prgs = '\n'.join(dedup([cast(CompiledRunner, ji.prg).p.src for ji in jit_cache]))
|
||||
args = [f"{render_dtype(x.dtype)}* arg{i}" for i,x in enumerate(input_rawbuffers)]
|
||||
args += sorted([f"int {v}" for v in var_vals])
|
||||
code = ["void batched("+','.join(args)+") {"]
|
||||
for ji in jit_cache:
|
||||
args = []
|
||||
for buf in ji.bufs:
|
||||
assert buf is not None
|
||||
if buf in input_rawbuffers:
|
||||
args.append(f"arg{input_rawbuffers.index(buf)}")
|
||||
else:
|
||||
args.append(f"({render_dtype(buf.dtype)}*)0x{ctypes.addressof(buf._buf):X}")
|
||||
args += [x.expr for x in cast(CompiledRunner, ji.prg).p.vars]
|
||||
code.append(f" {cast(CompiledRunner, ji.prg).p.function_name}({','.join(args)});")
|
||||
code.append("}")
|
||||
if DEBUG >= 4: print("\n".join(code))
|
||||
compiler = Device["CPU"].compiler
|
||||
assert compiler is not None
|
||||
self._prg = ClangProgram("batched", compiler.compile(prgs+"\n"+"\n".join(code))) # no point in caching the pointers
|
||||
|
||||
def __call__(self, rawbufs: List[Buffer], var_vals: Dict[str, int], wait=False):
|
||||
return cpu_time_execution(
|
||||
lambda: self._prg(*[x._buf for x in rawbufs], *[x[1] for x in sorted(var_vals.items(), key=lambda x: x[0])]), enable=wait)
|
||||
@@ -1,27 +0,0 @@
|
||||
import ctypes
|
||||
from typing import Tuple
|
||||
import tinygrad.runtime.autogen.hip as hip
|
||||
from tinygrad.helpers import init_c_var, time_execution_cuda_style
|
||||
from tinygrad.runtime.ops_hip import check, hip_set_device
|
||||
from tinygrad.runtime.graph.cuda import CUDAGraph
|
||||
|
||||
# TODO: this is only used in graph
|
||||
def hip_time_execution(cb, enable=False): return time_execution_cuda_style(cb, hip.hipEvent_t, hip.hipEventCreate, hip.hipEventRecord, hip.hipEventSynchronize, hip.hipEventDestroy, hip.hipEventElapsedTime, enable=enable) # noqa: E501
|
||||
|
||||
class HIPGraph(CUDAGraph):
|
||||
def __del__(self):
|
||||
if hasattr(self, 'graph'): check(hip.hipGraphDestroy(self.graph))
|
||||
if hasattr(self, 'instance'): check(hip.hipGraphExecDestroy(self.instance))
|
||||
def set_device(self): hip_set_device(self.dev)
|
||||
def encode_args_info(self): return (hip.hipDeviceptr_t, (1,2,3))
|
||||
def graph_create(self): return init_c_var(hip.hipGraph_t(), lambda x: check(hip.hipGraphCreate(ctypes.byref(x), 0)))
|
||||
def graph_instantiate(self, graph):
|
||||
return init_c_var(hip.hipGraphExec_t(), lambda x: check(hip.hipGraphInstantiate(ctypes.byref(x), graph, None, None, 0)))
|
||||
def graph_add_kernel_node(self, graph, c_deps, c_params):
|
||||
return init_c_var(hip.hipGraphNode_t(), lambda x: check(hip.hipGraphAddKernelNode(ctypes.byref(x), graph, c_deps, ctypes.sizeof(c_deps)//8 if c_deps else 0, ctypes.byref(c_params)))) # noqa: E501
|
||||
def graph_launch(self, *args, wait=False): return hip_time_execution(lambda: check(hip.hipGraphLaunch(*args)), enable=wait)
|
||||
def graph_exec_kernel_node_set_params(self, *args): return check(hip.hipGraphExecKernelNodeSetParams(*args))
|
||||
def build_kernel_node_params(self, prg, global_size, local_size, c_config):
|
||||
return hip.hipKernelNodeParams(hip.dim3(*local_size), c_config, ctypes.cast(prg.clprg.prg, ctypes.c_void_p), hip.dim3(*global_size), None, 0)
|
||||
def set_kernel_node_launch_dims(self, node, global_size: Tuple[int, int, int], local_size: Tuple[int, int, int]):
|
||||
node.blockDim.x, node.blockDim.y, node.blockDim.z, node.gridDim.x, node.gridDim.y, node.gridDim.z = *local_size, *global_size
|
||||
@@ -1,143 +0,0 @@
|
||||
import ctypes, collections
|
||||
import tinygrad.runtime.autogen.hsa as hsa
|
||||
from tinygrad.helpers import init_c_var
|
||||
|
||||
def check(status):
|
||||
if status != 0:
|
||||
hsa.hsa_status_string(status, ctypes.byref(status_str := ctypes.POINTER(ctypes.c_char)()))
|
||||
raise RuntimeError(f"HSA Error {status}: {ctypes.string_at(status_str).decode()}")
|
||||
|
||||
# Precalulated AQL info
|
||||
AQL_PACKET_SIZE = ctypes.sizeof(hsa.hsa_kernel_dispatch_packet_t)
|
||||
EMPTY_SIGNAL = hsa.hsa_signal_t()
|
||||
|
||||
DISPATCH_KERNEL_SETUP = 3 << hsa.HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS
|
||||
DISPATCH_KERNEL_HEADER = 1 << hsa.HSA_PACKET_HEADER_BARRIER
|
||||
DISPATCH_KERNEL_HEADER |= hsa.HSA_FENCE_SCOPE_SYSTEM << hsa.HSA_PACKET_HEADER_SCACQUIRE_FENCE_SCOPE
|
||||
DISPATCH_KERNEL_HEADER |= hsa.HSA_FENCE_SCOPE_SYSTEM << hsa.HSA_PACKET_HEADER_SCRELEASE_FENCE_SCOPE
|
||||
DISPATCH_KERNEL_HEADER |= hsa.HSA_PACKET_TYPE_KERNEL_DISPATCH << hsa.HSA_PACKET_HEADER_TYPE
|
||||
|
||||
BARRIER_HEADER = 1 << hsa.HSA_PACKET_HEADER_BARRIER
|
||||
BARRIER_HEADER |= hsa.HSA_FENCE_SCOPE_SYSTEM << hsa.HSA_PACKET_HEADER_SCACQUIRE_FENCE_SCOPE
|
||||
BARRIER_HEADER |= hsa.HSA_FENCE_SCOPE_SYSTEM << hsa.HSA_PACKET_HEADER_SCRELEASE_FENCE_SCOPE
|
||||
BARRIER_HEADER |= hsa.HSA_PACKET_TYPE_BARRIER_AND << hsa.HSA_PACKET_HEADER_TYPE
|
||||
|
||||
class AQLQueue:
|
||||
def __init__(self, device, sz=-1):
|
||||
self.device = device
|
||||
|
||||
check(hsa.hsa_agent_get_info(self.device.agent, hsa.HSA_AGENT_INFO_QUEUE_MAX_SIZE, ctypes.byref(max_queue_size := ctypes.c_uint32())))
|
||||
queue_size = min(max_queue_size.value, sz) if sz != -1 else max_queue_size.value
|
||||
|
||||
null_func = ctypes.CFUNCTYPE(None, hsa.hsa_status_t, ctypes.POINTER(hsa.struct_hsa_queue_s), ctypes.c_void_p)()
|
||||
self.hw_queue = init_c_var(ctypes.POINTER(hsa.hsa_queue_t)(), lambda x: check(
|
||||
hsa.hsa_queue_create(self.device.agent, queue_size, hsa.HSA_QUEUE_TYPE_SINGLE, null_func, None, (1<<32)-1, (1<<32)-1, ctypes.byref(x))))
|
||||
|
||||
self.next_doorbell_index = 0
|
||||
self.queue_base = self.hw_queue.contents.base_address
|
||||
self.queue_size = self.hw_queue.contents.size * AQL_PACKET_SIZE # in bytes
|
||||
self.write_addr = self.queue_base
|
||||
self.write_addr_end = self.queue_base + self.queue_size - 1 # precalc saves some time
|
||||
self.available_packet_slots = self.hw_queue.contents.size
|
||||
|
||||
check(hsa.hsa_amd_queue_set_priority(self.hw_queue, hsa.HSA_AMD_QUEUE_PRIORITY_HIGH))
|
||||
check(hsa.hsa_amd_profiling_set_profiler_enabled(self.hw_queue, 1))
|
||||
|
||||
def __del__(self):
|
||||
if hasattr(self, 'hw_queue'): check(hsa.hsa_queue_destroy(self.hw_queue))
|
||||
|
||||
def submit_kernel(self, prg, global_size, local_size, kernargs, completion_signal=None):
|
||||
if self.available_packet_slots == 0: self._wait_queue()
|
||||
|
||||
packet = hsa.hsa_kernel_dispatch_packet_t.from_address(self.write_addr)
|
||||
packet.workgroup_size_x = local_size[0]
|
||||
packet.workgroup_size_y = local_size[1]
|
||||
packet.workgroup_size_z = local_size[2]
|
||||
packet.reserved0 = 0
|
||||
packet.grid_size_x = global_size[0] * local_size[0]
|
||||
packet.grid_size_y = global_size[1] * local_size[1]
|
||||
packet.grid_size_z = global_size[2] * local_size[2]
|
||||
packet.private_segment_size = prg.private_segment_size
|
||||
packet.group_segment_size = prg.group_segment_size
|
||||
packet.kernel_object = prg.handle
|
||||
packet.kernarg_address = kernargs
|
||||
packet.reserved2 = 0
|
||||
packet.completion_signal = completion_signal if completion_signal else EMPTY_SIGNAL
|
||||
packet.setup = DISPATCH_KERNEL_SETUP
|
||||
packet.header = DISPATCH_KERNEL_HEADER
|
||||
self._submit_packet()
|
||||
|
||||
def submit_barrier(self, wait_signals=None, completion_signal=None):
|
||||
assert wait_signals is None or len(wait_signals) <= 5
|
||||
if self.available_packet_slots == 0: self._wait_queue()
|
||||
|
||||
packet = hsa.hsa_barrier_and_packet_t.from_address(self.write_addr)
|
||||
packet.reserved0 = 0
|
||||
packet.reserved1 = 0
|
||||
for i in range(5):
|
||||
packet.dep_signal[i] = wait_signals[i] if wait_signals and len(wait_signals) > i else EMPTY_SIGNAL
|
||||
packet.reserved2 = 0
|
||||
packet.completion_signal = completion_signal if completion_signal else EMPTY_SIGNAL
|
||||
packet.header = BARRIER_HEADER
|
||||
self._submit_packet()
|
||||
|
||||
def blit_packets(self, packet_addr, packet_cnt):
|
||||
if self.available_packet_slots < packet_cnt: self._wait_queue(packet_cnt)
|
||||
|
||||
tail_blit_packets = min((self.queue_base + self.queue_size - self.write_addr) // AQL_PACKET_SIZE, packet_cnt)
|
||||
rem_packet_cnt = packet_cnt - tail_blit_packets
|
||||
ctypes.memmove(self.write_addr, packet_addr, AQL_PACKET_SIZE * tail_blit_packets)
|
||||
if rem_packet_cnt > 0: ctypes.memmove(self.queue_base, packet_addr + AQL_PACKET_SIZE * tail_blit_packets, AQL_PACKET_SIZE * rem_packet_cnt)
|
||||
|
||||
self._submit_packet(packet_cnt)
|
||||
|
||||
def wait(self):
|
||||
self.submit_barrier([], finish_signal := self.device.alloc_signal(reusable=True))
|
||||
hsa.hsa_signal_wait_scacquire(finish_signal, hsa.HSA_SIGNAL_CONDITION_LT, 1, (1 << 64) - 1, hsa.HSA_WAIT_STATE_ACTIVE)
|
||||
self.available_packet_slots = self.queue_size // AQL_PACKET_SIZE
|
||||
|
||||
def _wait_queue(self, need_packets=1):
|
||||
while self.available_packet_slots < need_packets:
|
||||
rindex = hsa.hsa_queue_load_read_index_relaxed(self.hw_queue)
|
||||
self.available_packet_slots = self.queue_size // AQL_PACKET_SIZE - (self.next_doorbell_index - rindex)
|
||||
|
||||
def _submit_packet(self, cnt=1):
|
||||
self.available_packet_slots -= cnt
|
||||
self.next_doorbell_index += cnt
|
||||
hsa.hsa_queue_store_write_index_relaxed(self.hw_queue, self.next_doorbell_index)
|
||||
hsa.hsa_signal_store_screlease(self.hw_queue.contents.doorbell_signal, self.next_doorbell_index-1)
|
||||
|
||||
self.write_addr += AQL_PACKET_SIZE * cnt
|
||||
if self.write_addr > self.write_addr_end:
|
||||
self.write_addr = self.queue_base + (self.write_addr - self.queue_base) % self.queue_size
|
||||
|
||||
def scan_agents():
|
||||
agents = collections.defaultdict(list)
|
||||
|
||||
@ctypes.CFUNCTYPE(hsa.hsa_status_t, hsa.hsa_agent_t, ctypes.c_void_p)
|
||||
def __scan_agents(agent, data):
|
||||
status = hsa.hsa_agent_get_info(agent, hsa.HSA_AGENT_INFO_DEVICE, ctypes.byref(device_type := hsa.hsa_device_type_t()))
|
||||
if status == 0: agents[device_type.value].append(agent)
|
||||
return hsa.HSA_STATUS_SUCCESS
|
||||
|
||||
hsa.hsa_iterate_agents(__scan_agents, None)
|
||||
return agents
|
||||
|
||||
def find_memory_pool(agent, segtyp=-1, location=-1):
|
||||
@ctypes.CFUNCTYPE(hsa.hsa_status_t, hsa.hsa_amd_memory_pool_t, ctypes.c_void_p)
|
||||
def __filter_amd_memory_pools(mem_pool, data):
|
||||
check(hsa.hsa_amd_memory_pool_get_info(mem_pool, hsa.HSA_AMD_MEMORY_POOL_INFO_SEGMENT, ctypes.byref(segment := hsa.hsa_amd_segment_t())))
|
||||
if segtyp >= 0 and segment.value != segtyp: return hsa.HSA_STATUS_SUCCESS
|
||||
|
||||
check(hsa.hsa_amd_memory_pool_get_info(mem_pool, hsa.HSA_AMD_MEMORY_POOL_INFO_LOCATION, ctypes.byref(loc:=hsa.hsa_amd_memory_pool_location_t())))
|
||||
if location >= 0 and loc.value != location: return hsa.HSA_STATUS_SUCCESS
|
||||
|
||||
check(hsa.hsa_amd_memory_pool_get_info(mem_pool, hsa.HSA_AMD_MEMORY_POOL_INFO_SIZE, ctypes.byref(sz := ctypes.c_size_t())))
|
||||
if sz.value == 0: return hsa.HSA_STATUS_SUCCESS
|
||||
|
||||
ret = ctypes.cast(data, ctypes.POINTER(hsa.hsa_amd_memory_pool_t))
|
||||
ret[0] = mem_pool
|
||||
return hsa.HSA_STATUS_INFO_BREAK
|
||||
|
||||
hsa.hsa_amd_agent_iterate_memory_pools(agent, __filter_amd_memory_pools, ctypes.byref(region := hsa.hsa_amd_memory_pool_t()))
|
||||
return region
|
||||
@@ -1,171 +0,0 @@
|
||||
import ctypes, collections, time, itertools
|
||||
from typing import List, Any, Dict, cast, Optional, Tuple
|
||||
from tinygrad.helpers import init_c_var, round_up
|
||||
from tinygrad.device import Buffer, BufferSpec
|
||||
from tinygrad.device import Compiled, Device
|
||||
from tinygrad.uop.ops import Variable
|
||||
from tinygrad.runtime.ops_hsa import HSADevice, PROFILE, Profiler
|
||||
from tinygrad.engine.realize import ExecItem, BufferXfer, CompiledRunner
|
||||
from tinygrad.engine.jit import MultiGraphRunner, GraphException
|
||||
import tinygrad.runtime.autogen.hsa as hsa
|
||||
from tinygrad.runtime.support.hsa import check, AQLQueue, AQL_PACKET_SIZE, EMPTY_SIGNAL
|
||||
|
||||
def dedup_signals(signals): return [hsa.hsa_signal_t(hndl) for hndl in set([x.handle for x in signals if isinstance(x, hsa.hsa_signal_t)])]
|
||||
|
||||
class VirtAQLQueue(AQLQueue):
|
||||
def __init__(self, device, sz):
|
||||
self.device = device
|
||||
self.virt_queue = (hsa.hsa_kernel_dispatch_packet_t * sz)()
|
||||
self.queue_base = self.write_addr = ctypes.addressof(self.virt_queue)
|
||||
self.packets_count = 0
|
||||
self.available_packet_slots = sz
|
||||
def _wait_queue(self, need_packets=1): assert False, f"VirtQueue is too small to handle {self.packets_count+need_packets} packets!"
|
||||
def _submit_packet(self):
|
||||
self.write_addr += AQL_PACKET_SIZE
|
||||
self.packets_count += 1
|
||||
self.available_packet_slots -= 1
|
||||
|
||||
class HSAGraph(MultiGraphRunner):
|
||||
def __init__(self, jit_cache: List[ExecItem], input_rawbuffers: List[Buffer], var_vals: Dict[str, int]):
|
||||
super().__init__(jit_cache, input_rawbuffers, var_vals)
|
||||
|
||||
# Check all jit items are compatible.
|
||||
compiled_devices = set()
|
||||
for ji in self.jit_cache:
|
||||
if isinstance(ji.prg, CompiledRunner): compiled_devices.add(ji.prg.dev)
|
||||
elif isinstance(ji.prg, BufferXfer):
|
||||
for x in ji.bufs[0:2]: compiled_devices.add(Device[cast(Buffer, x).device])
|
||||
else: raise GraphException
|
||||
if any(not isinstance(d, HSADevice) for d in compiled_devices): raise GraphException
|
||||
|
||||
self.devices: List[HSADevice] = list(compiled_devices) #type:ignore
|
||||
|
||||
# Allocate kernel args.
|
||||
kernargs_size: Dict[Compiled, int] = collections.defaultdict(int)
|
||||
for ji in self.jit_cache:
|
||||
if isinstance(ji.prg, CompiledRunner): kernargs_size[ji.prg.dev] += round_up(ctypes.sizeof(ji.prg._prg.args_struct_t), 16)
|
||||
kernargs_ptrs: Dict[Compiled, int] = {dev:dev.allocator._alloc(sz, BufferSpec()) for dev,sz in kernargs_size.items()}
|
||||
|
||||
# Fill initial arguments.
|
||||
self.ji_kargs_structs: Dict[int, ctypes.Structure] = {}
|
||||
for j,ji in enumerate(self.jit_cache):
|
||||
if not isinstance(ji.prg, CompiledRunner): continue
|
||||
self.ji_kargs_structs[j] = ji.prg._prg.args_struct_t.from_address(kernargs_ptrs[ji.prg.dev])
|
||||
kernargs_ptrs[ji.prg.dev] += round_up(ctypes.sizeof(ji.prg._prg.args_struct_t), 16)
|
||||
for i in range(len(ji.bufs)): self.ji_kargs_structs[j].__setattr__(f'f{i}', cast(Buffer, ji.bufs[i])._buf)
|
||||
for i in range(len(ji.prg.p.vars)): self.ji_kargs_structs[j].__setattr__(f'v{i}', var_vals[ji.prg.p.vars[i].expr])
|
||||
|
||||
# Build queues.
|
||||
self.virt_aql_queues: Dict[Compiled, VirtAQLQueue] = {dev:VirtAQLQueue(dev, 2*len(self.jit_cache)+16) for dev in self.devices}
|
||||
self.packets = {}
|
||||
self.transfers = []
|
||||
self.ji_to_transfer: Dict[int, int] = {} # faster to store transfers as list and update using this mapping table.
|
||||
self.signals_to_reset: List[hsa.hsa_signal_t] = []
|
||||
self.signals_to_devices: Dict[ctypes.c_uint64, List[HSADevice]] = {}
|
||||
self.profile_info: Dict[Compiled, List[Tuple[Any, ...]]] = collections.defaultdict(list)
|
||||
|
||||
# Special packet to wait for the world.
|
||||
self.kickoff_signals: Dict[HSADevice, hsa.hsa_signal_t] = {dev:self.alloc_signal(reset_on_start=True) for dev in self.devices}
|
||||
for dev in self.devices: self.virt_aql_queues[dev].submit_barrier([], self.kickoff_signals[dev])
|
||||
|
||||
for j,ji in enumerate(self.jit_cache):
|
||||
if isinstance(ji.prg, CompiledRunner):
|
||||
wait_signals = self.access_resources(ji.bufs, ji.prg.p.outs, new_dependency=j, sync_with_aql_packets=False)
|
||||
for i in range(0, len(wait_signals), 5):
|
||||
self.virt_aql_queues[ji.prg.dev].submit_barrier(wait_signals[i:i+5])
|
||||
self.packets[j] = hsa.hsa_kernel_dispatch_packet_t.from_address(self.virt_aql_queues[ji.prg.dev].write_addr)
|
||||
|
||||
sync_signal = self.alloc_signal(reset_on_start=True) if PROFILE else None
|
||||
self.virt_aql_queues[ji.prg.dev].submit_kernel(ji.prg._prg, *ji.prg.p.launch_dims(var_vals), #type:ignore
|
||||
ctypes.addressof(self.ji_kargs_structs[j]), completion_signal=sync_signal)
|
||||
if PROFILE: self.profile_info[ji.prg.dev].append((sync_signal, ji.prg._prg.name, False))
|
||||
elif isinstance(ji.prg, BufferXfer):
|
||||
dest, src = [cast(Buffer, x) for x in ji.bufs[0:2]]
|
||||
dest_dev, src_dev = cast(HSADevice, Device[dest.device]), cast(HSADevice, Device[src.device])
|
||||
sync_signal = self.alloc_signal(reset_on_start=True, wait_on=[dest_dev, src_dev])
|
||||
|
||||
wait_signals = self.access_resources([dest, src], write=[0], new_dependency=sync_signal, sync_with_aql_packets=True)
|
||||
self.transfers.append([dest._buf, dest_dev.agent, src._buf, src_dev.agent, dest.nbytes, len(wait_signals),
|
||||
(hsa.hsa_signal_t*len(wait_signals))(*wait_signals), sync_signal, hsa.HSA_AMD_SDMA_ENGINE_0, True])
|
||||
self.ji_to_transfer[j] = len(self.transfers) - 1
|
||||
if PROFILE: self.profile_info[src_dev].append((sync_signal, f"transfer: HSA:{src_dev.device_id} -> HSA:{dest_dev.device_id}", True))
|
||||
|
||||
# Wait for all active signals to finish the graph
|
||||
wait_signals_to_finish: Dict[HSADevice, List[hsa.hsa_signal_t]] = collections.defaultdict(list)
|
||||
for v in dedup_signals(list(self.w_dependency_map.values()) + list(itertools.chain.from_iterable(self.r_dependency_map.values()))):
|
||||
for dev in self.signals_to_devices[v.handle]:
|
||||
wait_signals_to_finish[dev].append(v)
|
||||
|
||||
self.finish_signal = init_c_var(hsa.hsa_signal_t(), lambda x: check(hsa.hsa_amd_signal_create(1, 0, None, 0, ctypes.byref(x))))
|
||||
for dev in self.devices:
|
||||
wait_signals = wait_signals_to_finish[dev]
|
||||
for i in range(0, max(1, len(wait_signals)), 5):
|
||||
self.virt_aql_queues[dev].submit_barrier(wait_signals[i:i+5], completion_signal=self.finish_signal if i+5>=len(wait_signals) else None)
|
||||
|
||||
# Zero signals to allow graph to start and execute.
|
||||
for sig in self.signals_to_reset: hsa.hsa_signal_silent_store_relaxed(sig, 0)
|
||||
hsa.hsa_signal_silent_store_relaxed(self.finish_signal, 0)
|
||||
|
||||
def __call__(self, input_rawbuffers: List[Buffer], var_vals: Dict[str, int], wait=False) -> Optional[float]:
|
||||
# Wait and restore signals
|
||||
hsa.hsa_signal_wait_scacquire(self.finish_signal, hsa.HSA_SIGNAL_CONDITION_LT, 1, (1 << 64) - 1, hsa.HSA_WAIT_STATE_ACTIVE)
|
||||
for sig in self.signals_to_reset: hsa.hsa_signal_silent_store_relaxed(sig, 1)
|
||||
hsa.hsa_signal_silent_store_relaxed(self.finish_signal, len(self.devices))
|
||||
|
||||
# Update rawbuffers
|
||||
for (j,i),input_idx in self.input_replace.items():
|
||||
if j in self.ji_kargs_structs:
|
||||
self.ji_kargs_structs[j].__setattr__(f'f{i}', input_rawbuffers[input_idx]._buf)
|
||||
else:
|
||||
if i == 0: self.transfers[self.ji_to_transfer[j]][0] = input_rawbuffers[input_idx]._buf # dest
|
||||
elif i == 1: self.transfers[self.ji_to_transfer[j]][2] = input_rawbuffers[input_idx]._buf # src
|
||||
|
||||
# Update var_vals
|
||||
for j in self.jc_idx_with_updatable_var_vals:
|
||||
for i,v in enumerate(cast(CompiledRunner, self.jit_cache[j].prg).p.vars):
|
||||
self.ji_kargs_structs[j].__setattr__(f'v{i}', var_vals[v.expr])
|
||||
|
||||
# Update launch dims
|
||||
for j in self.jc_idx_with_updatable_launch_dims:
|
||||
gl, lc = cast(CompiledRunner, self.jit_cache[j].prg).p.launch_dims(var_vals)
|
||||
self.packets[j].workgroup_size_x = lc[0]
|
||||
self.packets[j].workgroup_size_y = lc[1]
|
||||
self.packets[j].workgroup_size_z = lc[2]
|
||||
self.packets[j].grid_size_x = gl[0] * lc[0]
|
||||
self.packets[j].grid_size_y = gl[1] * lc[1]
|
||||
self.packets[j].grid_size_z = gl[2] * lc[2]
|
||||
|
||||
for dev in self.devices:
|
||||
dev.flush_hdp()
|
||||
dev.hw_queue.blit_packets(self.virt_aql_queues[dev].queue_base, self.virt_aql_queues[dev].packets_count)
|
||||
|
||||
for transfer_data in self.transfers:
|
||||
check(hsa.hsa_amd_memory_async_copy_on_engine(*transfer_data))
|
||||
|
||||
et = None
|
||||
if wait:
|
||||
st = time.perf_counter()
|
||||
hsa.hsa_signal_wait_scacquire(self.finish_signal, hsa.HSA_SIGNAL_CONDITION_LT, 1, (1 << 64) - 1, hsa.HSA_WAIT_STATE_ACTIVE)
|
||||
et = time.perf_counter() - st
|
||||
|
||||
for profdev,profdata in self.profile_info.items(): Profiler.tracked_signals[profdev] += profdata
|
||||
return et
|
||||
|
||||
def alloc_signal(self, reset_on_start=False, wait_on=None):
|
||||
sync_signal = init_c_var(hsa.hsa_signal_t(), lambda x: check(hsa.hsa_amd_signal_create(1, 0, None, 0, ctypes.byref(x))))
|
||||
if reset_on_start: self.signals_to_reset.append(sync_signal)
|
||||
if wait_on is not None: self.signals_to_devices[sync_signal.handle] = wait_on
|
||||
return sync_signal
|
||||
|
||||
def dependency_as_signal(self, dep, sync_with_aql_packets) -> Optional[hsa.hsa_signal_t]:
|
||||
if isinstance(dep, hsa.hsa_signal_t): return dep
|
||||
elif sync_with_aql_packets and isinstance(packet := self.packets.get(dep), hsa.hsa_kernel_dispatch_packet_t):
|
||||
if packet.completion_signal.handle == EMPTY_SIGNAL.handle: packet.completion_signal = self.alloc_signal(reset_on_start=True)
|
||||
return packet.completion_signal
|
||||
return None
|
||||
|
||||
def access_resources(self, rawbufs, write, new_dependency, sync_with_aql_packets=False):
|
||||
rdeps = self._access_resources(rawbufs, write, new_dependency)
|
||||
wait_signals = [self.dependency_as_signal(dep, sync_with_aql_packets=sync_with_aql_packets) for dep in rdeps]
|
||||
if sync_with_aql_packets: wait_signals += [self.kickoff_signals[cast(HSADevice, Device[rawbuf.device])] for rawbuf in rawbufs]
|
||||
return dedup_signals(wait_signals)
|
||||
@@ -1,275 +0,0 @@
|
||||
from __future__ import annotations
|
||||
import ctypes, functools, subprocess, io, atexit, collections, json
|
||||
from typing import Tuple, TypeVar, List, Dict, Any
|
||||
import tinygrad.runtime.autogen.hsa as hsa
|
||||
from tinygrad.helpers import DEBUG, init_c_var, from_mv, round_up, to_mv, init_c_struct_t, getenv, PROFILE
|
||||
from tinygrad.device import Compiled, Compiler, CompileError, BufferSpec, LRUAllocator
|
||||
from tinygrad.renderer.cstyle import HIPRenderer
|
||||
from tinygrad.runtime.support.hsa import check, scan_agents, find_memory_pool, AQLQueue
|
||||
from tinygrad.runtime.support.hip_comgr import compile_hip
|
||||
if getenv("IOCTL"): import extra.hip_gpu_driver.hip_ioctl # noqa: F401
|
||||
|
||||
class HSAProfiler:
|
||||
def __init__(self):
|
||||
self.tracked_signals = collections.defaultdict(list)
|
||||
self.collected_events: List[Tuple[Any, ...]] = []
|
||||
self.copy_timings = hsa.hsa_amd_profiling_async_copy_time_t()
|
||||
self.disp_timings = hsa.hsa_amd_profiling_dispatch_time_t()
|
||||
|
||||
def track(self, signal, device, name, is_copy=False): self.tracked_signals[device].append((signal, name, is_copy))
|
||||
def process(self, device):
|
||||
# Process all tracked signals, should be called before any of tracked signals are reused.
|
||||
for sig,name,is_copy in self.tracked_signals[device]:
|
||||
if is_copy: check(hsa.hsa_amd_profiling_get_async_copy_time(sig, ctypes.byref(timings := self.copy_timings)))
|
||||
else: check(hsa.hsa_amd_profiling_get_dispatch_time(device.agent, sig, ctypes.byref(timings := self.disp_timings))) #type:ignore
|
||||
self.collected_events.append((device.device_id, 1 if is_copy else 0, name, timings.start, timings.end))
|
||||
self.tracked_signals.pop(device)
|
||||
|
||||
def save(self, path):
|
||||
mjson = []
|
||||
for i in range(len(HSADevice.devices)):
|
||||
mjson.append({"name": "process_name", "ph": "M", "pid": i, "args": {"name": "HSA"}})
|
||||
mjson.append({"name": "thread_name", "ph": "M", "pid": i, "tid": 0, "args": {"name": "AQL"}})
|
||||
mjson.append({"name": "thread_name", "ph": "M", "pid": i, "tid": 1, "args": {"name": "SDMA"}})
|
||||
|
||||
for dev_id,queue_id,name,st,et in self.collected_events:
|
||||
mjson.append({"name": name, "ph": "B", "pid": dev_id, "tid": queue_id, "ts": st*1e-3})
|
||||
mjson.append({"name": name, "ph": "E", "pid": dev_id, "tid": queue_id, "ts": et*1e-3})
|
||||
with open(path, "w") as f: f.write(json.dumps({"traceEvents": mjson}))
|
||||
print(f"Saved HSA profile to {path}")
|
||||
Profiler = HSAProfiler()
|
||||
|
||||
class HSACompiler(Compiler):
|
||||
def __init__(self, arch:str):
|
||||
self.arch = arch
|
||||
super().__init__(f"compile_hip_{self.arch}")
|
||||
def compile(self, src:str) -> bytes:
|
||||
try: return compile_hip(src, self.arch)
|
||||
except RuntimeError as e: raise CompileError(e)
|
||||
|
||||
class HSAProgram:
|
||||
def __init__(self, device:HSADevice, name:str, lib:bytes):
|
||||
self.device, self.name, self.lib = device, name, lib
|
||||
|
||||
if DEBUG >= 6:
|
||||
asm = subprocess.check_output(["/opt/rocm/llvm/bin/llvm-objdump", '-d', '-'], input=lib)
|
||||
print('\n'.join([x for x in asm.decode('utf-8').split("\n") if 's_code_end' not in x]))
|
||||
|
||||
self.exec = init_c_var(hsa.hsa_executable_t(), lambda x: check(hsa.hsa_executable_create_alt(hsa.HSA_PROFILE_FULL, hsa.HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT, None, ctypes.byref(x)))) # noqa: E501
|
||||
self.code_reader = init_c_var(hsa.hsa_code_object_reader_t(),
|
||||
lambda x: check(hsa.hsa_code_object_reader_create_from_memory(lib, len(lib), ctypes.byref(x))))
|
||||
check(hsa.hsa_executable_load_agent_code_object(self.exec, self.device.agent, self.code_reader, None, None))
|
||||
check(hsa.hsa_executable_freeze(self.exec, None))
|
||||
|
||||
self.kernel = init_c_var(hsa.hsa_executable_symbol_t(), lambda x: check(hsa.hsa_executable_get_symbol_by_name(self.exec, (name+".kd").encode("utf-8"), ctypes.byref(self.device.agent), ctypes.byref(x)))) # noqa: E501
|
||||
self.handle = init_c_var(ctypes.c_uint64(), lambda x: check(hsa.hsa_executable_symbol_get_info(self.kernel, hsa.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT, ctypes.byref(x)))) # noqa: E501
|
||||
self.kernargs_segment_size = init_c_var(ctypes.c_uint32(), lambda x: check(hsa.hsa_executable_symbol_get_info(self.kernel, hsa.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE, ctypes.byref(x)))).value # noqa: E501
|
||||
self.group_segment_size = init_c_var(ctypes.c_uint32(), lambda x: check(hsa.hsa_executable_symbol_get_info(self.kernel, hsa.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE, ctypes.byref(x)))).value # noqa: E501
|
||||
self.private_segment_size = init_c_var(ctypes.c_uint32(), lambda x: check(hsa.hsa_executable_symbol_get_info(self.kernel, hsa.HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE, ctypes.byref(x)))).value # noqa: E501
|
||||
|
||||
def __del__(self):
|
||||
self.device.synchronize()
|
||||
if hasattr(self, 'code_reader'): check(hsa.hsa_code_object_reader_destroy(self.code_reader))
|
||||
if hasattr(self, 'exec'): check(hsa.hsa_executable_destroy(self.exec))
|
||||
|
||||
def __call__(self, *args, global_size:Tuple[int,int,int]=(1,1,1), local_size:Tuple[int,int,int]=(1,1,1), vals:Tuple[int, ...]=(), wait=False):
|
||||
if not hasattr(self, "args_struct_t"):
|
||||
self.args_struct_t = init_c_struct_t(tuple([(f'f{i}', ctypes.c_void_p) for i in range(len(args))] +
|
||||
[(f'v{i}', ctypes.c_int) for i in range(len(vals))]))
|
||||
if ctypes.sizeof(self.args_struct_t) != self.kernargs_segment_size:
|
||||
raise RuntimeError(f"HSAProgram.__call__: incorrect args struct size {ctypes.sizeof(self.args_struct_t)} != {self.kernargs_segment_size}")
|
||||
|
||||
kernargs = None
|
||||
if self.kernargs_segment_size > 0:
|
||||
kernargs = self.device.alloc_kernargs(self.kernargs_segment_size)
|
||||
args_st = self.args_struct_t.from_address(kernargs)
|
||||
for i in range(len(args)): args_st.__setattr__(f'f{i}', args[i])
|
||||
for i in range(len(vals)): args_st.__setattr__(f'v{i}', vals[i])
|
||||
self.device.flush_hdp()
|
||||
|
||||
signal = self.device.alloc_signal(reusable=True) if wait or PROFILE else None
|
||||
self.device.hw_queue.submit_kernel(self, global_size, local_size, kernargs, completion_signal=signal)
|
||||
if PROFILE: Profiler.track(signal, self.device, self.name)
|
||||
if wait:
|
||||
hsa.hsa_signal_wait_scacquire(signal, hsa.HSA_SIGNAL_CONDITION_LT, 1, (1 << 64) - 1, hsa.HSA_WAIT_STATE_ACTIVE)
|
||||
check(hsa.hsa_amd_profiling_get_dispatch_time(self.device.agent, signal, ctypes.byref(timings := hsa.hsa_amd_profiling_dispatch_time_t())))
|
||||
return (timings.end - timings.start) * self.device.clocks_to_time
|
||||
|
||||
T = TypeVar("T")
|
||||
CHUNK_SIZE, PAGE_SIZE = 256*1024*1024, 0x1000
|
||||
class HSAAllocator(LRUAllocator):
|
||||
def __init__(self, device:HSADevice):
|
||||
self.device = device
|
||||
super().__init__()
|
||||
|
||||
def _alloc(self, size:int, options:BufferSpec):
|
||||
if options.host:
|
||||
check(hsa.hsa_amd_memory_pool_allocate(HSADevice.cpu_mempool, size, 0, ctypes.byref(mem := ctypes.c_void_p())))
|
||||
check(hsa.hsa_amd_agents_allow_access(2, (hsa.hsa_agent_t*2)(HSADevice.cpu_agent, self.device.agent), None, mem))
|
||||
return mem.value
|
||||
c_agents = (hsa.hsa_agent_t * len(HSADevice.agents[hsa.HSA_DEVICE_TYPE_GPU]))(*HSADevice.agents[hsa.HSA_DEVICE_TYPE_GPU])
|
||||
check(hsa.hsa_amd_memory_pool_allocate(self.device.gpu_mempool, size, 0, ctypes.byref(buf := ctypes.c_void_p())))
|
||||
check(hsa.hsa_amd_agents_allow_access(len(HSADevice.agents[hsa.HSA_DEVICE_TYPE_GPU]), c_agents, None, buf))
|
||||
return buf.value
|
||||
|
||||
def _free(self, opaque:T, options:BufferSpec):
|
||||
HSADevice.synchronize_system()
|
||||
check(hsa.hsa_amd_memory_pool_free(opaque))
|
||||
|
||||
def _copyin(self, dest:T, src: memoryview):
|
||||
# Async copyin sync model uses barriers on the main hw queue, since barriers are guaranteed to execute in order with all other packets.
|
||||
self.device.hw_queue.submit_barrier([], sync_signal := self.device.alloc_signal(reusable=True))
|
||||
mem = self._alloc(src.nbytes, BufferSpec(host=True))
|
||||
ctypes.memmove(mem, from_mv(src), src.nbytes)
|
||||
check(hsa.hsa_amd_memory_async_copy_on_engine(dest, self.device.agent, mem, HSADevice.cpu_agent, src.nbytes, 1, ctypes.byref(sync_signal),
|
||||
copy_signal := self.device.alloc_signal(reusable=True), hsa.HSA_AMD_SDMA_ENGINE_0, True))
|
||||
self.device.hw_queue.submit_barrier([copy_signal])
|
||||
self.device.delayed_free.append(mem)
|
||||
if PROFILE: Profiler.track(copy_signal, self.device, f"copyin: CPU -> HSA:{self.device.device_id}", is_copy=True)
|
||||
|
||||
def copy_from_fd(self, dest, fd, offset, size):
|
||||
self.device.hw_queue.submit_barrier([], sync_signal := self.device.alloc_signal(reusable=True))
|
||||
|
||||
if not hasattr(self, 'hb'):
|
||||
self.hb = [self._alloc(CHUNK_SIZE, BufferSpec(host=True)) for _ in range(2)]
|
||||
self.hb_signals = [self.device.alloc_signal(reusable=False) for _ in range(2)]
|
||||
self.hb_polarity = 0
|
||||
self.sdma = [hsa.HSA_AMD_SDMA_ENGINE_0, hsa.HSA_AMD_SDMA_ENGINE_1]
|
||||
for sig in self.hb_signals: hsa.hsa_signal_store_relaxed(sig, 0)
|
||||
|
||||
fo = io.FileIO(fd, "a+b", closefd=False)
|
||||
fo.seek(offset - (minor_offset:=offset % PAGE_SIZE))
|
||||
|
||||
copies_called = 0
|
||||
copied_in = 0
|
||||
for local_offset in range(0, size+minor_offset, CHUNK_SIZE):
|
||||
local_size = min(round_up(size+minor_offset, PAGE_SIZE)-local_offset, CHUNK_SIZE)
|
||||
copy_size = min(local_size-minor_offset, size-copied_in)
|
||||
if copy_size == 0: break
|
||||
|
||||
hsa.hsa_signal_wait_scacquire(self.hb_signals[self.hb_polarity], hsa.HSA_SIGNAL_CONDITION_LT, 1, (1 << 64) - 1, hsa.HSA_WAIT_STATE_ACTIVE)
|
||||
self.device.reusable_signals.append(self.hb_signals[self.hb_polarity]) # it's free now and can be reused
|
||||
self.hb_signals[self.hb_polarity] = self.device.alloc_signal(reusable=False)
|
||||
|
||||
fo.readinto(to_mv(self.hb[self.hb_polarity], local_size))
|
||||
check(hsa.hsa_amd_memory_async_copy_on_engine(dest+copied_in, self.device.agent, self.hb[self.hb_polarity]+minor_offset, HSADevice.cpu_agent,
|
||||
copy_size, 1, ctypes.byref(sync_signal), self.hb_signals[self.hb_polarity],
|
||||
self.sdma[self.hb_polarity], True))
|
||||
copied_in += copy_size
|
||||
self.hb_polarity = (self.hb_polarity + 1) % len(self.hb)
|
||||
minor_offset = 0 # only on the first
|
||||
copies_called += 1
|
||||
|
||||
wait_signals = [self.hb_signals[self.hb_polarity - 1]]
|
||||
if copies_called > 1: wait_signals.append(self.hb_signals[self.hb_polarity])
|
||||
self.device.hw_queue.submit_barrier(wait_signals)
|
||||
|
||||
def _copyout(self, dest:memoryview, src:T):
|
||||
HSADevice.synchronize_system()
|
||||
copy_signal = self.device.alloc_signal(reusable=True)
|
||||
c_agents = (hsa.hsa_agent_t*2)(self.device.agent, HSADevice.cpu_agent)
|
||||
check(hsa.hsa_amd_memory_lock_to_pool(from_mv(dest), dest.nbytes, c_agents, 2, HSADevice.cpu_mempool, 0, ctypes.byref(addr:=ctypes.c_void_p())))
|
||||
check(hsa.hsa_amd_memory_async_copy(addr, HSADevice.cpu_agent, src, self.device.agent, dest.nbytes, 0, None, copy_signal))
|
||||
hsa.hsa_signal_wait_scacquire(copy_signal, hsa.HSA_SIGNAL_CONDITION_LT, 1, (1 << 64) - 1, hsa.HSA_WAIT_STATE_ACTIVE)
|
||||
check(hsa.hsa_amd_memory_unlock(from_mv(dest)))
|
||||
if PROFILE: Profiler.track(copy_signal, self.device, f"copyout: HSA:{self.device.device_id} -> CPU", is_copy=True)
|
||||
|
||||
def transfer(self, dest:T, src:T, sz:int, src_dev=None, dest_dev=None):
|
||||
src_dev.hw_queue.submit_barrier([], sync_signal_1 := src_dev.alloc_signal(reusable=True))
|
||||
dest_dev.hw_queue.submit_barrier([], sync_signal_2 := dest_dev.alloc_signal(reusable=True))
|
||||
c_wait_signal = (hsa.hsa_signal_t*2)(sync_signal_1, sync_signal_2)
|
||||
check(hsa.hsa_amd_memory_async_copy_on_engine(dest, dest_dev.agent, src, src_dev.agent, sz, 2, c_wait_signal,
|
||||
copy_signal := dest_dev.alloc_signal(reusable=False), hsa.HSA_AMD_SDMA_ENGINE_0, True))
|
||||
src_dev.hw_queue.submit_barrier([copy_signal])
|
||||
dest_dev.hw_queue.submit_barrier([copy_signal])
|
||||
if PROFILE: Profiler.track(copy_signal, src_dev, f"transfer: HSA:{src_dev.device_id} -> HSA:{dest_dev.device_id}", is_copy=True)
|
||||
|
||||
class HSADevice(Compiled):
|
||||
devices: List[HSADevice] = []
|
||||
agents: Dict[int, List[hsa.hsa_agent_t]] = {}
|
||||
cpu_agent: hsa.hsa_agent_t
|
||||
cpu_mempool: hsa.hsa_amd_memory_pool_t
|
||||
def __init__(self, device:str=""):
|
||||
if not HSADevice.agents:
|
||||
check(hsa.hsa_init())
|
||||
atexit.register(hsa_terminate)
|
||||
HSADevice.agents = scan_agents()
|
||||
HSADevice.cpu_agent = HSADevice.agents[hsa.HSA_DEVICE_TYPE_CPU][0]
|
||||
HSADevice.cpu_mempool = find_memory_pool(HSADevice.cpu_agent, segtyp=hsa.HSA_AMD_SEGMENT_GLOBAL, location=hsa.HSA_AMD_MEMORY_POOL_LOCATION_CPU)
|
||||
if PROFILE: check(hsa.hsa_amd_profiling_async_copy_enable(1))
|
||||
|
||||
self.device_id = int(device.split(":")[1]) if ":" in device else 0
|
||||
self.agent = HSADevice.agents[hsa.HSA_DEVICE_TYPE_GPU][self.device_id]
|
||||
self.gpu_mempool = find_memory_pool(self.agent, segtyp=hsa.HSA_AMD_SEGMENT_GLOBAL, location=hsa.HSA_AMD_MEMORY_POOL_LOCATION_GPU)
|
||||
self.hw_queue = AQLQueue(self)
|
||||
HSADevice.devices.append(self)
|
||||
|
||||
check(hsa.hsa_agent_get_info(self.agent, hsa.HSA_AGENT_INFO_NAME, ctypes.byref(agent_name_buf := ctypes.create_string_buffer(256))))
|
||||
self.arch = ctypes.string_at(agent_name_buf).decode()
|
||||
|
||||
check(hsa.hsa_system_get_info(hsa.HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, ctypes.byref(gpu_freq := ctypes.c_uint64())))
|
||||
self.clocks_to_time: float = 1 / gpu_freq.value
|
||||
|
||||
check(hsa.hsa_agent_get_info(self.agent, hsa.HSA_AMD_AGENT_INFO_HDP_FLUSH, ctypes.byref(hdp_flush := hsa.hsa_amd_hdp_flush_t())))
|
||||
self.hdp_flush = hdp_flush
|
||||
|
||||
self.delayed_free: List[int] = []
|
||||
self.reusable_signals: List[hsa.hsa_signal_t] = []
|
||||
|
||||
from tinygrad.runtime.graph.hsa import HSAGraph
|
||||
super().__init__(device, HSAAllocator(self), HIPRenderer(), HSACompiler(self.arch), functools.partial(HSAProgram, self), HSAGraph)
|
||||
|
||||
# Finish init: preallocate some signals + space for kernargs
|
||||
self.signal_pool = [init_c_var(hsa.hsa_signal_t(), lambda x: check(hsa.hsa_signal_create(1, 0, None, ctypes.byref(x)))) for _ in range(4096)]
|
||||
self._new_kernargs_region(16 << 20) # initial region size is 16mb
|
||||
|
||||
def synchronize(self):
|
||||
self.hw_queue.wait()
|
||||
|
||||
for sig in self.reusable_signals: hsa.hsa_signal_silent_store_relaxed(sig, 1)
|
||||
self.signal_pool.extend(self.reusable_signals)
|
||||
self.reusable_signals.clear()
|
||||
|
||||
for opaque_to_free in self.delayed_free: check(hsa.hsa_amd_memory_pool_free(opaque_to_free))
|
||||
self.delayed_free.clear()
|
||||
|
||||
self.kernarg_next_addr = self.kernarg_start_addr
|
||||
Profiler.process(self)
|
||||
|
||||
@staticmethod
|
||||
def synchronize_system():
|
||||
for d in HSADevice.devices: d.synchronize()
|
||||
|
||||
def alloc_signal(self, reusable=False):
|
||||
if len(self.signal_pool): signal = self.signal_pool.pop()
|
||||
else: check(hsa.hsa_amd_signal_create(1, 0, None, 0, ctypes.byref(signal := hsa.hsa_signal_t())))
|
||||
|
||||
# reusable means a signal could be reused after synchronize for the device it's allocated from is called.
|
||||
if reusable: self.reusable_signals.append(signal)
|
||||
return signal
|
||||
|
||||
def alloc_kernargs(self, sz):
|
||||
if self.kernarg_next_addr + sz >= self.kernarg_start_addr + self.kernarg_pool_sz: self._new_kernargs_region(int(self.kernarg_pool_sz * 2))
|
||||
result = self.kernarg_next_addr
|
||||
self.kernarg_next_addr = round_up(self.kernarg_next_addr + sz, 16)
|
||||
return result
|
||||
|
||||
def _new_kernargs_region(self, sz:int):
|
||||
if hasattr(self, 'kernarg_start_addr'): self.delayed_free.append(self.kernarg_start_addr)
|
||||
self.kernarg_start_addr: int = self.allocator._alloc(sz, BufferSpec())
|
||||
self.kernarg_next_addr = self.kernarg_start_addr
|
||||
self.kernarg_pool_sz: int = sz
|
||||
|
||||
def flush_hdp(self): self.hdp_flush.HDP_MEM_FLUSH_CNTL[0] = 1
|
||||
|
||||
def hsa_terminate():
|
||||
# Need to stop/delete aql queue before hsa shut down, this leads to gpu hangs.
|
||||
for dev in HSADevice.devices:
|
||||
Profiler.process(dev)
|
||||
del dev.hw_queue
|
||||
|
||||
# hsa_shut_down cleans up all hsa-related resources.
|
||||
hsa.hsa_shut_down()
|
||||
HSADevice.synchronize = lambda: None #type:ignore
|
||||
HSAProgram.__del__ = lambda _: None #type:ignore
|
||||
if Profiler.collected_events: Profiler.save("/tmp/profile.json")
|
||||
@@ -1,127 +0,0 @@
|
||||
from typing import Dict, Set
|
||||
import yaml
|
||||
from tinygrad.codegen.uops import UOpGraph, UOps, UOp
|
||||
from tinygrad.uop.ops import BinaryOps
|
||||
from tinygrad.dtype import dtypes
|
||||
|
||||
def uops_to_rdna(function_name:str, uops:UOpGraph) -> str:
|
||||
replace: Dict[UOp, UOp] = {}
|
||||
seen: Set[UOp] = set()
|
||||
for u in uops:
|
||||
if u in seen: continue
|
||||
seen.add(u)
|
||||
for o,n in replace.items():
|
||||
if o in u.vin and u is not n:
|
||||
u.vin = tuple(n if x == o else x for x in u.vin)
|
||||
# pointer indexing
|
||||
if u.uop in {UOps.LOAD, UOps.STORE} and u.vin[0].dtype.itemsize > 1:
|
||||
val = UOp(UOps.CONST, dtypes.int, tuple(), arg=u.vin[0].dtype.itemsize, insert_at=uops.uops.index(u))
|
||||
ptr = UOp(UOps.ALU, dtypes.int, (u.vin[1], val), arg=BinaryOps.MUL, insert_at=uops.uops.index(u))
|
||||
u.vin = (u.vin[0], ptr) + u.vin[2:]
|
||||
#uops.print()
|
||||
|
||||
args = []
|
||||
ins = []
|
||||
|
||||
v_cnt = 3 # v[0:2] is local_xyz
|
||||
s_cnt = 5 # s[0:1] is the address, s[2:4] is global_xyz
|
||||
|
||||
r: Dict[UOp, str] = {}
|
||||
for u in uops:
|
||||
if u.uop == UOps.SPECIAL:
|
||||
if u.arg.startswith("lidx"):
|
||||
r[u] = f'v{u.src[0].arg}'
|
||||
elif u.arg.startswith("gidx"):
|
||||
r[u] = f's{2+u.src[0].arg}'
|
||||
else:
|
||||
raise NotImplementedError
|
||||
elif u.uop == UOps.CONST:
|
||||
#r[u] = u.arg
|
||||
|
||||
# TODO: sometimes we can use s
|
||||
#r[u] = f"s{s_cnt}"
|
||||
#s_cnt += 1
|
||||
#ins.append(f"s_mov_b32 {r[u]}, {u.arg}")
|
||||
|
||||
r[u] = f"v{v_cnt}"
|
||||
v_cnt += 1
|
||||
ins.append(f"v_mov_b32 {r[u]}, {u.arg}")
|
||||
elif u.uop == UOps.ALU:
|
||||
if u.arg == BinaryOps.ADD:
|
||||
r[u] = f"v{v_cnt}"
|
||||
v_cnt += 1
|
||||
ins.append(f"v_add_f32_e32 {r[u]}, {r[u.vin[0]]}, {r[u.vin[1]]}")
|
||||
elif u.arg == BinaryOps.MUL:
|
||||
r[u] = f"v{v_cnt}"
|
||||
v_cnt += 1
|
||||
if dtypes.is_float(u.dtype):
|
||||
ins.append(f"v_mul_f32_e32 {r[u]}, {r[u.vin[0]]}, {r[u.vin[1]]}")
|
||||
else:
|
||||
ins.append(f"v_mul_u32_u24 {r[u]}, {r[u.vin[0]]}, {r[u.vin[1]]}")
|
||||
else:
|
||||
raise NotImplementedError
|
||||
elif u.uop == UOps.LOAD:
|
||||
r[u] = f"v{v_cnt}"
|
||||
v_cnt += 1
|
||||
ins.append(f"global_load_b32 {r[u]}, {r[u.vin[1]]}, {r[u.vin[0]]}")
|
||||
ins.append("s_waitcnt vmcnt(0)")
|
||||
elif u.uop == UOps.STORE:
|
||||
ins.append(f"global_store_b32 {r[u.vin[1]]}, {r[u.vin[2]]}, {r[u.vin[0]]}")
|
||||
elif u.uop == UOps.DEFINE_GLOBAL:
|
||||
i = u.arg[0]
|
||||
args.append({'.address_space': 'global', '.name': f'buf_{i}', '.offset': i*8, '.size': 8,
|
||||
'.type_name': u.dtype.name+"*", '.value_kind': 'global_buffer'})
|
||||
s_cnt += s_cnt%2 # skip
|
||||
r[u] = f"s[{s_cnt}:{s_cnt+1}]"
|
||||
s_cnt += 2
|
||||
ins.append(f"s_load_b64 {r[u]}, s[0:1], {i*8}")
|
||||
ins.append("s_waitcnt lgkmcnt(0)")
|
||||
else:
|
||||
raise NotImplementedError(f"can't render {u.uop}")
|
||||
|
||||
# *** boilerplate rendering ***
|
||||
|
||||
metadata = {
|
||||
'amdhsa.kernels': [{'.args': args,
|
||||
'.group_segment_fixed_size': 0, '.kernarg_segment_align': 8, '.kernarg_segment_size': args[-1][".offset"] + args[-1][".size"],
|
||||
'.language': 'OpenCL C', '.language_version': [1, 2], '.max_flat_workgroup_size': 256,
|
||||
'.name': function_name, '.private_segment_fixed_size': 0, '.sgpr_count': s_cnt, '.sgpr_spill_count': 0,
|
||||
'.symbol': f'{function_name}.kd', '.uses_dynamic_stack': False, '.vgpr_count': v_cnt, '.vgpr_spill_count': 0,
|
||||
'.wavefront_size': 32}],
|
||||
'amdhsa.target': 'amdgcn-amd-amdhsa--gfx1100', 'amdhsa.version': [1, 2]}
|
||||
|
||||
boilerplate_start = f"""
|
||||
.rodata
|
||||
.global {function_name}.kd
|
||||
.type {function_name}.kd,STT_OBJECT
|
||||
.align 0x10
|
||||
.amdhsa_kernel {function_name}"""
|
||||
|
||||
kernel_desc = {
|
||||
'.amdhsa_group_segment_fixed_size': 0, '.amdhsa_private_segment_fixed_size': 0, '.amdhsa_kernarg_size': 0,
|
||||
'.amdhsa_next_free_vgpr': v_cnt, # this matters!
|
||||
'.amdhsa_reserve_vcc': 0, '.amdhsa_reserve_xnack_mask': 0,
|
||||
'.amdhsa_next_free_sgpr': s_cnt,
|
||||
'.amdhsa_float_round_mode_32': 0, '.amdhsa_float_round_mode_16_64': 0, '.amdhsa_float_denorm_mode_32': 3, '.amdhsa_float_denorm_mode_16_64': 3,
|
||||
'.amdhsa_dx10_clamp': 1, '.amdhsa_ieee_mode': 1, '.amdhsa_fp16_overflow': 0,
|
||||
'.amdhsa_workgroup_processor_mode': 1, '.amdhsa_memory_ordered': 1, '.amdhsa_forward_progress': 0, '.amdhsa_enable_private_segment': 0,
|
||||
'.amdhsa_system_sgpr_workgroup_id_x': 1, '.amdhsa_system_sgpr_workgroup_id_y': 1, '.amdhsa_system_sgpr_workgroup_id_z': 1,
|
||||
'.amdhsa_system_sgpr_workgroup_info': 0, '.amdhsa_system_vgpr_workitem_id': 2, # is amdhsa_system_vgpr_workitem_id real?
|
||||
'.amdhsa_exception_fp_ieee_invalid_op': 0, '.amdhsa_exception_fp_denorm_src': 0,
|
||||
'.amdhsa_exception_fp_ieee_div_zero': 0, '.amdhsa_exception_fp_ieee_overflow': 0, '.amdhsa_exception_fp_ieee_underflow': 0,
|
||||
'.amdhsa_exception_fp_ieee_inexact': 0, '.amdhsa_exception_int_div_zero': 0,
|
||||
'.amdhsa_user_sgpr_dispatch_ptr': 0, '.amdhsa_user_sgpr_queue_ptr': 0, '.amdhsa_user_sgpr_kernarg_segment_ptr': 1,
|
||||
'.amdhsa_user_sgpr_dispatch_id': 0, '.amdhsa_user_sgpr_private_segment_size': 0, '.amdhsa_wavefront_size32': 1, '.amdhsa_uses_dynamic_stack': 0}
|
||||
|
||||
code_start = f""".end_amdhsa_kernel
|
||||
.text
|
||||
.global {function_name}
|
||||
.type {function_name},@function
|
||||
.p2align 8
|
||||
{function_name}:
|
||||
"""
|
||||
|
||||
ins += ['s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)', 's_endpgm', 's_code_end']
|
||||
return ".amdgpu_metadata\n" + yaml.dump(metadata) + ".end_amdgpu_metadata" + \
|
||||
boilerplate_start + "\n" + '\n'.join("%s %d" % x for x in kernel_desc.items()) + "\n" + code_start + \
|
||||
'\n'.join(ins) + f"\n.size {function_name}, .-{function_name}"
|
||||
@@ -1,131 +0,0 @@
|
||||
from typing import Dict, List, Final, Callable, DefaultDict
|
||||
from collections import defaultdict
|
||||
from tinygrad.uop.ops import UnaryOps, BinaryOps, TernaryOps, Op
|
||||
from tinygrad.helpers import DType, PtrDType, dtypes, ImageDType, DEBUG, getenv
|
||||
from tinygrad.codegen.opt.kernel import UOp, Ops
|
||||
from triton.compiler import compile as triton_compile
|
||||
import linecache
|
||||
import math
|
||||
import re
|
||||
|
||||
triton_dtypes = {dtypes.double: "tl.float64", dtypes.float32: "tl.float32", dtypes.float16: "tl.float16", dtypes.bool: "tl.int1", dtypes.int8: "tl.int8", dtypes.uint8: "tl.uint8", dtypes.int32: "tl.int32", dtypes.int64: "tl.int64", dtypes.uint32: "tl.uint32", dtypes.uint64: "tl.uint64", dtypes.int16: "tl.int16", dtypes.uint16: "tl.uint16"}
|
||||
signature_dtypes = {dtypes.double: "fp64",dtypes.float32: "fp32", dtypes.float16: "fp16", dtypes.bool: "i8", dtypes.int8: "i1", dtypes.uint8: "u8", dtypes.int32: "i32", dtypes.int64: "i64", dtypes.uint32: "u32", dtypes.uint64: "u64", dtypes.int16: "i16", dtypes.uint16: "u16"}
|
||||
|
||||
def next_power_of_2(x):
|
||||
return 1 << (x - 1).bit_length()
|
||||
|
||||
def render_valid(valid):
|
||||
return '(' * (len(valid) -1) + ') and '.join(valid) if len(valid) else 'True'
|
||||
|
||||
#NOTE Triton requires matching dimensions for load/store, disable this and see TestOps::test_output_padded_conv_transpose2d fail to compile
|
||||
def fill_dims_for_idx(idx, dims):
|
||||
return "(" + idx + "+ (" + (f"0*({'+'.join(d for d in dims)})))") if len(dims) else idx
|
||||
|
||||
def get_max(var):
|
||||
if isinstance(var, int): return var
|
||||
return re.sub(r'\[(.*?)\]', '', str(var))[1:-1]
|
||||
|
||||
#NOTE can be removed after https://github.com/gpuocelot/gpuocelot/issues/8 gets resolved
|
||||
def remove_single_scalar_curly_braces(ptx_code):
|
||||
return '\n'.join([re.sub(r'\{\s*(%\w+)\s*\}', r'\1', line) for line in ptx_code.split('\n')])
|
||||
|
||||
def render_const(args,dtype:DType):
|
||||
return (('-' if args<0 else '') + 'tl.where(1,float("inf"),0)') if math.isinf(args) else ('tl.where(1,float("nan"),0)' if math.isnan(args) else f"{int(args)}" if dtypes.is_int(dtype) else str(args))
|
||||
|
||||
def render_cast(x:str, dtype:DType, bitcast=False):
|
||||
return f"{x}.to({triton_dtypes[dtype]}, bitcast={bitcast})"
|
||||
|
||||
def define_scalar(local_size, dtype, args):
|
||||
if len(local_size) > 0: return f"tl.full(({','.join([str(next_power_of_2(x)) for x in local_size])},),{render_const(args,dtype)}, dtype={triton_dtypes[dtype]})"
|
||||
return render_const(args,dtype)
|
||||
|
||||
def uops_to_triton(function_name:str, uops:List[UOp]):
|
||||
local_size: List[int] = []
|
||||
depth = 1
|
||||
signatures, dims, bufs, kernel, valid = [], [], [], [], [] #type: ignore
|
||||
|
||||
c: DefaultDict[str, int] = defaultdict(int)
|
||||
r: Dict[UOp, str] = {}
|
||||
def ssa(u, prefix="t"):
|
||||
nonlocal c, r
|
||||
c[prefix] += 1
|
||||
r[u]=f"{prefix}{c[prefix]-1}"
|
||||
return r[u]
|
||||
|
||||
child_count: DefaultDict[UOp, int] = defaultdict(int)
|
||||
for ru in uops:
|
||||
for v in ru.vin:
|
||||
child_count[v] += 1
|
||||
|
||||
def kk(s): kernel.append(" "*depth+s)
|
||||
code_for_op: Final[Dict[Op, Callable]] = {
|
||||
UnaryOps.EXP2: lambda x,dtype,: f"tl.math.exp2({x})",
|
||||
UnaryOps.LOG2: lambda x,dtype,: f"tl.math.log2({x})",
|
||||
UnaryOps.SIN: lambda x,dtype: f"tl.sin({x})",
|
||||
UnaryOps.SQRT: lambda x,dtype: f"tl.sqrt({x})",
|
||||
UnaryOps.NEG: lambda x,dtype: f"-{x}",
|
||||
BinaryOps.ADD: lambda x,y,dtype: f"({x}+{y})", BinaryOps.SUB: lambda x,y,: f"({x}-{y})",
|
||||
BinaryOps.MUL: lambda x,y,dtype: f"({x}*{y})", BinaryOps.DIV: lambda x,y,: f"({x}/{y})" if y != '0.0' else f"{x}*tl.where({x}==0.0, float('nan'), float('inf'))",
|
||||
BinaryOps.MAX: lambda x,y,dtype: f"tl.maximum({x},{y})",
|
||||
BinaryOps.CMPLT: lambda x,y,dtype: f"({x}<{y})",
|
||||
BinaryOps.MOD: lambda x,y,dtype: f"tl.abs({x})%tl.abs({y})*tl.where({x}<0,-1,1)",
|
||||
TernaryOps.MULACC: lambda x,y,z,dtype: f"(({x}*{y})+{z})",
|
||||
TernaryOps.WHERE: lambda x,y,z,dtype: f"tl.where({x},{y},{z})",
|
||||
}
|
||||
def int_div(x,y): return f"({x}//{y})" if y != '0' else f"{x}*tl.where({x}==0, float('nan'), float('inf'))"
|
||||
for u in uops:
|
||||
uop,dtype,vin,args = u.uop,u.dtype,u.vin,u.arg
|
||||
if uop == Ops.LOOP:
|
||||
kk(f"for {ssa(u, 'ridx')} in range({vin[0].arg}, {r[vin[1]]}):")
|
||||
depth += 1
|
||||
elif uop == Ops.END: depth -= 1
|
||||
elif uop == Ops.ALU:
|
||||
assert dtype is not None
|
||||
val = code_for_op[args](*[r[x] for x in vin])
|
||||
if child_count[u] <=1 or dtypes.is_int(dtype): r[u] = int_div(*[r[x] for x in vin]) if args == BinaryOps.DIV and dtypes.is_int(dtype) else val
|
||||
else: kk(f"{ssa(u, 'alu')} = ({val})")
|
||||
elif uop == Ops.LOAD:
|
||||
assert dtype is not None
|
||||
if len(vin) == 2: kk(f"{ssa(u, 'val')} = {render_cast(f'tl.load({r[vin[0]]} + { fill_dims_for_idx(r[vin[1]], dims)}, mask = {render_valid(valid)})', dtype)}")
|
||||
else: kk(f"{ssa(u, 'val')} = {render_cast(f'tl.where({r[vin[2]]}, tl.load({r[vin[0]]}+{fill_dims_for_idx(r[vin[1]],dims)} , mask={render_valid(valid+[r[vin[2]]])}), 0.0)', dtype)}")
|
||||
elif uop == Ops.DEFINE_REG: kk(f"{ssa(u, 'acc')} = {define_scalar(local_size, dtype, args).replace('//', '/')}")
|
||||
elif uop == Ops.CONST: r[u] = define_scalar([], dtype, args)
|
||||
elif uop == Ops.ASSIGN:
|
||||
kk(f"{r[vin[0]]} = {r[vin[1]].replace('//', '/')}")
|
||||
r[u] = r[vin[0]]
|
||||
elif uop == Ops.STORE:
|
||||
assert not isinstance(dtype, ImageDType), "unimplemented: image store"
|
||||
kk(f"{'if '+r[vin[3]]+': ' if len(vin)>3 else ''}tl.store({r[vin[0]]} + {r[vin[1]]}, {r[vin[2]].replace('//', '/')}, mask = {render_valid(valid)}) ")
|
||||
elif uop == Ops.DEFINE_GLOBAL:
|
||||
bufs.append(args)
|
||||
signatures.append("*" if isinstance(dtype, PtrDType) else "" + signature_dtypes[dtype])
|
||||
r[u] = args
|
||||
elif uop == Ops.SPECIAL:
|
||||
dims.append(args[1])
|
||||
valid.append(f"{args[1]}<{get_max(args[2])}")
|
||||
if args[1].startswith("g"): kk(f"{args[1]} = tl.program_id({args[0]}) # {args[2]}")
|
||||
elif args[1].startswith("l"):
|
||||
kk(f"{args[1]} = tl.arange({0}, {next_power_of_2(args[2])})")
|
||||
local_size.append(args[2])
|
||||
r[u] = args[1]
|
||||
elif uop == Ops.CAST and dtype is not None: r[u] = render_cast(r[vin[0]], dtype, isinstance(args, tuple) and args[1])
|
||||
else: raise NotImplementedError(f"unimplemented: {uop}")
|
||||
|
||||
prg = f"import triton\nimport triton.language as tl\ntl.core.TRITON_MAX_TENSOR_NUMEL = float('inf')\n@triton.jit\ndef {function_name}("+','.join(bufs)+"):\n"
|
||||
for i, line in enumerate(list(filter(lambda line: "tl.arange" in line, kernel))): kernel[kernel.index(line)] += f"[{', '.join([':' if i == j else 'None' for j in range(len(local_size))])}]"
|
||||
prg += "\n".join(kernel)
|
||||
|
||||
acc_local_size = 1
|
||||
for x in local_size: acc_local_size *= next_power_of_2(x)
|
||||
local_size = [acc_local_size] + [1] * (len(local_size) - 1)
|
||||
|
||||
if DEBUG >= 4: print(prg)
|
||||
getlines = linecache.getlines
|
||||
linecache.getlines = lambda filename, module_globals=None: prg.splitlines(keepends=True) if "<triton>" == filename else getlines(filename, module_globals)
|
||||
exec(compile(prg, "<triton>", "exec"), globals()) # pylint: disable=W0122\
|
||||
compiled = triton_compile(globals()[function_name], signature=",".join(signatures), device_type="cuda", debug=False, cc=(35 if getenv("CUDACPU", 0) else None))
|
||||
prg = remove_single_scalar_curly_braces(compiled.asm["ptx"].split(".file")[0].split(".visible .func")[0])
|
||||
max_local_size = [int(x) for x in prg.split(".maxntid ")[1].split("\n")[0].split(", ")]
|
||||
for i in range(len(local_size)): local_size[i] = min(local_size[i], max_local_size[i])
|
||||
|
||||
return prg, {"shared":compiled.metadata["shared"], "local_size":local_size + [1]*(3-len(local_size))}
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
from tinygrad import Tensor, Device, GlobalCounters, Context, dtypes
|
||||
from tinygrad.helpers import getenv, colored
|
||||
|
||||
SZ = 8_000_000_000
|
||||
GPUS = getenv("GPUS", 4) # TODO: expose a way in tinygrad to access this
|
||||
|
||||
if __name__ == "__main__":
|
||||
# create tensors
|
||||
tens = [Tensor.ones(SZ, dtype=dtypes.uint8, device=f"{Device.DEFAULT}:{i}").contiguous() for i in range(GPUS)]
|
||||
Tensor.realize(*tens)
|
||||
|
||||
bw = [[0.0]*GPUS for _ in range(GPUS)]
|
||||
for i in range(GPUS):
|
||||
for j in range(GPUS):
|
||||
GlobalCounters.reset()
|
||||
with Context(DEBUG=2):
|
||||
if i == j:
|
||||
# this copy would be optimized out, just add 1
|
||||
(tens[i]+1).realize()
|
||||
else:
|
||||
tens[i].to(f"{Device.DEFAULT}:{j}").realize()
|
||||
t = max(GlobalCounters.time_sum_s, 1e-9)
|
||||
bw[i][j] = SZ / t / 1e9 # GB/s
|
||||
|
||||
def fmt(x):
|
||||
c = "green" if x > 50 else "yellow" if x > 20 else "red"
|
||||
return colored(f"{x:6.1f}", c)
|
||||
|
||||
# header
|
||||
print(" " * 8 + " ".join(f"{'d'+str(j):>6}" for j in range(GPUS)))
|
||||
# rows
|
||||
for i in range(GPUS):
|
||||
print(f"{'s'+str(i):>6} -> " + " ".join(fmt(x) for x in bw[i]))
|
||||
@@ -1,22 +0,0 @@
|
||||
import ctypes
|
||||
import os
|
||||
import pathlib
|
||||
import struct
|
||||
from hexdump import hexdump
|
||||
|
||||
fxn = None
|
||||
def disasm_raw(buf):
|
||||
global fxn
|
||||
if fxn is None:
|
||||
shared = pathlib.Path(__file__).parent / "disasm.so"
|
||||
if not shared.is_file():
|
||||
os.system(f'cd {pathlib.Path(__file__).parent} && gcc -shared disasm-a3xx.c -o disasm.so')
|
||||
fxn = ctypes.CDLL(shared.as_posix())['disasm']
|
||||
fxn(buf, len(buf))
|
||||
|
||||
def disasm(buf):
|
||||
def _read_lib(off): return struct.unpack("I", buf[off:off+4])[0]
|
||||
|
||||
image_offset = _read_lib(0xc0)
|
||||
image_size = _read_lib(0x100)
|
||||
disasm_raw(buf[image_offset:image_offset+image_size])
|
||||
@@ -1,120 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import os, ctypes, ctypes.util, io, mmap, pathlib
|
||||
from tinygrad import Tensor, dtypes, Device
|
||||
from tinygrad.helpers import Timing, from_mv
|
||||
libc = ctypes.CDLL(ctypes.util.find_library("c"))
|
||||
|
||||
#from extra.hip_gpu_driver import hip_ioctl
|
||||
|
||||
# sudo su -c "echo 3 > /proc/sys/vm/drop_caches"
|
||||
|
||||
# sudo su -c 'echo 8 > /proc/sys/kernel/printk'
|
||||
# sudo su -c "echo 'module amdgpu +p' > /sys/kernel/debug/dynamic_debug/control"
|
||||
|
||||
libc.memcpy.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
|
||||
|
||||
libc.read.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_size_t]
|
||||
libc.read.restype = ctypes.c_size_t
|
||||
|
||||
libc.malloc.argtypes = [ctypes.c_size_t]
|
||||
libc.malloc.restype = ctypes.c_void_p
|
||||
|
||||
def read_direct(fd, sz):
|
||||
with Timing("mmap: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
buf = mmap.mmap(-1, sz, flags=mmap.MAP_SHARED|mmap.MAP_POPULATE)
|
||||
with Timing("read: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
ret = libc.read(fd, from_mv(buf), sz)
|
||||
assert ret == sz
|
||||
|
||||
def read_mmap(fd, sz):
|
||||
with Timing("mmfd: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
buf = mmap.mmap(fd, sz, flags=mmap.MAP_SHARED|mmap.MAP_POPULATE) #|MAP_LOCKED)
|
||||
t = 0
|
||||
for i in range(0, sz, 0x1000): t += buf[i]
|
||||
|
||||
# def _copyin_async(self, dest:T, src:T, size:int): check(hip.hipMemcpyAsync(dest, src, size, hip.hipMemcpyHostToDevice, None))
|
||||
|
||||
def read_to_gpu_mmap(fd, sz, gpubuf):
|
||||
with Timing("gpu copyin: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
with Timing("mmfd: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
buf = mmap.mmap(fd, sz, flags=mmap.MAP_SHARED|mmap.MAP_POPULATE) #|MAP_LOCKED)
|
||||
dev.allocator._copyin_async(gpubuf, from_mv(buf), sz)
|
||||
dev.synchronize()
|
||||
|
||||
def read_to_gpu_single(fd, sz, gpubuf):
|
||||
os.lseek(fd, 0, os.SEEK_SET)
|
||||
with Timing("total: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
with Timing("gpu host alloc: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
hst = dev.allocator._hostalloc(sz)
|
||||
with Timing("read to host: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
ret = libc.read(fd, hst, sz)
|
||||
with Timing("gpu host copy: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
dev.allocator._copyin_async(gpubuf, hst, sz)
|
||||
dev.synchronize()
|
||||
|
||||
def read_to_gpu_pingpong(fd, sz, gpubuf):
|
||||
psz = 256*1024*1024
|
||||
print(f"piece size {psz/(1024*1024):.2f} MB")
|
||||
with Timing("gpu host alloc: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
hst1 = dev.allocator._hostalloc(psz)
|
||||
hst2 = dev.allocator._hostalloc(psz)
|
||||
|
||||
os.lseek(fd, 0, os.SEEK_SET)
|
||||
with Timing("total: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
for i in range(sz//(psz*2)):
|
||||
with Timing("tfer(0): ", lambda x: f", {psz/x:.2f} GB/s"):
|
||||
ret = libc.read(fd, hst1, psz)
|
||||
dev.synchronize()
|
||||
dev.allocator._copyin_async(gpubuf, hst1, psz)
|
||||
with Timing("tfer(1): ", lambda x: f", {psz/x:.2f} GB/s"):
|
||||
ret = libc.read(fd, hst2, psz)
|
||||
dev.synchronize()
|
||||
dev.allocator._copyin_async(gpubuf, hst2, psz)
|
||||
dev.synchronize()
|
||||
|
||||
MAP_LOCKED = 0x2000
|
||||
MAP_HUGETLB = 0x40000
|
||||
|
||||
if __name__ == "__main__":
|
||||
dev = Device[Device.DEFAULT]
|
||||
|
||||
warm = (Tensor.ones(1024, device=Device.DEFAULT).contiguous() + Tensor.ones(1024, device=Device.DEFAULT).contiguous()).realize()
|
||||
#fn = "/home/tiny/tinygrad/weights/rng"
|
||||
fn = pathlib.Path(__file__).parents[1] / "weights/LLaMA-2/70B/consolidated.00.pth"
|
||||
sz = os.stat(fn).st_size
|
||||
t = Tensor.empty(sz, dtype=dtypes.uint8, device=f"disk:{fn}")
|
||||
with Timing("copy: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
on_dev = t.to(Device.DEFAULT).realize()
|
||||
|
||||
exit(0)
|
||||
|
||||
# 4GB of random numbers
|
||||
#fd = os.open("/home/tiny/tinygrad/weights/rng", os.O_RDWR|os.O_DIRECT)
|
||||
#sz = os.fstat(fd).st_size // 4
|
||||
fd = os.open("/home/tiny/tinygrad/weights/LLaMA/7B/consolidated.00.pth", os.O_RDWR|os.O_DIRECT)
|
||||
sz = os.fstat(fd).st_size
|
||||
print(f"read {sz} from {fd}")
|
||||
|
||||
with Timing("gpu alloc: ", lambda x: f", {sz/x:.2f} GB/s"):
|
||||
gpubuf = dev.allocator._alloc(sz)
|
||||
# warmup
|
||||
dev.allocator._copyin_async(gpubuf, from_mv(bytearray(b"\x00\x00\x00\x00"*0x1000)), 0x4000)
|
||||
print("copying, is warm")
|
||||
|
||||
print("****** read to gpu pingpong")
|
||||
read_to_gpu_pingpong(fd, sz, gpubuf)
|
||||
exit(0)
|
||||
|
||||
print("****** read direct")
|
||||
read_direct(fd, sz)
|
||||
|
||||
print("****** read mmap")
|
||||
read_mmap(fd, sz)
|
||||
|
||||
print("****** read to gpu single")
|
||||
read_to_gpu_single(fd, sz, gpubuf)
|
||||
|
||||
print("****** read to gpu mmap")
|
||||
read_to_gpu_mmap(fd, sz, gpubuf)
|
||||
|
||||
os._exit(0)
|
||||
@@ -1,21 +0,0 @@
|
||||
import sys, sqlite3, pickle
|
||||
from tinygrad.helpers import CACHEDB
|
||||
|
||||
if __name__ == "__main__":
|
||||
fn = sys.argv[1] if len(sys.argv) > 1 else CACHEDB
|
||||
conn = sqlite3.connect(fn)
|
||||
cur = conn.cursor()
|
||||
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
for f in cur.fetchall():
|
||||
table = f[0]
|
||||
cur2 = conn.cursor()
|
||||
cur2.execute(f"SELECT COUNT(*) FROM {table}")
|
||||
cnt = cur2.fetchone()[0]
|
||||
print(f"{table:20s} : {cnt}")
|
||||
|
||||
cur3 = conn.cursor()
|
||||
cur3.execute(f"SELECT * FROM {table} LIMIT 10")
|
||||
for f in cur3.fetchall():
|
||||
v = pickle.loads(f[-1])
|
||||
print(" ", len(f[0]) if isinstance(f[0], str) else f[0], f[1:-1], str(v)[0:50])
|
||||
#print(f"{len(k):10d}, {sk} -> {v}")
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import time
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
|
||||
print(jax.devices())
|
||||
DEVICES = len(jax.devices())
|
||||
BS = 32
|
||||
N = 4096
|
||||
dtype = jnp.float16
|
||||
A = jnp.zeros((DEVICES, BS, N, N), dtype)
|
||||
B = jnp.zeros((1, 1, N, N), dtype)
|
||||
A = jax.device_put_sharded([A[i] for i in range(DEVICES)], jax.devices())
|
||||
B = jax.device_put_sharded([B for i in range(DEVICES)], jax.devices())
|
||||
|
||||
OPS = DEVICES*BS*N*N*N*2
|
||||
def matmul(A,B): return jnp.matmul(A,B,preferred_element_type=jnp.float32)
|
||||
pmatmul = jax.pmap(matmul)
|
||||
|
||||
MAX_TFLOPS = 123*DEVICES # Peak FP16 Tensor TFLOPS with FP32 Acc (7900XTX)
|
||||
for i in range(10):
|
||||
st = time.perf_counter()
|
||||
C = pmatmul(A,B).block_until_ready()
|
||||
et = time.perf_counter()-st
|
||||
tflops = (OPS*1e-12)/et
|
||||
print(f"time {et*1e3:.2f} ms, TFLOPS {tflops:6.2f}, MFU {(tflops/MAX_TFLOPS)*100:4.2f}% out shape {C.shape} dtype {C.dtype}")
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
import mlx.core as mx
|
||||
from tinygrad.helpers import Timing
|
||||
N = 4096
|
||||
x = mx.random.normal((N,N))
|
||||
w = mx.random.normal((N,N))
|
||||
|
||||
FLOPS = N*N*N*2
|
||||
for i in range(10):
|
||||
with Timing("", lambda x: f" {FLOPS/x:.2f} GFLOPS"):
|
||||
mx.eval(x@w)
|
||||
@@ -1,33 +0,0 @@
|
||||
import time
|
||||
import tensorflow as tf
|
||||
|
||||
gpus = tf.config.list_physical_devices('GPU')
|
||||
if gpus:
|
||||
try:
|
||||
# Currently, memory growth needs to be the same across GPUs
|
||||
for gpu in gpus:
|
||||
tf.config.experimental.set_memory_growth(gpu, True)
|
||||
logical_gpus = tf.config.list_logical_devices('GPU')
|
||||
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
|
||||
except RuntimeError as e:
|
||||
# Memory growth must be set before GPUs have been initialized
|
||||
print(e)
|
||||
|
||||
for dtype in [tf.float16, tf.float32]:
|
||||
for N in [256, 512, 1024, 2048, 4096, 8192]:
|
||||
FLOPS = N*N*N*2
|
||||
|
||||
b = tf.random.uniform((N, N), dtype=dtype)
|
||||
c = tf.random.uniform((N, N), dtype=dtype)
|
||||
|
||||
b = tf.Variable(b)
|
||||
c = tf.Variable(c)
|
||||
|
||||
def tf_prog(b, c):
|
||||
st = time.perf_counter()
|
||||
a = tf.matmul(b, c)
|
||||
tf.debugging.check_numerics(a, "Nan or Inf in result") # Ensures that the calculation is done.
|
||||
return time.perf_counter() - st
|
||||
|
||||
tm = min([tf_prog(b, c) for _ in range(20)])
|
||||
print(f"{N*N:10d} {tm*1e6:9.2f} us, would be {FLOPS*1e-9/tm:9.2f} GFLOPS {N:4d}x{N:4d}x{N:4d} matmul in {dtype}")
|
||||
@@ -1,16 +0,0 @@
|
||||
from tinygrad import Tensor, Device, TinyJit, dtypes
|
||||
from tinygrad.helpers import getenv
|
||||
|
||||
GPUS = getenv("GPUS", 4) # TODO: expose a way in tinygrad to access this
|
||||
N = 6144
|
||||
|
||||
@TinyJit
|
||||
def many_matmul(A, B):
|
||||
out = A
|
||||
for _ in range(8): out = out@B
|
||||
return out
|
||||
|
||||
if __name__ == "__main__":
|
||||
A = Tensor.ones(GPUS, N, N, dtype=dtypes.half).shard(devices=tuple([f"{Device.DEFAULT}:{i}" for i in range(GPUS)]), axis=0).contiguous()
|
||||
B = Tensor.ones(GPUS, N, N, dtype=dtypes.half).shard(devices=tuple([f"{Device.DEFAULT}:{i}" for i in range(GPUS)]), axis=0).contiguous()
|
||||
while 1: many_matmul(A, B)
|
||||
@@ -1,12 +0,0 @@
|
||||
import ctypes
|
||||
import tinygrad.runtime.autogen.hip as hip
|
||||
from tinygrad.runtime.ops_hip import check
|
||||
from tinygrad.helpers import init_c_var
|
||||
|
||||
if __name__ == "__main__":
|
||||
check(hip.hipSetDevice(0))
|
||||
evt = init_c_var(hip.hipEvent_t(), lambda x: check(hip.hipEventCreate(ctypes.byref(x))))
|
||||
check(hip.hipSetDevice(1))
|
||||
check(hip.hipStreamWaitEvent(None, evt, 0))
|
||||
check(hip.hipSetDevice(0))
|
||||
check(hip.hipEventRecord(evt, None))
|
||||
@@ -1,45 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: sentencepiece_model.proto
|
||||
# Protobuf Python Version: 4.25.1
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
from google.protobuf.internal import builder as _builder
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03')
|
||||
|
||||
_globals = globals()
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
_globals['DESCRIPTOR']._options = None
|
||||
_globals['DESCRIPTOR']._serialized_options = b'H\003'
|
||||
_globals['_TRAINERSPEC'].fields_by_name['mining_sentence_size']._options = None
|
||||
_globals['_TRAINERSPEC'].fields_by_name['mining_sentence_size']._serialized_options = b'\030\001'
|
||||
_globals['_TRAINERSPEC'].fields_by_name['training_sentence_size']._options = None
|
||||
_globals['_TRAINERSPEC'].fields_by_name['training_sentence_size']._serialized_options = b'\030\001'
|
||||
_globals['_TRAINERSPEC']._serialized_start=45
|
||||
_globals['_TRAINERSPEC']._serialized_end=1581
|
||||
_globals['_TRAINERSPEC_MODELTYPE']._serialized_start=1517
|
||||
_globals['_TRAINERSPEC_MODELTYPE']._serialized_end=1570
|
||||
_globals['_NORMALIZERSPEC']._serialized_start=1584
|
||||
_globals['_NORMALIZERSPEC']._serialized_end=1793
|
||||
_globals['_SELFTESTDATA']._serialized_start=1795
|
||||
_globals['_SELFTESTDATA']._serialized_end=1916
|
||||
_globals['_SELFTESTDATA_SAMPLE']._serialized_start=1864
|
||||
_globals['_SELFTESTDATA_SAMPLE']._serialized_end=1905
|
||||
_globals['_MODELPROTO']._serialized_start=1919
|
||||
_globals['_MODELPROTO']._serialized_end=2429
|
||||
_globals['_MODELPROTO_SENTENCEPIECE']._serialized_start=2208
|
||||
_globals['_MODELPROTO_SENTENCEPIECE']._serialized_end=2418
|
||||
_globals['_MODELPROTO_SENTENCEPIECE_TYPE']._serialized_start=2323
|
||||
_globals['_MODELPROTO_SENTENCEPIECE_TYPE']._serialized_end=2407
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
@@ -1,176 +0,0 @@
|
||||
from __future__ import annotations
|
||||
from typing import List, Optional, Dict, cast
|
||||
import numpy as np
|
||||
np.set_printoptions(suppress=True)
|
||||
import math, functools, time, random, statistics
|
||||
from tinygrad.helpers import DEBUG, getenv, CACHELEVEL, diskcache_get, diskcache_put, colored, Profiling
|
||||
from tinygrad.codegen.opt.kernel import Kernel
|
||||
from tinygrad.device import Buffer, Device, CompileError
|
||||
from tinygrad.codegen.opt.search import _ensure_buffer_alloc, get_kernel_actions, _time_program
|
||||
from tinygrad.engine.realize import get_program
|
||||
|
||||
class MCTSNode:
|
||||
def __init__(self, kernel:Kernel, parent=None):
|
||||
self.kernel:Kernel = kernel
|
||||
self.t = math.inf
|
||||
self.n = 0
|
||||
self.tm = math.inf
|
||||
self.i = -1
|
||||
self.parents: List[MCTSNode] = [parent] if parent is not None else []
|
||||
self.children: Optional[List[MCTSNode]] = None
|
||||
self.removed_children: List[MCTSNode] = []
|
||||
|
||||
def expand_node(node:MCTSNode):
|
||||
assert node.children is None
|
||||
node.children = [MCTSNode(x, node) for x in get_kernel_actions(node.kernel, include_0=False).values()]
|
||||
|
||||
def remove_node(node:MCTSNode):
|
||||
for parent in node.parents:
|
||||
assert parent.children is not None
|
||||
parent.children.remove(node)
|
||||
parent.removed_children.append(node)
|
||||
|
||||
C = math.sqrt(2)
|
||||
TEMP = 0.5
|
||||
def _sample_tree(node:MCTSNode, best_tm:float) -> MCTSNode:
|
||||
if node.children is None or len(node.children) == 0: return node
|
||||
unexplored_children = []
|
||||
explored_children = []
|
||||
ucb_explored_children: List[float] = []
|
||||
for child in node.children:
|
||||
if child.n == 0: unexplored_children.append(child)
|
||||
else:
|
||||
ucb = -child.t/best_tm + C*math.sqrt(math.log(node.n)/child.n)
|
||||
if not math.isinf(ucb):
|
||||
explored_children.append(child)
|
||||
ucb_explored_children.append(ucb)
|
||||
if len(unexplored_children): return random.choice(unexplored_children)
|
||||
if not len(explored_children): return node
|
||||
# safe softmax
|
||||
ucb_exp = np.exp((np.array(ucb_explored_children)-max(ucb_explored_children))/TEMP)
|
||||
return _sample_tree(explored_children[np.random.choice(len(ucb_exp), p=ucb_exp/np.sum(ucb_exp))], best_tm)
|
||||
|
||||
# this will expand/remove sometimes
|
||||
def sample_tree(root:MCTSNode, best_tm:float) -> Optional[MCTSNode]:
|
||||
if root.children is None: expand_node(root)
|
||||
while root.children:
|
||||
# tree traversal
|
||||
node = _sample_tree(root, best_tm)
|
||||
|
||||
if node.children is not None and len(node.children) == 0:
|
||||
remove_node(node)
|
||||
continue
|
||||
|
||||
# node expansion
|
||||
if node.n != 0:
|
||||
if node.children is None: expand_node(node)
|
||||
assert node.children is not None
|
||||
if len(node.children) == 0:
|
||||
remove_node(node)
|
||||
continue
|
||||
node = random.choice(node.children)
|
||||
return node
|
||||
return None
|
||||
|
||||
def backprop(bnode:MCTSNode, tm, strength=1.0):
|
||||
if bnode.t > tm: bnode.t = tm
|
||||
bnode.n += strength
|
||||
for parent in bnode.parents: backprop(parent, tm, strength/len(bnode.parents))
|
||||
|
||||
graph_mcts_cnt = 0
|
||||
def mcts_search(lin:Kernel, rawbufs:List[Buffer], amt:int) -> Kernel:
|
||||
global graph_mcts_cnt
|
||||
# TODO: copied from BEAM
|
||||
key = {"ast": lin.ast.key, "amt": amt, "device": lin.opts.device, "suffix": lin.opts.suffix}
|
||||
if not getenv("IGNORE_MCTS_CACHE") and CACHELEVEL >= 1 and (val:=diskcache_get("mcts_search", key)) is not None:
|
||||
ret = lin.copy()
|
||||
for o in val[len(lin.applied_opts):]: ret.apply_opt(o)
|
||||
return ret
|
||||
|
||||
rawbufs = _ensure_buffer_alloc(rawbufs)
|
||||
var_vals = {k.expr:(k.vmax+k.vmin)//2 for k in lin.ast.variables()}
|
||||
dev = Device[lin.opts.device]
|
||||
root = MCTSNode(lin)
|
||||
|
||||
st = time.perf_counter()
|
||||
best, best_idx, best_tm = lin, 0, math.inf
|
||||
seen_libs: Dict[bytes, MCTSNode] = {}
|
||||
seen_asts: Dict[bytes, MCTSNode] = {}
|
||||
compile_time, runtime_time = 0.0, 0.0
|
||||
for i in range(amt):
|
||||
node = sample_tree(root, best_tm) # sample and expand
|
||||
if node is None: break # finished the whole tree
|
||||
node.i = i # when was node explored
|
||||
|
||||
opt_ast = node.kernel.get_optimized_ast()
|
||||
if (sibling_node:=seen_asts.get(opt_ast.key, None)) is not None:
|
||||
# early check for same optimized AST hit
|
||||
remove_node(node)
|
||||
tm = sibling_node.t
|
||||
else:
|
||||
seen_asts[opt_ast.key] = node
|
||||
|
||||
# lowering (50% of the time)
|
||||
p = get_program(node.kernel.get_optimized_ast(name_override="test"), node.kernel.opts)
|
||||
|
||||
# rollout
|
||||
tm1 = time.perf_counter()
|
||||
try:
|
||||
lib = dev.compiler.compile(p.src)
|
||||
except CompileError:
|
||||
# NOTE: many of these "compiler errors" are caused by bad code output from the lowerer
|
||||
lib = None
|
||||
tm2 = time.perf_counter()
|
||||
if lib is None:
|
||||
tm = math.inf
|
||||
else:
|
||||
if (sibling_node:=seen_libs.get(lib, None)) is not None:
|
||||
# NOTE: these should all be caught by the AST check, need to canonicalize
|
||||
# remove this node, it's a duplicate
|
||||
remove_node(node)
|
||||
tm = sibling_node.t
|
||||
else:
|
||||
seen_libs[lib] = node
|
||||
try: tm = statistics.median(_time_program(p, lib, var_vals, rawbufs, cnt=3, early_stop=best_tm*5/1e6))*1e6
|
||||
except RuntimeError: tm = math.inf
|
||||
node.tm = tm
|
||||
tm3 = time.perf_counter()
|
||||
compile_time += tm2-tm1
|
||||
runtime_time += tm3-tm2
|
||||
|
||||
# mock rollout
|
||||
#node.tm = tm = random.random() + 0.1
|
||||
|
||||
if tm < best_tm: best, best_idx, best_tm = node.kernel, i, tm
|
||||
et = time.perf_counter() - st
|
||||
if DEBUG>=2: print(f"\r{et:7.2f}s {colored(f'{compile_time*100/et:3.0f}%', 'cyan')} {colored(f'{runtime_time*100/et:3.0f}%', 'red')}: {tm:12.2f} us best: {best_tm:12.2f} us @ {best_idx+1:4d} {i+1:4d}/{amt:4d} {int(round((i+1)/et)):4d}/s {node.kernel.colored_shape()}\033[K", end="") # noqa: E501
|
||||
|
||||
# backprop
|
||||
backprop(node, tm)
|
||||
if DEBUG>=2: print()
|
||||
|
||||
if getenv("MCTSGRAPH"):
|
||||
import networkx as nx
|
||||
import os
|
||||
GRAPHPATH = "/tmp/net"
|
||||
def save_graph(G, fn, opt=""):
|
||||
print("saving", G, f"to {fn}.svg")
|
||||
nx.drawing.nx_pydot.write_dot(G, f'{fn}.dot')
|
||||
os.system(f'dot {opt} -Tsvg {fn}.dot -o {fn}.svg')
|
||||
|
||||
G = nx.DiGraph()
|
||||
def add_node(node:MCTSNode):
|
||||
if node.n == 0: return
|
||||
for parent in node.parents: G.add_edge(parent, node)
|
||||
gopts = node.kernel.applied_opts
|
||||
edge_lbl = f"{str(gopts[-1].op)[7:]} {gopts[-1].axis} {gopts[-1].arg}" if len(gopts) else "ROOT"
|
||||
G.add_node(node, label=f"{node.i+1}\n{node.tm:.2f} us\n{edge_lbl}\nt {node.t:.2f}\nn {node.n}",
|
||||
fillcolor="#80ff8080" if node.tm == best_tm else "#ffff8080", style='filled' if node.t == best_tm else '')
|
||||
if node.children is not None:
|
||||
for child in node.children+node.removed_children: add_node(child)
|
||||
add_node(root)
|
||||
save_graph(G, f"{GRAPHPATH}.{graph_mcts_cnt}.mcts", '-Grankdir=LR')
|
||||
graph_mcts_cnt += 1
|
||||
|
||||
if CACHELEVEL >= 1: diskcache_put("mcts_search", key, best.applied_opts)
|
||||
return best
|
||||
@@ -1,75 +0,0 @@
|
||||
import pickle, sys
|
||||
from dataclasses import replace
|
||||
from tinygrad import Device, Context, Tensor, GlobalCounters
|
||||
from tinygrad.device import Buffer
|
||||
from tinygrad.helpers import getenv, BEAM
|
||||
from tinygrad.engine.jit import TinyJit
|
||||
from tinygrad.engine.realize import CompiledRunner, ExecItem, ScheduleItem, lower_schedule_item, get_program
|
||||
from tinygrad.renderer import ProgramSpec
|
||||
from tinygrad.codegen.opt.kernel import Kernel, Opt, OptOps
|
||||
from tinygrad.codegen.opt.heuristic import hand_coded_optimizations
|
||||
import numpy as np
|
||||
|
||||
def move_jit_captured_to_dev(captured, device="DSP"):
|
||||
captured.expected_st_vars_dtype_device = [x[:3] + (device,) for x in captured.expected_st_vars_dtype_device]
|
||||
|
||||
assign = {}
|
||||
def move_buffer(b):
|
||||
if b in assign: return assign[b]
|
||||
|
||||
if b._base is not None:
|
||||
newbuf = Buffer(device, b.size, b.dtype, base=move_buffer(b._base), offset=b.offset)
|
||||
else:
|
||||
newbuf = Buffer(device, b.size, b.dtype)
|
||||
if b.is_allocated(): newbuf.ensure_allocated().copyin(b.as_buffer())
|
||||
assign[b] = newbuf
|
||||
return assign[b]
|
||||
|
||||
for item in captured.jit_cache:
|
||||
for b in item.bufs:
|
||||
if b is not None: move_buffer(b)
|
||||
captured.jit_cache = [ExecItem(item.prg, [assign.get(b,b) for b in item.bufs]) for item in captured.jit_cache]
|
||||
return captured
|
||||
|
||||
if __name__ == "__main__":
|
||||
with Context(DEBUG=0):
|
||||
with open(sys.argv[1], "rb") as f:
|
||||
fxn: TinyJit = pickle.load(f)
|
||||
print(f"{f.tell()/1e6:.2f}M loaded")
|
||||
print(type(fxn))
|
||||
|
||||
# Move all buffers to DSP device.
|
||||
fxn.captured = move_jit_captured_to_dev(fxn.captured, "DSP")
|
||||
new_jit = []
|
||||
|
||||
knum = 1
|
||||
for ei in fxn.captured.jit_cache:
|
||||
# skip the copy and the first kernel
|
||||
if isinstance(ei.prg, CompiledRunner) and all(x is not None for x in ei.bufs):
|
||||
if knum == (pknum:=getenv("KNUM", 0)) or pknum == 0:
|
||||
p: ProgramSpec = ei.prg.p
|
||||
k = Kernel(p.ast, Device["DSP"].renderer)
|
||||
|
||||
if getenv("VALIDATE"):
|
||||
with Context(NOOPT=1):
|
||||
lower_schedule_item(ScheduleItem(p.ast, ei.bufs)).run()
|
||||
correct = ei.bufs[0].numpy()
|
||||
ei.bufs[0].copyin(memoryview(bytearray(b'\x00'*ei.bufs[0].nbytes)))
|
||||
GlobalCounters.kernel_count -= 1
|
||||
|
||||
if not getenv("NOOPT"): k.apply_opts(hand_coded_optimizations(k))
|
||||
p2 = get_program(k.ast, k.opts, k.applied_opts)
|
||||
new_ei = replace(ei, prg=CompiledRunner(p2))
|
||||
new_ei.run()
|
||||
new_jit.append(new_ei)
|
||||
test = ei.bufs[0].numpy()
|
||||
|
||||
if getenv("VALIDATE"):
|
||||
import numpy as np
|
||||
np.testing.assert_allclose(correct, test, rtol=1e-3, atol=1e-3)
|
||||
knum += 1
|
||||
|
||||
if getenv("RUN_JIT", 0):
|
||||
fxn.captured.free_intermediates()
|
||||
fxn.captured.jit_cache = new_jit
|
||||
fxn(input=Tensor(np.zeros((1, 3, 224, 224), dtype=np.float32), device="DSP"))
|
||||
@@ -1,114 +0,0 @@
|
||||
# code from https://x.com/awnihannun/status/1832511021602500796
|
||||
from huggingface_hub import snapshot_download
|
||||
import mlx.core as mx
|
||||
import mlx.nn as nn
|
||||
import time
|
||||
|
||||
|
||||
class Block(nn.Module):
|
||||
def __init__(self, in_dims, dims, stride=1):
|
||||
super().__init__()
|
||||
|
||||
self.conv1 = nn.Conv2d(
|
||||
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
|
||||
)
|
||||
self.bn1 = nn.BatchNorm(dims)
|
||||
|
||||
self.conv2 = nn.Conv2d(
|
||||
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
|
||||
)
|
||||
self.bn2 = nn.BatchNorm(dims)
|
||||
|
||||
self.downsample = []
|
||||
if stride != 1:
|
||||
self.downsample = [
|
||||
nn.Conv2d(in_dims, dims, kernel_size=1, stride=stride, bias=False),
|
||||
nn.BatchNorm(dims)
|
||||
]
|
||||
|
||||
def __call__(self, x):
|
||||
out = nn.relu(self.bn1(self.conv1(x)))
|
||||
out = self.bn2(self.conv2(out))
|
||||
for l in self.downsample:
|
||||
x = l(x)
|
||||
out += x
|
||||
out = nn.relu(out)
|
||||
return out
|
||||
|
||||
|
||||
class ResNet(nn.Module):
|
||||
def __init__(self, block, num_blocks, num_classes=10):
|
||||
super().__init__()
|
||||
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
|
||||
self.bn1 = nn.BatchNorm(64)
|
||||
|
||||
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
||||
self.layer1 = self._make_layer(block, 64, 64, num_blocks[0], stride=1)
|
||||
self.layer2 = self._make_layer(block, 64, 128, num_blocks[1], stride=2)
|
||||
self.layer3 = self._make_layer(block, 128, 256, num_blocks[2], stride=2)
|
||||
self.layer4 = self._make_layer(block, 256, 512, num_blocks[3], stride=2)
|
||||
|
||||
self.fc = nn.Linear(512, num_classes)
|
||||
|
||||
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
|
||||
strides = [stride] + [1] * (num_blocks - 1)
|
||||
layers = []
|
||||
for stride in strides:
|
||||
layers.append(block(in_dims, dims, stride))
|
||||
in_dims = dims
|
||||
return layers
|
||||
|
||||
def __call__(self, x):
|
||||
x = nn.relu(self.bn1(self.conv1(x)))
|
||||
x = self.maxpool(x)
|
||||
for l in self.layer1 + self.layer2 + self.layer3 + self.layer4:
|
||||
x = l(x)
|
||||
x = mx.mean(x, axis=[1, 2])
|
||||
x = self.fc(x)
|
||||
return x
|
||||
|
||||
|
||||
|
||||
def load():
|
||||
model = ResNet(Block, [2, 2, 2, 2], num_classes=1000)
|
||||
file = "model.safetensors"
|
||||
model_path = snapshot_download(
|
||||
repo_id="awni/resnet18-mlx",
|
||||
allow_patterns=[file],
|
||||
)
|
||||
model.load_weights(model_path + "/" + file)
|
||||
model.eval()
|
||||
mx.eval(model)
|
||||
return model
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
resnet18 = load()
|
||||
|
||||
@mx.compile
|
||||
def forward(im):
|
||||
return resnet18(im)
|
||||
|
||||
batch_sizes = [1, 2, 4, 8, 16, 32, 64]
|
||||
#its = 200
|
||||
#batch_sizes = [64]
|
||||
its = 20
|
||||
print(f"Batch Size | Images-per-second | Milliseconds-per-image")
|
||||
print(f"---- | ---- | ---- ")
|
||||
for N in batch_sizes:
|
||||
image = mx.random.uniform(shape=(N, 288, 288, 3))
|
||||
|
||||
# Warmup
|
||||
for _ in range(5):
|
||||
output = forward(image)
|
||||
mx.eval(output)
|
||||
|
||||
tic = time.time()
|
||||
for _ in range(its):
|
||||
output = forward(image)
|
||||
mx.async_eval(output)
|
||||
mx.eval(output)
|
||||
toc = time.time()
|
||||
ims_per_sec = N * its / (toc - tic)
|
||||
ms_per_im = 1e3 / ims_per_sec
|
||||
print(f"{N} | {ims_per_sec:.3f} | {ms_per_im:.3f}")
|
||||
@@ -1,109 +0,0 @@
|
||||
from huggingface_hub import snapshot_download
|
||||
from tinygrad import nn, Tensor, TinyJit, Device, GlobalCounters, Context
|
||||
import time
|
||||
|
||||
class Block:
|
||||
def __init__(self, in_dims, dims, stride=1):
|
||||
super().__init__()
|
||||
|
||||
self.conv1 = nn.Conv2d(
|
||||
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
|
||||
)
|
||||
self.bn1 = nn.BatchNorm(dims)
|
||||
|
||||
self.conv2 = nn.Conv2d(
|
||||
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
|
||||
)
|
||||
self.bn2 = nn.BatchNorm(dims)
|
||||
|
||||
self.downsample = []
|
||||
if stride != 1:
|
||||
self.downsample = [
|
||||
nn.Conv2d(in_dims, dims, kernel_size=1, stride=stride, bias=False),
|
||||
nn.BatchNorm(dims)
|
||||
]
|
||||
|
||||
def __call__(self, x):
|
||||
out = self.bn1(self.conv1(x)).relu()
|
||||
out = self.bn2(self.conv2(out))
|
||||
for l in self.downsample:
|
||||
x = l(x)
|
||||
out += x
|
||||
return out.relu()
|
||||
|
||||
|
||||
class ResNet:
|
||||
def __init__(self, block, num_blocks, num_classes=10):
|
||||
super().__init__()
|
||||
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
|
||||
self.bn1 = nn.BatchNorm(64)
|
||||
|
||||
self.layer1 = self._make_layer(block, 64, 64, num_blocks[0], stride=1)
|
||||
self.layer2 = self._make_layer(block, 64, 128, num_blocks[1], stride=2)
|
||||
self.layer3 = self._make_layer(block, 128, 256, num_blocks[2], stride=2)
|
||||
self.layer4 = self._make_layer(block, 256, 512, num_blocks[3], stride=2)
|
||||
|
||||
self.fc = nn.Linear(512, num_classes)
|
||||
|
||||
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
|
||||
strides = [stride] + [1] * (num_blocks - 1)
|
||||
layers = []
|
||||
for stride in strides:
|
||||
layers.append(block(in_dims, dims, stride))
|
||||
in_dims = dims
|
||||
return layers
|
||||
|
||||
def __call__(self, x:Tensor):
|
||||
x = self.bn1(self.conv1(x)).relu().max_pool2d()
|
||||
x = x.sequential(self.layer1)
|
||||
with Context(WINO=1): x = x.sequential(self.layer2 + self.layer3 + self.layer4)
|
||||
x = x.mean([2, 3])
|
||||
x = self.fc(x)
|
||||
return x
|
||||
|
||||
|
||||
|
||||
def load():
|
||||
model = ResNet(Block, [2, 2, 2, 2], num_classes=1000)
|
||||
file = "model.safetensors"
|
||||
model_path = snapshot_download(
|
||||
repo_id="awni/resnet18-mlx",
|
||||
allow_patterns=[file],
|
||||
)
|
||||
state = nn.state.safe_load(model_path + "/" + file)
|
||||
# mlx is NHWC, tinygrad is NCHW
|
||||
nn.state.load_state_dict(model, {k:v if len(v.shape) != 4 else v.to(None).permute(0,3,1,2).contiguous() for k,v in state.items()}, strict=False)
|
||||
return model
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
resnet18 = load()
|
||||
|
||||
def _forward(im): return resnet18(im)
|
||||
forward = TinyJit(_forward, prune=True)
|
||||
|
||||
batch_sizes = [1, 2, 4, 8, 16, 32, 64]
|
||||
#its = 200
|
||||
#batch_sizes = [64]
|
||||
its = 20
|
||||
print(f"Batch Size | Images-per-second | Milliseconds-per-image")
|
||||
print(f"---- | ---- | ---- ")
|
||||
for N in batch_sizes:
|
||||
forward.reset() # reset the JIT for a new batch size (could make automatic)
|
||||
image = Tensor.uniform(N, 3, 288, 288)
|
||||
|
||||
# Warmup
|
||||
for _ in range(5):
|
||||
GlobalCounters.reset()
|
||||
output = forward(image)
|
||||
Device.default.synchronize()
|
||||
|
||||
tic = time.time()
|
||||
for _ in range(its):
|
||||
GlobalCounters.reset()
|
||||
output = forward(image)
|
||||
Device.default.synchronize()
|
||||
toc = time.time()
|
||||
ims_per_sec = N * its / (toc - tic)
|
||||
ms_per_im = 1e3 / ims_per_sec
|
||||
print(f"{N} | {ims_per_sec:.3f} | {ms_per_im:.3f}")
|
||||
@@ -1,15 +0,0 @@
|
||||
from tinygrad import Tensor, Device, GlobalCounters
|
||||
from tinygrad.helpers import Timing
|
||||
|
||||
N = 512
|
||||
GPUS = 5
|
||||
ds = tuple([f"{Device.DEFAULT}:{i+1}" for i in range(GPUS)])
|
||||
t = [Tensor.ones(N, N, N, device=d).contiguous().realize() for d in ds]
|
||||
|
||||
for _ in range(10):
|
||||
GlobalCounters.reset()
|
||||
with Timing():
|
||||
for ti in t:
|
||||
ti.to_(ds[(ds.index(ti.device)+1+len(ds))%len(ds)])
|
||||
# ti.to_(ds[(ds.index(ti.device)-1+len(ds))%len(ds)]) # reversed order
|
||||
ti.realize()
|
||||
@@ -1,47 +0,0 @@
|
||||
import os, pathlib, argparse
|
||||
from examples.llama3 import Tokenizer
|
||||
from tabulate import tabulate
|
||||
from tinygrad import fetch
|
||||
from tinygrad.helpers import flatten, getenv
|
||||
from sz import NONCORE_DIRS
|
||||
|
||||
# llama 3 tokenizer
|
||||
tokenizer = Tokenizer(fetch("https://huggingface.co/bofenghuang/Meta-Llama-3-8B/resolve/main/original/tokenizer.model").as_posix())
|
||||
|
||||
def read_code(base_path, full=False):
|
||||
ret = []
|
||||
for path, _, files in os.walk(os.path.join(base_path, "tinygrad")):
|
||||
if not full and any(path.split("./")[1].startswith(x) for x in NONCORE_DIRS): continue
|
||||
for name in files:
|
||||
if not name.endswith(".py"): continue
|
||||
if 'tinygrad/runtime/autogen' in path.replace('\\', '/'): continue
|
||||
fullpath = os.path.join(path, name)
|
||||
code = pathlib.Path(fullpath).read_text()
|
||||
ret.append((fullpath.split("tinygrad/", 1)[1], code))
|
||||
return ret
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Analyze and optionally save tinygrad code.")
|
||||
parser.add_argument("--output", help="Output file to write the combined code to.")
|
||||
parser.add_argument("--full", action="store_true", help="All directories")
|
||||
args = parser.parse_args()
|
||||
|
||||
ret = read_code(".", args.full)
|
||||
|
||||
table = []
|
||||
for name,code in ret:
|
||||
table.append([name, len(tokenizer.encode(code))])
|
||||
print(tabulate([["name", "llm tokens"]]+sorted(table, key=lambda x: -x[1]), headers="firstrow"))
|
||||
|
||||
banner = "#"*40
|
||||
code_str = ''.join([f"{banner}\n# {name}\n{banner}\n\n{code}\n" for name,code in ret])
|
||||
print(f"code has {len(code_str)} chars")
|
||||
newline_count = code_str.count('\n')
|
||||
print(f"code has {newline_count} newlines")
|
||||
|
||||
encoded = tokenizer.encode(code_str)
|
||||
print(f"code has {len(encoded)} tokens")
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w') as f: f.write(code_str)
|
||||
print(f"Combined code written to {args.output}")
|
||||
@@ -1,16 +0,0 @@
|
||||
if __name__ == "__main__":
|
||||
import os
|
||||
if "DEBUG" not in os.environ: os.environ["DEBUG"] = "2"
|
||||
|
||||
from tinygrad import Tensor, GlobalCounters
|
||||
from tinygrad.helpers import getenv
|
||||
|
||||
if (seed := getenv("SEED", 0)) != 0:
|
||||
Tensor.manual_seed(seed)
|
||||
print(f"using seed {Tensor._seed}")
|
||||
|
||||
for N in [10_000_000, 100_000_000, 1_000_000_000]:
|
||||
GlobalCounters.reset()
|
||||
t = Tensor.rand(N)
|
||||
t.realize()
|
||||
print(f"N {N:>20_}, global_ops {GlobalCounters.global_ops:>20_}, global_mem {GlobalCounters.global_mem:>20_}")
|
||||
@@ -1,154 +0,0 @@
|
||||
import itertools
|
||||
from enum import Enum, auto
|
||||
from collections import defaultdict
|
||||
from typing import List, Tuple, DefaultDict
|
||||
from tinygrad.helpers import prod, tqdm
|
||||
from tinygrad.uop.ops import UOp, Ops
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.uop.ops import sym_infer
|
||||
from tinygrad.tensor import Tensor
|
||||
|
||||
class MovementOps(Enum): RESHAPE = auto(); PERMUTE = auto(); EXPAND = auto(); PAD = auto(); SHRINK = auto(); STRIDE = auto(); AS_STRIDED = auto() # noqa: E702
|
||||
|
||||
def apply_mop(st: Tensor|ShapeTracker, mop_arg: Tuple[MovementOps, Tuple]) -> ShapeTracker:
|
||||
mop, arg = mop_arg
|
||||
if mop == MovementOps.RESHAPE:
|
||||
# shapetracker doesn't allow flattening with -1 but required for MovementOps.RESHAPE
|
||||
if arg == (-1,): return st.reshape((prod(st.shape),))
|
||||
return st.reshape(arg)
|
||||
if mop == MovementOps.PERMUTE: return st.permute(arg)
|
||||
if mop == MovementOps.EXPAND:
|
||||
if len(arg) != len(st.shape): st = st.reshape((1,*st.shape))
|
||||
return st.expand(arg)
|
||||
if mop == MovementOps.PAD: return st.pad(arg)
|
||||
if mop == MovementOps.SHRINK: return st.shrink(arg)
|
||||
if mop == MovementOps.STRIDE:
|
||||
assert all(x in [-1, 1] for x in arg)
|
||||
return st.flip(tuple(i for i,x in enumerate(arg) if x == -1))
|
||||
raise ValueError("invalid mop")
|
||||
|
||||
def make_scratch_st(st: ShapeTracker) -> ShapeTracker:
|
||||
return ShapeTracker.from_shape((get_buffer_size(st.views[0].shape, st.views[0].strides, st.views[0].offset, st.views[0].mask),))
|
||||
|
||||
# ShapeTracker to an equivalent series of MovementOps (https://github.com/tinygrad/tinygrad/pull/2216)
|
||||
def to_movement_ops(st: ShapeTracker) -> List[Tuple[MovementOps, Tuple]]:
|
||||
to_apply:List[Tuple[MovementOps, Tuple]] = []
|
||||
for i, v in enumerate(st.views):
|
||||
real_shape = tuple(y-x for x,y in v.mask) if v.mask else v.shape
|
||||
offset = (v.offset or 0) + sum(st*(s-1) for s,st in zip(real_shape, v.strides) if st<0)
|
||||
real_offset = offset + (sum(x*st for (x,_),st in zip(v.mask, v.strides)) if v.mask else 0)
|
||||
real_real_shape = [s for s,st in zip(real_shape, v.strides) if st]
|
||||
strides: List[int] = [abs(st) if isinstance(st,int) else st for st in v.strides if st]
|
||||
buffer_size = sum((s-1)*st for s,st in zip(real_real_shape,strides)) + 1
|
||||
if i: buffer_size = prod(st.views[i-1].shape) - real_offset if real_shape else 1
|
||||
def sort_by_strides(shape, strides): return sorted(zip(shape, strides), key=lambda k: (k[1],-k[0]), reverse=True), sorted(range(len(strides)), key=lambda k: (strides[k],-real_real_shape[k]), reverse=True)
|
||||
ordered_shape_strides, order = sort_by_strides(real_real_shape, strides)
|
||||
to_apply.extend([(MovementOps.RESHAPE, (-1,)), (MovementOps.SHRINK, ((real_offset, real_offset+buffer_size),))])
|
||||
if strides:
|
||||
if (ordered_shape_strides[0][0]*ordered_shape_strides[0][1])-buffer_size>0: to_apply.append((MovementOps.PAD, ((0, (ordered_shape_strides[0][0] * ordered_shape_strides[0][1]) - buffer_size),)))
|
||||
for i, shape_stride in enumerate(ordered_shape_strides):
|
||||
if i<len(ordered_shape_strides)-1 and shape_stride[1] < ordered_shape_strides[i+1][0]*ordered_shape_strides[i+1][1]:
|
||||
remaining_buffer = ordered_shape_strides[i-1][1] if i>0 else buffer_size
|
||||
to_apply.append((MovementOps.EXPAND, (shape_stride[0], *(s[0] for s in ordered_shape_strides[:i]), remaining_buffer)))
|
||||
to_apply.append((MovementOps.PERMUTE, (*range(1,i+1), 0, i+1)))
|
||||
to_apply.append((MovementOps.RESHAPE, (*(s[0] for s in ordered_shape_strides[:i]), shape_stride[0]*remaining_buffer)))
|
||||
to_apply.append((MovementOps.PAD, (*((0,0) for _ in range(i)), (0, shape_stride[0]*shape_stride[1]))))
|
||||
to_apply.append((MovementOps.RESHAPE, (*(s[0] for s in ordered_shape_strides[:i+1]), remaining_buffer+shape_stride[1])))
|
||||
ordered_shape_strides[i] = (ordered_shape_strides[i][0], remaining_buffer+shape_stride[1])
|
||||
else:
|
||||
to_apply.append((MovementOps.SHRINK, (*((0, s[0]) for s in ordered_shape_strides[:i]), (0, shape_stride[0]*shape_stride[1]))))
|
||||
to_apply.append((MovementOps.RESHAPE, (*[s[0] for s in ordered_shape_strides[:i+1]], shape_stride[1])))
|
||||
to_apply.extend([(MovementOps.SHRINK, (*[(0, s[0]) for s in ordered_shape_strides], (0,1))), (MovementOps.RESHAPE, tuple(s[0] for s in ordered_shape_strides))])
|
||||
if order != list(range(len(order))): to_apply.append((MovementOps.PERMUTE, tuple(order.index(i) for i in range(len(strides)))))
|
||||
to_apply.append((MovementOps.RESHAPE, tuple(s if st else 1 for s,st in zip(real_shape, v.strides))))
|
||||
if any(i<0 for i in v.strides): to_apply.append((MovementOps.STRIDE, tuple(-1 if st<0 else 1 for st in v.strides)))
|
||||
# then, we apply pre expand pads
|
||||
if v.mask is not None:
|
||||
pre_expand_pads = tuple((x,s-y) if st != 0 else (0,0) for (x,y),s,st in zip(v.mask, v.shape, v.strides))
|
||||
post_expand_pads = tuple((x,s-y) if st == 0 else (0,0) for (x,y),s,st in zip(v.mask, v.shape, v.strides))
|
||||
if any(x != (0,0) for x in pre_expand_pads):
|
||||
to_apply.append((MovementOps.PAD, pre_expand_pads))
|
||||
real_shape = tuple(x+s[0]+s[1] for x,s in zip(real_shape, pre_expand_pads))
|
||||
# then, we do any expands
|
||||
if any(s != 1 and st == 0 for s,st in zip(real_shape, v.strides)): to_apply.append((MovementOps.EXPAND, real_shape))
|
||||
# lastly, we apply post expand pads
|
||||
if v.mask is not None and any(x != (0,0) for x in post_expand_pads): to_apply.append((MovementOps.PAD, post_expand_pads))
|
||||
|
||||
scratch_st = make_scratch_st(st)
|
||||
ret = []
|
||||
seen = {} # {shapetracker: list of mops to generate that shapetracker}
|
||||
for mop_arg in to_apply:
|
||||
scratch_st = apply_mop(scratch_st, mop_arg)
|
||||
if scratch_st in seen:
|
||||
ret = seen[scratch_st][:]
|
||||
else:
|
||||
if len(ret) and ret[-1][0] == MovementOps.RESHAPE and mop_arg[0] == MovementOps.RESHAPE:
|
||||
ret[-1] = mop_arg
|
||||
else:
|
||||
if mop_arg == (MovementOps.RESHAPE, -1): mop_arg = (MovementOps.RESHAPE, (prod(st.shape),))
|
||||
ret.append(mop_arg)
|
||||
seen[scratch_st] = ret[:]
|
||||
return ret
|
||||
|
||||
def get_real_view(shape, strides, offset, mask):
|
||||
real_shape = tuple(y-x for x,y in mask) if mask else shape
|
||||
offset = offset + sum(st * (s-1) for s,st in zip(real_shape, strides) if st<0)
|
||||
real_offset = offset + (sum(x*st for (x,_),st in zip(mask, strides)) if mask else 0)
|
||||
real_real_shape = [s for s,st in zip(real_shape, strides) if st]
|
||||
strides = [abs(st) if isinstance(st,int) else st for st in strides if st]
|
||||
return real_real_shape, strides, real_offset
|
||||
|
||||
def get_buffer_size(shape, strides, offset, mask):
|
||||
real_real_shape, strides, real_offset = get_real_view(shape, strides, offset, mask)
|
||||
return real_offset + sum((s-1)*st for s, st in zip(real_real_shape,strides)) + 1
|
||||
|
||||
def st_equivalent(st1: ShapeTracker, st2: ShapeTracker):
|
||||
if (idxs1:=st1.expr_idxs()) == (idxs2:=st2.expr_idxs()): return True
|
||||
idx1, valid1 = idxs1
|
||||
idx2, valid2 = idxs2
|
||||
# always invalid
|
||||
if valid1 == 0 and valid2 == 0: return True
|
||||
|
||||
var1 = idx1.vars() | valid1.vars()
|
||||
var2 = idx2.vars() | valid2.vars()
|
||||
# Maybe there are cases that vars are different yet the sts are the same?
|
||||
if var1 != var2: return False
|
||||
|
||||
# brute force over the vars range
|
||||
vs = list(var1)
|
||||
for i, ranges in enumerate(itertools.product(*[range(v.min, v.max+1) for v in vs])):
|
||||
if i > 1000:
|
||||
print("WARNING: did not search all possible combinations")
|
||||
break
|
||||
var_vals = {k.expr:v for k,v in zip(vs, ranges)}
|
||||
r1 = sym_infer(idx1, var_vals) if sym_infer(valid1, var_vals) else 0
|
||||
r2 = sym_infer(idx2, var_vals) if sym_infer(valid2, var_vals) else 0
|
||||
if r1 != r2: return False
|
||||
|
||||
return True
|
||||
|
||||
c: DefaultDict[int,int] = defaultdict(int)
|
||||
def test_rebuild(st: ShapeTracker):
|
||||
rebuilt_st = make_scratch_st(st)
|
||||
mops = to_movement_ops(st)
|
||||
c[len(mops)] += 1
|
||||
for mop_arg in mops: rebuilt_st = apply_mop(rebuilt_st, mop_arg)
|
||||
rebuilt_st = rebuilt_st.simplify()
|
||||
# why is the "all(x == 0 for x in rebuilt_st.views[-1].strides)" hack needed?
|
||||
assert st_equivalent(st, rebuilt_st) or all(x == 0 for x in rebuilt_st.views[-1].strides), f"mismatch {st} {rebuilt_st}"
|
||||
last_v1 = st.views[-1]
|
||||
last_v2 = rebuilt_st.views[-1]
|
||||
assert last_v1.shape == last_v2.shape, f"{last_v1.shape} != {last_v2.shape}"
|
||||
|
||||
def test_rebuild_bufferop_st(ast:UOp):
|
||||
if ast.op is Ops.SHAPETRACKER:
|
||||
test_rebuild(ast.arg)
|
||||
for src in ast.src: test_rebuild_bufferop_st(src)
|
||||
|
||||
if __name__ == "__main__":
|
||||
from extra.optimization.helpers import load_worlds, ast_str_to_ast
|
||||
ast_strs = load_worlds(False, False, True)[:2000]
|
||||
for ast_str in tqdm(ast_strs):
|
||||
test_rebuild_bufferop_st(ast_str_to_ast(ast_str))
|
||||
|
||||
print(f"avg length of mop = {sum(k*v for k,v in c.items()) / sum(c.values()):.2f}")
|
||||
@@ -1,18 +0,0 @@
|
||||
from tinygrad import Tensor, Device
|
||||
|
||||
#N = 1024
|
||||
N = 32
|
||||
t = Tensor.rand(N, N, N, device="CPU").realize()
|
||||
d1 = Device.DEFAULT + ":1"
|
||||
d2 = Device.DEFAULT + ":2"
|
||||
d3 = Device.DEFAULT + ":3"
|
||||
|
||||
for i in range(3):
|
||||
t.to_(d1)
|
||||
t.realize()
|
||||
# t.to_("CPU")
|
||||
# t.realize()
|
||||
t.to_(d2)
|
||||
t.realize()
|
||||
t.to_(d3)
|
||||
t.realize()
|
||||
Reference in New Issue
Block a user