mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 23:18:04 -05:00
use Buffer.ensure_allocated in search _ensure_buffer_alloc (#4132)
This commit is contained in:
@@ -20,9 +20,7 @@ class Buffer:
|
||||
if initial_value is not None:
|
||||
self.allocate()
|
||||
self.copyin(memoryview(initial_value))
|
||||
def ensure_allocated(self) -> Buffer:
|
||||
if not hasattr(self, '_buf'): self.allocate()
|
||||
return self
|
||||
def ensure_allocated(self) -> Buffer: return self.allocate() if not hasattr(self, '_buf') else self
|
||||
def allocate(self, opaque=None) -> Buffer:
|
||||
assert not hasattr(self, '_buf'), "can't allocate already allocated buffer"
|
||||
from tinygrad.device import Device
|
||||
|
||||
@@ -240,7 +240,7 @@ class Compiled:
|
||||
k = beam_search(kb, rawbufs, BEAM.value, bool(getenv("BEAM_ESTIMATE", 1)))
|
||||
if getenv("BEAM_COMPARE", 1):
|
||||
# TODO: move the HC/TC/BEAM compare to beam_search so it can be optionally cached which choice is better
|
||||
lins = [(f"beam{BEAM.value}", k), (("tc" if used_tensor_cores else "hc"), k_opt)]
|
||||
lins: List[Tuple[str, Linearizer]] = [(f"beam{BEAM.value}", k), (("tc" if used_tensor_cores else "hc"), k_opt)]
|
||||
if used_tensor_cores:
|
||||
lins.append(("hc", Linearizer(*ast, opts=self.compiler.compiler_opts)))
|
||||
lins[-1][1].hand_coded_optimizations()
|
||||
|
||||
@@ -64,7 +64,7 @@ def _try_compile_linearized_w_idx(x:Tuple[int,Linearizer], compiler:Compiler):
|
||||
# workers should ignore ctrl c
|
||||
def _init_worker(): signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def _ensure_buffer_alloc(bufs:List[Buffer]) -> List[Buffer]: return [buf if hasattr(buf, "_buf") else buf.allocate() for buf in bufs]
|
||||
def _ensure_buffer_alloc(bufs:List[Buffer]) -> List[Buffer]: return [buf.ensure_allocated() for buf in bufs]
|
||||
|
||||
# *** external API ***
|
||||
|
||||
|
||||
Reference in New Issue
Block a user