s/tinytqdm/tqdm (#5103)

except in unit test where tqdm is imported
This commit is contained in:
chenyu
2024-06-22 14:18:26 -04:00
committed by GitHub
parent 9f875123b6
commit 8080298739
8 changed files with 15 additions and 15 deletions

View File

@@ -6,7 +6,7 @@ from tiktoken.load import load_tiktoken_bpe
from extra.models.llama import Transformer, convert_from_huggingface, fix_bf16
from tinygrad.nn.state import safe_load, torch_load, load_state_dict, get_parameters
from tinygrad import Tensor, dtypes, nn, Context, Device, GlobalCounters
from tinygrad.helpers import Profiling, Timing, DEBUG, colored, fetch, tinytqdm
from tinygrad.helpers import Profiling, Timing, DEBUG, colored, fetch, tqdm
class Tokenizer:
pat_str = r"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"
@@ -196,7 +196,7 @@ def prefill(model, toks, start_pos=0):
toks = toks[i:]
# prefill the model
for tok in tinytqdm(toks):
for tok in tqdm(toks):
GlobalCounters.reset()
model(Tensor([[tok]], device=device), start_pos, TEMPERATURE, TOP_K, TOP_P, ALPHA_F, ALPHA_P).realize()
start_pos += 1

View File

@@ -15,7 +15,7 @@ from extra.onnx import get_run_onnx
from tinygrad import Tensor, Device, GlobalCounters, dtypes
from tinygrad.dtype import ImageDType
from tinygrad.device import Buffer
from tinygrad.helpers import partition, Context, fetch, getenv, DEBUG, tinytqdm
from tinygrad.helpers import partition, Context, fetch, getenv, DEBUG, tqdm
from tinygrad.engine.realize import run_schedule, lower_schedule, ExecItem, CompiledRunner
from tinygrad.engine.schedule import ScheduleItem, create_schedule, memory_planner
from tinygrad.ops import LoadOps
@@ -117,7 +117,7 @@ if __name__ == "__main__":
assert not b.is_allocated(), "output should not be allocated"
image_count = sum(isinstance(out.dtype, ImageDType) for si in schedule for out in si.outputs)
print(f"**** compiling real kernels {image_count}/{len(schedule)} images ****")
eis = list(tinytqdm(lower_schedule(schedule), total=len(schedule)))
eis = list(tqdm(lower_schedule(schedule), total=len(schedule)))
print("kernel count:", len(eis))
assert len(eis) <= getenv("ALLOWED_KERNEL_COUNT", 0) or getenv("ALLOWED_KERNEL_COUNT", 0) == 0, "too many kernels!"

View File

@@ -2,7 +2,7 @@ import random
from extra.optimization.helpers import load_worlds, ast_str_to_lin
from tinygrad.engine.search import actions
from tinygrad.codegen.linearizer import Linearizer
from tinygrad.helpers import tinytqdm
from tinygrad.helpers import tqdm
tactions = set()
def test_rebuild(lin):
@@ -20,7 +20,7 @@ if __name__ == "__main__":
ast_strs = load_worlds(False, False, False)
random.shuffle(ast_strs)
ast_strs = ast_strs[:2000]
for ast_str in tinytqdm(ast_strs):
for ast_str in tqdm(ast_strs):
lin = ast_str_to_lin(ast_str)
#if not lin.apply_tensor_cores():
lin.hand_coded_optimizations()

View File

@@ -4,7 +4,7 @@ from collections import defaultdict
from typing import List, Tuple, DefaultDict
from extra.optimization.helpers import load_worlds, ast_str_to_ast
from tinygrad.ops import BufferOps, LazyOp
from tinygrad.helpers import prod, tinytqdm
from tinygrad.helpers import prod, tqdm
from tinygrad.shape.shapetracker import ShapeTracker
from tinygrad.shape.symbolic import sym_infer, Node
@@ -142,7 +142,7 @@ def test_rebuild_bufferop_st(ast:LazyOp):
if __name__ == "__main__":
ast_strs = load_worlds(False, False, True)[:2000]
for ast_str in tinytqdm(ast_strs):
for ast_str in tqdm(ast_strs):
for ast in ast_str_to_ast(ast_str):
test_rebuild_bufferop_st(ast)

View File

@@ -2,13 +2,13 @@
# compare kernels created by HEAD against master
import difflib, pickle
from tinygrad.codegen.linearizer import Linearizer
from tinygrad.helpers import colored, db_connection, VERSION, getenv, to_function_name, tinytqdm
from tinygrad.helpers import colored, db_connection, VERSION, getenv, to_function_name, tqdm
page_size = 100
conn = db_connection()
cur = conn.cursor()
row_count = cur.execute(f"select count(*) from 'process_replay_{VERSION}'").fetchone()[0]
for offset in tinytqdm(range(0, row_count, page_size)):
for offset in tqdm(range(0, row_count, page_size)):
cur.execute(f"SELECT val FROM 'process_replay_{VERSION}' LIMIT ? OFFSET ?", (page_size, offset))
for row in cur.fetchall():
compare_k, compare_src = pickle.loads(row[0])

View File

@@ -2,7 +2,7 @@ import time, random, unittest
from tqdm import tqdm
from unittest.mock import patch
from io import StringIO
from tinygrad.helpers import tinytqdm
from tinygrad.helpers import tqdm as tinytqdm
from collections import namedtuple
class TestProgressBar(unittest.TestCase):

View File

@@ -211,7 +211,7 @@ def fetch(url:str, name:Optional[Union[pathlib.Path, str]]=None, subdir:Optional
with urllib.request.urlopen(url, timeout=10) as r:
assert r.status == 200
total_length = int(r.headers.get('content-length', 0))
progress_bar = tinytqdm(total=total_length, unit='B', unit_scale=True, desc=f"{url}: ")
progress_bar = tqdm(total=total_length, unit='B', unit_scale=True, desc=f"{url}: ")
(path := fp.parent).mkdir(parents=True, exist_ok=True)
with tempfile.NamedTemporaryFile(dir=path, delete=False) as f:
while chunk := r.read(16384): progress_bar.update(f.write(chunk))
@@ -249,7 +249,7 @@ def init_c_struct_t(fields: Tuple[Tuple[str, ctypes._SimpleCData], ...]):
def init_c_var(ctypes_var, creat_cb): return (creat_cb(ctypes_var), ctypes_var)[1]
def flat_mv(mv:memoryview): return mv if len(mv) == 0 else mv.cast("B", shape=(mv.nbytes,))
class tinytqdm:
class tqdm:
def __init__(self, iterable=None, desc:str='', disable:bool=False, unit:str='it', unit_scale=False, total:int=-1, rate:int=100):
self.iter, self.desc, self.dis, self.unit, self.unit_scale, self.rate = iterable, desc, disable, unit, unit_scale, rate
self.st, self.i, self.n, self.skip, self.t = time.perf_counter(), -1, 0, 1, len(iterable) if total==-1 else total

View File

@@ -2,7 +2,7 @@ import os, json, pathlib, zipfile, pickle, tarfile, struct
from typing import Dict, Union, List, Optional, Any, Tuple
from tinygrad.tensor import Tensor
from tinygrad.dtype import dtypes
from tinygrad.helpers import prod, argsort, DEBUG, Timing, CI, unwrap, GlobalCounters, tinytqdm
from tinygrad.helpers import prod, argsort, DEBUG, Timing, CI, unwrap, GlobalCounters, tqdm
from tinygrad.shape.view import strides_for_shape
from tinygrad.multi import MultiLazyBuffer
@@ -118,7 +118,7 @@ def load_state_dict(model, state_dict:Dict[str, Tensor], strict=True, verbose=Tr
model_state_dict = get_state_dict(model)
if DEBUG >= 1 and len(state_dict) > len(model_state_dict):
print("WARNING: unused weights in state_dict", sorted(list(state_dict.keys() - model_state_dict.keys())))
for k,v in (t := tinytqdm(model_state_dict.items(), disable=CI or not verbose)):
for k,v in (t := tqdm(model_state_dict.items(), disable=CI or not verbose)):
t.desc = f"ram used: {GlobalCounters.mem_used/1e9:5.2f} GB, {k:50s}: "
if k not in state_dict and not strict:
if DEBUG >= 1: print(f"WARNING: not loading {k}")