mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-04-29 03:00:14 -04:00
llm is the only app (#15779)
* tinygrad/llm is the only app * upd pyproject * claude refs * scoping * min diff
This commit is contained in:
2
test/external/external_llm_eval.py
vendored
2
test/external/external_llm_eval.py
vendored
@@ -1,4 +1,4 @@
|
||||
# eval for tinygrad.apps.llm -- hits the server via OpenAI API
|
||||
# eval for OpenAI API server
|
||||
# uses Meta's exact ARC-Challenge prompt template from lm-evaluation-harness llama3 tasks
|
||||
import argparse, re, pyarrow.parquet as pq
|
||||
from openai import OpenAI
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import functools, multiprocessing
|
||||
from transformers import AutoTokenizer
|
||||
from datasets import load_dataset
|
||||
from tinygrad.apps.llm import SimpleTokenizer
|
||||
from tinygrad.llm.cli import SimpleTokenizer
|
||||
from tinygrad.helpers import tqdm, getenv, partition
|
||||
|
||||
@functools.cache
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
from tinygrad import Tensor, dtypes, TinyJit, UOp
|
||||
from tinygrad.apps.llm import apply_rope as apply_rope_new, precompute_freqs_cis
|
||||
from tinygrad.llm.cli import apply_rope as apply_rope_new, precompute_freqs_cis
|
||||
from test.helpers import assert_jit_cache_len
|
||||
|
||||
def apply_rope(x:Tensor, start_pos:int):
|
||||
|
||||
@@ -22,18 +22,15 @@ class TestLLMServer(unittest.TestCase):
|
||||
cls.bos_id = 1
|
||||
cls.eos_id = 999
|
||||
|
||||
import tinygrad.apps.llm as llm_module
|
||||
llm_module.model = cls.mock_model
|
||||
llm_module.model_name = "test-model"
|
||||
llm_module.tok = cls.mock_tok
|
||||
llm_module.bos_id = cls.bos_id
|
||||
llm_module.eos_id = cls.eos_id
|
||||
llm_module.eot_id = None
|
||||
from tinygrad.llm.cli import Handler, LLMServer
|
||||
|
||||
from tinygrad.apps.llm import Handler
|
||||
from tinygrad.viz.serve import TCPServerWithReuse
|
||||
|
||||
cls.server = TCPServerWithReuse(('127.0.0.1', 0), Handler)
|
||||
cls.server = LLMServer(('127.0.0.1', 0), Handler)
|
||||
cls.server.model = cls.mock_model
|
||||
cls.server.model_name = "test-model"
|
||||
cls.server.tok = cls.mock_tok
|
||||
cls.server.bos_id = cls.bos_id
|
||||
cls.server.eos_id = cls.eos_id
|
||||
cls.server.eot_id = None
|
||||
cls.port = cls.server.server_address[1]
|
||||
cls.server_thread = threading.Thread(target=cls.server.serve_forever, daemon=True)
|
||||
cls.server_thread.start()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import unittest, base64, functools, sys
|
||||
from tinygrad.apps.llm import SimpleTokenizer
|
||||
from tinygrad.llm.cli import SimpleTokenizer
|
||||
from tinygrad.helpers import fetch
|
||||
|
||||
@unittest.skipIf(sys.platform == 'win32', "fetch race condition on Windows")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from tinygrad import Tensor, dtypes
|
||||
from tinygrad.apps.llm import (
|
||||
from tinygrad.llm.cli import (
|
||||
GatedDeltaNetBlock, SSMConfig, TransformerBlock, TransformerConfig,
|
||||
apply_rope as apply_rope_new, precompute_freqs_cis, pairwise_topk,
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.apps.llm import Transformer, TransformerConfig, apply_rope
|
||||
from tinygrad.llm.cli import Transformer, TransformerConfig, apply_rope
|
||||
|
||||
class TestMLA(unittest.TestCase):
|
||||
def _make_config(self, **kwargs):
|
||||
@@ -13,7 +13,7 @@ class TestMLA(unittest.TestCase):
|
||||
|
||||
def test_mla_attention_matches_naive(self):
|
||||
config = self._make_config(max_context=16)
|
||||
from tinygrad.apps.llm import MLATransformerBlock, precompute_freqs_cis
|
||||
from tinygrad.llm.cli import MLATransformerBlock, precompute_freqs_cis
|
||||
|
||||
block = MLATransformerBlock(config)
|
||||
c = config
|
||||
|
||||
@@ -2,7 +2,7 @@ import unittest
|
||||
import numpy as np
|
||||
from dataclasses import replace
|
||||
from tinygrad import Tensor
|
||||
from tinygrad.apps.llm import TransformerBlock, TransformerConfig
|
||||
from tinygrad.llm.cli import TransformerBlock, TransformerConfig
|
||||
|
||||
def _moe_config(dim=8, hidden=16, n_heads=2, num_experts=4, num_experts_per_tok=2):
|
||||
return TransformerConfig(
|
||||
|
||||
@@ -2,7 +2,7 @@ import unittest
|
||||
from unittest.mock import patch
|
||||
from tinygrad import Tensor, UOp
|
||||
from tinygrad.schedule import schedule_cache
|
||||
from tinygrad.apps.llm import Transformer, TransformerConfig
|
||||
from tinygrad.llm.cli import Transformer, TransformerConfig
|
||||
|
||||
TEST_CONFIG = TransformerConfig(num_blocks=1, dim=64, hidden_dim=128, n_heads=2, n_kv_heads=2,
|
||||
norm_eps=1e-5, vocab_size=100, head_dim=32, rope_theta=10000.0, rope_dim=32, v_head_dim=32, max_context=32)
|
||||
|
||||
Reference in New Issue
Block a user