mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-02-07 21:26:21 -05:00
* calling qualcomm dsp from python * include so files * add include file * adsprpc.py * running with adsprpc * work * 32-bit support in elf * compilation works * ion * msm_ion * working DSP backend * getting 500 MFLOPS on matmul * beam works with timing * move to autogen * disasm * progress * simple tests pass * qcom_dsp * more dsp autogen * progress * some progress * works w/o lib * checkpoint * no lib * ugh, better * cleaner, but with lib. test good, but with the hack * remove autogens * small * push * simpler * revert this * run_3 * simpler * android * handle * run it * why? * run2 * to gen * cc * cleaner * elf * part of autogen * comemnt * no lib * autohen * linter * bug reproducer * cleaner * this repro is almost empty and doesn't work!!!! * with this test_ops passes, no crashes anymore * cleaner * linter * renames * shorter * remoev contextlib * ugh * myoy * cleaner * cleaner * remove import * conn * import * revert this * remove heavy .so * shorter alloc * not tue anymore --------- Co-authored-by: Comma Device <device@comma.ai> Co-authored-by: George Hotz <geohot@gmail.com> Co-authored-by: George Hotz <george@comma.ai>
35 lines
1.3 KiB
Python
35 lines
1.3 KiB
Python
import numpy as np
|
|
from tinygrad.helpers import getenv
|
|
from tinygrad import dtypes, Tensor
|
|
dtype_in = dtypes.half if getenv("HALF") else dtypes.bfloat16 if getenv("BFLOAT16") else dtypes.float
|
|
acc_dtype = dtypes.half if getenv("ACC_HALF") else dtypes.bfloat16 if getenv("ACC_BFLOAT16") else None
|
|
if getenv("INT"):
|
|
dtype_in = dtypes.int8
|
|
acc_dtype = dtypes.int32
|
|
N = getenv("N", 4096)
|
|
M = getenv("M", N)
|
|
K = getenv("K", N)
|
|
CNT = getenv("CNT", 10)
|
|
ATOL = getenv("ATOL", 1e-4)
|
|
RTOL = getenv("RTOL", 3e-2)
|
|
|
|
if __name__ == "__main__":
|
|
a, b = Tensor.rand(M, K, dtype=dtype_in).realize(), Tensor.rand(K, N, dtype=dtype_in).realize()
|
|
for i in range(CNT):
|
|
if i > 0 and getenv("RAND", 0) != 0:
|
|
a, b = Tensor.rand(M, K, dtype=dtype_in).realize(), Tensor.rand(K, N, dtype=dtype_in).realize()
|
|
c = a.matmul(b, acc_dtype=acc_dtype).realize()
|
|
comp = a.numpy().astype(np.float32) @ b.numpy().astype(np.float32)
|
|
nc = c.numpy()
|
|
try:
|
|
np.testing.assert_allclose(nc, comp, atol=ATOL, rtol=RTOL)
|
|
except AssertionError as e:
|
|
if getenv("DEBUG_VALUES") > 0:
|
|
indices = np.where(~np.isclose(nc, comp, rtol=RTOL, atol=ATOL))
|
|
non_matching_elements_nc = nc[indices]
|
|
non_matching_elements_comp = comp[indices]
|
|
print(indices)
|
|
print("result :", non_matching_elements_nc)
|
|
print("ground truth:", non_matching_elements_comp)
|
|
raise e
|