Files
tinygrad/extra/optimization/test_beam_search.py
Ben Waldron ea1be2e4cd [bounty] Remove using reshape to register symbolic shape (#11771)
* Modify tests and start work towards removing symbolic reshape

* Refactor symbolic reshape

* fix small error

* much cleaner + fix more tests

* Can remove this now

* Update test_symbolic_ops and test_tiny

* Couple more tests

* Unused import

* More tests and add EXPAND to Tensor.empty

* Fix test beam search

* all int

* Fix rangeify by adding shrink

* Remove OOB check and so fix test_symbolic_jit

* test_symbolic_jit doesn't need OOB Context anymore either

* Should remove that test now

* Cleanups part 1

* fix linters

* Final cleanups

* Don't reassign inside for loop

---------

Co-authored-by: chenyu <chenyu@fastmail.com>
2025-08-28 12:30:49 -04:00

80 lines
2.4 KiB
Python

import unittest
import numpy as np
from tinygrad.helpers import BEAM, Timing, CI, Context
from tinygrad import Variable, Tensor
from tinygrad.nn import Conv2d
def rand(*shape):
return Tensor(np.random.rand(*shape).astype(np.float32))
class TestBeamSearch(unittest.TestCase):
def setUp(self):
self.old_beam = BEAM.value
BEAM.value = 2
def tearDown(self):
BEAM.value = self.old_beam
def test_variable_ast_beam(self):
vi = Variable("a", 1, 10).bind(3)
a = rand(10, 3)[:vi]
a = (a+1).realize()
def test_big_prime_number(self):
a = rand(367, 367)
b = rand(367, 367)
c = (a@b).realize()
np.testing.assert_allclose(c.numpy(), a.numpy() @ b.numpy(), atol=1e-4, rtol=1e-4)
def test_big_prime_number_max(self):
a = -rand(367, 367)
b = rand(367, 367)
# if incorrectly padded 0, the max would be 0 instead of a negative number
c = (a*b).max(1)
np.testing.assert_allclose(c.numpy(), (a.numpy() * b.numpy()).max(1), atol=1e-4, rtol=1e-4)
def test_big_prime_number_sum(self):
a = rand(367, 367)
b = rand(367, 367)
# if incorrectly padded 0, the sum would be inf
c = (a/b).sum(1).realize()
np.testing.assert_allclose(c.numpy(), (a.numpy() / b.numpy()).sum(1), atol=1e-4, rtol=1e-4)
def test_variable_big_prime_number(self):
v = Variable("v", 1, 400).bind(367)
a = rand(367, 400)
b = rand(400, 367)
c = (a[:, :v] @ b[:v, :]).realize()
np.testing.assert_allclose(c.numpy(), a[:, :367].numpy() @ b[:367, :].numpy(), atol=1e-4, rtol=1e-4)
def test_variable_shrink_prime_number(self):
v = Variable("v", 1, 400).bind(367)
a = rand(400, 367)
b = (a.shrink(((0,v), None))+1).reshape(367,367).realize()
np.testing.assert_allclose(b.numpy(), a.numpy()[:367]+1, atol=1e-4, rtol=1e-4)
def test_no_mutate_rawbuffers(self):
a = rand(3, 3).realize()
desired = a.numpy() + 1
a.assign(a+1)
actual = a.numpy()
np.testing.assert_allclose(actual, desired)
@unittest.skipIf(CI, "flaky. CL_OUT_OF_RESOURCES")
def test_conv_beam(self):
c = Conv2d(3, 16, (3,3))
x = rand(1,3,32,32)
with Timing():
c(x).realize()
@unittest.skip("flaky, Fatal Python error: Floating point exception")
def test_large_ast(self):
a = Tensor.rand(3, 3)
for _ in range(5):
for _ in range(4):
a = (a + a) * a
a.realize()
if __name__ == '__main__':
unittest.main()