diff --git a/test/test_dtype.py b/test/test_dtype.py index 53566f6fa8..67dd9983a9 100644 --- a/test/test_dtype.py +++ b/test/test_dtype.py @@ -620,10 +620,10 @@ class TestImplicitFunctionTypeChange(unittest.TestCase): t = func(Tensor([4.0, 3.0])).max() == func(Tensor([4.0, 3.0])) result.append(t.numpy().sum()) - if Device.DEFAULT not in ["PYTHON", "CLANG"]: + if Device.DEFAULT not in ["PYTHON"]: assert all(result) else: - # CLANG and PYTHON function default returns in double, and comparison to float can fail + # PYTHON function default returns in double, and comparison to float can fail # TODO: fix this assert not all(result) diff --git a/test/test_ops.py b/test/test_ops.py index 43549426bf..ee5be0ecbd 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -793,7 +793,7 @@ class TestOps(unittest.TestCase): helper_test_op([(10,10,10)], lambda x: x.softmax(0), atol=1e-7, grad_atol=1e-7) helper_test_op([(10,10,10)], lambda x: x.softmax(1), atol=1e-7, grad_atol=1e-7) helper_test_op([(10,10,10)], lambda x: x.softmax(2), atol=1e-7, grad_atol=1e-7) - @unittest.skipIf(CI and Device.DEFAULT in ["CLANG", "PYTHON"], "Broken ISSUE #3552") + @unittest.skipIf(Device.DEFAULT in ["PYTHON"], "Broken ISSUE #3552") def test_softmax_argmax(self): helper_test_op([(45,65)], lambda x: x.softmax(0).argmax().type(torch.int32), lambda x: x.softmax(0).argmax(), forward_only=True, atol=1e-7, grad_atol=1e-7) diff --git a/tinygrad/runtime/ops_clang.py b/tinygrad/runtime/ops_clang.py index 30caa52050..13607453c4 100644 --- a/tinygrad/runtime/ops_clang.py +++ b/tinygrad/runtime/ops_clang.py @@ -4,7 +4,7 @@ from tinygrad.helpers import cpu_time_execution from tinygrad.codegen.kernel import LinearizerOptions from tinygrad.renderer.cstyle import uops_to_cstyle, CStyleLanguage -CLANG_PROGRAM_HEADER = '#include \n#include \n#define max(x,y) ((x>y)?x:y)\n#define half __fp16\n' +CLANG_PROGRAM_HEADER = '#include \n#include \n#define max(x,y) ((x>y)?x:y)\n#define half __fp16\n' class ClangCompiler(Compiler): linearizer_opts = LinearizerOptions("CLANG", supports_float4=False, has_local=False)