mirror of
https://github.com/tinygrad/tinygrad.git
synced 2026-01-09 06:58:11 -05:00
fix onnx attention permute (#14025)
* fix onnx attention permute * skip test_attention_4d_fp16_cpu too
This commit is contained in:
8
test/external/external_test_onnx_backend.py
vendored
8
test/external/external_test_onnx_backend.py
vendored
@@ -171,9 +171,11 @@ backend_test.exclude('test_tensorscatter_*')
|
||||
backend_test.exclude('test_l1normalization_*')
|
||||
backend_test.exclude('test_l2normalization_*')
|
||||
backend_test.exclude('test_lpnormalization_*')
|
||||
backend_test.exclude('test_mod_mixed_sign_float16_cpu')
|
||||
backend_test.exclude('test_attention_3d_*')
|
||||
backend_test.exclude('test_attention_4d_*')
|
||||
backend_test.exclude('test_attention_4d_diff_heads_mask4d_padded_kv_cpu') # needs nonpad_kv_seqlen handling
|
||||
backend_test.exclude('test_attention_4d_fp16_cpu') # fp16 numerical issues
|
||||
backend_test.exclude('test_attention_4d_fp16_expanded_cpu') # fp16 numerical issues
|
||||
backend_test.exclude('test_attention_4d_gqa_with_past_and_present_fp16_cpu') # fp16 numerical issues
|
||||
backend_test.exclude('test_attention_4d_gqa_with_past_and_present_fp16_expanded_cpu') # fp16 numerical issues
|
||||
|
||||
|
||||
# rest of the failing tests
|
||||
|
||||
Reference in New Issue
Block a user