fix onnx attention permute (#14025)

* fix onnx attention permute

* skip test_attention_4d_fp16_cpu too
This commit is contained in:
chenyu
2026-01-05 08:58:50 -05:00
committed by GitHub
parent 5cff5698f7
commit 9497ec00f2
2 changed files with 11 additions and 8 deletions

View File

@@ -171,9 +171,11 @@ backend_test.exclude('test_tensorscatter_*')
backend_test.exclude('test_l1normalization_*')
backend_test.exclude('test_l2normalization_*')
backend_test.exclude('test_lpnormalization_*')
backend_test.exclude('test_mod_mixed_sign_float16_cpu')
backend_test.exclude('test_attention_3d_*')
backend_test.exclude('test_attention_4d_*')
backend_test.exclude('test_attention_4d_diff_heads_mask4d_padded_kv_cpu') # needs nonpad_kv_seqlen handling
backend_test.exclude('test_attention_4d_fp16_cpu') # fp16 numerical issues
backend_test.exclude('test_attention_4d_fp16_expanded_cpu') # fp16 numerical issues
backend_test.exclude('test_attention_4d_gqa_with_past_and_present_fp16_cpu') # fp16 numerical issues
backend_test.exclude('test_attention_4d_gqa_with_past_and_present_fp16_expanded_cpu') # fp16 numerical issues
# rest of the failing tests